summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2011-04-05 01:28:06 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2011-04-05 01:28:06 +0000
commit2a3ab330ddb05b7dff5318f755a1ac17599d2711 (patch)
treec31f1bb159de21585699690b2442bb61618fd92a /sys/uvm
parent37b467e371103c787228faa5bb157ef6ac72c36a (diff)
- Change pool constraints to use kmem_pa_mode instead of uvm_constraint_range
- Use km_alloc for all backend allocations in pools. - Use km_alloc for the emergmency kentry allocations in uvm_mapent_alloc - Garbage collect uvm_km_getpage, uvm_km_getpage_pla and uvm_km_putpage ariane@ ok
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_extern.h10
-rw-r--r--sys/uvm/uvm_km.c83
-rw-r--r--sys/uvm/uvm_map.c7
3 files changed, 6 insertions, 94 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 5b9af4a913f..cd57aaf39eb 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.92 2011/04/04 11:56:12 art Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.93 2011/04/05 01:28:05 art Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -533,14 +533,6 @@ vaddr_t uvm_km_valloc_wait(vm_map_t, vsize_t);
vaddr_t uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t, int);
vaddr_t uvm_km_valloc_prefer_wait(vm_map_t, vsize_t,
voff_t);
-void *uvm_km_getpage_pla(boolean_t, int *, paddr_t, paddr_t,
- paddr_t, paddr_t);
-/* Wrapper around old function prototype. */
-#define uvm_km_getpage(waitok, slowdown) \
- uvm_km_getpage_pla(((waitok) ? 0 : UVM_KMF_NOWAIT), (slowdown), \
- (paddr_t)0, (paddr_t)-1, 0, 0)
-
-void uvm_km_putpage(void *);
struct vm_map *uvm_km_suballoc(vm_map_t, vaddr_t *,
vaddr_t *, vsize_t, int,
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 7a770d62fa4..38590e2624c 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.91 2011/04/04 21:16:31 art Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.92 2011/04/05 01:28:05 art Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -811,88 +811,7 @@ uvm_km_thread(void *arg)
}
}
}
-#endif
-
-void *
-uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high,
- paddr_t alignment, paddr_t boundary)
-{
- struct pglist pgl;
- int pla_flags;
- struct vm_page *pg;
- vaddr_t va;
-
- *slowdown = 0;
- pla_flags = (flags & UVM_KMF_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
- if (flags & UVM_KMF_ZERO)
- pla_flags |= UVM_PLA_ZERO;
- TAILQ_INIT(&pgl);
- if (uvm_pglistalloc(PAGE_SIZE, low, high, alignment, boundary, &pgl,
- 1, pla_flags) != 0)
- return NULL;
- pg = TAILQ_FIRST(&pgl);
- KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
- TAILQ_REMOVE(&pgl, pg, pageq);
-
-#ifdef __HAVE_PMAP_DIRECT
- va = pmap_map_direct(pg);
- if (__predict_false(va == 0))
- uvm_pagefree(pg);
-
-#else /* !__HAVE_PMAP_DIRECT */
- mtx_enter(&uvm_km_pages.mtx);
- while (uvm_km_pages.free == 0) {
- if (flags & UVM_KMF_NOWAIT) {
- mtx_leave(&uvm_km_pages.mtx);
- uvm_pagefree(pg);
- return NULL;
- }
- msleep(&uvm_km_pages.free, &uvm_km_pages.mtx, PVM, "getpage",
- 0);
- }
-
- va = uvm_km_pages.page[--uvm_km_pages.free];
- if (uvm_km_pages.free < uvm_km_pages.lowat &&
- curproc != uvm_km_pages.km_proc) {
- *slowdown = 1;
- wakeup(&uvm_km_pages.km_proc);
- }
- mtx_leave(&uvm_km_pages.mtx);
-
-
- atomic_setbits_int(&pg->pg_flags, PG_FAKE);
- UVM_PAGE_OWN(pg, NULL);
-
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
- pmap_update(kernel_map->pmap);
-
-#endif /* !__HAVE_PMAP_DIRECT */
- return ((void *)va);
-}
-
-void
-uvm_km_putpage(void *v)
-{
-#ifdef __HAVE_PMAP_DIRECT
- vaddr_t va = (vaddr_t)v;
- struct vm_page *pg;
-
- pg = pmap_unmap_direct(va);
-
- uvm_pagefree(pg);
-#else /* !__HAVE_PMAP_DIRECT */
- struct uvm_km_free_page *fp = v;
-
- mtx_enter(&uvm_km_pages.mtx);
- fp->next = uvm_km_pages.freelist;
- uvm_km_pages.freelist = fp;
- if (uvm_km_pages.freelistlen++ > 16)
- wakeup(&uvm_km_pages.km_proc);
- mtx_leave(&uvm_km_pages.mtx);
-#endif /* !__HAVE_PMAP_DIRECT */
-}
-#ifndef __HAVE_PMAP_DIRECT
struct uvm_km_free_page *
uvm_km_doputpage(struct uvm_km_free_page *fp)
{
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 91d5db39e64..e5bdfd94b37 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.131 2010/12/24 21:49:04 tedu Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.132 2011/04/05 01:28:05 art Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -396,7 +396,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags)
{
struct vm_map_entry *me, *ne;
int s, i;
- int slowdown, pool_flags;
+ int pool_flags;
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
pool_flags = PR_WAITOK;
@@ -408,7 +408,8 @@ uvm_mapent_alloc(struct vm_map *map, int flags)
simple_lock(&uvm.kentry_lock);
me = uvm.kentry_free;
if (me == NULL) {
- ne = uvm_km_getpage(0, &slowdown);
+ ne = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty,
+ &kd_nowait);
if (ne == NULL)
panic("uvm_mapent_alloc: cannot allocate map "
"entry");