From 812654822957b2b2463f49a9d3665ced4d1709e9 Mon Sep 17 00:00:00 2001 From: "Thordur I. Bjornsson" Date: Sun, 27 Jun 2010 17:45:21 +0000 Subject: doh! Use pmap_kenter/pmap_kremove in the backend page allocator to prevent recursion in pmap_enter as seen on zaurus. ok art@ also, release a the uvm_km_page.mtx before calling uvm_km_kmemalloc as we can sleep there. ok oga@ --- sys/uvm/uvm_km.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'sys') diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 19d22bb269e..c488c7f55ee 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.77 2010/06/27 03:03:49 thib Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.78 2010/06/27 17:45:20 thib Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -789,6 +789,7 @@ uvm_km_thread(void *arg) msleep(&uvm_km_pages.km_proc, &uvm_km_pages.mtx, PVM, "kmalloc", 0); } + mtx_leave(&uvm_km_pages.mtx); for (i = 0; i < nitems(pg); i++) { pg[i] = (vaddr_t)uvm_km_kmemalloc(kernel_map, NULL, @@ -863,8 +864,7 @@ uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high, atomic_setbits_int(&pg->pg_flags, PG_FAKE); UVM_PAGE_OWN(pg, NULL); - pmap_enter(kernel_map->pmap, va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW, - PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); pmap_update(kernel_map->pmap); #endif /* !__HAVE_PMAP_DIRECT */ @@ -887,7 +887,7 @@ uvm_km_putpage(void *v) KASSERT(pg != NULL); - pmap_remove(kernel_map->pmap, va, va + PAGE_SIZE); + pmap_kremove(va, PAGE_SIZE); pmap_update(kernel_map->pmap); mtx_enter(&uvm_km_pages.mtx); -- cgit v1.2.3