summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-04-11 12:10:43 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-04-11 12:10:43 +0000
commit45b5f5562300b1b582a88e47df07ff5c316e8f68 (patch)
tree2e986c1b5df798d88908e4d0e3fa145b6577c355 /sys
parent249cd96ad0ce1ab417c7c97f8dde0be8604cce01 (diff)
Instead of managing pages for intrsafe maps in special objects (aka.
kmem_object) just so that we can remove them, just use pmap_extract to get the pages to free and simplify a lot of code to not deal with the list of intrsafe maps, intrsafe objects, etc. miod@ ok
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/arm/arm/pmap.c4
-rw-r--r--sys/arch/i386/i386/pmap.c45
-rw-r--r--sys/kern/kern_malloc.c8
-rw-r--r--sys/kern/kern_malloc_debug.c8
-rw-r--r--sys/kern/subr_pool.c5
-rw-r--r--sys/uvm/uvm_extern.h8
-rw-r--r--sys/uvm/uvm_fault.c32
-rw-r--r--sys/uvm/uvm_fault_i.h36
-rw-r--r--sys/uvm/uvm_km.c179
-rw-r--r--sys/uvm/uvm_km.h4
-rw-r--r--sys/uvm/uvm_map.c23
-rw-r--r--sys/uvm/uvm_map.h30
-rw-r--r--sys/uvm/uvm_map_i.h24
-rw-r--r--sys/uvm/uvm_object.h9
-rw-r--r--sys/uvm/uvm_page.c5
-rw-r--r--sys/uvm/uvm_stat.c5
16 files changed, 76 insertions, 349 deletions
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 6f234a8f70b..77fe39dbd37 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.9 2007/02/03 16:48:23 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.10 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -4071,7 +4071,7 @@ pmap_init(void)
/*
* Now we need to free enough pv_entry structures to allow us to get
- * the kmem_map/kmem_object allocated and inited (done after this
+ * the kmem_map allocated and inited (done after this
* function is finished). to do this we allocate one bootstrap page out
* of kernel_map and use it to provide an initial pool of pv_entry
* structures. we never free this page.
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index cf71ffb5b7a..36c87ba741c 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.100 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.101 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -170,9 +170,9 @@
* => success: we have an unmapped VA, continue to [b]
* => failure: unable to lock kmem_map or out of VA in it.
* move on to plan 3.
- * [b] allocate a page in kmem_object for the VA
+ * [b] allocate a page for the VA
* => success: map it in, free the pv_entry's, DONE!
- * => failure: kmem_object locked, no free vm_pages, etc.
+ * => failure: no free vm_pages, etc.
* save VA for later call to [a], go to plan 3.
* If we fail, we simply let pmap_enter() tell UVM about it.
*/
@@ -1074,9 +1074,9 @@ pmap_init()
/*
* now we need to free enough pv_entry structures to allow us to get
- * the kmem_map/kmem_object allocated and inited (done after this
- * function is finished). to do this we allocate one bootstrap page out
- * of kernel_map and use it to provide an initial pool of pv_entry
+ * the kmem_map allocated and inited (done after this function is
+ * finished). to do this we allocate one bootstrap page out of
+ * kernel_map and use it to provide an initial pool of pv_entry
* structures. we never free this page.
*/
@@ -1222,37 +1222,19 @@ pmap_alloc_pvpage(struct pmap *pmap, int mode)
* if not, try to allocate one.
*/
- s = splvm(); /* must protect kmem_map/kmem_object with splvm! */
+ s = splvm(); /* must protect kmem_map with splvm! */
if (pv_cachedva == 0) {
- pv_cachedva = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
+ pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
NBPG, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
- if (pv_cachedva == 0) {
- splx(s);
- goto steal_one;
- }
}
-
- /*
- * we have a VA, now let's try and allocate a page in the object
- * note: we are still holding splvm to protect kmem_object
- */
-
- if (!simple_lock_try(&uvmexp.kmem_object->vmobjlock)) {
- splx(s);
+ splx(s);
+ if (pv_cachedva == 0)
goto steal_one;
- }
- pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva -
- vm_map_min(kernel_map),
- NULL, UVM_PGA_USERESERVE);
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg)
pg->pg_flags &= ~PG_BUSY; /* never busy */
-
- simple_unlock(&uvmexp.kmem_object->vmobjlock);
- splx(s);
- /* splvm now dropped */
-
- if (pg == NULL)
+ else
goto steal_one;
/*
@@ -1505,9 +1487,6 @@ pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
* => assume caller is holding the pvalloc_lock and that
* there is a page on the pv_unusedpgs list
* => if we can't get a lock on the kmem_map we try again later
- * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows
- * that if we can lock the kmem_map then we are not already
- * holding kmem_object's lock.
*/
void
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 8b913a9a10a..42291b0c7e6 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc.c,v 1.67 2007/03/25 02:38:11 tedu Exp $ */
+/* $OpenBSD: kern_malloc.c,v 1.68 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
@@ -43,7 +43,7 @@
#include <uvm/uvm_extern.h>
-static struct vm_map_intrsafe kmem_map_store;
+static struct vm_map kmem_map_store;
struct vm_map *kmem_map = NULL;
#ifdef NKMEMCLUSTERS
@@ -195,7 +195,7 @@ malloc(unsigned long size, int type, int flags)
else
allocsize = 1 << indx;
npg = btoc(allocsize);
- va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
+ va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
(vsize_t)ctob(npg),
((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
@@ -515,7 +515,7 @@ kmeminit(void)
base = vm_map_min(kernel_map);
kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
(vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE,
- &kmem_map_store.vmi_map);
+ &kmem_map_store);
kmembase = (char *)base;
kmemlimit = (char *)limit;
kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c
index f6ec3efd608..f5a2bb48f1c 100644
--- a/sys/kern/kern_malloc_debug.c
+++ b/sys/kern/kern_malloc_debug.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc_debug.c,v 1.24 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: kern_malloc_debug.c,v 1.25 2007/04/11 12:10:42 art Exp $ */
/*
* Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
@@ -240,7 +240,7 @@ debug_malloc_allocate_free(int wait)
if (md == NULL)
return;
- va = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, PAGE_SIZE * 2,
+ va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
UVM_KMF_VALLOC | (wait ? 0: UVM_KMF_NOWAIT));
if (va == 0) {
pool_put(&debug_malloc_pool, md);
@@ -249,13 +249,11 @@ debug_malloc_allocate_free(int wait)
offset = va - vm_map_min(kernel_map);
for (;;) {
- simple_lock(&uvmexp.kmem_object->vmobjlock);
- pg = uvm_pagealloc(uvmexp.kmem_object, offset, NULL, 0);
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg) {
pg->pg_flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
}
- simple_unlock(&uvmexp.kmem_object->vmobjlock);
if (pg)
break;
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index c92d4f7b447..6d08f849656 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.48 2006/11/17 11:50:09 jmc Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.49 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -2032,8 +2032,7 @@ pool_page_alloc_kmem(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
- return ((void *)uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object,
- waitok));
+ return ((void *)uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
}
void
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 3d051013137..488cd33468c 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.63 2006/11/29 12:39:50 miod Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.64 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -356,9 +356,6 @@ struct uvmexp {
int pdrevnode; /* vnode pages reactivated due to min threshold */
int pdrevtext; /* vtext pages reactivated due to min threshold */
- /* kernel memory objects: managed by uvm_km_kmemalloc() only! */
- struct uvm_object *kmem_object;
-
int fpswtch; /* FPU context switches */
int kmapent; /* number of kernel map entries */
};
@@ -507,9 +504,6 @@ vaddr_t uvm_km_alloc_poolpage1(vm_map_t,
struct uvm_object *, boolean_t);
void uvm_km_free_poolpage1(vm_map_t, vaddr_t);
-#define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \
- uvmexp.kmem_object, (waitok))
-#define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr))
void *uvm_km_getpage(boolean_t);
void uvm_km_putpage(void *);
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index b791efe8499..1039c38428a 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.44 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.45 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -599,19 +599,6 @@ uvm_fault(orig_map, vaddr, fault_type, access_type)
narrow = FALSE; /* normal fault */
/*
- * before we do anything else, if this is a fault on a kernel
- * address, check to see if the address is managed by an
- * interrupt-safe map. If it is, we fail immediately. Intrsafe
- * maps are never pageable, and this approach avoids an evil
- * locking mess.
- */
- if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) {
- UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p",
- ufi.orig_rvaddr, ufi.map, 0, 0);
- return (EFAULT);
- }
-
- /*
* "goto ReFault" means restart the page fault from ground zero.
*/
ReFault:
@@ -626,6 +613,12 @@ ReFault:
}
/* locked: maps(read) */
+#ifdef DIAGNOSTIC
+ if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
+ panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
+ ufi.map, vaddr);
+#endif
+
/*
* check protection
*/
@@ -639,17 +632,6 @@ ReFault:
}
/*
- * if the map is not a pageable map, a page fault always fails.
- */
-
- if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
- UVMHIST_LOG(maphist,
- "<- map %p not pageable", ufi.map, 0, 0, 0);
- uvmfault_unlockmaps(&ufi, FALSE);
- return (EFAULT);
- }
-
- /*
* "enter_prot" is the protection we want to enter the page in at.
* for certain pages (e.g. copy-on-write pages) this protection can
* be more strict than ufi.entry->protection. "wired" means either
diff --git a/sys/uvm/uvm_fault_i.h b/sys/uvm/uvm_fault_i.h
index 1505bb746e6..fc264cd4410 100644
--- a/sys/uvm/uvm_fault_i.h
+++ b/sys/uvm/uvm_fault_i.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault_i.h,v 1.10 2002/03/14 01:27:18 millert Exp $ */
+/* $OpenBSD: uvm_fault_i.h,v 1.11 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_fault_i.h,v 1.11 2000/06/26 14:21:17 mrg Exp $ */
/*
@@ -41,7 +41,6 @@
/*
* uvm_fault_i.h: fault inline functions
*/
-static boolean_t uvmfault_check_intrsafe(struct uvm_faultinfo *);
static boolean_t uvmfault_lookup(struct uvm_faultinfo *, boolean_t);
static boolean_t uvmfault_relock(struct uvm_faultinfo *);
static void uvmfault_unlockall(struct uvm_faultinfo *, struct vm_amap *,
@@ -97,39 +96,6 @@ uvmfault_unlockall(ufi, amap, uobj, anon)
}
/*
- * uvmfault_check_intrsafe: check for a virtual address managed by
- * an interrupt-safe map.
- *
- * => caller must provide a uvm_faultinfo structure with the IN
- * params properly filled in
- * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
- */
-
-static __inline boolean_t
-uvmfault_check_intrsafe(ufi)
- struct uvm_faultinfo *ufi;
-{
- struct vm_map_intrsafe *vmi;
- int s;
-
- s = vmi_list_lock();
- for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
- vmi = LIST_NEXT(vmi, vmi_list)) {
- if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
- ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
- break;
- }
- vmi_list_unlock(s);
-
- if (vmi != NULL) {
- ufi->map = &vmi->vmi_map;
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-/*
* uvmfault_lookup: lookup a virtual address in a map
*
* => caller must provide a uvm_faultinfo structure with the IN
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 595eb71204b..4273e866a72 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.56 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.57 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -107,8 +107,7 @@
*
* most kernel private memory lives in kernel_object. the only exception
* to this is for memory that belongs to submaps that must be protected
- * by splvm(). each of these submaps has their own private kernel
- * object (e.g. kmem_object).
+ * by splvm(). each of these submaps manages their own pages.
*
* note that just because a kernel object spans the entire kernel virtual
* address space doesn't mean that it has to be mapped into the entire space.
@@ -127,12 +126,6 @@
* then that means that the page at offset 0x235000 in kernel_object is
* mapped at 0xf8235000.
*
- * note that the offsets in kmem_object also follow this rule.
- * this means that the offsets for kmem_object must fall in the
- * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to
- * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets
- * in those objects will typically not start at zero.
- *
* kernel objects have one other special property: when the kernel virtual
* memory mapping them is unmapped, the backing memory in the object is
* freed right away. this is done with the uvm_km_pgremove() function.
@@ -153,22 +146,11 @@
vm_map_t kernel_map = NULL;
-struct vmi_list vmi_list;
-simple_lock_data_t vmi_list_slock;
-
/*
* local data structues
*/
static struct vm_map kernel_map_store;
-static struct uvm_object kmem_object_store;
-
-/*
- * All pager operations here are NULL, but the object must have
- * a pager ops vector associated with it; various places assume
- * it to be so.
- */
-static struct uvm_pagerops km_pager;
/*
* uvm_km_init: init kernel maps and objects to reflect reality (i.e.
@@ -186,12 +168,6 @@ uvm_km_init(start, end)
vaddr_t base = VM_MIN_KERNEL_ADDRESS;
/*
- * first, initialize the interrupt-safe map list.
- */
- LIST_INIT(&vmi_list);
- simple_lock_init(&vmi_list_slock);
-
- /*
* next, init kernel memory objects.
*/
@@ -201,19 +177,6 @@ uvm_km_init(start, end)
VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
/*
- * kmem_object: for use by the kernel malloc(). Memory is always
- * wired, and this object (and the kmem_map) can be accessed at
- * interrupt time.
- */
- simple_lock_init(&kmem_object_store.vmobjlock);
- kmem_object_store.pgops = &km_pager;
- TAILQ_INIT(&kmem_object_store.memq);
- kmem_object_store.uo_npages = 0;
- /* we are special. we never die */
- kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
- uvmexp.kmem_object = &kmem_object_store;
-
- /*
* init the map and reserve already allocated kernel space
* before installing.
*/
@@ -301,30 +264,14 @@ uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
* => when you unmap a part of anonymous kernel memory you want to toss
* the pages right away. (this gets called from uvm_unmap_...).
*/
-
-#define UKM_HASH_PENALTY 4 /* a guess */
-
void
-uvm_km_pgremove(uobj, start, end)
- struct uvm_object *uobj;
- vaddr_t start, end;
+uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
{
- boolean_t by_list;
- struct vm_page *pp, *ppnext;
+ struct vm_page *pp;
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
KASSERT(uobj->pgops == &aobj_pager);
- simple_lock(&uobj->vmobjlock);
-
- /* choose cheapest traversal */
- by_list = (uobj->uo_npages <=
- ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
-
- if (by_list)
- goto loop_by_list;
-
- /* by hash */
for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
pp = uvm_pagelookup(uobj, curoff);
@@ -334,7 +281,6 @@ uvm_km_pgremove(uobj, start, end)
UVMHIST_LOG(maphist," page %p, busy=%ld", pp,
pp->pg_flags & PG_BUSY, 0, 0);
- /* now do the actual work */
if (pp->pg_flags & PG_BUSY) {
/* owner must check for this when done */
pp->pg_flags |= PG_RELEASED;
@@ -351,37 +297,6 @@ uvm_km_pgremove(uobj, start, end)
uvm_unlock_pageq();
}
}
- simple_unlock(&uobj->vmobjlock);
- return;
-
-loop_by_list:
-
- for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
- ppnext = TAILQ_NEXT(pp, listq);
- if (pp->offset < start || pp->offset >= end) {
- continue;
- }
-
- UVMHIST_LOG(maphist," page %p, busy=%ld", pp,
- pp->pg_flags & PG_BUSY, 0, 0);
-
- if (pp->pg_flags & PG_BUSY) {
- /* owner must check for this when done */
- pp->pg_flags |= PG_RELEASED;
- } else {
- /* free the swap slot... */
- uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
-
- /*
- * ...and free the page; note it may be on the
- * active or inactive queues.
- */
- uvm_lock_pageq();
- uvm_pagefree(pp);
- uvm_unlock_pageq();
- }
- }
- simple_unlock(&uobj->vmobjlock);
}
@@ -397,59 +312,20 @@ loop_by_list:
*/
void
-uvm_km_pgremove_intrsafe(uobj, start, end)
- struct uvm_object *uobj;
- vaddr_t start, end;
+uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
{
- boolean_t by_list;
- struct vm_page *pp, *ppnext;
- vaddr_t curoff;
- UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
-
- KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj));
- simple_lock(&uobj->vmobjlock); /* lock object */
-
- /* choose cheapest traversal */
- by_list = (uobj->uo_npages <=
- ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
-
- if (by_list)
- goto loop_by_list;
-
- /* by hash */
-
- for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
- pp = uvm_pagelookup(uobj, curoff);
- if (pp == NULL) {
- continue;
- }
-
- UVMHIST_LOG(maphist," page %p, busy=%ld", pp,
- pp->pg_flags & PG_BUSY, 0, 0);
- KASSERT((pp->pg_flags & PG_BUSY) == 0);
- KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
- KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
- uvm_pagefree(pp);
- }
- simple_unlock(&uobj->vmobjlock);
- return;
-
-loop_by_list:
-
- for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
- ppnext = TAILQ_NEXT(pp, listq);
- if (pp->offset < start || pp->offset >= end) {
- continue;
- }
-
- UVMHIST_LOG(maphist," page %p, busy=%ld", pp,
- pp->flags & PG_BUSY, 0, 0);
- KASSERT((pp->pg_flags & PG_BUSY) == 0);
- KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
- KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
- uvm_pagefree(pp);
+ struct vm_page *pg;
+ vaddr_t va;
+ paddr_t pa;
+
+ for (va = start; va < end; va += PAGE_SIZE) {
+ if (!pmap_extract(pmap_kernel(), va, &pa))
+ continue; /* panic? */
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg == NULL)
+ panic("uvm_km_pgremove_intrsafe: no page");
+ uvm_pagefree(pg);
}
- simple_unlock(&uobj->vmobjlock);
}
@@ -512,7 +388,11 @@ uvm_km_kmemalloc(map, obj, size, flags)
* recover object offset from virtual address
*/
- offset = kva - vm_map_min(kernel_map);
+ if (obj != NULL)
+ offset = kva - vm_map_min(kernel_map);
+ else
+ offset = 0;
+
UVMHIST_LOG(maphist, " kva=0x%lx, offset=0x%lx", kva, offset,0,0);
/*
@@ -522,18 +402,12 @@ uvm_km_kmemalloc(map, obj, size, flags)
loopva = kva;
while (size) {
- simple_lock(&obj->vmobjlock);
pg = uvm_pagealloc(obj, offset, NULL, 0);
if (pg) {
pg->pg_flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
}
- simple_unlock(&obj->vmobjlock);
- /*
- * out of memory?
- */
-
if (__predict_false(pg == NULL)) {
if ((flags & UVM_KMF_NOWAIT) ||
((flags & UVM_KMF_CANFAIL) &&
@@ -549,12 +423,10 @@ uvm_km_kmemalloc(map, obj, size, flags)
/*
* map it in: note that we call pmap_enter with the map and
- * object unlocked in case we are kmem_map/kmem_object
- * (because if pmap_enter wants to allocate out of kmem_object
- * it will need to lock it itself!)
+ * object unlocked in case we are kmem_map.
*/
- if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
+ if (obj == NULL) {
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
UVM_PROT_RW);
} else {
@@ -577,10 +449,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
*/
void
-uvm_km_free(map, addr, size)
- vm_map_t map;
- vaddr_t addr;
- vsize_t size;
+uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
{
uvm_unmap(map, trunc_page(addr), round_page(addr+size));
}
diff --git a/sys/uvm/uvm_km.h b/sys/uvm/uvm_km.h
index 1162b4dc29a..66d00e5bddd 100644
--- a/sys/uvm/uvm_km.h
+++ b/sys/uvm/uvm_km.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.h,v 1.8 2004/04/19 22:52:33 tedu Exp $ */
+/* $OpenBSD: uvm_km.h,v 1.9 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_km.h,v 1.9 1999/06/21 17:25:11 thorpej Exp $ */
/*
@@ -51,7 +51,7 @@
void uvm_km_init(vaddr_t, vaddr_t);
void uvm_km_page_init(void);
void uvm_km_pgremove(struct uvm_object *, vaddr_t, vaddr_t);
-void uvm_km_pgremove_intrsafe(struct uvm_object *, vaddr_t, vaddr_t);
+void uvm_km_pgremove_intrsafe(vaddr_t, vaddr_t);
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 55e8ba7b95a..83fa927a828 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.86 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.87 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -1474,7 +1474,10 @@ uvm_unmap_remove(map, start, end, entry_list, p)
* special case: handle mappings to anonymous kernel objects.
* we want to free these pages right away...
*/
- if (UVM_ET_ISOBJ(entry) &&
+ if (map->flags & VM_MAP_INTRSAFE) {
+ uvm_km_pgremove_intrsafe(entry->start, entry->end);
+ pmap_kremove(entry->start, len);
+ } else if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
@@ -1513,18 +1516,10 @@ uvm_unmap_remove(map, start, end, entry_list, p)
* from the object. offsets are always relative
* to vm_map_min(kernel_map).
*/
- if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) {
- pmap_kremove(entry->start, len);
- uvm_km_pgremove_intrsafe(entry->object.uvm_obj,
- entry->start - vm_map_min(kernel_map),
- entry->end - vm_map_min(kernel_map));
- } else {
- pmap_remove(pmap_kernel(), entry->start,
- entry->end);
- uvm_km_pgremove(entry->object.uvm_obj,
- entry->start - vm_map_min(kernel_map),
- entry->end - vm_map_min(kernel_map));
- }
+ pmap_remove(pmap_kernel(), entry->start, entry->end);
+ uvm_km_pgremove(entry->object.uvm_obj,
+ entry->start - vm_map_min(kernel_map),
+ entry->end - vm_map_min(kernel_map));
/*
* null out kernel_object reference, we've just
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 7de3f38393b..5ed91a71a72 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.37 2007/04/04 18:02:59 art Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.38 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -258,34 +258,6 @@ struct vm_map_intrsafe {
LIST_ENTRY(vm_map_intrsafe) vmi_list;
};
-LIST_HEAD(vmi_list, vm_map_intrsafe);
-#ifdef _KERNEL
-extern simple_lock_data_t vmi_list_slock;
-extern struct vmi_list vmi_list;
-
-static __inline int vmi_list_lock(void);
-static __inline void vmi_list_unlock(int);
-
-static __inline int
-vmi_list_lock()
-{
- int s;
-
- s = splhigh();
- simple_lock(&vmi_list_slock);
- return (s);
-}
-
-static __inline void
-vmi_list_unlock(s)
- int s;
-{
-
- simple_unlock(&vmi_list_slock);
- splx(s);
-}
-#endif /* _KERNEL */
-
/*
* handle inline options
*/
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
index 21bc39cea41..70dd13e2a2d 100644
--- a/sys/uvm/uvm_map_i.h
+++ b/sys/uvm/uvm_map_i.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map_i.h,v 1.20 2007/04/04 18:02:59 art Exp $ */
+/* $OpenBSD: uvm_map_i.h,v 1.21 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_map_i.h,v 1.18 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -92,10 +92,7 @@ uvm_map_create(pmap, min, max, flags)
{
vm_map_t result;
- MALLOC(result, vm_map_t,
- (flags & VM_MAP_INTRSAFE) ? sizeof(struct vm_map_intrsafe) :
- sizeof(struct vm_map),
- M_VMMAP, M_WAITOK);
+ MALLOC(result, vm_map_t, sizeof(struct vm_map), M_VMMAP, M_WAITOK);
uvm_map_setup(result, min, max, flags);
result->pmap = pmap;
return(result);
@@ -128,23 +125,6 @@ uvm_map_setup(map, min, max, flags)
rw_init(&map->lock, "vmmaplk");
simple_lock_init(&map->ref_lock);
simple_lock_init(&map->hint_lock);
-
- /*
- * If the map is interrupt safe, place it on the list
- * of interrupt safe maps, for uvm_fault().
- *
- * We almost never set up an interrupt-safe map, but we set
- * up quite a few regular ones (at every fork!), so put
- * interrupt-safe map setup in the slow path.
- */
- if (__predict_false(flags & VM_MAP_INTRSAFE)) {
- struct vm_map_intrsafe *vmi = (struct vm_map_intrsafe *)map;
- int s;
-
- s = vmi_list_lock();
- LIST_INSERT_HEAD(&vmi_list, vmi, vmi_list);
- vmi_list_unlock(s);
- }
}
diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h
index 7dda1ae55a0..5992700e11d 100644
--- a/sys/uvm/uvm_object.h
+++ b/sys/uvm/uvm_object.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_object.h,v 1.9 2005/07/26 07:11:55 art Exp $ */
+/* $OpenBSD: uvm_object.h,v 1.10 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -74,14 +74,9 @@ struct uvm_object {
* maps.
*/
#define UVM_OBJ_KERN (-2)
-#define UVM_OBJ_KERN_INTRSAFE (-3)
#define UVM_OBJ_IS_KERN_OBJECT(uobj) \
- ((uobj)->uo_refs == UVM_OBJ_KERN || \
- (uobj)->uo_refs == UVM_OBJ_KERN_INTRSAFE)
-
-#define UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) \
- ((uobj)->uo_refs == UVM_OBJ_KERN_INTRSAFE)
+ ((uobj)->uo_refs == UVM_OBJ_KERN)
#ifdef _KERNEL
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index c37dd3ca8ab..deaf13ccc9c 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.57 2007/04/04 17:44:45 art Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.58 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -1122,8 +1122,7 @@ uvm_pagerealloc(pg, newobj, newoff)
*/
void
-uvm_pagefree(pg)
- struct vm_page *pg;
+uvm_pagefree(struct vm_page *pg)
{
int s;
int saved_loan_count = pg->loan_count;
diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c
index f8884695399..f8e0f7c167c 100644
--- a/sys/uvm/uvm_stat.c
+++ b/sys/uvm/uvm_stat.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_stat.c,v 1.17 2005/12/10 11:45:43 miod Exp $ */
+/* $OpenBSD: uvm_stat.c,v 1.18 2007/04/11 12:10:42 art Exp $ */
/* $NetBSD: uvm_stat.c,v 1.18 2001/03/09 01:02:13 chs Exp $ */
/*
@@ -255,7 +255,6 @@ uvmexp_print(int (*pr)(const char *, ...))
uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging);
(*pr)(" kernel pointers:\n");
- (*pr)(" objs(kern/kmem)=%p/%p\n", uvm.kernel_object,
- uvmexp.kmem_object);
+ (*pr)(" objs(kern)=%p\n", uvm.kernel_object);
}
#endif