diff options
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_extern.h | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_fault.c | 32 | ||||
-rw-r--r-- | sys/uvm/uvm_fault_i.h | 36 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 179 | ||||
-rw-r--r-- | sys/uvm/uvm_km.h | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 23 | ||||
-rw-r--r-- | sys/uvm/uvm_map.h | 30 | ||||
-rw-r--r-- | sys/uvm/uvm_map_i.h | 24 | ||||
-rw-r--r-- | sys/uvm/uvm_object.h | 9 | ||||
-rw-r--r-- | sys/uvm/uvm_page.c | 5 | ||||
-rw-r--r-- | sys/uvm/uvm_stat.c | 5 |
11 files changed, 53 insertions, 302 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index 3d051013137..488cd33468c 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.63 2006/11/29 12:39:50 miod Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.64 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -356,9 +356,6 @@ struct uvmexp { int pdrevnode; /* vnode pages reactivated due to min threshold */ int pdrevtext; /* vtext pages reactivated due to min threshold */ - /* kernel memory objects: managed by uvm_km_kmemalloc() only! */ - struct uvm_object *kmem_object; - int fpswtch; /* FPU context switches */ int kmapent; /* number of kernel map entries */ }; @@ -507,9 +504,6 @@ vaddr_t uvm_km_alloc_poolpage1(vm_map_t, struct uvm_object *, boolean_t); void uvm_km_free_poolpage1(vm_map_t, vaddr_t); -#define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \ - uvmexp.kmem_object, (waitok)) -#define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr)) void *uvm_km_getpage(boolean_t); void uvm_km_putpage(void *); diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index b791efe8499..1039c38428a 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault.c,v 1.44 2007/04/04 17:44:45 art Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.45 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ /* @@ -599,19 +599,6 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) narrow = FALSE; /* normal fault */ /* - * before we do anything else, if this is a fault on a kernel - * address, check to see if the address is managed by an - * interrupt-safe map. If it is, we fail immediately. Intrsafe - * maps are never pageable, and this approach avoids an evil - * locking mess. - */ - if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) { - UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p", - ufi.orig_rvaddr, ufi.map, 0, 0); - return (EFAULT); - } - - /* * "goto ReFault" means restart the page fault from ground zero. */ ReFault: @@ -626,6 +613,12 @@ ReFault: } /* locked: maps(read) */ +#ifdef DIAGNOSTIC + if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) + panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)", + ufi.map, vaddr); +#endif + /* * check protection */ @@ -639,17 +632,6 @@ ReFault: } /* - * if the map is not a pageable map, a page fault always fails. - */ - - if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) { - UVMHIST_LOG(maphist, - "<- map %p not pageable", ufi.map, 0, 0, 0); - uvmfault_unlockmaps(&ufi, FALSE); - return (EFAULT); - } - - /* * "enter_prot" is the protection we want to enter the page in at. * for certain pages (e.g. copy-on-write pages) this protection can * be more strict than ufi.entry->protection. "wired" means either diff --git a/sys/uvm/uvm_fault_i.h b/sys/uvm/uvm_fault_i.h index 1505bb746e6..fc264cd4410 100644 --- a/sys/uvm/uvm_fault_i.h +++ b/sys/uvm/uvm_fault_i.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault_i.h,v 1.10 2002/03/14 01:27:18 millert Exp $ */ +/* $OpenBSD: uvm_fault_i.h,v 1.11 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_fault_i.h,v 1.11 2000/06/26 14:21:17 mrg Exp $ */ /* @@ -41,7 +41,6 @@ /* * uvm_fault_i.h: fault inline functions */ -static boolean_t uvmfault_check_intrsafe(struct uvm_faultinfo *); static boolean_t uvmfault_lookup(struct uvm_faultinfo *, boolean_t); static boolean_t uvmfault_relock(struct uvm_faultinfo *); static void uvmfault_unlockall(struct uvm_faultinfo *, struct vm_amap *, @@ -97,39 +96,6 @@ uvmfault_unlockall(ufi, amap, uobj, anon) } /* - * uvmfault_check_intrsafe: check for a virtual address managed by - * an interrupt-safe map. - * - * => caller must provide a uvm_faultinfo structure with the IN - * params properly filled in - * => if we find an intersafe VA, we fill in ufi->map, and return TRUE - */ - -static __inline boolean_t -uvmfault_check_intrsafe(ufi) - struct uvm_faultinfo *ufi; -{ - struct vm_map_intrsafe *vmi; - int s; - - s = vmi_list_lock(); - for (vmi = LIST_FIRST(&vmi_list); vmi != NULL; - vmi = LIST_NEXT(vmi, vmi_list)) { - if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) && - ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map)) - break; - } - vmi_list_unlock(s); - - if (vmi != NULL) { - ufi->map = &vmi->vmi_map; - return (TRUE); - } - - return (FALSE); -} - -/* * uvmfault_lookup: lookup a virtual address in a map * * => caller must provide a uvm_faultinfo structure with the IN diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 595eb71204b..4273e866a72 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.56 2007/04/04 17:44:45 art Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.57 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -107,8 +107,7 @@ * * most kernel private memory lives in kernel_object. the only exception * to this is for memory that belongs to submaps that must be protected - * by splvm(). each of these submaps has their own private kernel - * object (e.g. kmem_object). + * by splvm(). each of these submaps manages their own pages. * * note that just because a kernel object spans the entire kernel virtual * address space doesn't mean that it has to be mapped into the entire space. @@ -127,12 +126,6 @@ * then that means that the page at offset 0x235000 in kernel_object is * mapped at 0xf8235000. * - * note that the offsets in kmem_object also follow this rule. - * this means that the offsets for kmem_object must fall in the - * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to - * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets - * in those objects will typically not start at zero. - * * kernel objects have one other special property: when the kernel virtual * memory mapping them is unmapped, the backing memory in the object is * freed right away. this is done with the uvm_km_pgremove() function. @@ -153,22 +146,11 @@ vm_map_t kernel_map = NULL; -struct vmi_list vmi_list; -simple_lock_data_t vmi_list_slock; - /* * local data structues */ static struct vm_map kernel_map_store; -static struct uvm_object kmem_object_store; - -/* - * All pager operations here are NULL, but the object must have - * a pager ops vector associated with it; various places assume - * it to be so. - */ -static struct uvm_pagerops km_pager; /* * uvm_km_init: init kernel maps and objects to reflect reality (i.e. @@ -186,12 +168,6 @@ uvm_km_init(start, end) vaddr_t base = VM_MIN_KERNEL_ADDRESS; /* - * first, initialize the interrupt-safe map list. - */ - LIST_INIT(&vmi_list); - simple_lock_init(&vmi_list_slock); - - /* * next, init kernel memory objects. */ @@ -201,19 +177,6 @@ uvm_km_init(start, end) VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); /* - * kmem_object: for use by the kernel malloc(). Memory is always - * wired, and this object (and the kmem_map) can be accessed at - * interrupt time. - */ - simple_lock_init(&kmem_object_store.vmobjlock); - kmem_object_store.pgops = &km_pager; - TAILQ_INIT(&kmem_object_store.memq); - kmem_object_store.uo_npages = 0; - /* we are special. we never die */ - kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; - uvmexp.kmem_object = &kmem_object_store; - - /* * init the map and reserve already allocated kernel space * before installing. */ @@ -301,30 +264,14 @@ uvm_km_suballoc(map, min, max, size, flags, fixed, submap) * => when you unmap a part of anonymous kernel memory you want to toss * the pages right away. (this gets called from uvm_unmap_...). */ - -#define UKM_HASH_PENALTY 4 /* a guess */ - void -uvm_km_pgremove(uobj, start, end) - struct uvm_object *uobj; - vaddr_t start, end; +uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end) { - boolean_t by_list; - struct vm_page *pp, *ppnext; + struct vm_page *pp; vaddr_t curoff; UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); KASSERT(uobj->pgops == &aobj_pager); - simple_lock(&uobj->vmobjlock); - - /* choose cheapest traversal */ - by_list = (uobj->uo_npages <= - ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); - - if (by_list) - goto loop_by_list; - - /* by hash */ for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { pp = uvm_pagelookup(uobj, curoff); @@ -334,7 +281,6 @@ uvm_km_pgremove(uobj, start, end) UVMHIST_LOG(maphist," page %p, busy=%ld", pp, pp->pg_flags & PG_BUSY, 0, 0); - /* now do the actual work */ if (pp->pg_flags & PG_BUSY) { /* owner must check for this when done */ pp->pg_flags |= PG_RELEASED; @@ -351,37 +297,6 @@ uvm_km_pgremove(uobj, start, end) uvm_unlock_pageq(); } } - simple_unlock(&uobj->vmobjlock); - return; - -loop_by_list: - - for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) { - ppnext = TAILQ_NEXT(pp, listq); - if (pp->offset < start || pp->offset >= end) { - continue; - } - - UVMHIST_LOG(maphist," page %p, busy=%ld", pp, - pp->pg_flags & PG_BUSY, 0, 0); - - if (pp->pg_flags & PG_BUSY) { - /* owner must check for this when done */ - pp->pg_flags |= PG_RELEASED; - } else { - /* free the swap slot... */ - uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); - - /* - * ...and free the page; note it may be on the - * active or inactive queues. - */ - uvm_lock_pageq(); - uvm_pagefree(pp); - uvm_unlock_pageq(); - } - } - simple_unlock(&uobj->vmobjlock); } @@ -397,59 +312,20 @@ loop_by_list: */ void -uvm_km_pgremove_intrsafe(uobj, start, end) - struct uvm_object *uobj; - vaddr_t start, end; +uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) { - boolean_t by_list; - struct vm_page *pp, *ppnext; - vaddr_t curoff; - UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); - - KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj)); - simple_lock(&uobj->vmobjlock); /* lock object */ - - /* choose cheapest traversal */ - by_list = (uobj->uo_npages <= - ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); - - if (by_list) - goto loop_by_list; - - /* by hash */ - - for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { - pp = uvm_pagelookup(uobj, curoff); - if (pp == NULL) { - continue; - } - - UVMHIST_LOG(maphist," page %p, busy=%ld", pp, - pp->pg_flags & PG_BUSY, 0, 0); - KASSERT((pp->pg_flags & PG_BUSY) == 0); - KASSERT((pp->pqflags & PQ_ACTIVE) == 0); - KASSERT((pp->pqflags & PQ_INACTIVE) == 0); - uvm_pagefree(pp); - } - simple_unlock(&uobj->vmobjlock); - return; - -loop_by_list: - - for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) { - ppnext = TAILQ_NEXT(pp, listq); - if (pp->offset < start || pp->offset >= end) { - continue; - } - - UVMHIST_LOG(maphist," page %p, busy=%ld", pp, - pp->flags & PG_BUSY, 0, 0); - KASSERT((pp->pg_flags & PG_BUSY) == 0); - KASSERT((pp->pqflags & PQ_ACTIVE) == 0); - KASSERT((pp->pqflags & PQ_INACTIVE) == 0); - uvm_pagefree(pp); + struct vm_page *pg; + vaddr_t va; + paddr_t pa; + + for (va = start; va < end; va += PAGE_SIZE) { + if (!pmap_extract(pmap_kernel(), va, &pa)) + continue; /* panic? */ + pg = PHYS_TO_VM_PAGE(pa); + if (pg == NULL) + panic("uvm_km_pgremove_intrsafe: no page"); + uvm_pagefree(pg); } - simple_unlock(&uobj->vmobjlock); } @@ -512,7 +388,11 @@ uvm_km_kmemalloc(map, obj, size, flags) * recover object offset from virtual address */ - offset = kva - vm_map_min(kernel_map); + if (obj != NULL) + offset = kva - vm_map_min(kernel_map); + else + offset = 0; + UVMHIST_LOG(maphist, " kva=0x%lx, offset=0x%lx", kva, offset,0,0); /* @@ -522,18 +402,12 @@ uvm_km_kmemalloc(map, obj, size, flags) loopva = kva; while (size) { - simple_lock(&obj->vmobjlock); pg = uvm_pagealloc(obj, offset, NULL, 0); if (pg) { pg->pg_flags &= ~PG_BUSY; /* new page */ UVM_PAGE_OWN(pg, NULL); } - simple_unlock(&obj->vmobjlock); - /* - * out of memory? - */ - if (__predict_false(pg == NULL)) { if ((flags & UVM_KMF_NOWAIT) || ((flags & UVM_KMF_CANFAIL) && @@ -549,12 +423,10 @@ uvm_km_kmemalloc(map, obj, size, flags) /* * map it in: note that we call pmap_enter with the map and - * object unlocked in case we are kmem_map/kmem_object - * (because if pmap_enter wants to allocate out of kmem_object - * it will need to lock it itself!) + * object unlocked in case we are kmem_map. */ - if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { + if (obj == NULL) { pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); } else { @@ -577,10 +449,7 @@ uvm_km_kmemalloc(map, obj, size, flags) */ void -uvm_km_free(map, addr, size) - vm_map_t map; - vaddr_t addr; - vsize_t size; +uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size) { uvm_unmap(map, trunc_page(addr), round_page(addr+size)); } diff --git a/sys/uvm/uvm_km.h b/sys/uvm/uvm_km.h index 1162b4dc29a..66d00e5bddd 100644 --- a/sys/uvm/uvm_km.h +++ b/sys/uvm/uvm_km.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.h,v 1.8 2004/04/19 22:52:33 tedu Exp $ */ +/* $OpenBSD: uvm_km.h,v 1.9 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_km.h,v 1.9 1999/06/21 17:25:11 thorpej Exp $ */ /* @@ -51,7 +51,7 @@ void uvm_km_init(vaddr_t, vaddr_t); void uvm_km_page_init(void); void uvm_km_pgremove(struct uvm_object *, vaddr_t, vaddr_t); -void uvm_km_pgremove_intrsafe(struct uvm_object *, vaddr_t, vaddr_t); +void uvm_km_pgremove_intrsafe(vaddr_t, vaddr_t); #endif /* _KERNEL */ diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 55e8ba7b95a..83fa927a828 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.86 2007/04/04 17:44:45 art Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.87 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -1474,7 +1474,10 @@ uvm_unmap_remove(map, start, end, entry_list, p) * special case: handle mappings to anonymous kernel objects. * we want to free these pages right away... */ - if (UVM_ET_ISOBJ(entry) && + if (map->flags & VM_MAP_INTRSAFE) { + uvm_km_pgremove_intrsafe(entry->start, entry->end); + pmap_kremove(entry->start, len); + } else if (UVM_ET_ISOBJ(entry) && UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) { KASSERT(vm_map_pmap(map) == pmap_kernel()); @@ -1513,18 +1516,10 @@ uvm_unmap_remove(map, start, end, entry_list, p) * from the object. offsets are always relative * to vm_map_min(kernel_map). */ - if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) { - pmap_kremove(entry->start, len); - uvm_km_pgremove_intrsafe(entry->object.uvm_obj, - entry->start - vm_map_min(kernel_map), - entry->end - vm_map_min(kernel_map)); - } else { - pmap_remove(pmap_kernel(), entry->start, - entry->end); - uvm_km_pgremove(entry->object.uvm_obj, - entry->start - vm_map_min(kernel_map), - entry->end - vm_map_min(kernel_map)); - } + pmap_remove(pmap_kernel(), entry->start, entry->end); + uvm_km_pgremove(entry->object.uvm_obj, + entry->start - vm_map_min(kernel_map), + entry->end - vm_map_min(kernel_map)); /* * null out kernel_object reference, we've just diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index 7de3f38393b..5ed91a71a72 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.h,v 1.37 2007/04/04 18:02:59 art Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.38 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ /* @@ -258,34 +258,6 @@ struct vm_map_intrsafe { LIST_ENTRY(vm_map_intrsafe) vmi_list; }; -LIST_HEAD(vmi_list, vm_map_intrsafe); -#ifdef _KERNEL -extern simple_lock_data_t vmi_list_slock; -extern struct vmi_list vmi_list; - -static __inline int vmi_list_lock(void); -static __inline void vmi_list_unlock(int); - -static __inline int -vmi_list_lock() -{ - int s; - - s = splhigh(); - simple_lock(&vmi_list_slock); - return (s); -} - -static __inline void -vmi_list_unlock(s) - int s; -{ - - simple_unlock(&vmi_list_slock); - splx(s); -} -#endif /* _KERNEL */ - /* * handle inline options */ diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h index 21bc39cea41..70dd13e2a2d 100644 --- a/sys/uvm/uvm_map_i.h +++ b/sys/uvm/uvm_map_i.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map_i.h,v 1.20 2007/04/04 18:02:59 art Exp $ */ +/* $OpenBSD: uvm_map_i.h,v 1.21 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_map_i.h,v 1.18 2000/11/27 08:40:04 chs Exp $ */ /* @@ -92,10 +92,7 @@ uvm_map_create(pmap, min, max, flags) { vm_map_t result; - MALLOC(result, vm_map_t, - (flags & VM_MAP_INTRSAFE) ? sizeof(struct vm_map_intrsafe) : - sizeof(struct vm_map), - M_VMMAP, M_WAITOK); + MALLOC(result, vm_map_t, sizeof(struct vm_map), M_VMMAP, M_WAITOK); uvm_map_setup(result, min, max, flags); result->pmap = pmap; return(result); @@ -128,23 +125,6 @@ uvm_map_setup(map, min, max, flags) rw_init(&map->lock, "vmmaplk"); simple_lock_init(&map->ref_lock); simple_lock_init(&map->hint_lock); - - /* - * If the map is interrupt safe, place it on the list - * of interrupt safe maps, for uvm_fault(). - * - * We almost never set up an interrupt-safe map, but we set - * up quite a few regular ones (at every fork!), so put - * interrupt-safe map setup in the slow path. - */ - if (__predict_false(flags & VM_MAP_INTRSAFE)) { - struct vm_map_intrsafe *vmi = (struct vm_map_intrsafe *)map; - int s; - - s = vmi_list_lock(); - LIST_INSERT_HEAD(&vmi_list, vmi, vmi_list); - vmi_list_unlock(s); - } } diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h index 7dda1ae55a0..5992700e11d 100644 --- a/sys/uvm/uvm_object.h +++ b/sys/uvm/uvm_object.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_object.h,v 1.9 2005/07/26 07:11:55 art Exp $ */ +/* $OpenBSD: uvm_object.h,v 1.10 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */ /* @@ -74,14 +74,9 @@ struct uvm_object { * maps. */ #define UVM_OBJ_KERN (-2) -#define UVM_OBJ_KERN_INTRSAFE (-3) #define UVM_OBJ_IS_KERN_OBJECT(uobj) \ - ((uobj)->uo_refs == UVM_OBJ_KERN || \ - (uobj)->uo_refs == UVM_OBJ_KERN_INTRSAFE) - -#define UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) \ - ((uobj)->uo_refs == UVM_OBJ_KERN_INTRSAFE) + ((uobj)->uo_refs == UVM_OBJ_KERN) #ifdef _KERNEL diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index c37dd3ca8ab..deaf13ccc9c 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_page.c,v 1.57 2007/04/04 17:44:45 art Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.58 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ /* @@ -1122,8 +1122,7 @@ uvm_pagerealloc(pg, newobj, newoff) */ void -uvm_pagefree(pg) - struct vm_page *pg; +uvm_pagefree(struct vm_page *pg) { int s; int saved_loan_count = pg->loan_count; diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c index f8884695399..f8e0f7c167c 100644 --- a/sys/uvm/uvm_stat.c +++ b/sys/uvm/uvm_stat.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_stat.c,v 1.17 2005/12/10 11:45:43 miod Exp $ */ +/* $OpenBSD: uvm_stat.c,v 1.18 2007/04/11 12:10:42 art Exp $ */ /* $NetBSD: uvm_stat.c,v 1.18 2001/03/09 01:02:13 chs Exp $ */ /* @@ -255,7 +255,6 @@ uvmexp_print(int (*pr)(const char *, ...)) uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging); (*pr)(" kernel pointers:\n"); - (*pr)(" objs(kern/kmem)=%p/%p\n", uvm.kernel_object, - uvmexp.kmem_object); + (*pr)(" objs(kern)=%p\n", uvm.kernel_object); } #endif |