diff options
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_amap.c | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_bio.c | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_device.c | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_extern.h | 9 | ||||
-rw-r--r-- | sys/uvm/uvm_fault.c | 12 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_loan.c | 32 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 84 | ||||
-rw-r--r-- | sys/uvm/uvm_map.h | 5 | ||||
-rw-r--r-- | sys/uvm/uvm_mmap.c | 9 | ||||
-rw-r--r-- | sys/uvm/uvm_page.c | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_page.h | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_pager.c | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_param.h | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_pglist.h | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_pmap.h | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_swap.c | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_vnode.c | 11 |
19 files changed, 126 insertions, 120 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c index 9717ac38a10..a8a1a527367 100644 --- a/sys/uvm/uvm_amap.c +++ b/sys/uvm/uvm_amap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_amap.c,v 1.16 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_amap.c,v 1.32 2001/06/02 18:09:25 chs Exp $ */ +/* $OpenBSD: uvm_amap.c,v 1.17 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_amap.c,v 1.33 2001/07/22 13:34:12 wiz Exp $ */ /* * diff --git a/sys/uvm/uvm_bio.c b/sys/uvm/uvm_bio.c index 571d0932e7c..ba119958317 100644 --- a/sys/uvm/uvm_bio.c +++ b/sys/uvm/uvm_bio.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_bio.c,v 1.16 2001/07/18 16:44:39 thorpej Exp $ */ +/* $NetBSD: uvm_bio.c,v 1.17 2001/09/10 21:19:43 chris Exp $ */ /* * Copyright (c) 1998 Chuck Silvers. @@ -334,7 +334,7 @@ again: UVM_PAGE_OWN(pg, NULL); } simple_unlock(&uobj->vmobjlock); - pmap_update(); + pmap_update(ufi->orig_map->pmap); return 0; } @@ -420,7 +420,7 @@ again: va = (vaddr_t)(ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); - pmap_update(); + pmap_update(pmap_kernel()); } if (umap->refcount == 0) { @@ -492,7 +492,7 @@ ubc_release(va, wlen) va = (vaddr_t)(ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); - pmap_update(); + pmap_update(pmap_kernel()); LIST_REMOVE(umap, hash); umap->uobj = NULL; TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, @@ -546,7 +546,7 @@ ubc_flush(uobj, start, end) va = (vaddr_t)(ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); - pmap_update(); + pmap_update(pmap_kernel()); LIST_REMOVE(umap, hash); umap->uobj = NULL; diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c index b03086bc5c7..0f5f2214ec8 100644 --- a/sys/uvm/uvm_device.c +++ b/sys/uvm/uvm_device.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_device.c,v 1.19 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_device.c,v 1.36 2001/05/26 21:27:21 chs Exp $ */ +/* $OpenBSD: uvm_device.c,v 1.20 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_device.c,v 1.37 2001/09/10 21:19:42 chris Exp $ */ /* * @@ -460,13 +460,13 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) */ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); - pmap_update(); /* sync what we have so far */ + pmap_update(ufi->orig_map->pmap); /* sync what we have so far */ uvm_wait("udv_fault"); return (ERESTART); } } uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); - pmap_update(); + pmap_update(ufi->orig_map->pmap); return (retval); } diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index b0bfa189072..bb9f28b6c71 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_extern.h,v 1.37 2001/11/30 17:24:19 art Exp $ */ -/* $NetBSD: uvm_extern.h,v 1.65 2001/06/02 18:09:26 chs Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.38 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_extern.h,v 1.66 2001/08/16 01:37:50 chs Exp $ */ /* * @@ -544,10 +544,9 @@ boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t, vaddr_t, vm_prot_t)); int uvm_map_protect __P((struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, boolean_t)); -struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t, - boolean_t)); +struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t)); void uvmspace_init __P((struct vmspace *, struct pmap *, - vaddr_t, vaddr_t, boolean_t)); + vaddr_t, vaddr_t)); void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t)); struct vmspace *uvmspace_fork __P((struct vmspace *)); void uvmspace_free __P((struct vmspace *)); diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index 8b47ada9019..4e08eaa63a4 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_fault.c,v 1.27 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_fault.c,v 1.67 2001/06/26 17:55:14 thorpej Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.28 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.68 2001/09/10 21:19:42 chris Exp $ */ /* * @@ -845,7 +845,7 @@ ReFault: (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0)); } simple_unlock(&anon->an_lock); - pmap_update(); + pmap_update(ufi.orig_map->pmap); } /* locked: maps(read), amap(if there) */ @@ -985,7 +985,7 @@ ReFault: pages[lcv]->flags &= ~(PG_BUSY); /* un-busy! */ UVM_PAGE_OWN(pages[lcv], NULL); } /* for "lcv" loop */ - pmap_update(); + pmap_update(ufi.orig_map->pmap); } /* "gotpages" != 0 */ /* note: object still _locked_ */ } else { @@ -1284,7 +1284,7 @@ ReFault: if (anon != oanon) simple_unlock(&anon->an_lock); uvmfault_unlockall(&ufi, amap, uobj, oanon); - pmap_update(); + pmap_update(ufi.orig_map->pmap); return 0; @@ -1761,7 +1761,7 @@ Case2: UVM_PAGE_OWN(pg, NULL); uvmfault_unlockall(&ufi, amap, uobj, anon); - pmap_update(); + pmap_update(ufi.orig_map->pmap); UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0); return 0; diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index 8349434d0f8..7e6057194f1 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_glue.c,v 1.29 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_glue.c,v 1.50 2001/06/02 18:09:26 chs Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.30 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_glue.c,v 1.51 2001/09/10 21:19:42 chris Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -196,7 +196,7 @@ uvm_chgkprot(addr, len, rw) panic("chgkprot: invalid page"); pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED); } - pmap_update(); + pmap_update(pmap_kernel()); } #endif diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 584a3eeadf7..afc2ac92d10 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_km.c,v 1.25 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_km.c,v 1.50 2001/06/26 17:55:15 thorpej Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.26 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_km.c,v 1.51 2001/09/10 21:19:42 chris Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -572,7 +572,9 @@ uvm_km_kmemalloc(map, obj, size, flags) offset += PAGE_SIZE; loopsize -= PAGE_SIZE; } - pmap_update(); + + pmap_update(pmap_kernel()); + UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); return(kva); } @@ -701,7 +703,7 @@ uvm_km_alloc1(map, size, zeroit) size -= PAGE_SIZE; } - pmap_update(); + pmap_update(map->pmap); /* * zero on request (note that "size" is now zero due to the above loop diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c index 630460b1316..cc82286e91b 100644 --- a/sys/uvm/uvm_loan.c +++ b/sys/uvm/uvm_loan.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_loan.c,v 1.15 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_loan.c,v 1.29 2001/05/25 04:06:14 chs Exp $ */ +/* $OpenBSD: uvm_loan.c,v 1.16 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.31 2001/08/27 02:34:29 chuck Exp $ */ /* * @@ -120,13 +120,14 @@ static int uvm_loanzero __P((struct uvm_faultinfo *, void ***, int)); * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan()) * * => "ufi" is the result of a successful map lookup (meaning that - * the map is locked by the caller) + * on entry the map is locked by the caller) * => we may unlock and then relock the map if needed (for I/O) * => we put our output result in "output" + * => we always return with the map unlocked * => possible return values: * -1 == error, map is unlocked * 0 == map relock error (try again!), map is unlocked - * >0 == number of pages we loaned, map remain locked + * >0 == number of pages we loaned, map is unlocked */ static __inline int @@ -175,15 +176,15 @@ uvm_loanentry(ufi, output, flags) } else { rv = -1; /* null map entry... fail now */ } - /* locked: if (rv > 0) => map, amap, uobj */ + /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */ /* total failure */ if (rv < 0) - return(-1); + return(-1); /* everything unlocked */ /* relock failed, need to do another lookup */ if (rv == 0) - return(result); + return(result); /* everything unlocked */ /* * got it... advance to next page @@ -194,12 +195,13 @@ uvm_loanentry(ufi, output, flags) } /* - * unlock what we locked and return (with map still locked) + * unlock what we locked, unlock the maps and return */ if (aref->ar_amap) amap_unlock(aref->ar_amap); if (uobj) simple_unlock(&uobj->vmobjlock); + uvmfault_unlockmaps(ufi, FALSE); return(result); } @@ -281,18 +283,16 @@ uvm_loan(map, start, len, result, flags) } /* - * done! the map is locked only if rv > 0. if that - * is the case, advance and unlock. + * done! the map is unlocked. advance, if possible. * - * XXXCDC: could avoid the unlock with smarter code - * (but it only happens on map entry boundaries, - * so it isn't that bad). + * XXXCDC: could be recoded to hold the map lock with + * smarter code (but it only happens on map entry + * boundaries, so it isn't that bad). */ if (rv) { rv <<= PAGE_SHIFT; len -= rv; start += rv; - uvmfault_unlockmaps(&ufi, FALSE); } } @@ -468,7 +468,7 @@ uvm_loanuobj(ufi, output, flags, va) npages = 1; /* locked: uobj */ result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start, - &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0); + &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO); /* locked: <nothing> */ /* @@ -761,7 +761,7 @@ uvm_unloanpage(ploans, npages) panic("uvm_unloanpage: page %p isn't loaned", pg); pg->loan_count--; /* drop loan */ - uvm_pageunwire(pg); /* and wire */ + uvm_pageunwire(pg); /* and unwire */ /* * if page is unowned and we killed last loan, then we can diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 67b856277aa..f2ebe948eb9 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_map.c,v 1.33 2001/11/28 19:28:14 art Exp $ */ -/* $NetBSD: uvm_map.c,v 1.99 2001/06/02 18:09:26 chs Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.34 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_map.c,v 1.105 2001/09/10 21:19:42 chris Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -77,6 +77,7 @@ #include <sys/proc.h> #include <sys/malloc.h> #include <sys/pool.h> +#include <sys/kernel.h> #ifdef SYSVSHM #include <sys/shm.h> @@ -105,6 +106,7 @@ struct pool uvm_vmspace_pool; */ struct pool uvm_map_entry_pool; +struct pool uvm_map_entry_kmem_pool; #ifdef PMAP_GROWKERNEL /* @@ -189,8 +191,6 @@ static void uvm_map_unreference_amap __P((struct vm_map_entry *, int)); /* * uvm_mapent_alloc: allocate a map entry - * - * => XXX: static pool for kernel map? */ static __inline struct vm_map_entry * @@ -199,36 +199,36 @@ uvm_mapent_alloc(map) { struct vm_map_entry *me; int s; - UVMHIST_FUNC("uvm_mapent_alloc"); - UVMHIST_CALLED(maphist); + UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); - if ((map->flags & VM_MAP_INTRSAFE) == 0 && - map != kernel_map && kernel_map != NULL /* XXX */) { - me = pool_get(&uvm_map_entry_pool, PR_WAITOK); - me->flags = 0; - /* me can't be null, wait ok */ - } else { - s = splvm(); /* protect kentry_free list with splvm */ + if (map->flags & VM_MAP_INTRSAFE || cold) { + s = splvm(); simple_lock(&uvm.kentry_lock); me = uvm.kentry_free; if (me) uvm.kentry_free = me->next; simple_unlock(&uvm.kentry_lock); splx(s); - if (!me) - panic("mapent_alloc: out of static map entries, check MAX_KMAPENT"); + if (me == NULL) { + panic("uvm_mapent_alloc: out of static map entries, " + "check MAX_KMAPENT (currently %d)", + MAX_KMAPENT); + } me->flags = UVM_MAP_STATIC; + } else if (map == kernel_map) { + me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK); + me->flags = UVM_MAP_KMEM; + } else { + me = pool_get(&uvm_map_entry_pool, PR_WAITOK); + me->flags = 0; } - UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", - me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map) - ? TRUE : FALSE, 0, 0); + UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, + ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); return(me); } /* * uvm_mapent_free: free map entry - * - * => XXX: static pool for kernel map? */ static __inline void @@ -236,19 +236,21 @@ uvm_mapent_free(me) struct vm_map_entry *me; { int s; - UVMHIST_FUNC("uvm_mapent_free"); - UVMHIST_CALLED(maphist); + UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); + UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", me, me->flags, 0, 0); - if ((me->flags & UVM_MAP_STATIC) == 0) { - pool_put(&uvm_map_entry_pool, me); - } else { - s = splvm(); /* protect kentry_free list with splvm */ + if (me->flags & UVM_MAP_STATIC) { + s = splvm(); simple_lock(&uvm.kentry_lock); me->next = uvm.kentry_free; uvm.kentry_free = me; simple_unlock(&uvm.kentry_lock); splx(s); + } else if (me->flags & UVM_MAP_KMEM) { + pool_put(&uvm_map_entry_kmem_pool, me); + } else { + pool_put(&uvm_map_entry_pool, me); } } @@ -359,6 +361,8 @@ uvm_map_init() pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl", 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP); + pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry), + 0, 0, 0, "vmmpekpl", 0, NULL, NULL, M_VMMAP); } /* @@ -1125,7 +1129,7 @@ uvm_unmap_remove(map, start, end, entry_list) first_entry = entry; entry = next; /* next entry, please */ } - pmap_update(); + pmap_update(vm_map_pmap(map)); /* * now we've cleaned up the map and are ready for the caller to drop @@ -1621,7 +1625,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags) /* end of 'while' loop */ fudge = 0; } - pmap_update(); + pmap_update(srcmap->pmap); /* * unlock dstmap. we will dispose of deadentry in @@ -1833,7 +1837,7 @@ uvm_map_protect(map, start, end, new_prot, set_max) } current = current->next; } - pmap_update(); + pmap_update(map->pmap); out: vm_map_unlock(map); @@ -2600,7 +2604,7 @@ uvm_map_clean(map, start, end, flags) continue; default: - panic("uvm_map_clean: wierd flags"); + panic("uvm_map_clean: weird flags"); } } amap_unlock(amap); @@ -2682,15 +2686,14 @@ uvm_map_checkprot(map, start, end, protection) * - refcnt set to 1, rest must be init'd by caller */ struct vmspace * -uvmspace_alloc(min, max, pageable) +uvmspace_alloc(min, max) vaddr_t min, max; - int pageable; { struct vmspace *vm; UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist); vm = pool_get(&uvm_vmspace_pool, PR_WAITOK); - uvmspace_init(vm, NULL, min, max, pageable); + uvmspace_init(vm, NULL, min, max); UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0); return (vm); } @@ -2702,16 +2705,15 @@ uvmspace_alloc(min, max, pageable) * - refcnt set to 1, rest must me init'd by caller */ void -uvmspace_init(vm, pmap, min, max, pageable) +uvmspace_init(vm, pmap, min, max) struct vmspace *vm; struct pmap *pmap; vaddr_t min, max; - boolean_t pageable; { UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); memset(vm, 0, sizeof(*vm)); - uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0); + uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE); if (pmap) pmap_reference(pmap); else @@ -2832,8 +2834,7 @@ uvmspace_exec(p, start, end) * for p */ - nvm = uvmspace_alloc(start, end, - (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE); + nvm = uvmspace_alloc(start, end); /* * install new vmspace and drop our ref to the old one. @@ -2914,8 +2915,7 @@ uvmspace_fork(vm1) vm_map_lock(old_map); - vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset, - (old_map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE); + vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset); memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); new_map = &vm2->vm_map; /* XXX */ @@ -3127,7 +3127,7 @@ uvmspace_fork(vm1) old_entry->end, old_entry->protection & ~VM_PROT_WRITE); - pmap_update(); + pmap_update(old_map->pmap); } old_entry->etype |= UVM_ET_NEEDSCOPY; } @@ -3167,7 +3167,7 @@ uvmspace_fork(vm1) new_entry->end, new_entry->protection & ~VM_PROT_WRITE); - pmap_update(); + pmap_update(new_pmap); } } diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index d16b48b4509..d0d1509fc4c 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_map.h,v 1.18 2001/11/28 19:28:15 art Exp $ */ -/* $NetBSD: uvm_map.h,v 1.29 2001/06/26 17:55:15 thorpej Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.19 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_map.h,v 1.30 2001/09/09 19:38:23 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -139,6 +139,7 @@ struct vm_map_entry { u_int8_t flags; /* flags */ #define UVM_MAP_STATIC 0x01 /* static map entry */ +#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ }; diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c index 1543303d5aa..05c786f6666 100644 --- a/sys/uvm/uvm_mmap.c +++ b/sys/uvm/uvm_mmap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_mmap.c,v 1.30 2001/11/28 19:28:15 art Exp $ */ -/* $NetBSD: uvm_mmap.c,v 1.54 2001/06/14 20:32:49 thorpej Exp $ */ +/* $OpenBSD: uvm_mmap.c,v 1.31 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_mmap.c,v 1.55 2001/08/17 05:52:46 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -1063,6 +1063,11 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit) } else { vp = (struct vnode *)handle; if (vp->v_type != VCHR) { + error = VOP_MMAP(vp, 0, curproc->p_ucred, curproc); + if (error) { + return error; + } + uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ? maxprot : (maxprot & ~VM_PROT_WRITE)); diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index cba06a965b2..0da38fc51ed 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_page.c,v 1.36 2001/11/30 17:24:19 art Exp $ */ -/* $NetBSD: uvm_page.c,v 1.65 2001/06/27 23:57:16 thorpej Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.37 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_page.c,v 1.66 2001/09/10 21:19:43 chris Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -506,7 +506,7 @@ uvm_pageboot_alloc(size) */ pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); } - pmap_update(); + pmap_update(pmap_kernel()); return(addr); #endif /* PMAP_STEAL_MEMORY */ } diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h index f3d11ab646f..45b26021f3e 100644 --- a/sys/uvm/uvm_page.h +++ b/sys/uvm/uvm_page.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_page.h,v 1.15 2001/11/30 17:37:43 art Exp $ */ -/* $NetBSD: uvm_page.h,v 1.27 2001/06/28 00:26:38 thorpej Exp $ */ +/* $OpenBSD: uvm_page.h,v 1.16 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_page.h,v 1.30 2001/07/25 23:05:04 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -110,7 +110,7 @@ * fields were dumped and all the flags were lumped into one short. * that is fine for a single threaded uniprocessor OS, but bad if you * want to actual make use of locking (simple_lock's). so, we've - * seperated things back out again. + * separated things back out again. * * note the page structure has no lock of its own. */ diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c index 5d7c5e4dab2..8259df56237 100644 --- a/sys/uvm/uvm_pager.c +++ b/sys/uvm/uvm_pager.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pager.c,v 1.27 2001/11/30 05:45:33 csapuntz Exp $ */ -/* $NetBSD: uvm_pager.c,v 1.48 2001/06/23 20:47:44 chs Exp $ */ +/* $OpenBSD: uvm_pager.c,v 1.28 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_pager.c,v 1.49 2001/09/10 21:19:43 chris Exp $ */ /* * @@ -186,7 +186,7 @@ enter: prot, PMAP_WIRED | ((pp->flags & PG_FAKE) ? prot : VM_PROT_READ)); } - pmap_update(); + pmap_update(vm_map_pmap(pager_map)); UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0); return(kva); @@ -237,7 +237,7 @@ remove: pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT)); if (entries) uvm_unmap_detach(entries, 0); - pmap_update(); + pmap_update(pmap_kernel()); UVMHIST_LOG(maphist,"<- done",0,0,0,0); } diff --git a/sys/uvm/uvm_param.h b/sys/uvm/uvm_param.h index c183c97f500..5cc1be262cc 100644 --- a/sys/uvm/uvm_param.h +++ b/sys/uvm/uvm_param.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_param.h,v 1.5 2001/11/28 19:28:15 art Exp $ */ -/* $NetBSD: uvm_param.h,v 1.11 2001/07/14 06:36:03 matt Exp $ */ +/* $OpenBSD: uvm_param.h,v 1.6 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_param.h,v 1.12 2001/08/05 03:33:16 matt Exp $ */ /* * Copyright (c) 1991, 1993 @@ -72,7 +72,9 @@ #ifndef _VM_PARAM_ #define _VM_PARAM_ +#ifdef _KERNEL #include <machine/vmparam.h> +#endif /* * This belongs in types.h, but breaks too many existing programs. diff --git a/sys/uvm/uvm_pglist.h b/sys/uvm/uvm_pglist.h index 62d48fe7437..883171ebb86 100644 --- a/sys/uvm/uvm_pglist.h +++ b/sys/uvm/uvm_pglist.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pglist.h,v 1.3 2001/11/28 19:28:15 art Exp $ */ -/* $NetBSD: uvm_pglist.h,v 1.4 2001/05/25 04:06:17 chs Exp $ */ +/* $OpenBSD: uvm_pglist.h,v 1.4 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_pglist.h,v 1.5 2001/08/25 20:37:46 chs Exp $ */ /*- * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. @@ -37,8 +37,8 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _PGLIST_H_ -#define _PGLIST_H_ +#ifndef _UVM_UVM_PGLIST_H_ +#define _UVM_UVM_PGLIST_H_ /* * This defines the type of a page queue, e.g. active list, inactive @@ -62,4 +62,4 @@ struct pgfreelist { struct pgflbucket *pgfl_buckets; }; -#endif /* _PGLIST_H */ +#endif /* _UVM_UVM_PGLIST_H_ */ diff --git a/sys/uvm/uvm_pmap.h b/sys/uvm/uvm_pmap.h index 4033c140e4d..f4f2e4ce0ea 100644 --- a/sys/uvm/uvm_pmap.h +++ b/sys/uvm/uvm_pmap.h @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pmap.h,v 1.7 2001/05/25 04:06:17 chs Exp $ */ +/* $NetBSD: uvm_pmap.h,v 1.9 2001/09/10 21:19:43 chris Exp $ */ /* * Copyright (c) 1991, 1993 @@ -86,7 +86,9 @@ struct pmap_statistics { }; typedef struct pmap_statistics *pmap_statistics_t; +#ifdef _KERNEL #include <machine/pmap.h> +#endif /* * Flags passed to pmap_enter(). Note the bottom 3 bits are VM_PROT_* @@ -147,7 +149,7 @@ void pmap_protect __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t)); void pmap_reference __P((pmap_t)); void pmap_remove __P((pmap_t, vaddr_t, vaddr_t)); -void pmap_update __P((void)); +void pmap_update __P((pmap_t)); #if !defined(pmap_resident_count) long pmap_resident_count __P((pmap_t)); #endif diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c index 2fb9de02cde..02d7901ba9f 100644 --- a/sys/uvm/uvm_swap.c +++ b/sys/uvm/uvm_swap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_swap.c,v 1.45 2001/11/30 05:45:33 csapuntz Exp $ */ -/* $NetBSD: uvm_swap.c,v 1.52 2001/05/26 16:32:47 chs Exp $ */ +/* $OpenBSD: uvm_swap.c,v 1.46 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_swap.c,v 1.53 2001/08/26 00:43:53 chs Exp $ */ /* * Copyright (c) 1995, 1996, 1997 Matthew R. Green diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c index 10e75532731..180acc16e66 100644 --- a/sys/uvm/uvm_vnode.c +++ b/sys/uvm/uvm_vnode.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_vnode.c,v 1.28 2001/12/02 23:37:52 art Exp $ */ -/* $NetBSD: uvm_vnode.c,v 1.50 2001/05/26 21:27:21 chs Exp $ */ +/* $OpenBSD: uvm_vnode.c,v 1.29 2001/12/04 23:22:42 art Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.51 2001/08/17 05:53:02 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -161,12 +161,7 @@ uvn_attach(arg, accessprot) UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } - -#ifdef DIAGNOSTIC - if (vp->v_type != VREG) { - panic("uvn_attach: vp %p not VREG", vp); - } -#endif + KASSERT(vp->v_type == VREG || vp->v_type == VBLK); /* * set up our idea of the size |