diff options
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_anon.c | 14 | ||||
-rw-r--r-- | sys/uvm/uvm_aobj.c | 22 | ||||
-rw-r--r-- | sys/uvm/uvm_bio.c | 48 | ||||
-rw-r--r-- | sys/uvm/uvm_device.c | 14 | ||||
-rw-r--r-- | sys/uvm/uvm_extern.h | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_fault.c | 121 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 20 | ||||
-rw-r--r-- | sys/uvm/uvm_io.c | 7 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 23 | ||||
-rw-r--r-- | sys/uvm/uvm_loan.c | 50 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 161 | ||||
-rw-r--r-- | sys/uvm/uvm_map.h | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_map_i.h | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_mmap.c | 308 | ||||
-rw-r--r-- | sys/uvm/uvm_page.c | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_pager.c | 62 | ||||
-rw-r--r-- | sys/uvm/uvm_pager.h | 26 | ||||
-rw-r--r-- | sys/uvm/uvm_param.h | 20 | ||||
-rw-r--r-- | sys/uvm/uvm_pdaemon.c | 171 | ||||
-rw-r--r-- | sys/uvm/uvm_swap.c | 30 | ||||
-rw-r--r-- | sys/uvm/uvm_unix.c | 31 | ||||
-rw-r--r-- | sys/uvm/uvm_user.c | 16 | ||||
-rw-r--r-- | sys/uvm/uvm_vnode.c | 62 |
23 files changed, 434 insertions, 802 deletions
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c index 8478141a72c..4089ca48991 100644 --- a/sys/uvm/uvm_anon.c +++ b/sys/uvm/uvm_anon.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_anon.c,v 1.16 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_anon.c,v 1.15 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_anon.c,v 1.17 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_anon.c,v 1.16 2001/03/10 22:46:47 chs Exp $ */ /* * @@ -482,20 +482,20 @@ anon_pagein(anon) rv = uvmfault_anonget(NULL, NULL, anon); /* - * if rv == VM_PAGER_OK, anon is still locked, else anon + * if rv == 0, anon is still locked, else anon * is unlocked */ switch (rv) { - case VM_PAGER_OK: + case 0: break; - case VM_PAGER_ERROR: - case VM_PAGER_REFAULT: + case EIO: + case ERESTART: /* * nothing more to do on errors. - * VM_PAGER_REFAULT can only mean that the anon was freed, + * ERESTART can only mean that the anon was freed, * so again there's nothing to do. */ diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c index 0ebf53c3502..0d7d7c3aa3f 100644 --- a/sys/uvm/uvm_aobj.c +++ b/sys/uvm/uvm_aobj.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_aobj.c,v 1.21 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_aobj.c,v 1.22 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_aobj.c,v 1.40 2001/03/10 22:46:47 chs Exp $ */ /* * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and @@ -935,7 +935,7 @@ uao_flush(uobj, start, stop, flags) * * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot. * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES), - * then we will need to return VM_PAGER_UNLOCK. + * then we will need to return EBUSY. * * => prefer map unlocked (not required) * => object must be locked! we will _unlock_ it before starting any I/O. @@ -1040,10 +1040,10 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) *npagesp = gotpages; if (done) /* bingo! */ - return(VM_PAGER_OK); + return(0); else /* EEK! Need to unlock and I/O */ - return(VM_PAGER_UNLOCK); + return(EBUSY); } /* @@ -1177,7 +1177,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) /* * I/O done. check for errors. */ - if (rv != VM_PAGER_OK) + if (rv != 0) { UVMHIST_LOG(pdhist, "<- done (error=%d)", rv,0,0,0); @@ -1228,7 +1228,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); - return(VM_PAGER_OK); + return(0); } /* @@ -1488,14 +1488,14 @@ uao_pagein_page(aobj, pageidx) simple_lock(&aobj->u_obj.vmobjlock); switch (rv) { - case VM_PAGER_OK: + case 0: break; - case VM_PAGER_ERROR: - case VM_PAGER_REFAULT: + case EIO: + case ERESTART: /* * nothing more to do on errors. - * VM_PAGER_REFAULT can only mean that the anon was freed, + * ERESTART can only mean that the anon was freed, * so again there's nothing to do. */ return FALSE; diff --git a/sys/uvm/uvm_bio.c b/sys/uvm/uvm_bio.c index fccf51b8ece..9ba758f2680 100644 --- a/sys/uvm/uvm_bio.c +++ b/sys/uvm/uvm_bio.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_bio.c,v 1.7 2001/02/02 01:55:52 enami Exp $ */ +/* $NetBSD: uvm_bio.c,v 1.11 2001/03/19 00:29:04 chs Exp $ */ /* * Copyright (c) 1998 Chuck Silvers. @@ -64,7 +64,7 @@ static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t)); (((u_long)(offset)) >> PAGE_SHIFT)) & \ ubc_object.hashmask) -#define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / ubc_winsize) & \ +#define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \ (UBC_NQUEUES - 1)]) struct ubc_map @@ -103,7 +103,8 @@ struct uvm_pagerops ubc_pager = }; int ubc_nwins = UBC_NWINS; -int ubc_winsize = UBC_WINSIZE; +int ubc_winshift = UBC_WINSHIFT; +int ubc_winsize; #ifdef PMAP_PREFER int ubc_nqueues; boolean_t ubc_release_unmap = FALSE; @@ -150,14 +151,15 @@ ubc_init(void) va = (vaddr_t)1L; #ifdef PMAP_PREFER PMAP_PREFER(0, &va); - if (va < ubc_winsize) { - va = ubc_winsize; + ubc_nqueues = va >> ubc_winshift; + if (ubc_nqueues == 0) { + ubc_nqueues = 1; } - ubc_nqueues = va / ubc_winsize; if (ubc_nqueues != 1) { ubc_release_unmap = TRUE; } #endif + ubc_winsize = 1 << ubc_winshift; ubc_object.inactive = malloc(UBC_NQUEUES * sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT); @@ -179,10 +181,9 @@ ubc_init(void) } if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, - ubc_nwins * ubc_winsize, &ubc_object.uobj, 0, (vsize_t)va, + ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, - UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) - != KERN_SUCCESS) { + UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { panic("ubc_init: failed to map ubc_object\n"); } UVMHIST_INIT(ubchist, 300); @@ -192,7 +193,7 @@ ubc_init(void) /* * ubc_fault: fault routine for ubc mapping */ -static int +int ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags) struct uvm_faultinfo *ufi; vaddr_t ign1; @@ -207,7 +208,7 @@ ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags) struct ubc_map *umap; vaddr_t va, eva, ubc_offset, slot_offset; int i, error, rv, npages; - struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg; + struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg; UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist); /* @@ -217,7 +218,7 @@ ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags) */ if (flags & PGO_LOCKED) { #if 0 - return VM_PAGER_UNLOCK; + return EBUSY; #else uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL); flags &= ~PGO_LOCKED; @@ -230,7 +231,7 @@ ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags) UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d", va, ubc_offset, access_type,0); - umap = &ubc_object.umap[ubc_offset / ubc_winsize]; + umap = &ubc_object.umap[ubc_offset >> ubc_winshift]; KASSERT(umap->refcount != 0); slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1)); @@ -287,10 +288,10 @@ again: goto again; } if (error) { - return VM_PAGER_ERROR; + return error; } if (npages == 0) { - return VM_PAGER_OK; + return 0; } va = ufi->orig_rvaddr; @@ -328,14 +329,14 @@ again: UVM_PAGE_OWN(pg, NULL); } simple_unlock(&uobj->vmobjlock); - return VM_PAGER_OK; + return 0; } /* * local functions */ -static struct ubc_map * +struct ubc_map * ubc_find_mapping(uobj, offset) struct uvm_object *uobj; voff_t offset; @@ -411,7 +412,7 @@ again: umap, hash); va = (vaddr_t)(ubc_object.kva + - (umap - ubc_object.umap) * ubc_winsize); + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); } @@ -435,10 +436,11 @@ again: splx(s); UVMHIST_LOG(ubchist, "umap %p refs %d va %p", umap, umap->refcount, - ubc_object.kva + (umap - ubc_object.umap) * ubc_winsize,0); + ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift), + 0); return ubc_object.kva + - (umap - ubc_object.umap) * ubc_winsize + slot_offset; + ((umap - ubc_object.umap) << ubc_winshift) + slot_offset; } @@ -457,7 +459,7 @@ ubc_release(va, wlen) s = splbio(); simple_lock(&ubc_object.uobj.vmobjlock); - umap = &ubc_object.umap[((char *)va - ubc_object.kva) / ubc_winsize]; + umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift]; uobj = umap->uobj; KASSERT(uobj != NULL); @@ -481,7 +483,7 @@ ubc_release(va, wlen) */ va = (vaddr_t)(ubc_object.kva + - (umap - ubc_object.umap) * ubc_winsize); + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); LIST_REMOVE(umap, hash); umap->uobj = NULL; @@ -534,7 +536,7 @@ ubc_flush(uobj, start, end) */ va = (vaddr_t)(ubc_object.kva + - (umap - ubc_object.umap) * ubc_winsize); + ((umap - ubc_object.umap) << ubc_winshift)); pmap_remove(pmap_kernel(), va, va + ubc_winsize); LIST_REMOVE(umap, hash); diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c index 932fdfd5ec3..2704d728c7d 100644 --- a/sys/uvm/uvm_device.c +++ b/sys/uvm/uvm_device.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_device.c,v 1.17 2001/11/07 02:55:50 art Exp $ */ -/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */ +/* $OpenBSD: uvm_device.c,v 1.18 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_device.c,v 1.32 2001/03/15 06:10:56 chs Exp $ */ /* * @@ -401,7 +401,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)", entry->etype, 0,0,0); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); - return(VM_PAGER_ERROR); + return(EIO); } /* @@ -427,7 +427,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) * loop over the page range entering in as needed */ - retval = VM_PAGER_OK; + retval = 0; for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE, curr_va += PAGE_SIZE) { if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx) @@ -438,7 +438,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) mdpgno = (*mapfn)(device, curr_offset, access_type); if (mdpgno == -1) { - retval = VM_PAGER_ERROR; + retval = EIO; break; } paddr = pmap_phys_address(mdpgno); @@ -447,7 +447,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) " MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d", ufi->orig_map->pmap, curr_va, paddr, mapprot); if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, - mapprot, PMAP_CANFAIL | mapprot) != KERN_SUCCESS) { + mapprot, PMAP_CANFAIL | mapprot) != 0) { /* * pmap_enter() didn't have the resource to * enter this mapping. Unlock everything, @@ -461,7 +461,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags) uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); uvm_wait("udv_fault"); - return (VM_PAGER_REFAULT); + return (ERESTART); } } diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index bb6b841f0ca..3e3d255a5a9 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_extern.h,v 1.34 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.35 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_extern.h,v 1.58 2001/03/15 06:10:56 chs Exp $ */ /* * @@ -569,7 +569,7 @@ void uvm_total __P((struct vmtotal *)); /* uvm_mmap.c */ int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, - caddr_t, voff_t, vsize_t)); + void *, voff_t, vsize_t)); /* uvm_page.c */ struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *, @@ -610,7 +610,7 @@ int uvm_coredump __P((struct proc *, struct vnode *, int uvm_grow __P((struct proc *, vaddr_t)); /* uvm_user.c */ -int uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); +void uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); /* uvm_vnode.c */ void uvm_vnp_setsize __P((struct vnode *, voff_t)); diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index 0e4103fe49b..3be2966ea58 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_fault.c,v 1.25 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_fault.c,v 1.56 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.26 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.60 2001/04/01 16:45:53 chs Exp $ */ /* * @@ -276,7 +276,7 @@ uvmfault_amapcopy(ufi) * page in that anon. * * => maps, amap, and anon locked by caller. - * => if we fail (result != VM_PAGER_OK) we unlock everything. + * => if we fail (result != 0) we unlock everything. * => if we are successful, we return with everything still locked. * => we don't move the page on the queues [gets moved later] * => if we allocate a new page [we_own], it gets put on the queues. @@ -296,12 +296,12 @@ uvmfault_anonget(ufi, amap, anon) boolean_t we_own; /* we own anon's page? */ boolean_t locked; /* did we relock? */ struct vm_page *pg; - int result; + int error; UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist); LOCK_ASSERT(simple_lock_held(&anon->an_lock)); - result = 0; /* XXX shut up gcc */ + error = 0; uvmexp.fltanget++; /* bump rusage counters */ if (anon->u.an_page) @@ -342,7 +342,7 @@ uvmfault_anonget(ufi, amap, anon) if ((pg->flags & (PG_BUSY|PG_RELEASED)) == 0) { UVMHIST_LOG(maphist, "<- OK",0,0,0,0); - return (VM_PAGER_OK); + return (0); } pg->flags |= PG_WANTED; uvmexp.fltpgwait++; @@ -398,7 +398,7 @@ uvmfault_anonget(ufi, amap, anon) * we hold PG_BUSY on the page. */ uvmexp.pageins++; - result = uvm_swap_get(pg, anon->an_swslot, + error = uvm_swap_get(pg, anon->an_swslot, PGO_SYNCIO); /* @@ -455,12 +455,10 @@ uvmfault_anonget(ufi, amap, anon) NULL); uvmexp.fltpgrele++; UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); - return (VM_PAGER_REFAULT); /* refault! */ + return (ERESTART); /* refault! */ } - if (result != VM_PAGER_OK) { - KASSERT(result != VM_PAGER_PEND); - + if (error) { /* remove page from anon */ anon->u.an_page = NULL; @@ -488,7 +486,7 @@ uvmfault_anonget(ufi, amap, anon) else simple_unlock(&anon->an_lock); UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0); - return (VM_PAGER_ERROR); + return error; } /* @@ -509,7 +507,7 @@ uvmfault_anonget(ufi, amap, anon) if (!locked) { UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); - return (VM_PAGER_REFAULT); + return (ERESTART); } /* @@ -522,7 +520,7 @@ uvmfault_anonget(ufi, amap, anon) uvmfault_unlockall(ufi, amap, NULL, anon); UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); - return (VM_PAGER_REFAULT); + return (ERESTART); } /* @@ -565,7 +563,7 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) struct uvm_faultinfo ufi; vm_prot_t enter_prot; boolean_t wired, narrow, promote, locked, shadowed; - int npages, nback, nforw, centeridx, result, lcv, gotpages; + int npages, nback, nforw, centeridx, error, lcv, gotpages; vaddr_t startva, objaddr, currva, offset, uoff; paddr_t pa; struct vm_amap *amap; @@ -602,10 +600,11 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) * maps are never pageable, and this approach avoids an evil * locking mess. */ + if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) { UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p", ufi.orig_rvaddr, ufi.map, 0, 0); - return (KERN_FAILURE); + return EFAULT; } /* @@ -619,10 +618,12 @@ ReFault: if (uvmfault_lookup(&ufi, FALSE) == FALSE) { UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0); - return (KERN_INVALID_ADDRESS); + return (EFAULT); } /* locked: maps(read) */ + KASSERT(ufi.map->flags & VM_MAP_PAGEABLE); + /* * check protection */ @@ -632,18 +633,7 @@ ReFault: "<- protection failure (prot=0x%x, access=0x%x)", ufi.entry->protection, access_type, 0, 0); uvmfault_unlockmaps(&ufi, FALSE); - return (KERN_PROTECTION_FAILURE); - } - - /* - * if the map is not a pageable map, a page fault always fails. - */ - - if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) { - UVMHIST_LOG(maphist, - "<- map %p not pageable", ufi.map, 0, 0, 0); - uvmfault_unlockmaps(&ufi, FALSE); - return (KERN_FAILURE); + return EACCES; } /* @@ -702,7 +692,7 @@ ReFault: if (amap == NULL && uobj == NULL) { uvmfault_unlockmaps(&ufi, FALSE); UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0); - return (KERN_INVALID_ADDRESS); + return (EFAULT); } /* @@ -887,18 +877,14 @@ ReFault: simple_lock(&uobj->vmobjlock); /* locked: maps(read), amap (if there), uobj */ - result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages, - centeridx, fault_type, access_type, - PGO_LOCKED|PGO_SYNCIO); + error = uobj->pgops->pgo_fault(&ufi, startva, pages, npages, + centeridx, fault_type, access_type, PGO_LOCKED|PGO_SYNCIO); /* locked: nothing, pgo_fault has unlocked everything */ - if (result == VM_PAGER_OK) - return (KERN_SUCCESS); /* pgo_fault did pmap enter */ - else if (result == VM_PAGER_REFAULT) + if (error == ERESTART) goto ReFault; /* try again! */ - else - return (KERN_PROTECTION_FAILURE); + return error; } /* @@ -1058,24 +1044,20 @@ ReFault: * lock that object for us if it does not fail. */ - result = uvmfault_anonget(&ufi, amap, anon); - switch (result) { - case VM_PAGER_OK: + error = uvmfault_anonget(&ufi, amap, anon); + switch (error) { + case 0: break; - case VM_PAGER_REFAULT: + case ERESTART: goto ReFault; - case VM_PAGER_AGAIN: + case EAGAIN: tsleep(&lbolt, PVM, "fltagain1", 0); goto ReFault; default: -#ifdef DIAGNOSTIC - panic("uvm_fault: uvmfault_anonget -> %d", result); -#else - return (KERN_PROTECTION_FAILURE); -#endif + return error; } /* @@ -1199,7 +1181,7 @@ ReFault: UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); uvmexp.fltnoanon++; - return (KERN_RESOURCE_SHORTAGE); + return ENOMEM; } uvmexp.fltnoram++; @@ -1247,7 +1229,7 @@ ReFault: ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0); if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) - != KERN_SUCCESS) { + != 0) { /* * No need to undo what we did; we can simply think of * this as the pmap throwing away the mapping information. @@ -1263,7 +1245,7 @@ ReFault: UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); /* XXX instrumentation */ - return (KERN_RESOURCE_SHORTAGE); + return ENOMEM; } /* XXX instrumentation */ uvm_wait("flt_pmfail1"); @@ -1302,7 +1284,7 @@ ReFault: if (anon != oanon) simple_unlock(&anon->an_lock); uvmfault_unlockall(&ufi, amap, uobj, oanon); - return (KERN_SUCCESS); + return 0; Case2: @@ -1356,29 +1338,27 @@ Case2: uvmexp.fltget++; gotpages = 1; uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset; - result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, + error = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, 0, access_type & MASK(ufi.entry), ufi.entry->advice, PGO_SYNCIO); - /* locked: uobjpage(if result OK) */ + /* locked: uobjpage(if no error) */ /* * recover from I/O */ - if (result != VM_PAGER_OK) { - KASSERT(result != VM_PAGER_PEND); - - if (result == VM_PAGER_AGAIN) { + if (error) { + if (error == EAGAIN) { UVMHIST_LOG(maphist, " pgo_get says TRY AGAIN!",0,0,0,0); - tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0); + tsleep(&lbolt, PVM, "fltagain2", 0); goto ReFault; } UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)", - result, 0,0,0); - return (KERN_PROTECTION_FAILURE); /* XXX i/o error */ + error, 0,0,0); + return error; } /* locked: uobjpage */ @@ -1629,7 +1609,7 @@ Case2: UVMHIST_LOG(maphist, " promote: out of VM", 0,0,0,0); uvmexp.fltnoanon++; - return (KERN_RESOURCE_SHORTAGE); + return ENOMEM; } UVMHIST_LOG(maphist, " out of RAM, waiting for more", @@ -1710,8 +1690,7 @@ Case2: KASSERT(access_type == VM_PROT_READ || (pg->flags & PG_RDONLY) == 0); if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), pg->flags & PG_RDONLY ? VM_PROT_READ : enter_prot, - access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) - != KERN_SUCCESS) { + access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { /* * No need to undo what we did; we can simply think of @@ -1737,7 +1716,7 @@ Case2: UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); /* XXX instrumentation */ - return (KERN_RESOURCE_SHORTAGE); + return ENOMEM; } /* XXX instrumentation */ uvm_wait("flt_pmfail2"); @@ -1779,7 +1758,7 @@ Case2: uvmfault_unlockall(&ufi, amap, uobj, anon); UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0); - return (KERN_SUCCESS); + return 0; } @@ -1800,7 +1779,7 @@ uvm_fault_wire(map, start, end, access_type) { vaddr_t va; pmap_t pmap; - int rv; + int error; pmap = vm_map_pmap(map); @@ -1811,16 +1790,16 @@ uvm_fault_wire(map, start, end, access_type) */ for (va = start ; va < end ; va += PAGE_SIZE) { - rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type); - if (rv) { + error = uvm_fault(map, va, VM_FAULT_WIRE, access_type); + if (error) { if (va != start) { uvm_fault_unwire(map, start, va); } - return (rv); + return error; } } - return (KERN_SUCCESS); + return 0; } /* diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index 78e881bef6e..4eb6e146a83 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_glue.c,v 1.26 2001/11/10 19:20:39 art Exp $ */ -/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.27 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_glue.c,v 1.45 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -218,15 +218,13 @@ uvm_vslock(p, addr, len, access_type) { vm_map_t map; vaddr_t start, end; - int rv; + int error; map = &p->p_vmspace->vm_map; start = trunc_page((vaddr_t)addr); end = round_page((vaddr_t)addr + len); - - rv = uvm_fault_wire(map, start, end, access_type); - - return (rv); + error = uvm_fault_wire(map, start, end, access_type); + return error; } /* @@ -271,7 +269,7 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg) void *arg; { struct user *up = p2->p_addr; - int rv; + int error; if (shared == TRUE) { p2->p_vmspace = NULL; @@ -288,10 +286,10 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg) * Note the kernel stack gets read/write accesses right off * the bat. */ - rv = uvm_fault_wire(kernel_map, (vaddr_t)up, + error = uvm_fault_wire(kernel_map, (vaddr_t)up, (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE); - if (rv != KERN_SUCCESS) - panic("uvm_fork: uvm_fault_wire failed: %d", rv); + if (error) + panic("uvm_fork: uvm_fault_wire failed: %d", error); /* * p_stats currently points at a field in the user struct. Copy diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c index 5cb29a07715..cd64da0ac95 100644 --- a/sys/uvm/uvm_io.c +++ b/sys/uvm/uvm_io.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_io.c,v 1.10 2001/11/06 01:35:04 art Exp $ */ -/* $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $ */ +/* $OpenBSD: uvm_io.c,v 1.11 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_io.c,v 1.13 2001/03/15 06:10:57 chs Exp $ */ /* * @@ -138,8 +138,7 @@ uvm_io(map, uio) */ vm_map_lock(kernel_map); - (void)uvm_unmap_remove(kernel_map, kva, kva+chunksz, - &dead_entries); + uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries); vm_map_unlock(kernel_map); if (dead_entries != NULL) diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index b15ba7cdbef..43922f24541 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_km.c,v 1.22 2001/11/11 01:16:56 art Exp $ */ -/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.23 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_km.c,v 1.43 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -236,7 +236,7 @@ uvm_km_init(start, end) kernel_map_store.pmap = pmap_kernel(); if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, - UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) + UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0) panic("uvm_km_init: could not reserve space for kernel"); /* @@ -275,7 +275,7 @@ uvm_km_suballoc(map, min, max, size, flags, fixed, submap) if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, - UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { + UVM_ADV_RANDOM, mapflags)) != 0) { panic("uvm_km_suballoc: unable to allocate space in parent map"); } @@ -303,7 +303,7 @@ uvm_km_suballoc(map, min, max, size, flags, fixed, submap) * now let uvm_map_submap plug in it... */ - if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) + if (uvm_map_submap(map, *min, *max, submap) != 0) panic("uvm_km_suballoc: submap allocation failed"); return(submap); @@ -509,7 +509,7 @@ uvm_km_kmemalloc(map, obj, size, flags) if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) - != KERN_SUCCESS)) { + != 0)) { UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); return(0); } @@ -612,11 +612,10 @@ uvm_km_free_wakeup(map, addr, size) vm_map_entry_t dead_entries; vm_map_lock(map); - (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), - &dead_entries); + uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size), + &dead_entries); wakeup(map); vm_map_unlock(map); - if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); } @@ -650,7 +649,7 @@ uvm_km_alloc1(map, size, zeroit) if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, - 0)) != KERN_SUCCESS)) { + 0)) != 0)) { UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); return(0); } @@ -756,7 +755,7 @@ uvm_km_valloc_align(map, size, align) if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, - 0)) != KERN_SUCCESS)) { + 0)) != 0)) { UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); return(0); } @@ -800,7 +799,7 @@ uvm_km_valloc_prefer_wait(map, size, prefer) if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) - == KERN_SUCCESS)) { + == 0)) { UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); return(kva); } diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c index 481c6bec810..1c0d926ae76 100644 --- a/sys/uvm/uvm_loan.c +++ b/sys/uvm/uvm_loan.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_loan.c,v 1.12 2001/11/11 01:16:56 art Exp $ */ -/* $NetBSD: uvm_loan.c,v 1.23 2001/01/23 02:27:39 thorpej Exp $ */ +/* $OpenBSD: uvm_loan.c,v 1.13 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.27 2001/04/09 06:21:03 jdolecek Exp $ */ /* * @@ -220,21 +220,15 @@ uvm_loan(map, start, len, result, flags) { struct uvm_faultinfo ufi; void **output; - int rv; - -#ifdef DIAGNOSTIC - if (map->flags & VM_MAP_INTRSAFE) - panic("uvm_loan: intrsafe map"); -#endif + int rv, error; /* * ensure that one and only one of the flags is set */ - if ((flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) == - (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE) || - (flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) == 0) - return(KERN_FAILURE); + KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^ + ((flags & UVM_LOAN_TOPAGE) == 0)); + KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); /* * "output" is a pointer to the current place to put the loaned @@ -262,15 +256,19 @@ uvm_loan(map, start, len, result, flags) * an unmapped region (an error) */ - if (!uvmfault_lookup(&ufi, FALSE)) + if (!uvmfault_lookup(&ufi, FALSE)) { + error = ENOENT; goto fail; + } /* * now do the loanout */ rv = uvm_loanentry(&ufi, &output, flags); - if (rv < 0) + if (rv < 0) { + error = EINVAL; goto fail; + } /* * done! advance pointers and unlock. @@ -285,7 +283,7 @@ uvm_loan(map, start, len, result, flags) * got it! return success. */ - return(KERN_SUCCESS); + return 0; fail: /* @@ -299,7 +297,7 @@ fail: uvm_unloanpage((struct vm_page **)result, output - result); } - return(KERN_FAILURE); + return (error); } /* @@ -354,15 +352,15 @@ uvm_loananon(ufi, output, flags, anon) * unlocked everything and returned an error code. */ - if (result != VM_PAGER_OK) { + if (result != 0) { /* need to refault (i.e. refresh our lookup) ? */ - if (result == VM_PAGER_REFAULT) + if (result == ERESTART) return(0); /* "try again"? sleep a bit and retry ... */ - if (result == VM_PAGER_AGAIN) { - tsleep((caddr_t)&lbolt, PVM, "loanagain", 0); + if (result == EAGAIN) { + tsleep(&lbolt, PVM, "loanagain", 0); return(0); } @@ -427,7 +425,7 @@ uvm_loanuobj(ufi, output, flags, va) result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start, &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED); } else { - result = VM_PAGER_ERROR; + result = EIO; } /* @@ -435,7 +433,7 @@ uvm_loanuobj(ufi, output, flags, va) * then we fail the loan. */ - if (result != VM_PAGER_OK && result != VM_PAGER_UNLOCK) { + if (result != 0 && result != EBUSY) { uvmfault_unlockall(ufi, amap, uobj, NULL); return(-1); } @@ -444,7 +442,7 @@ uvm_loanuobj(ufi, output, flags, va) * if we need to unlock for I/O, do so now. */ - if (result == VM_PAGER_UNLOCK) { + if (result == EBUSY) { uvmfault_unlockall(ufi, amap, NULL, NULL); npages = 1; @@ -457,9 +455,9 @@ uvm_loanuobj(ufi, output, flags, va) * check for errors */ - if (result != VM_PAGER_OK) { - if (result == VM_PAGER_AGAIN) { - tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0); + if (result != 0) { + if (result == EAGAIN) { + tsleep(&lbolt, PVM, "fltagain2", 0); return(0); /* redo the lookup and try again */ } return(-1); /* total failure */ diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index bcefa88942c..da4bdd44f9f 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_map.c,v 1.31 2001/11/12 01:26:09 art Exp $ */ -/* $NetBSD: uvm_map.c,v 1.93 2001/02/11 01:34:23 eeh Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.32 2001/11/28 13:47:39 art Exp $ */ +/* $NetBSD: uvm_map.c,v 1.94 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -525,7 +525,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags) if ((prot & maxprot) != prot) { UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", prot, maxprot,0,0); - return(KERN_PROTECTION_FAILURE); + return EACCES; } /* @@ -534,14 +534,14 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags) if (vm_map_lock_try(map) == FALSE) { if (flags & UVM_FLAG_TRYLOCK) - return(KERN_FAILURE); + return EAGAIN; vm_map_lock(map); /* could sleep here */ } if ((prev_entry = uvm_map_findspace(map, *startp, size, startp, uobj, uoffset, align, flags)) == NULL) { UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0); vm_map_unlock(map); - return (KERN_NO_SPACE); + return ENOMEM; } #ifdef PMAP_GROWKERNEL @@ -644,7 +644,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags) UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); vm_map_unlock(map); - return (KERN_SUCCESS); + return 0; } step3: @@ -715,7 +715,7 @@ step3: UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); vm_map_unlock(map); - return(KERN_SUCCESS); + return 0; } /* @@ -954,7 +954,7 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags) * in "entry_list" */ -int +void uvm_unmap_remove(map, start, end, entry_list) vm_map_t map; vaddr_t start,end; @@ -1122,7 +1122,6 @@ uvm_unmap_remove(map, start, end, entry_list) *entry_list = first_entry; UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); - return(KERN_SUCCESS); } /* @@ -1212,7 +1211,7 @@ uvm_map_reserve(map, size, offset, align, raddr) if (uvm_map(map, raddr, size, NULL, offset, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) { + UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); return (FALSE); } @@ -1685,7 +1684,7 @@ uvm_map_submap(map, start, end, submap) vaddr_t start, end; { vm_map_entry_t entry; - int result; + int error; vm_map_lock(map); @@ -1706,12 +1705,12 @@ uvm_map_submap(map, start, end, submap) entry->object.sub_map = submap; entry->offset = 0; uvm_map_reference(submap); - result = KERN_SUCCESS; + error = 0; } else { - result = KERN_INVALID_ARGUMENT; + error = EINVAL; } vm_map_unlock(map); - return(result); + return error; } @@ -1724,7 +1723,6 @@ uvm_map_submap(map, start, end, submap) #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ ~VM_PROT_WRITE : VM_PROT_ALL) -#define max(a,b) ((a) > (b) ? (a) : (b)) int uvm_map_protect(map, start, end, new_prot, set_max) @@ -1734,15 +1732,13 @@ uvm_map_protect(map, start, end, new_prot, set_max) boolean_t set_max; { vm_map_entry_t current, entry; - int rv = KERN_SUCCESS; + int error = 0; UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)", map, start, end, new_prot); vm_map_lock(map); - VM_MAP_RANGE_CHECK(map, start, end); - if (uvm_map_lookup_entry(map, start, &entry)) { UVM_MAP_CLIP_START(map, entry, start); } else { @@ -1756,11 +1752,11 @@ uvm_map_protect(map, start, end, new_prot, set_max) current = entry; while ((current != &map->header) && (current->start < end)) { if (UVM_ET_ISSUBMAP(current)) { - rv = KERN_INVALID_ARGUMENT; + error = EINVAL; goto out; } if ((new_prot & current->max_protection) != new_prot) { - rv = KERN_PROTECTION_FAILURE; + error = EACCES; goto out; } current = current->next; @@ -1769,12 +1765,10 @@ uvm_map_protect(map, start, end, new_prot, set_max) /* go back and fix up protections (no need to clip this time). */ current = entry; - while ((current != &map->header) && (current->start < end)) { vm_prot_t old_prot; UVM_MAP_CLIP_END(map, current, end); - old_prot = current->protection; if (set_max) current->protection = @@ -1805,13 +1799,13 @@ uvm_map_protect(map, start, end, new_prot, set_max) new_prot != VM_PROT_NONE) { if (uvm_map_pageable(map, entry->start, entry->end, FALSE, - UVM_LK_ENTER|UVM_LK_EXIT) != KERN_SUCCESS) { + UVM_LK_ENTER|UVM_LK_EXIT) != 0) { /* * If locking the entry fails, remember the * error if it's the first one. Note we * still continue setting the protection in - * the map, but will return the resource - * shortage condition regardless. + * the map, but will return the error + * condition regardless. * * XXX Ignore what the actual error is, * XXX just call it a resource shortage @@ -1819,7 +1813,7 @@ uvm_map_protect(map, start, end, new_prot, set_max) * XXX what uvm_map_protect() itself would * XXX normally return. */ - rv = KERN_RESOURCE_SHORTAGE; + error = ENOMEM; } } @@ -1828,11 +1822,10 @@ uvm_map_protect(map, start, end, new_prot, set_max) out: vm_map_unlock(map); - UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0); - return (rv); + UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0); + return error; } -#undef max #undef MASK /* @@ -1862,7 +1855,7 @@ uvm_map_inherit(map, start, end, new_inheritance) break; default: UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); - return (KERN_INVALID_ARGUMENT); + return EINVAL; } vm_map_lock(map); @@ -1884,7 +1877,7 @@ uvm_map_inherit(map, start, end, new_inheritance) vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); - return(KERN_SUCCESS); + return 0; } /* @@ -1931,7 +1924,7 @@ uvm_map_advice(map, start, end, new_advice) default: vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); - return (KERN_INVALID_ARGUMENT); + return EINVAL; } entry->advice = new_advice; entry = entry->next; @@ -1939,7 +1932,7 @@ uvm_map_advice(map, start, end, new_advice) vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); - return (KERN_SUCCESS); + return 0; } /* @@ -1975,7 +1968,6 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) if ((lockflags & UVM_LK_ENTER) == 0) vm_map_lock(map); - VM_MAP_RANGE_CHECK(map, start, end); /* @@ -1990,8 +1982,8 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); - return (KERN_INVALID_ADDRESS); + UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0); + return EFAULT; } entry = start_entry; @@ -2014,9 +2006,8 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) entry->next->start > entry->end))) { if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist, - "<- done (INVALID UNWIRE ARG)",0,0,0,0); - return (KERN_INVALID_ARGUMENT); + UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0); + return EINVAL; } entry = entry->next; } @@ -2037,7 +2028,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); - return(KERN_SUCCESS); + return 0; } /* @@ -2107,7 +2098,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0); - return (KERN_INVALID_ARGUMENT); + return EINVAL; } entry = entry->next; } @@ -2129,11 +2120,13 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) rv = uvm_fault_wire(map, entry->start, entry->end, entry->protection); if (rv) { + /* * wiring failed. break out of the loop. * we'll clean up the map below, once we * have a write lock again. */ + break; } } @@ -2198,7 +2191,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags) } UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); - return(KERN_SUCCESS); + return 0; } /* @@ -2246,7 +2239,7 @@ uvm_map_pageable_all(map, flags, limit) vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); vm_map_unlock(map); UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); - return (KERN_SUCCESS); + return 0; /* * end of unwire case! @@ -2266,7 +2259,7 @@ uvm_map_pageable_all(map, flags, limit) */ UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0); vm_map_unlock(map); - return (KERN_SUCCESS); + return 0; } /* @@ -2302,7 +2295,7 @@ uvm_map_pageable_all(map, flags, limit) if (atop(size) + uvmexp.wired > uvmexp.wiredmax) { vm_map_unlock(map); - return (KERN_NO_SPACE); /* XXX overloaded */ + return ENOMEM; } /* XXX non-pmap_wired_count case must be handled by caller */ @@ -2310,7 +2303,7 @@ uvm_map_pageable_all(map, flags, limit) if (limit != 0 && (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) { vm_map_unlock(map); - return (KERN_NO_SPACE); /* XXX overloaded */ + return ENOMEM; } #endif @@ -2353,7 +2346,7 @@ uvm_map_pageable_all(map, flags, limit) vm_map_busy(map); vm_map_downgrade(map); - rv = KERN_SUCCESS; + rv = 0; for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry->wired_count == 1) { @@ -2420,7 +2413,7 @@ uvm_map_pageable_all(map, flags, limit) vm_map_unlock_read(map); UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); - return (KERN_SUCCESS); + return 0; } /* @@ -2464,7 +2457,7 @@ uvm_map_clean(map, start, end, flags) VM_MAP_RANGE_CHECK(map, start, end); if (uvm_map_lookup_entry(map, start, &entry) == FALSE) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return EFAULT; } /* @@ -2474,19 +2467,18 @@ uvm_map_clean(map, start, end, flags) for (current = entry; current->start < end; current = current->next) { if (UVM_ET_ISSUBMAP(current)) { vm_map_unlock_read(map); - return (KERN_INVALID_ARGUMENT); + return EINVAL; } if (end <= current->end) { break; } if (current->end != current->next->start) { vm_map_unlock_read(map); - return (KERN_INVALID_ADDRESS); + return EFAULT; } } - error = KERN_SUCCESS; - + error = 0; for (current = entry; start < end; current = current->next) { amap = current->aref.ar_amap; /* top layer */ uobj = current->object.uvm_obj; /* bottom layer */ @@ -2605,7 +2597,7 @@ uvm_map_clean(map, start, end, flags) simple_unlock(&uobj->vmobjlock); if (rv == FALSE) - error = KERN_FAILURE; + error = EIO; } start += size; } @@ -2627,40 +2619,40 @@ uvm_map_checkprot(map, start, end, protection) vaddr_t start, end; vm_prot_t protection; { - vm_map_entry_t entry; - vm_map_entry_t tmp_entry; - - if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { - return(FALSE); - } - entry = tmp_entry; - while (start < end) { - if (entry == &map->header) { - return(FALSE); - } + vm_map_entry_t entry; + vm_map_entry_t tmp_entry; + + if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { + return(FALSE); + } + entry = tmp_entry; + while (start < end) { + if (entry == &map->header) { + return(FALSE); + } /* * no holes allowed */ - if (start < entry->start) { - return(FALSE); - } + if (start < entry->start) { + return(FALSE); + } /* * check protection associated with entry */ - if ((entry->protection & protection) != protection) { - return(FALSE); - } + if ((entry->protection & protection) != protection) { + return(FALSE); + } - /* go to next entry */ + /* go to next entry */ - start = entry->end; - entry = entry->next; - } - return(TRUE); + start = entry->end; + entry = entry->next; + } + return(TRUE); } /* @@ -2700,15 +2692,12 @@ uvmspace_init(vm, pmap, min, max, pageable) UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); memset(vm, 0, sizeof(*vm)); - uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0); - if (pmap) pmap_reference(pmap); else pmap = pmap_create(); vm->vm_map.pmap = pmap; - vm->vm_refcnt = 1; UVMHIST_LOG(maphist,"<- done",0,0,0,0); } @@ -2812,8 +2801,6 @@ uvmspace_exec(p, start, end) map->min_offset = start; map->max_offset = end; vm_map_unlock(map); - - } else { /* @@ -2863,7 +2850,7 @@ uvmspace_free(vm) #endif vm_map_lock(&vm->vm_map); if (vm->vm_map.nentries) { - (void)uvm_unmap_remove(&vm->vm_map, + uvm_unmap_remove(&vm->vm_map, vm->vm_map.min_offset, vm->vm_map.max_offset, &dead_entries); if (dead_entries != NULL) @@ -2919,13 +2906,9 @@ uvmspace_fork(vm1) /* * first, some sanity checks on the old entry */ - if (UVM_ET_ISSUBMAP(old_entry)) - panic("fork: encountered a submap during fork (illegal)"); - - if (!UVM_ET_ISCOPYONWRITE(old_entry) && - UVM_ET_ISNEEDSCOPY(old_entry)) - panic("fork: non-copy_on_write map entry marked needs_copy (illegal)"); - + KASSERT(!UVM_ET_ISSUBMAP(old_entry)); + KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) || + !UVM_ET_ISNEEDSCOPY(old_entry)); switch (old_entry->inheritance) { case MAP_INHERIT_NONE: diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index 2c95aff1607..fa135af21d5 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_map.h,v 1.16 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.17 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_map.h,v 1.25 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -343,9 +343,9 @@ int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vsize_t, void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); MAP_INLINE -int uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); +void uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); void uvm_unmap_detach __P((vm_map_entry_t,int)); -int uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, +void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *)); #endif /* _KERNEL */ diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h index c8f75e5f91a..2f4578f2a22 100644 --- a/sys/uvm/uvm_map_i.h +++ b/sys/uvm/uvm_map_i.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_map_i.h,v 1.9 2001/11/09 03:32:23 art Exp $ */ -/* $NetBSD: uvm_map_i.h,v 1.18 2000/11/27 08:40:04 chs Exp $ */ +/* $OpenBSD: uvm_map_i.h,v 1.10 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_map_i.h,v 1.19 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -159,12 +159,11 @@ uvm_map_setup(map, min, max, flags) * => map must be unlocked (we will lock it) */ -MAP_INLINE int +MAP_INLINE void uvm_unmap(map, start, end) vm_map_t map; vaddr_t start,end; { - int result; vm_map_entry_t dead_entries; UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist); @@ -175,14 +174,13 @@ uvm_unmap(map, start, end) * detach from the dead entries... */ vm_map_lock(map); - result = uvm_unmap_remove(map, start, end, &dead_entries); + uvm_unmap_remove(map, start, end, &dead_entries); vm_map_unlock(map); if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); UVMHIST_LOG(maphist, "<- done", 0,0,0,0); - return(result); } diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c index 3c4c4bdf961..32203733178 100644 --- a/sys/uvm/uvm_mmap.c +++ b/sys/uvm/uvm_mmap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_mmap.c,v 1.28 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_mmap.c,v 1.29 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_mmap.c,v 1.50 2001/03/15 06:10:57 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -165,8 +165,8 @@ sys_mincore(p, v, retval) * Lock down vec, so our returned status isn't outdated by * storing the status byte for a page. */ - uvm_vslock(p, vec, npgs, VM_PROT_WRITE); + uvm_vslock(p, vec, npgs, VM_PROT_WRITE); vm_map_lock_read(map); if (uvm_map_lookup_entry(map, start, &entry) == FALSE) { @@ -194,6 +194,7 @@ sys_mincore(p, v, retval) * Special case for objects with no "real" pages. Those * are always considered resident (mapped devices). */ + if (UVM_ET_ISOBJ(entry)) { KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)); if (entry->object.uvm_obj->pgops->pgo_releasepg @@ -221,30 +222,31 @@ sys_mincore(p, v, retval) start - entry->start); /* Don't need to lock anon here. */ if (anon != NULL && anon->u.an_page != NULL) { + /* * Anon has the page for this entry * offset. */ + pgi = 1; } } - if (uobj != NULL && pgi == 0) { /* Check the bottom layer. */ m = uvm_pagelookup(uobj, entry->offset + (start - entry->start)); if (m != NULL) { + /* * Object has the page for this entry * offset. */ + pgi = 1; } } - (void) subyte(vec, pgi); } - if (uobj != NULL) simple_unlock(&uobj->vmobjlock); if (amap != NULL) @@ -291,15 +293,15 @@ sys_mmap(p, v, retval) struct filedesc *fdp = p->p_fd; struct file *fp; struct vnode *vp; - caddr_t handle; + void *handle; int error; /* * first, extract syscall args from the uap. */ - addr = (vaddr_t) SCARG(uap, addr); - size = (vsize_t) SCARG(uap, len); + addr = (vaddr_t)SCARG(uap, addr); + size = (vsize_t)SCARG(uap, len); prot = SCARG(uap, prot) & VM_PROT_ALL; flags = SCARG(uap, flags); fd = SCARG(uap, fd); @@ -321,7 +323,7 @@ sys_mmap(p, v, retval) pageoff = (pos & PAGE_MASK); pos -= pageoff; size += pageoff; /* add offset */ - size = (vsize_t) round_page(size); /* round up */ + size = (vsize_t)round_page(size); /* round up */ if ((ssize_t) size < 0) return (EINVAL); /* don't allow wrap */ @@ -351,10 +353,8 @@ sys_mmap(p, v, retval) * we will refine our guess later (e.g. to account for VAC, etc) */ - if (addr < round_page((vaddr_t)p->p_vmspace->vm_daddr + - MAXDSIZ)) - addr = round_page((vaddr_t)p->p_vmspace->vm_daddr + - MAXDSIZ); + addr = MAX(addr, round_page((vaddr_t)p->p_vmspace->vm_daddr + + MAXDSIZ)); } /* @@ -446,12 +446,7 @@ sys_mmap(p, v, retval) /* MAP_PRIVATE mappings can always write to */ maxprot |= VM_PROT_WRITE; } - - /* - * set handle to vnode - */ - - handle = (caddr_t)vp; + handle = vp; } else { /* MAP_ANON case */ /* @@ -476,7 +471,8 @@ sys_mmap(p, v, retval) if ((flags & MAP_ANON) != 0 || ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) { if (size > - (p->p_rlimit[RLIMIT_DATA].rlim_cur - ctob(p->p_vmspace->vm_dsize))) { + (p->p_rlimit[RLIMIT_DATA].rlim_cur - + ctob(p->p_vmspace->vm_dsize))) { return (ENOMEM); } } @@ -513,7 +509,7 @@ sys_msync(p, v, retval) vaddr_t addr; vsize_t size, pageoff; vm_map_t map; - int rv, flags, uvmflags; + int error, rv, flags, uvmflags; /* * extract syscall args from the uap @@ -532,13 +528,13 @@ sys_msync(p, v, retval) flags |= MS_SYNC; /* - * align the address to a page boundary, and adjust the size accordingly + * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); /* disallow wrap-around. */ if (addr + size < addr) @@ -560,6 +556,7 @@ sys_msync(p, v, retval) * This can be incorrect if the region splits or is coalesced * with a neighbor. */ + if (size == 0) { vm_map_entry_t entry; @@ -577,6 +574,7 @@ sys_msync(p, v, retval) /* * translate MS_ flags into PGO_ flags */ + uvmflags = PGO_CLEANIT; if (flags & MS_INVALIDATE) uvmflags |= PGO_FREE; @@ -585,27 +583,8 @@ sys_msync(p, v, retval) else uvmflags |= PGO_SYNCIO; /* XXXCDC: force sync for now! */ - /* - * doit! - */ - rv = uvm_map_clean(map, addr, addr+size, uvmflags); - - /* - * and return... - */ - switch (rv) { - case KERN_SUCCESS: - return(0); - case KERN_INVALID_ADDRESS: - return (ENOMEM); - case KERN_FAILURE: - return (EIO); - case KERN_PAGES_LOCKED: /* XXXCDC: uvm doesn't return this */ - return (EBUSY); - default: - return (EINVAL); - } - /*NOTREACHED*/ + error = uvm_map_clean(map, addr, addr+size, uvmflags); + return error; } /* @@ -629,20 +608,20 @@ sys_munmap(p, v, retval) struct vm_map_entry *dead_entries; /* - * get syscall args... + * get syscall args. */ - addr = (vaddr_t) SCARG(uap, addr); - size = (vsize_t) SCARG(uap, len); + addr = (vaddr_t)SCARG(uap, addr); + size = (vsize_t)SCARG(uap, len); /* - * align the address to a page boundary, and adjust the size accordingly + * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); if ((int)size < 0) return (EINVAL); @@ -661,29 +640,20 @@ sys_munmap(p, v, retval) return (EINVAL); map = &p->p_vmspace->vm_map; - - vm_map_lock(map); /* lock map so we can checkprot */ - /* * interesting system call semantic: make sure entire range is * allocated before allowing an unmap. */ + vm_map_lock(map); if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { vm_map_unlock(map); return (EINVAL); } - - /* - * doit! - */ - (void) uvm_unmap_remove(map, addr, addr + size, &dead_entries); - - vm_map_unlock(map); /* and unlock */ - + uvm_unmap_remove(map, addr, addr + size, &dead_entries); + vm_map_unlock(map); if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); - return (0); } @@ -705,7 +675,7 @@ sys_mprotect(p, v, retval) vaddr_t addr; vsize_t size, pageoff; vm_prot_t prot; - int rv; + int error; /* * extract syscall args from uap @@ -716,27 +686,19 @@ sys_mprotect(p, v, retval) prot = SCARG(uap, prot) & VM_PROT_ALL; /* - * align the address to a page boundary, and adjust the size accordingly + * align the address to a page boundary and adjust the size accordingly. */ + pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); + if ((int)size < 0) return (EINVAL); - - /* - * doit - */ - - rv = uvm_map_protect(&p->p_vmspace->vm_map, - addr, addr+size, prot, FALSE); - - if (rv == KERN_SUCCESS) - return (0); - if (rv == KERN_PROTECTION_FAILURE) - return (EACCES); - return (EINVAL); + error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, + FALSE); + return error; } /* @@ -757,30 +719,26 @@ sys_minherit(p, v, retval) vaddr_t addr; vsize_t size, pageoff; vm_inherit_t inherit; + int error; addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); inherit = SCARG(uap, inherit); + /* - * align the address to a page boundary, and adjust the size accordingly + * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); if ((int)size < 0) return (EINVAL); - - switch (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size, - inherit)) { - case KERN_SUCCESS: - return (0); - case KERN_PROTECTION_FAILURE: - return (EACCES); - } - return (EINVAL); + error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size, + inherit); + return error; } /* @@ -801,7 +759,7 @@ sys_madvise(p, v, retval) } */ *uap = v; vaddr_t addr; vsize_t size, pageoff; - int advice, rv;; + int advice, error; addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); @@ -810,10 +768,11 @@ sys_madvise(p, v, retval) /* * align the address to a page boundary, and adjust the size accordingly */ + pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); if ((ssize_t)size <= 0) return (EINVAL); @@ -822,11 +781,12 @@ sys_madvise(p, v, retval) case MADV_NORMAL: case MADV_RANDOM: case MADV_SEQUENTIAL: - rv = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, + error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, advice); break; case MADV_WILLNEED: + /* * Activate all these pages, pre-faulting them in if * necessary. @@ -836,29 +796,35 @@ sys_madvise(p, v, retval) * Should invent a "weak" mode for uvm_fault() * which would only do the PGO_LOCKED pgo_get(). */ + return (0); case MADV_DONTNEED: + /* * Deactivate all these pages. We don't need them * any more. We don't, however, toss the data in * the pages. */ - rv = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, + + error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_DEACTIVATE); break; case MADV_FREE: + /* * These pages contain no valid data, and may be * garbage-collected. Toss all resources, including * any swap space in use. */ - rv = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, + + error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_FREE); break; case MADV_SPACEAVAIL: + /* * XXXMRG What is this? I think it's: * @@ -869,24 +835,14 @@ sys_madvise(p, v, retval) * as it will free swap space allocated to pages in core. * There's also what to do for device/file/anonymous memory. */ + return (EINVAL); default: return (EINVAL); } - switch (rv) { - case KERN_SUCCESS: - return (0); - case KERN_NO_SPACE: - return (EAGAIN); - case KERN_INVALID_ADDRESS: - return (ENOMEM); - case KERN_FAILURE: - return (EIO); - } - - return (EINVAL); + return error; } /* @@ -910,19 +866,21 @@ sys_mlock(p, v, retval) /* * extract syscall args from uap */ + addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); /* * align the address to a page boundary and adjust the size accordingly */ + pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); /* disallow wrap-around. */ - if (addr + (int)size < addr) + if (addr + size < addr) return (EINVAL); if (atop(size) + uvmexp.wired > uvmexp.wiredmax) @@ -939,7 +897,7 @@ sys_mlock(p, v, retval) error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE, 0); - return (error == KERN_SUCCESS ? 0 : ENOMEM); + return error; } /* @@ -970,13 +928,14 @@ sys_munlock(p, v, retval) /* * align the address to a page boundary, and adjust the size accordingly */ + pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vsize_t) round_page(size); + size = (vsize_t)round_page(size); /* disallow wrap-around. */ - if (addr + (int)size < addr) + if (addr + size < addr) return (EINVAL); #ifndef pmap_wired_count @@ -986,7 +945,7 @@ sys_munlock(p, v, retval) error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE, 0); - return (error == KERN_SUCCESS ? 0 : ENOMEM); + return error; } /* @@ -1017,23 +976,6 @@ sys_mlockall(p, v, retval) error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); - switch (error) { - case KERN_SUCCESS: - error = 0; - break; - - case KERN_NO_SPACE: /* XXX overloaded */ - error = ENOMEM; - break; - - default: - /* - * "Some or all of the memory could not be locked when - * the call was made." - */ - error = EAGAIN; - } - return (error); } @@ -1068,13 +1010,13 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit) vsize_t size; vm_prot_t prot, maxprot; int flags; - caddr_t handle; /* XXX: VNODE? */ + void *handle; voff_t foff; vsize_t locklimit; { struct uvm_object *uobj; struct vnode *vp; - int retval; + int error; int advice = UVM_ADV_NORMAL; uvm_flag_t uvmflag = 0; @@ -1097,7 +1039,6 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit) if ((flags & MAP_FIXED) == 0) { *addr = round_page(*addr); /* round */ } else { - if (*addr & PAGE_MASK) return(EINVAL); uvmflag |= UVM_FLAG_FIXED; @@ -1120,10 +1061,9 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit) uvmflag |= UVM_FLAG_OVERLAY; } else { - - vp = (struct vnode *) handle; /* get vnode */ + vp = (struct vnode *)handle; if (vp->v_type != VCHR) { - uobj = uvn_attach((void *) vp, (flags & MAP_SHARED) ? + uobj = uvn_attach((void *)vp, (flags & MAP_SHARED) ? maxprot : (maxprot & ~VM_PROT_WRITE)); /* XXX for now, attach doesn't gain a ref */ @@ -1139,97 +1079,67 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit) */ if (uobj == NULL && (prot & PROT_EXEC) == 0) { maxprot &= ~VM_PROT_EXECUTE; - uobj = udv_attach((void *) &vp->v_rdev, + uobj = udv_attach((void *)&vp->v_rdev, (flags & MAP_SHARED) ? maxprot : (maxprot & ~VM_PROT_WRITE), foff, size); } advice = UVM_ADV_RANDOM; } - if (uobj == NULL) return((vp->v_type == VREG) ? ENOMEM : EINVAL); - if ((flags & MAP_SHARED) == 0) uvmflag |= UVM_FLAG_COPYONW; } - /* - * set up mapping flags - */ - uvmflag = UVM_MAPFLAG(prot, maxprot, (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice, uvmflag); + error = uvm_map(map, addr, size, uobj, foff, 0, uvmflag); + if (error) { + if (uobj) + uobj->pgops->pgo_detach(uobj); + return error; + } /* - * do it! + * POSIX 1003.1b -- if our address space was configured + * to lock all future mappings, wire the one we just made. */ - retval = uvm_map(map, addr, size, uobj, foff, 0, uvmflag); + if (prot == VM_PROT_NONE) { - if (retval == KERN_SUCCESS) { /* - * POSIX 1003.1b -- if our address space was configured - * to lock all future mappings, wire the one we just made. + * No more work to do in this case. */ - if (prot == VM_PROT_NONE) { - /* - * No more work to do in this case. - */ - return (0); - } - - vm_map_lock(map); - if (map->flags & VM_MAP_WIREFUTURE) { - if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax + return (0); + } + vm_map_lock(map); + if (map->flags & VM_MAP_WIREFUTURE) { + if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax #ifdef pmap_wired_count - || (locklimit != 0 && (size + - ptoa(pmap_wired_count(vm_map_pmap(map)))) > - locklimit) + || (locklimit != 0 && (size + + ptoa(pmap_wired_count(vm_map_pmap(map)))) > + locklimit) #endif - ) { - retval = KERN_RESOURCE_SHORTAGE; - vm_map_unlock(map); - /* unmap the region! */ - (void) uvm_unmap(map, *addr, *addr + size); - goto bad; - } - /* - * uvm_map_pageable() always returns the map - * unlocked. - */ - retval = uvm_map_pageable(map, *addr, *addr + size, - FALSE, UVM_LK_ENTER); - if (retval != KERN_SUCCESS) { - /* unmap the region! */ - (void) uvm_unmap(map, *addr, *addr + size); - goto bad; - } - return (0); + ) { + vm_map_unlock(map); + uvm_unmap(map, *addr, *addr + size); + return ENOMEM; } - vm_map_unlock(map); + /* + * uvm_map_pageable() always returns the map unlocked. + */ + error = uvm_map_pageable(map, *addr, *addr + size, + FALSE, UVM_LK_ENTER); + if (error) { + uvm_unmap(map, *addr, *addr + size); + return error; + } return (0); } - - /* - * errors: first detach from the uobj, if any. - */ - - if (uobj) - uobj->pgops->pgo_detach(uobj); - - bad: - switch (retval) { - case KERN_INVALID_ADDRESS: - case KERN_NO_SPACE: - return(ENOMEM); - case KERN_RESOURCE_SHORTAGE: - return (EAGAIN); - case KERN_PROTECTION_FAILURE: - return(EACCES); - } - return(EINVAL); + vm_map_unlock(map); + return 0; } diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index f7ebbd77f80..217ae468046 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_page.c,v 1.32 2001/11/27 05:27:12 art Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.33 2001/11/28 13:47:40 art Exp $ */ /* $NetBSD: uvm_page.c,v 1.51 2001/03/09 01:02:12 chs Exp $ */ /* @@ -1071,7 +1071,7 @@ uvm_pagealloc_contig(size, low, high, alignment) addr = vm_map_min(kernel_map); if (uvm_map(kernel_map, &addr, size, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, - UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { + UVM_ADV_RANDOM, 0))) { uvm_pglistfree(&pglist); return 0; } diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c index 2fded9caf08..2c7619d6c04 100644 --- a/sys/uvm/uvm_pager.c +++ b/sys/uvm/uvm_pager.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pager.c,v 1.23 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_pager.c,v 1.41 2001/02/18 19:26:50 chs Exp $ */ +/* $OpenBSD: uvm_pager.c,v 1.24 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_pager.c,v 1.43 2001/03/15 06:10:58 chs Exp $ */ /* * @@ -149,7 +149,7 @@ ReStart: kva = 0; /* let system choose VA */ if (uvm_map(pager_map, &kva, size, NULL, - UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) { + UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) { if (curproc == uvm.pagedaemon_proc) { simple_lock(&pager_map_wanted_lock); if (emerginuse) { @@ -223,7 +223,7 @@ uvm_pagermapout(kva, npages) } vm_map_lock(pager_map); - (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries); + uvm_unmap_remove(pager_map, kva, kva + size, &entries); simple_lock(&pager_map_wanted_lock); if (pager_map_wanted) { pager_map_wanted = FALSE; @@ -231,11 +231,11 @@ uvm_pagermapout(kva, npages) } simple_unlock(&pager_map_wanted_lock); vm_map_unlock(pager_map); + remove: pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT)); if (entries) uvm_unmap_detach(entries, 0); - UVMHIST_LOG(maphist,"<- done",0,0,0,0); } @@ -396,22 +396,22 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi) * => flags (first two for non-swap-backed pages) * PGO_ALLPAGES: all pages in uobj are valid targets * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets - * PGO_SYNCIO: do SYNC I/O (no async) + * PGO_SYNCIO: wait for i/o to complete * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range * if (!uobj) start is the (daddr_t) of the starting swapblk * => return state: - * 1. we return the VM_PAGER status code of the pageout + * 1. we return the error code of the pageout * 2. we return with the page queues unlocked * 3. if (uobj != NULL) [!swap_backed] we return with * uobj locked _only_ if PGO_PDFREECLUST is set - * AND result != VM_PAGER_PEND. in all other cases + * AND result == 0 AND async. in all other cases * we return with uobj unlocked. [this is a hack * that allows the pagedaemon to save one lock/unlock * pair in the !swap_backed case since we have to * lock the uobj to drop the cluster anyway] * 4. on errors we always drop the cluster. thus, if we return - * !PEND, !OK, then the caller only has to worry about + * an error, then the caller only has to worry about * un-busying the main page (not the cluster pages). * 5. on success, if !PGO_PDFREECLUST, we return the cluster * with all pages busy (caller must un-busy and check @@ -428,6 +428,7 @@ uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop) { int result; daddr_t swblk; + boolean_t async = (flags & PGO_SYNCIO) == 0; struct vm_page **ppsp = *ppsp_ptr; UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(ubchist); @@ -502,20 +503,21 @@ ReTry: * i/o is done...] */ - if (result == VM_PAGER_PEND || result == VM_PAGER_OK) { - if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) { + if (result == 0) { + if (flags & PGO_PDFREECLUST && !async) { + /* - * drop cluster and relock object (only if I/O is - * not pending) + * drop cluster and relock object for sync i/o. */ + if (uobj) /* required for dropcluster */ simple_lock(&uobj->vmobjlock); if (*npages > 1 || pg == NULL) uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_PDFREECLUST); - /* if (uobj): object still locked, as per - * return-state item #3 */ + + /* if (uobj): object still locked, as per #3 */ } return (result); } @@ -540,7 +542,7 @@ ReTry: */ if (uobj == NULL && pg != NULL) { - int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0; + int nswblk = (result == EAGAIN) ? swblk : 0; if (pg->pqflags & PQ_ANON) { simple_lock(&pg->uanon->an_lock); pg->uanon->an_swslot = nswblk; @@ -553,7 +555,7 @@ ReTry: simple_unlock(&pg->uobject->vmobjlock); } } - if (result == VM_PAGER_AGAIN) { + if (result == EAGAIN) { /* * for transient failures, free all the swslots that @@ -878,29 +880,3 @@ freed: pool_put(&bufpool, bp); splx(s); } - -/* - * translate unix errno values to VM_PAGER_*. - */ - -int -uvm_errno2vmerror(errno) - int errno; -{ - switch (errno) { - case 0: - return VM_PAGER_OK; - case EINVAL: - return VM_PAGER_BAD; - case EINPROGRESS: - return VM_PAGER_PEND; - case EIO: - return VM_PAGER_ERROR; - case EAGAIN: - return VM_PAGER_AGAIN; - case EBUSY: - return VM_PAGER_UNLOCK; - default: - return VM_PAGER_ERROR; - } -} diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h index b246e815e04..a826ada04de 100644 --- a/sys/uvm/uvm_pager.h +++ b/sys/uvm/uvm_pager.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pager.h,v 1.14 2001/11/10 18:42:31 art Exp $ */ -/* $NetBSD: uvm_pager.h,v 1.20 2000/11/27 08:40:05 chs Exp $ */ +/* $OpenBSD: uvm_pager.h,v 1.15 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_pager.h,v 1.21 2001/03/10 22:46:50 chs Exp $ */ /* * @@ -166,7 +166,6 @@ void uvm_pagermapout __P((vaddr_t, int)); struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **, int *, struct vm_page *, int, voff_t, voff_t)); -int uvm_errno2vmerror __P((int)); /* Flags to uvm_pagermapin() */ #define UVMPAGER_MAPIN_WAITOK 0x01 /* it's okay to wait */ @@ -174,27 +173,6 @@ int uvm_errno2vmerror __P((int)); #define UVMPAGER_MAPIN_WRITE 0x00 /* device -> host (pseudo flag) */ /* - * get/put return values - * OK operation was successful - * BAD specified data was out of the accepted range - * FAIL specified data was in range, but doesn't exist - * PEND operations was initiated but not completed - * ERROR error while accessing data that is in range and exists - * AGAIN temporary resource shortage prevented operation from happening - * UNLOCK unlock the map and try again - * REFAULT [uvm_fault internal use only!] unable to relock data structures, - * thus the mapping needs to be reverified before we can procede - */ -#define VM_PAGER_OK 0 -#define VM_PAGER_BAD 1 -#define VM_PAGER_FAIL 2 -#define VM_PAGER_PEND 3 -#define VM_PAGER_ERROR 4 -#define VM_PAGER_AGAIN 5 -#define VM_PAGER_UNLOCK 6 -#define VM_PAGER_REFAULT 7 - -/* * XXX * this is needed until the device strategy interface * is changed to do physically-addressed i/o. diff --git a/sys/uvm/uvm_param.h b/sys/uvm/uvm_param.h index 78b3f1bc5ba..46b0b1a79e0 100644 --- a/sys/uvm/uvm_param.h +++ b/sys/uvm/uvm_param.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_param.h,v 1.3 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_param.h,v 1.5 2001/03/09 01:02:12 chs Exp $ */ +/* $OpenBSD: uvm_param.h,v 1.4 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_param.h,v 1.7 2001/03/21 03:16:06 chs Exp $ */ /* * Copyright (c) 1991, 1993 @@ -135,20 +135,6 @@ struct _ps_strings { #define SWAPSKIPBYTES 8192 /* never use at the start of a swap space */ -/* - * Return values from the VM routines. - */ -#define KERN_SUCCESS 0 -#define KERN_INVALID_ADDRESS 1 -#define KERN_PROTECTION_FAILURE 2 -#define KERN_NO_SPACE 3 -#define KERN_INVALID_ARGUMENT 4 -#define KERN_FAILURE 5 -#define KERN_RESOURCE_SHORTAGE 6 -#define KERN_NOT_RECEIVER 7 -#define KERN_NO_ACCESS 8 -#define KERN_PAGES_LOCKED 9 - #ifndef ASSEMBLER /* * Convert addresses to pages and vice versa. @@ -167,7 +153,7 @@ struct _ps_strings { extern psize_t mem_size; /* size of physical memory (bytes) */ extern int ubc_nwins; /* number of UBC mapping windows */ -extern int ubc_winsize; /* size of a UBC mapping window */ +extern int ubc_winshift; /* shift for a UBC mapping window */ #else /* out-of-kernel versions of round_page and trunc_page */ diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index 5708b89a507..d25cd2d6119 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pdaemon.c,v 1.18 2001/11/12 01:26:10 art Exp $ */ -/* $NetBSD: uvm_pdaemon.c,v 1.30 2001/03/09 01:02:12 chs Exp $ */ +/* $OpenBSD: uvm_pdaemon.c,v 1.19 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_pdaemon.c,v 1.31 2001/03/10 22:46:50 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -767,18 +767,14 @@ uvmpd_scan_inactive(pglst) * * note locking semantics of uvm_pager_put with PGO_PDFREECLUST: * IN: locked: uobj (if !swap_backed), page queues - * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND) - * !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND) - * - * [the bit about VM_PAGER_PEND saves us one lock-unlock pair] + * OUT:!locked: pageqs, uobj */ /* locked: uobj (if !swap_backed), page queues */ uvmexp.pdpageouts++; result = uvm_pager_put(swap_backed ? NULL : uobj, p, &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0); - /* locked: uobj (if !swap_backed && result != PEND) */ - /* unlocked: pageqs, object (if swap_backed ||result == PEND) */ + /* unlocked: pageqs, uobj */ /* * if we did i/o to swap, zero swslot to indicate that we are @@ -789,35 +785,10 @@ uvmpd_scan_inactive(pglst) swslot = 0; /* done with this cluster */ /* - * first, we check for VM_PAGER_PEND which means that the - * async I/O is in progress and the async I/O done routine - * will clean up after us. in this case we move on to the - * next page. - * - * there is a very remote chance that the pending async i/o can - * finish _before_ we get here. if that happens, our page "p" - * may no longer be on the inactive queue. so we verify this - * when determining the next page (starting over at the head if - * we've lost our inactive page). + * if the pageout failed, reactivate the page and continue. */ - if (result == VM_PAGER_PEND) { - uvmexp.paging += npages; - uvm_lock_pageq(); - uvmexp.pdpending++; - if (p) { - if (p->pqflags & PQ_INACTIVE) - nextpg = TAILQ_NEXT(p, pageq); - else - nextpg = TAILQ_FIRST(pglst); - } else { - nextpg = NULL; - } - continue; - } - - if (result == VM_PAGER_ERROR && - curproc == uvm.pagedaemon_proc) { + if (result == EIO && curproc == uvm.pagedaemon_proc) { uvm_lock_pageq(); nextpg = TAILQ_NEXT(p, pageq); uvm_pageactivate(p); @@ -825,134 +796,20 @@ uvmpd_scan_inactive(pglst) } /* - * clean up "p" if we have one + * the pageout is in progress. bump counters and set up + * for the next loop. */ + uvm_lock_pageq(); + uvmexp.paging += npages; + uvmexp.pdpending++; if (p) { - /* - * the I/O request to "p" is done and uvm_pager_put - * has freed any cluster pages it may have allocated - * during I/O. all that is left for us to do is - * clean up page "p" (which is still PG_BUSY). - * - * our result could be one of the following: - * VM_PAGER_OK: successful pageout - * - * VM_PAGER_AGAIN: tmp resource shortage, we skip - * to next page - * VM_PAGER_{FAIL,ERROR,BAD}: an error. we - * "reactivate" page to get it out of the way (it - * will eventually drift back into the inactive - * queue for a retry). - * VM_PAGER_UNLOCK: should never see this as it is - * only valid for "get" operations - */ - - /* relock p's object: page queues not lock yet, so - * no need for "try" */ - - /* !swap_backed case: already locked... */ - if (swap_backed) { - if (anon) - simple_lock(&anon->an_lock); - else - simple_lock(&uobj->vmobjlock); - } - - /* handle PG_WANTED now */ - if (p->flags & PG_WANTED) - /* still holding object lock */ - wakeup(p); - - p->flags &= ~(PG_BUSY|PG_WANTED); - UVM_PAGE_OWN(p, NULL); - - /* released during I/O? */ - if (p->flags & PG_RELEASED) { - if (anon) { - /* remove page so we can get nextpg */ - anon->u.an_page = NULL; - - simple_unlock(&anon->an_lock); - uvm_anfree(anon); /* kills anon */ - pmap_page_protect(p, VM_PROT_NONE); - anon = NULL; - uvm_lock_pageq(); - nextpg = TAILQ_NEXT(p, pageq); - /* free released page */ - uvm_pagefree(p); - - } else { - - /* - * pgo_releasepg nukes the page and - * gets "nextpg" for us. it returns - * with the page queues locked (when - * given nextpg ptr). - */ - - if (!uobj->pgops->pgo_releasepg(p, - &nextpg)) - /* uobj died after release */ - uobj = NULL; - - /* - * lock page queues here so that they're - * always locked at the end of the loop. - */ - - uvm_lock_pageq(); - } - } else { /* page was not released during I/O */ - uvm_lock_pageq(); + if (p->pqflags & PQ_INACTIVE) nextpg = TAILQ_NEXT(p, pageq); - if (result != VM_PAGER_OK) { - /* pageout was a failure... */ - if (result != VM_PAGER_AGAIN) - uvm_pageactivate(p); - pmap_clear_reference(p); - /* XXXCDC: if (swap_backed) FREE p's - * swap block? */ - } else { - /* pageout was a success... */ - pmap_clear_reference(p); - pmap_clear_modify(p); - p->flags |= PG_CLEAN; - } - } - - /* - * drop object lock (if there is an object left). do - * a safety check of nextpg to make sure it is on the - * inactive queue (it should be since PG_BUSY pages on - * the inactive queue can't be re-queued [note: not - * true for active queue]). - */ - - if (anon) - simple_unlock(&anon->an_lock); - else if (uobj) - simple_unlock(&uobj->vmobjlock); - + else + nextpg = TAILQ_FIRST(pglst); } else { - - /* - * if p is null in this loop, make sure it stays null - * in the next loop. - */ - nextpg = NULL; - - /* - * lock page queues here just so they're always locked - * at the end of the loop. - */ - - uvm_lock_pageq(); - } - - if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) { - nextpg = TAILQ_FIRST(pglst); /* reload! */ } } return (retval); diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c index c4298200688..fc1d6861de1 100644 --- a/sys/uvm/uvm_swap.c +++ b/sys/uvm/uvm_swap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_swap.c,v 1.42 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_swap.c,v 1.46 2001/02/18 21:19:08 chs Exp $ */ +/* $OpenBSD: uvm_swap.c,v 1.43 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_swap.c,v 1.47 2001/03/10 22:46:51 chs Exp $ */ /* * Copyright (c) 1995, 1996, 1997 Matthew R. Green @@ -1754,12 +1754,13 @@ uvm_swap_get(page, swslot, flags) uvmexp.nswget++; KASSERT(flags & PGO_SYNCIO); if (swslot == SWSLOT_BAD) { - return VM_PAGER_ERROR; + return EIO; } /* * this page is (about to be) no longer only in swap. */ + simple_lock(&uvm.swap_data_lock); uvmexp.swpgonly--; simple_unlock(&uvm.swap_data_lock); @@ -1767,10 +1768,12 @@ uvm_swap_get(page, swslot, flags) result = uvm_swap_io(&page, swslot, 1, B_READ | ((flags & PGO_SYNCIO) ? 0 : B_ASYNC)); - if (result != VM_PAGER_OK && result != VM_PAGER_PEND) { + if (result != 0) { + /* * oops, the read failed so it really is still only in swap. */ + simple_lock(&uvm.swap_data_lock); uvmexp.swpgonly++; simple_unlock(&uvm.swap_data_lock); @@ -1791,7 +1794,7 @@ uvm_swap_io(pps, startslot, npages, flags) daddr_t startblk; struct buf *bp; vaddr_t kva; - int result, s, mapinflags, pflag; + int error, s, mapinflags, pflag; boolean_t write, async; #ifdef UVM_SWAP_ENCRYPT vaddr_t dstkva; @@ -1821,7 +1824,7 @@ uvm_swap_io(pps, startslot, npages, flags) mapinflags |= UVMPAGER_MAPIN_WAITOK; kva = uvm_pagermapin(pps, npages, mapinflags); if (kva == 0) - return (VM_PAGER_AGAIN); + return (EAGAIN); #ifdef UVM_SWAP_ENCRYPT if (write) { @@ -1867,14 +1870,14 @@ uvm_swap_io(pps, startslot, npages, flags) if (!uvm_swap_allocpages(tpps, npages)) { uvm_pagermapout(kva, npages); - return (VM_PAGER_AGAIN); + return (EAGAIN); } dstkva = uvm_pagermapin(tpps, npages, swmapflags); if (dstkva == NULL) { uvm_pagermapout(kva, npages); uvm_swap_freepages(tpps, npages); - return (VM_PAGER_AGAIN); + return (EAGAIN); } src = (caddr_t) kva; @@ -1928,7 +1931,7 @@ uvm_swap_io(pps, startslot, npages, flags) uvm_swap_freepages(tpps, npages); } #endif - return (VM_PAGER_AGAIN); + return (EAGAIN); } #ifdef UVM_SWAP_ENCRYPT @@ -1992,13 +1995,12 @@ uvm_swap_io(pps, startslot, npages, flags) */ VOP_STRATEGY(bp); if (async) - return (VM_PAGER_PEND); + return 0; /* * must be sync i/o. wait for it to finish */ - (void) biowait(bp); - result = (bp->b_flags & B_ERROR) ? VM_PAGER_ERROR : VM_PAGER_OK; + error = biowait(bp); #ifdef UVM_SWAP_ENCRYPT /* @@ -2050,8 +2052,8 @@ uvm_swap_io(pps, startslot, npages, flags) /* * finally return. */ - UVMHIST_LOG(pdhist, "<- done (sync) result=%d", result, 0, 0, 0); - return (result); + UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0); + return (error); } static void diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c index 44863db3e2d..a2fde83db35 100644 --- a/sys/uvm/uvm_unix.c +++ b/sys/uvm/uvm_unix.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_unix.c,v 1.17 2001/11/07 02:55:51 art Exp $ */ -/* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */ +/* $OpenBSD: uvm_unix.c,v 1.18 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_unix.c,v 1.20 2001/03/19 02:25:33 simonb Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -78,7 +78,7 @@ sys_obreak(p, v, retval) struct vmspace *vm = p->p_vmspace; vaddr_t new, old; ssize_t diff; - int rv; + int error; old = (vaddr_t)vm->vm_daddr; new = round_page((vaddr_t)SCARG(uap, nsize)); @@ -95,26 +95,23 @@ sys_obreak(p, v, retval) * grow or shrink? */ if (diff > 0) { - rv = uvm_map(&vm->vm_map, &old, diff, NULL, UVM_UNKNOWN_OFFSET, - 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY, + error = uvm_map(&vm->vm_map, &old, diff, NULL, + UVM_UNKNOWN_OFFSET, 0, + UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY, UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED| UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); - if (rv == KERN_SUCCESS) { - vm->vm_dsize += atop(diff); - return (0); + if (error) { + uprintf("sbrk: grow %ld failed, error = %d\n", + (long)diff, error); + return error; } + vm->vm_dsize += atop(diff); } else { - rv = uvm_deallocate(&vm->vm_map, new, -diff); - if (rv == KERN_SUCCESS) { - vm->vm_dsize -= atop(-diff); - return (0); - } + uvm_deallocate(&vm->vm_map, new, -diff); + vm->vm_dsize -= atop(-diff); } - uprintf("sbrk: %s %ld failed, return = %d\n", - diff > 0 ? "grow" : "shrink", - (long)(diff > 0 ? diff : -diff), rv); - return (ENOMEM); + return (0); } /* diff --git a/sys/uvm/uvm_user.c b/sys/uvm/uvm_user.c index cd2cacbe837..e6a6ba1d738 100644 --- a/sys/uvm/uvm_user.c +++ b/sys/uvm/uvm_user.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_user.c,v 1.6 2001/11/06 01:35:04 art Exp $ */ -/* $NetBSD: uvm_user.c,v 1.8 2000/06/27 17:29:37 mrg Exp $ */ +/* $OpenBSD: uvm_user.c,v 1.7 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_user.c,v 1.9 2001/03/15 06:10:58 chs Exp $ */ /* * @@ -50,19 +50,15 @@ * uvm_deallocate: deallocate memory (unmap) */ -int +void uvm_deallocate(map, start, size) vm_map_t map; vaddr_t start; vsize_t size; { - if (map == NULL) - panic("uvm_deallocate with null map"); - - if (size == (vaddr_t) 0) - return (KERN_SUCCESS); - - return(uvm_unmap(map, trunc_page(start), round_page(start+size))); + if (size == 0) + return; + uvm_unmap(map, trunc_page(start), round_page(start + size)); } diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c index 667cbc5b458..cef3499f281 100644 --- a/sys/uvm/uvm_vnode.c +++ b/sys/uvm/uvm_vnode.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_vnode.c,v 1.25 2001/11/27 05:27:12 art Exp $ */ -/* $NetBSD: uvm_vnode.c,v 1.47 2001/03/09 01:02:13 chs Exp $ */ +/* $OpenBSD: uvm_vnode.c,v 1.26 2001/11/28 13:47:40 art Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.48 2001/03/10 22:46:51 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -247,8 +247,6 @@ uvn_reference(uobj) * remove a reference to a VM object. * * => caller must call with object unlocked and map locked. - * => this starts the detach process, but doesn't have to finish it - * (async i/o could still be pending). */ static void uvn_detach(uobj) @@ -300,7 +298,7 @@ uvn_releasepg(pg, nextpgp) * there are two tailq's in the uvm. structure... one for pending async * i/o and one for "done" async i/o. to do an async i/o one puts * a buf on the "pending" list (protected by splbio()), starts the - * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect + * i/o and returns 0. when the i/o is done, we expect * some sort of "i/o done" function to be called (at splbio(), interrupt * time). this function should remove the buf from the pending list * and place it on the "done" list and wakeup the daemon. the daemon @@ -391,6 +389,7 @@ uvn_flush(uobj, start, stop, flags) int s; int npages, result, lcv; boolean_t retval, need_iosync, by_list, needs_clean, all, wasclean; + boolean_t async = (flags & PGO_SYNCIO) == 0; voff_t curoff; u_short pp_version; UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); @@ -522,7 +521,7 @@ uvn_flush(uobj, start, stop, flags) if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { needs_clean = FALSE; - if (flags & PGO_SYNCIO) + if (!async) need_iosync = TRUE; } else { @@ -587,7 +586,6 @@ uvn_flush(uobj, start, stop, flags) UVM_PAGE_OWN(pp, "uvn_flush"); pmap_page_protect(pp, VM_PROT_READ); pp_version = pp->version; -ReTry: ppsp = pps; npages = sizeof(pps) / sizeof(struct vm_page *); @@ -609,34 +607,10 @@ ReTry: uvm_lock_pageq(); /* - * VM_PAGER_AGAIN: given the structure of this pager, this - * can only happen when we are doing async I/O and can't - * map the pages into kernel memory (pager_map) due to lack - * of vm space. if this happens we drop back to sync I/O. - */ - - if (result == VM_PAGER_AGAIN) { - - /* - * it is unlikely, but page could have been released - * while we had the object lock dropped. we ignore - * this now and retry the I/O. we will detect and - * handle the released page after the syncio I/O - * completes. - */ -#ifdef DIAGNOSTIC - if (flags & PGO_SYNCIO) - panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)"); -#endif - flags |= PGO_SYNCIO; - goto ReTry; - } - - /* - * the cleaning operation is now done. finish up. note that - * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. - * if success (OK, PEND) then uvm_pager_put returns the cluster - * to us in ppsp/npages. + * the cleaning operation is now done. finish up. note that + * on error uvm_pager_put drops the cluster for us. + * on success uvm_pager_put returns the cluster to us in + * ppsp/npages. */ /* @@ -644,7 +618,7 @@ ReTry: * we can move on to the next page. */ - if (result == VM_PAGER_PEND && + if (result == 0 && async && (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { /* @@ -701,17 +675,17 @@ ReTry: * verify the page wasn't moved while obj was * unlocked */ - if (result == VM_PAGER_PEND && ptmp->uobject != uobj) + if (result == 0 && async && ptmp->uobject != uobj) continue; /* * unbusy the page if I/O is done. note that for - * pending I/O it is possible that the I/O op + * async I/O it is possible that the I/O op * finished before we relocked the object (in * which case the page is no longer busy). */ - if (result != VM_PAGER_PEND) { + if (result != 0 || !async) { if (ptmp->flags & PG_WANTED) { /* still holding object lock */ wakeup(ptmp); @@ -730,7 +704,7 @@ ReTry: continue; } else { if ((flags & PGO_WEAK) == 0 && - !(result == VM_PAGER_ERROR && + !(result == EIO && curproc == uvm.pagedaemon_proc)) { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); @@ -753,12 +727,12 @@ ReTry: uvm_pagedeactivate(ptmp); } } else if (flags & PGO_FREE) { - if (result == VM_PAGER_PEND) { + if (result == 0 && async) { if ((ptmp->flags & PG_BUSY) != 0) /* signal for i/o done */ ptmp->flags |= PG_RELEASED; } else { - if (result != VM_PAGER_OK) { + if (result != 0) { printf("uvn_flush: obj=%p, " "offset=0x%llx. error %d\n", pp->uobject, @@ -852,7 +826,7 @@ uvn_put(uobj, pps, npages, flags) int error; error = VOP_PUTPAGES(vp, pps, npages, flags, NULL); - return uvm_errno2vmerror(error); + return error; } @@ -884,7 +858,7 @@ uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); - return uvm_errno2vmerror(error); + return error; } |