diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2001-11-09 04:34:28 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2001-11-09 04:34:28 +0000 |
commit | 6be3aec4395751b4d576ebe34081f9290062620f (patch) | |
tree | 6b3ce70d9bea36bb240b4c47bfca5e8d3d46f386 /sys/uvm | |
parent | fd127db91b4066b807938ac60a8212295d0c06f5 (diff) |
various style fixes.
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_fault.c | 154 |
1 files changed, 52 insertions, 102 deletions
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index 98d3be5024a..4d9d28bd6cb 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault.c,v 1.22 2001/11/06 13:36:52 art Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.23 2001/11/09 04:34:27 art Exp $ */ /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ /* @@ -457,12 +457,8 @@ uvmfault_anonget(ufi, amap, anon) } if (result != VM_PAGER_OK) { -#ifdef DIAGNOSTIC - if (result == VM_PAGER_PEND) { - panic("uvmfault_anonget: " - "got PENDING for non-async I/O"); - } -#endif + KASSERT(result != VM_PAGER_PEND); + /* remove page from anon */ anon->u.an_page = NULL; @@ -568,7 +564,7 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) vm_prot_t enter_prot; boolean_t wired, narrow, promote, locked, shadowed; int npages, nback, nforw, centeridx, result, lcv, gotpages; - vaddr_t startva, objaddr, currva, offset; + vaddr_t startva, objaddr, currva, offset, uoff; paddr_t pa; struct vm_amap *amap; struct uvm_object *uobj; @@ -579,7 +575,8 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, ft=%d, at=%d)", orig_map, vaddr, fault_type, access_type); - anon = NULL; /* XXX: shut up gcc */ + anon = NULL; + pg = NULL; uvmexp.faults++; /* XXX: locking? */ @@ -716,10 +713,8 @@ ReFault: if (narrow == FALSE) { /* wide fault (!narrow) */ -#ifdef DIAGNOSTIC - if (uvmadvice[ufi.entry->advice].advice != ufi.entry->advice) - panic("fault: advice mismatch!"); -#endif + KASSERT(uvmadvice[ufi.entry->advice].advice == + ufi.entry->advice); nback = min(uvmadvice[ufi.entry->advice].nback, (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT); startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT); @@ -792,7 +787,7 @@ ReFault: /* now forget about the backpages */ if (amap) anons += nback; - startva = startva + (nback << PAGE_SHIFT); + startva += (nback << PAGE_SHIFT); npages -= nback; nback = centeridx = 0; } @@ -813,12 +808,10 @@ ReFault: * dont play with VAs that are already mapped * except for center) */ - if (lcv != centeridx) { - if (pmap_extract(ufi.orig_map->pmap, currva, &pa) == - TRUE) { - pages[lcv] = PGO_DONTCARE; - continue; - } + if (lcv != centeridx && + pmap_extract(ufi.orig_map->pmap, currva, &pa)) { + pages[lcv] = PGO_DONTCARE; + continue; } /* @@ -850,11 +843,13 @@ ReFault: " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x", ufi.orig_map->pmap, currva, anon->u.an_page, 0); uvmexp.fltnamap++; + /* * Since this isn't the page that's actually faulting, * ignore pmap_enter() failures; it's not critical * that we enter these right now. */ + (void) pmap_enter(ufi.orig_map->pmap, currva, VM_PAGE_TO_PHYS(anon->u.an_page), (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) : @@ -945,29 +940,22 @@ ReFault: pages[lcv] == PGO_DONTCARE) continue; -#ifdef DIAGNOSTIC - /* - * pager sanity check: pgo_get with - * PGO_LOCKED should never return a - * released page to us. - */ - if (pages[lcv]->flags & PG_RELEASED) - panic("uvm_fault: pgo_get PGO_LOCKED gave us a RELEASED page"); -#endif + KASSERT((pages[lcv]->flags & PG_RELEASED) == 0); - /* - * if center page is resident and not - * PG_BUSY|PG_RELEASED then pgo_get - * made it PG_BUSY for us and gave - * us a handle to it. remember this - * page as "uobjpage." (for later use). - */ - - if (lcv == centeridx) { - uobjpage = pages[lcv]; - UVMHIST_LOG(maphist, " got uobjpage (0x%x) with locked get", + /* + * if center page is resident and not + * PG_BUSY|PG_RELEASED then pgo_get + * made it PG_BUSY for us and gave + * us a handle to it. remember this + * page as "uobjpage." (for later use). + */ + + if (lcv == centeridx) { + uobjpage = pages[lcv]; + UVMHIST_LOG(maphist, " got uobjpage " + "(0x%x) with locked get", uobjpage, 0,0,0); - continue; + continue; } /* @@ -1003,18 +991,14 @@ ReFault: * because we've held the lock the whole time * we've had the handle. */ + pages[lcv]->flags &= ~(PG_BUSY); /* un-busy! */ UVM_PAGE_OWN(pages[lcv], NULL); - - /* done! */ } /* for "lcv" loop */ } /* "gotpages" != 0 */ - /* note: object still _locked_ */ } else { - uobjpage = NULL; - } /* locked (shadowed): maps(read), amap */ @@ -1104,6 +1088,7 @@ ReFault: /* * special handling for loaned pages */ + if (anon->u.an_page->loan_count) { if ((access_type & VM_PROT_WRITE) == 0) { @@ -1197,21 +1182,13 @@ ReFault: anon = uvm_analloc(); if (anon) pg = uvm_pagealloc(NULL, 0, anon, 0); -#ifdef __GNUC__ - else - pg = NULL; /* XXX: gcc */ -#endif /* check for out of RAM */ if (anon == NULL || pg == NULL) { if (anon) uvm_anfree(anon); uvmfault_unlockall(&ufi, amap, uobj, oanon); -#ifdef DIAGNOSTIC - if (uvmexp.swpgonly > uvmexp.swpages) { - panic("uvmexp.swpgonly botch"); - } -#endif + KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); @@ -1242,7 +1219,7 @@ ReFault: */ } else { - + uvmexp.flt_anon++; oanon = anon; /* old, locked anon is same as anon */ pg = anon->u.an_page; @@ -1251,7 +1228,7 @@ ReFault: } - /* locked: maps(read), amap, anon */ + /* locked: maps(read), amap, oanon */ /* * now map the page in ... @@ -1273,10 +1250,7 @@ ReFault: * as the map may change while we're asleep. */ uvmfault_unlockall(&ufi, amap, uobj, oanon); -#ifdef DIAGNOSTIC - if (uvmexp.swpgonly > uvmexp.swpages) - panic("uvmexp.swpgonly botch"); -#endif + KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (uvmexp.swpgonly == uvmexp.swpages) { UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); @@ -1342,7 +1316,7 @@ Case2: uobjpage = PGO_DONTCARE; promote = TRUE; /* always need anon here */ } else { - /* assert(uobjpage != PGO_DONTCARE) */ + KASSERT(uobjpage != PGO_DONTCARE); promote = (access_type & VM_PROT_WRITE) && UVM_ET_ISCOPYONWRITE(ufi.entry); } @@ -1371,24 +1345,19 @@ Case2: uvmexp.fltget++; gotpages = 1; - result = uobj->pgops->pgo_get(uobj, - (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset, - &uobjpage, &gotpages, 0, - access_type & MASK(ufi.entry), - ufi.entry->advice, 0); + uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset; + result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, + 0, access_type & MASK(ufi.entry), ufi.entry->advice, + PGO_SYNCIO); /* locked: uobjpage(if result OK) */ - + /* * recover from I/O */ if (result != VM_PAGER_OK) { -#ifdef DIAGNOSTIC - if (result == VM_PAGER_PEND) - panic("uvm_fault: pgo_get got PENDing " - "on non-async I/O"); -#endif + KASSERT(result != VM_PAGER_PEND); if (result == VM_PAGER_AGAIN) { UVMHIST_LOG(maphist, @@ -1447,11 +1416,8 @@ Case2: if (uobjpage->flags & PG_RELEASED) { uvmexp.fltpgrele++; -#ifdef DIAGNOSTIC - if (uobj->pgops->pgo_releasepg == NULL) - panic("uvm_fault: object has no " - "releasepg function"); -#endif + KASSERT(uobj->pgops->pgo_releasepg != NULL); + /* frees page */ if (uobj->pgops->pgo_releasepg(uobjpage,NULL)) /* unlock if still alive */ @@ -1478,7 +1444,6 @@ Case2: */ /* locked: maps(read), amap(if !null), uobj, uobjpage */ - } /* @@ -1615,10 +1580,6 @@ Case2: pg = uvm_pagealloc(NULL, 0, anon, (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0); } -#ifdef __GNUC__ - else - pg = NULL; /* XXX: gcc */ -#endif /* * out of memory resources? @@ -1634,21 +1595,15 @@ Case2: wakeup(uobjpage); uvm_lock_pageq(); - /* make sure it is in queues */ uvm_pageactivate(uobjpage); uvm_unlock_pageq(); - /* un-busy! (still locked) */ uobjpage->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(uobjpage, NULL); } /* unlock and fail ... */ uvmfault_unlockall(&ufi, amap, uobj, NULL); -#ifdef DIAGNOSTIC - if (uvmexp.swpgonly > uvmexp.swpages) { - panic("uvmexp.swpgonly botch"); - } -#endif + KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { UVMHIST_LOG(maphist, " promote: out of VM", 0,0,0,0); @@ -1683,8 +1638,8 @@ Case2: /* * dispose of uobjpage. it can't be PG_RELEASED - * since we still hold the object lock. drop - * handle to uobj as well. + * since we still hold the object lock. + * drop handle to uobj as well. */ if (uobjpage->flags & PG_WANTED) @@ -1693,10 +1648,11 @@ Case2: uobjpage->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(uobjpage, NULL); uvm_lock_pageq(); - uvm_pageactivate(uobjpage); /* put it back */ + uvm_pageactivate(uobjpage); uvm_unlock_pageq(); simple_unlock(&uobj->vmobjlock); uobj = NULL; + UVMHIST_LOG(maphist, " promote uobjpage 0x%x to anon/page 0x%x/0x%x", uobjpage, anon, pg, 0); @@ -1741,6 +1697,7 @@ Case2: * We do, however, have to go through the ReFault path, * as the map may change while we're asleep. */ + if (pg->flags & PG_WANTED) wakeup(pg); /* lock still held */ @@ -1752,10 +1709,7 @@ Case2: pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED); UVM_PAGE_OWN(pg, NULL); uvmfault_unlockall(&ufi, amap, uobj, NULL); -#ifdef DIAGNOSTIC - if (uvmexp.swpgonly > uvmexp.swpages) - panic("uvmexp.swpgonly botch"); -#endif + KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (uvmexp.swpgonly == uvmexp.swpages) { UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); @@ -1787,7 +1741,6 @@ Case2: /* activate it */ uvm_pageactivate(pg); } - uvm_unlock_pageq(); if (pg->flags & PG_WANTED) @@ -1879,10 +1832,7 @@ uvm_fault_unwire_locked(map, start, end) paddr_t pa; struct vm_page *pg; -#ifdef DIAGNOSTIC - if (map->flags & VM_MAP_INTRSAFE) - panic("uvm_fault_unwire_locked: intrsafe map"); -#endif + KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); /* * we assume that the area we are unwiring has actually been wired |