diff options
author | Owain Ainsworth <oga@cvs.openbsd.org> | 2011-07-03 18:34:15 +0000 |
---|---|---|
committer | Owain Ainsworth <oga@cvs.openbsd.org> | 2011-07-03 18:34:15 +0000 |
commit | 9ab2af9380379bf897461fdb1e4d46997af234e1 (patch) | |
tree | 2a99473ad5967b9a05b9c5e2e2925f2c84dcb14f /sys/uvm | |
parent | 298c7a6943ee533ae40ca5edeea32ed1ce315d2b (diff) |
Rip out and burn support for UVM_HIST.
The vm hackers don't use it, don't maintain it and have to look at it all the
time. About time this 800 lines of code hit /dev/null.
``never liked it'' tedu@. ariane@ was very happy when i told her i wrote
this diff.
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm.h | 11 | ||||
-rw-r--r-- | sys/uvm/uvm_amap.c | 66 | ||||
-rw-r--r-- | sys/uvm/uvm_anon.c | 13 | ||||
-rw-r--r-- | sys/uvm/uvm_aobj.c | 39 | ||||
-rw-r--r-- | sys/uvm/uvm_device.c | 19 | ||||
-rw-r--r-- | sys/uvm/uvm_fault.c | 84 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 24 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 139 | ||||
-rw-r--r-- | sys/uvm/uvm_page.c | 32 | ||||
-rw-r--r-- | sys/uvm/uvm_pager.c | 27 | ||||
-rw-r--r-- | sys/uvm/uvm_pdaemon.c | 25 | ||||
-rw-r--r-- | sys/uvm/uvm_stat.c | 124 | ||||
-rw-r--r-- | sys/uvm/uvm_stat.h | 154 | ||||
-rw-r--r-- | sys/uvm/uvm_swap.c | 74 | ||||
-rw-r--r-- | sys/uvm/uvm_vnode.c | 57 |
15 files changed, 24 insertions, 864 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h index 77dadbc2726..c33fc983c38 100644 --- a/sys/uvm/uvm.h +++ b/sys/uvm/uvm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm.h,v 1.43 2011/05/30 22:25:24 oga Exp $ */ +/* $OpenBSD: uvm.h,v 1.44 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */ /* @@ -155,15 +155,6 @@ struct uvm { extern struct uvm uvm; /* - * historys - */ -#ifdef UVMHIST -extern UVMHIST_DECL(maphist); -extern UVMHIST_DECL(pdhist); -extern UVMHIST_DECL(pghist); -#endif - -/* * UVM_UNLOCK_AND_WAIT: atomic unlock+wait... wrapper around the * interlocked tsleep() function. */ diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c index 2ba81cd27b8..9236427a12d 100644 --- a/sys/uvm/uvm_amap.c +++ b/sys/uvm/uvm_amap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_amap.c,v 1.45 2010/07/03 03:04:55 tedu Exp $ */ +/* $OpenBSD: uvm_amap.c,v 1.46 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */ /* @@ -231,7 +231,6 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf) { struct vm_amap *amap; int slots, padslots; - UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist); AMAP_B2SLOT(slots, sz); /* load slots */ AMAP_B2SLOT(padslots, padsz); @@ -243,7 +242,6 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf) amap_list_insert(amap); } - UVMHIST_LOG(maphist,"<- done, amap = %p, sz=%lu", amap, sz, 0, 0); return(amap); } @@ -257,7 +255,6 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf) void amap_free(struct vm_amap *amap) { - UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist); KASSERT(amap->am_ref == 0 && amap->am_nused == 0); KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0); @@ -269,7 +266,6 @@ amap_free(struct vm_amap *amap) #endif pool_put(&uvm_amap_pool, amap); - UVMHIST_LOG(maphist,"<- done, freed amap = %p", amap, 0, 0, 0); } /* @@ -294,9 +290,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize) u_int *newsl, *newbck, *oldsl, *oldbck; struct vm_anon **newover, **oldover; int slotadded; - UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, " (entry=%p, addsize=%lu)", entry, addsize, 0, 0); /* * first, determine how many slots we need in the amap. don't @@ -320,8 +313,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize) amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1); } #endif - UVMHIST_LOG(maphist,"<- done (case 1), amap = %p, sltneed=%ld", - amap, slotneed, 0, 0); return (0); } @@ -347,8 +338,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize) * no need to zero am_anon since that was done at * alloc time and we never shrink an allocation. */ - UVMHIST_LOG(maphist,"<- done (case 2), amap = %p, slotneed=%ld", - amap, slotneed, 0, 0); return (0); } @@ -440,8 +429,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize) if (oldppref && oldppref != PPREF_NONE) free(oldppref, M_UVMAMAP); #endif - UVMHIST_LOG(maphist,"<- done (case 3), amap = %p, slotneed=%ld", - amap, slotneed, 0, 0); return (0); } @@ -503,8 +490,6 @@ amap_wipeout(struct vm_amap *amap) { int lcv, slot; struct vm_anon *anon; - UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(amap=%p)", amap, 0,0,0); KASSERT(amap->am_ref == 0); @@ -527,9 +512,6 @@ amap_wipeout(struct vm_amap *amap) simple_lock(&anon->an_lock); /* lock anon */ - UVMHIST_LOG(maphist," processing anon %p, ref=%ld", anon, - anon->an_ref, 0, 0); - refs = --anon->an_ref; simple_unlock(&anon->an_lock); if (refs == 0) { @@ -547,7 +529,6 @@ amap_wipeout(struct vm_amap *amap) amap->am_ref = 0; /* ... was one */ amap->am_nused = 0; amap_free(amap); /* will unlock and free amap */ - UVMHIST_LOG(maphist,"<- done!", 0,0,0,0); } /* @@ -571,9 +552,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, struct vm_amap *amap, *srcamap; int slots, lcv; vaddr_t chunksize; - UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%ld)", - map, entry, waitf, 0); /* * is there a map to copy? if not, create one from scratch. @@ -593,17 +571,12 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT; startva = (startva / chunksize) * chunksize; endva = roundup(endva, chunksize); - UVMHIST_LOG(maphist, " chunk amap ==> clip " - "0x%lx->0x%lx to 0x%lx->0x%lx", - entry->start, entry->end, startva, endva); UVM_MAP_CLIP_START(map, entry, startva); /* watch out for endva wrap-around! */ if (endva >= startva) UVM_MAP_CLIP_END(map, entry, endva); } - UVMHIST_LOG(maphist, "<- done [creating new amap 0x%lx->0x%lx]", - entry->start, entry->end, 0, 0); entry->aref.ar_pageoff = 0; entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0, waitf); @@ -624,8 +597,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, if (entry->aref.ar_amap->am_ref == 1) { entry->etype &= ~UVM_ET_NEEDSCOPY; - UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]", - 0, 0, 0, 0); return; } @@ -633,14 +604,10 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, * looks like we need to copy the map. */ - UVMHIST_LOG(maphist," amap=%p, ref=%ld, must copy it", - entry->aref.ar_amap, entry->aref.ar_amap->am_ref, 0, 0); AMAP_B2SLOT(slots, entry->end - entry->start); amap = amap_alloc1(slots, 0, waitf); - if (amap == NULL) { - UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0); + if (amap == NULL) return; - } srcamap = entry->aref.ar_amap; /* @@ -662,7 +629,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, * we must copy it now. */ - UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0); for (lcv = 0 ; lcv < slots; lcv++) { amap->am_anon[lcv] = srcamap->am_anon[entry->aref.ar_pageoff + lcv]; @@ -704,11 +670,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, entry->etype &= ~UVM_ET_NEEDSCOPY; amap_list_insert(amap); - - /* - * done! - */ - UVMHIST_LOG(maphist, "<- done",0, 0, 0, 0); } /* @@ -1140,7 +1101,6 @@ amap_lookup(struct vm_aref *aref, vaddr_t offset) { int slot; struct vm_amap *amap = aref->ar_amap; - UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist); AMAP_B2SLOT(slot, offset); slot += aref->ar_pageoff; @@ -1148,8 +1108,6 @@ amap_lookup(struct vm_aref *aref, vaddr_t offset) if (slot >= amap->am_nslot) panic("amap_lookup: offset out of range"); - UVMHIST_LOG(maphist, "<- done (amap=%p, offset=0x%lx, result=%p)", - amap, offset, amap->am_anon[slot], 0); return(amap->am_anon[slot]); } @@ -1165,20 +1123,15 @@ amap_lookups(struct vm_aref *aref, vaddr_t offset, { int slot; struct vm_amap *amap = aref->ar_amap; - UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist); AMAP_B2SLOT(slot, offset); slot += aref->ar_pageoff; - UVMHIST_LOG(maphist, " slot=%ld, npages=%ld, nslot=%ld", slot, npages, - amap->am_nslot, 0); - if ((slot + (npages - 1)) >= amap->am_nslot) panic("amap_lookups: offset out of range"); memcpy(anons, &amap->am_anon[slot], npages * sizeof(struct vm_anon *)); - UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); return; } @@ -1196,7 +1149,6 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon, { int slot; struct vm_amap *amap = aref->ar_amap; - UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist); AMAP_B2SLOT(slot, offset); slot += aref->ar_pageoff; @@ -1225,9 +1177,6 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon, amap->am_nused++; } amap->am_anon[slot] = anon; - UVMHIST_LOG(maphist, - "<- done (amap=%p, offset=0x%lx, anon=%p, rep=%ld)", - amap, offset, anon, replace); } /* @@ -1240,7 +1189,6 @@ amap_unadd(struct vm_aref *aref, vaddr_t offset) { int ptr, slot; struct vm_amap *amap = aref->ar_amap; - UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist); AMAP_B2SLOT(slot, offset); slot += aref->ar_pageoff; @@ -1259,7 +1207,6 @@ amap_unadd(struct vm_aref *aref, vaddr_t offset) amap->am_bckptr[amap->am_slots[ptr]] = ptr; /* back link */ } amap->am_nused--; - UVMHIST_LOG(maphist, "<- done (amap=%p, slot=%ld)", amap, slot,0, 0); } /* @@ -1272,7 +1219,6 @@ amap_unadd(struct vm_aref *aref, vaddr_t offset) void amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags) { - UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist); amap->am_ref++; if (flags & AMAP_SHARED) @@ -1288,7 +1234,6 @@ amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags) amap_pp_adjref(amap, offset, len, 1); } #endif - UVMHIST_LOG(maphist,"<- done! amap=%p", amap, 0, 0, 0); } /* @@ -1303,10 +1248,6 @@ amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags) void amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all) { - UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist," amap=%p refs=%ld, nused=%ld", - amap, amap->am_ref, amap->am_nused, 0); /* * if we are the last reference, free the amap and return. @@ -1314,7 +1255,6 @@ amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all) if (amap->am_ref-- == 1) { amap_wipeout(amap); /* drops final ref and frees */ - UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0); return; /* no need to unlock */ } @@ -1333,6 +1273,4 @@ amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all) amap_pp_adjref(amap, offset, len, -1); } #endif - - UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); } diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c index 703822e55af..fb2b01d27f3 100644 --- a/sys/uvm/uvm_anon.c +++ b/sys/uvm/uvm_anon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_anon.c,v 1.34 2009/06/16 23:54:57 oga Exp $ */ +/* $OpenBSD: uvm_anon.c,v 1.35 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */ /* @@ -91,8 +91,6 @@ void uvm_anfree(struct vm_anon *anon) { struct vm_page *pg; - UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(anon=%p)", anon, 0,0,0); /* * get page @@ -142,17 +140,12 @@ uvm_anfree(struct vm_anon *anon) if ((pg->pg_flags & PG_BUSY) != 0) { /* tell them to dump it when done */ atomic_setbits_int(&pg->pg_flags, PG_RELEASED); - UVMHIST_LOG(maphist, - " anon %p, page %p: BUSY (released!)", - anon, pg, 0, 0); return; } pmap_page_protect(pg, VM_PROT_NONE); uvm_lock_pageq(); /* lock out pagedaemon */ uvm_pagefree(pg); /* bye bye */ uvm_unlock_pageq(); /* free the daemon */ - UVMHIST_LOG(maphist,"anon %p, page %p: freed now!", - anon, pg, 0, 0); } } if (pg == NULL && anon->an_swslot != 0) { @@ -176,7 +169,6 @@ uvm_anfree(struct vm_anon *anon) KASSERT(anon->an_swslot == 0); pool_put(&uvm_anon_pool, anon); - UVMHIST_LOG(maphist,"<- done!",0,0,0,0); } /* @@ -187,13 +179,10 @@ uvm_anfree(struct vm_anon *anon) void uvm_anon_dropswap(struct vm_anon *anon) { - UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); if (anon->an_swslot == 0) return; - UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%lx", - anon, anon->an_swslot, 0, 0); uvm_swap_free(anon->an_swslot, 1); anon->an_swslot = 0; } diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c index 527d1a27328..07bc46d397a 100644 --- a/sys/uvm/uvm_aobj.c +++ b/sys/uvm/uvm_aobj.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_aobj.c,v 1.53 2011/05/10 21:48:17 oga Exp $ */ +/* $OpenBSD: uvm_aobj.c,v 1.54 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */ /* @@ -306,9 +306,6 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) { struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; int oldslot; - UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist); - UVMHIST_LOG(pdhist, "aobj %p pageidx %ld slot %ld", - aobj, pageidx, slot, 0); /* * if noswap flag is set, then we can't set a slot @@ -594,7 +591,6 @@ uao_reference(struct uvm_object *uobj) void uao_reference_locked(struct uvm_object *uobj) { - UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist); /* * kernel_object already has plenty of references, leave it alone. @@ -604,8 +600,6 @@ uao_reference_locked(struct uvm_object *uobj) return; uobj->uo_refs++; /* bump! */ - UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)", - uobj, uobj->uo_refs,0,0); } @@ -636,7 +630,6 @@ uao_detach_locked(struct uvm_object *uobj) { struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; struct vm_page *pg; - UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist); /* * detaching from kernel_object is a noop. @@ -646,11 +639,9 @@ uao_detach_locked(struct uvm_object *uobj) return; } - UVMHIST_LOG(maphist," (uobj=%p) ref=%ld", uobj,uobj->uo_refs,0,0); uobj->uo_refs--; /* drop ref! */ if (uobj->uo_refs) { /* still more refs? */ simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); return; } @@ -718,7 +709,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) struct uvm_aobj *aobj = (struct uvm_aobj *) uobj; struct vm_page *pp; voff_t curoff; - UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist); if (flags & PGO_ALLPAGES) { start = 0; @@ -733,18 +723,12 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) } } - UVMHIST_LOG(maphist, " flush start=0x%lx, stop=0x%lx, flags=0x%lx", - (u_long)start, (u_long)stop, flags, 0); - /* * Don't need to do any work here if we're not freeing * or deactivating pages. */ - if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { - UVMHIST_LOG(maphist, - "<- done (no work to do)",0,0,0,0); + if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) return (TRUE); - } /* locked: uobj */ curoff = start; @@ -824,8 +808,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) } } - UVMHIST_LOG(maphist, - "<- done, rv=TRUE",0,0,0,0); return (TRUE); } @@ -857,10 +839,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, vm_page_t ptmp; int lcv, gotpages, maxpages, swslot, rv, pageidx; boolean_t done; - UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist); - - UVMHIST_LOG(pdhist, "aobj=%p offset=%ld, flags=%ld", - aobj, (u_long)offset, flags,0); /* * get number of pages @@ -936,8 +914,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, * to unlock and do some waiting or I/O. */ - UVMHIST_LOG(pdhist, "<- done (done=%ld)", done, 0,0,0); - *npagesp = gotpages; if (done) /* bingo! */ @@ -996,8 +972,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, /* out of RAM? */ if (ptmp == NULL) { simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(pdhist, - "sleeping, ptmp == NULL\n",0,0,0,0); uvm_wait("uao_getpage"); simple_lock(&uobj->vmobjlock); /* goto top of pps while loop */ @@ -1020,9 +994,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, /* page is there, see if we need to wait on it */ if ((ptmp->pg_flags & PG_BUSY) != 0) { atomic_setbits_int(&ptmp->pg_flags, PG_WANTED); - UVMHIST_LOG(pdhist, - "sleeping, ptmp->flags 0x%lx\n", - ptmp->pg_flags,0,0,0); UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, FALSE, "uao_get", 0); simple_lock(&uobj->vmobjlock); @@ -1063,9 +1034,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, */ uvm_pagezero(ptmp); } else { - UVMHIST_LOG(pdhist, "pagein from swslot %ld", - swslot, 0,0,0); - /* * page in the swapped-out page. * unlock object for i/o, relock when done. @@ -1079,8 +1047,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, */ if (rv != VM_PAGER_OK) { - UVMHIST_LOG(pdhist, "<- done (error=%ld)", - rv,0,0,0); if (ptmp->pg_flags & PG_WANTED) wakeup(ptmp); @@ -1129,7 +1095,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, */ simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); return(VM_PAGER_OK); } diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c index 6dc97150008..2cb3aa00c3f 100644 --- a/sys/uvm/uvm_device.c +++ b/sys/uvm/uvm_device.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_device.c,v 1.39 2010/12/26 15:41:00 miod Exp $ */ +/* $OpenBSD: uvm_device.c,v 1.40 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */ /* @@ -105,9 +105,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size) dev_t device = *((dev_t *)arg); struct uvm_device *udv, *lcv; paddr_t (*mapfn)(dev_t, off_t, int); - UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "(device=0x%lx)", device,0,0,0); /* * before we do anything, ensure this device supports mmap @@ -253,12 +250,9 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size) static void udv_reference(struct uvm_object *uobj) { - UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); uobj->uo_refs++; - UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)", - uobj, uobj->uo_refs,0,0); simple_unlock(&uobj->vmobjlock); } @@ -274,7 +268,6 @@ static void udv_detach(struct uvm_object *uobj) { struct uvm_device *udv = (struct uvm_device *)uobj; - UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist); /* * loop until done @@ -284,8 +277,6 @@ again: if (uobj->uo_refs > 1) { uobj->uo_refs--; simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(maphist," <- done, uobj=%p, ref=%ld", - uobj,uobj->uo_refs,0,0); return; } KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt)); @@ -316,7 +307,6 @@ again: mtx_leave(&udv_lock); simple_unlock(&uobj->vmobjlock); free(udv, M_TEMP); - UVMHIST_LOG(maphist," <- done, freed uobj=%p", uobj,0,0,0); } @@ -363,8 +353,6 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages, dev_t device; paddr_t (*mapfn)(dev_t, off_t, int); vm_prot_t mapprot; - UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist," flags=%ld", flags,0,0,0); /* * we do not allow device mappings to be mapped copy-on-write @@ -372,8 +360,6 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages, */ if (UVM_ET_ISCOPYONWRITE(entry)) { - UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%lx)", - entry->etype, 0,0,0); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); return(VM_PAGER_ERROR); } @@ -416,9 +402,6 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages, break; } mapprot = ufi->entry->protection; - UVMHIST_LOG(maphist, - " MAPPING: device: pm=%p, va=0x%lx, pa=0x%lx, at=%ld", - ufi->orig_map->pmap, curr_va, (u_long)paddr, mapprot); if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot, PMAP_CANFAIL | mapprot) != 0) { /* diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index 6ea81b902b4..b699bba34c5 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault.c,v 1.61 2011/06/23 21:52:42 oga Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.62 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ /* @@ -297,7 +297,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, boolean_t locked; /* did we relock? */ struct vm_page *pg; int result; - UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist); result = 0; /* XXX shut up gcc */ uvmexp.fltanget++; @@ -339,7 +338,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, */ if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) { - UVMHIST_LOG(maphist, "<- OK",0,0,0,0); return (VM_PAGER_OK); } atomic_setbits_int(&pg->pg_flags, PG_WANTED); @@ -351,16 +349,12 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, */ if (pg->uobject) { /* owner is uobject ? */ uvmfault_unlockall(ufi, amap, NULL, anon); - UVMHIST_LOG(maphist, " unlock+wait on uobj",0, - 0,0,0); UVM_UNLOCK_AND_WAIT(pg, &pg->uobject->vmobjlock, FALSE, "anonget1",0); } else { /* anon owns page */ uvmfault_unlockall(ufi, amap, NULL, NULL); - UVMHIST_LOG(maphist, " unlock+wait on anon",0, - 0,0,0); UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0, "anonget2",0); } @@ -377,8 +371,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, uvmfault_unlockall(ufi, amap, NULL, anon); uvmexp.fltnoram++; - UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0, - 0,0,0); uvm_wait("flt_noram1"); /* ready to relock and try again */ @@ -450,7 +442,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, uvmfault_unlockall(ufi, amap, NULL, NULL); uvmexp.fltpgrele++; - UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); return (VM_PAGER_REFAULT); /* refault! */ } @@ -483,7 +474,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, anon); else simple_unlock(&anon->an_lock); - UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0); return (VM_PAGER_ERROR); } @@ -503,10 +493,8 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, * we were not able to relock. restart fault. */ - if (!locked) { - UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); + if (!locked) return (VM_PAGER_REFAULT); - } /* * verify no one has touched the amap and moved the anon on us. @@ -517,7 +505,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, ufi->orig_rvaddr - ufi->entry->start) != anon) { uvmfault_unlockall(ufi, amap, NULL, anon); - UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); return (VM_PAGER_REFAULT); } @@ -566,10 +553,6 @@ uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type, struct uvm_object *uobj; struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon; struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage; - UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "(map=%p, vaddr=0x%lx, ft=%ld, at=%ld)", - orig_map, vaddr, fault_type, access_type); anon = NULL; pg = NULL; @@ -599,7 +582,6 @@ ReFault: */ if (uvmfault_lookup(&ufi, FALSE) == FALSE) { - UVMHIST_LOG(maphist, "<- no mapping @ 0x%lx", vaddr, 0,0,0); return (EFAULT); } /* locked: maps(read) */ @@ -615,9 +597,6 @@ ReFault: */ if ((ufi.entry->protection & access_type) != access_type) { - UVMHIST_LOG(maphist, - "<- protection failure (prot=0x%lx, access=0x%lx)", - ufi.entry->protection, access_type, 0, 0); uvmfault_unlockmaps(&ufi, FALSE); return (EACCES); } @@ -645,8 +624,6 @@ ReFault: if ((access_type & VM_PROT_WRITE) || (ufi.entry->object.uvm_obj == NULL)) { /* need to clear */ - UVMHIST_LOG(maphist, - " need to clear needs_copy and refault",0,0,0,0); uvmfault_unlockmaps(&ufi, FALSE); uvmfault_amapcopy(&ufi); uvmexp.fltamcopy++; @@ -677,7 +654,6 @@ ReFault: if (amap == NULL && uobj == NULL) { uvmfault_unlockmaps(&ufi, FALSE); - UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0); return (EFAULT); } @@ -719,10 +695,6 @@ ReFault: } /* locked: maps(read) */ - UVMHIST_LOG(maphist, " narrow=%ld, back=%ld, forw=%ld, startva=0x%lx", - narrow, nback, nforw, startva); - UVMHIST_LOG(maphist, " entry=%p, amap=%p, obj=%p", ufi.entry, - amap, uobj, 0); /* * if we've got an amap, lock it and extract current anons. @@ -745,8 +717,6 @@ ReFault: if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) { - UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages", - 0,0,0,0); /* flush back-page anons? */ if (amap) uvmfault_anonflush(anons, nback); @@ -815,9 +785,6 @@ ReFault: uvm_lock_pageq(); uvm_pageactivate(anon->an_page); /* reactivate */ uvm_unlock_pageq(); - UVMHIST_LOG(maphist, - " MAPPING: n anon: pm=%p, va=0x%lx, pg=%p", - ufi.orig_map->pmap, currva, anon->an_page, 0); uvmexp.fltnamap++; /* @@ -839,8 +806,6 @@ ReFault: /* locked: maps(read), amap(if there) */ /* (shadowed == TRUE) if there is an anon at the faulting address */ - UVMHIST_LOG(maphist, " shadowed=%ld, will_get=%ld", shadowed, - (uobj && shadowed == FALSE),0,0); /* * note that if we are really short of RAM we could sleep in the above @@ -929,9 +894,6 @@ ReFault: if (lcv == centeridx) { uobjpage = pages[lcv]; - UVMHIST_LOG(maphist, " got uobjpage " - "(%p) with locked get", - uobjpage, 0,0,0); continue; } @@ -947,9 +909,6 @@ ReFault: uvm_lock_pageq(); uvm_pageactivate(pages[lcv]); /* reactivate */ uvm_unlock_pageq(); - UVMHIST_LOG(maphist, - " MAPPING: n obj: pm=%p, va=0x%lx, pg=%p", - ufi.orig_map->pmap, currva, pages[lcv], 0); uvmexp.fltnomap++; /* @@ -1015,7 +974,6 @@ ReFault: */ anon = anons[centeridx]; - UVMHIST_LOG(maphist, " case 1 fault: anon=%p", anon, 0,0,0); simple_lock(&anon->an_lock); /* locked: maps(read), amap, anon */ @@ -1159,8 +1117,6 @@ ReFault: */ if ((access_type & VM_PROT_WRITE) != 0 && anon->an_ref > 1) { - - UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0); uvmexp.flt_acow++; oanon = anon; /* oanon = old, locked anon */ anon = uvm_analloc(); @@ -1175,8 +1131,6 @@ ReFault: uvmfault_unlockall(&ufi, amap, uobj, oanon); KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { - UVMHIST_LOG(maphist, - "<- failed. out of VM",0,0,0,0); uvmexp.fltnoanon++; return (ENOMEM); } @@ -1223,8 +1177,6 @@ ReFault: * under us between the unlock and the pmap_enter. */ - UVMHIST_LOG(maphist, " MAPPING: anon: pm=%p, va=0x%lx, pg=%p", - ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0); if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { @@ -1238,8 +1190,6 @@ ReFault: uvmfault_unlockall(&ufi, amap, uobj, oanon); KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (uvmexp.swpgonly == uvmexp.swpages) { - UVMHIST_LOG(maphist, - "<- failed. out of VM",0,0,0,0); /* XXX instrumentation */ return (ENOMEM); } @@ -1306,8 +1256,6 @@ Case2: promote = (access_type & VM_PROT_WRITE) && UVM_ET_ISCOPYONWRITE(ufi.entry); } - UVMHIST_LOG(maphist, " case 2 fault: promote=%ld, zfill=%ld", - promote, (uobj == NULL), 0,0); /* * if uobjpage is not null then we do not need to do I/O to get the @@ -1346,14 +1294,10 @@ Case2: KASSERT(result != VM_PAGER_PEND); if (result == VM_PAGER_AGAIN) { - UVMHIST_LOG(maphist, - " pgo_get says TRY AGAIN!",0,0,0,0); tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0); goto ReFault; } - UVMHIST_LOG(maphist, "<- pgo_get failed (code %ld)", - result, 0,0,0); return (EACCES); /* XXX i/o error */ } @@ -1387,10 +1331,6 @@ Case2: */ if (locked == FALSE) { - - UVMHIST_LOG(maphist, - " wasn't able to relock after fault: retry", - 0,0,0,0); if (uobjpage->pg_flags & PG_WANTED) /* still holding object lock */ wakeup(uobjpage); @@ -1480,9 +1420,6 @@ Case2: uvm_unlock_pageq(); uvmfault_unlockall(&ufi, amap, uobj, NULL); - UVMHIST_LOG(maphist, - " out of RAM breaking loan, waiting", - 0,0,0,0); uvmexp.fltnoram++; uvm_wait("flt_noram4"); goto ReFault; @@ -1578,14 +1515,10 @@ Case2: uvmfault_unlockall(&ufi, amap, uobj, NULL); KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { - UVMHIST_LOG(maphist, " promote: out of VM", - 0,0,0,0); uvmexp.fltnoanon++; return (ENOMEM); } - UVMHIST_LOG(maphist, " out of RAM, waiting for more", - 0,0,0,0); uvm_anfree(anon); uvmexp.fltnoram++; uvm_wait("flt_noram5"); @@ -1624,19 +1557,12 @@ Case2: uvm_unlock_pageq(); simple_unlock(&uobj->vmobjlock); uobj = NULL; - - UVMHIST_LOG(maphist, - " promote uobjpage %p to anon/page %p/%p", - uobjpage, anon, pg, 0); - } else { uvmexp.flt_przero++; /* * Page is zero'd and marked dirty by uvm_pagealloc() * above. */ - UVMHIST_LOG(maphist," zero fill anon/page %p/%p", - anon, pg, 0, 0); } amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start, @@ -1655,9 +1581,6 @@ Case2: * resources. */ - UVMHIST_LOG(maphist, - " MAPPING: case2: pm=%p, va=0x%lx, pg=%p, promote=%ld", - ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote); if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { @@ -1678,8 +1601,6 @@ Case2: uvmfault_unlockall(&ufi, amap, uobj, NULL); KASSERT(uvmexp.swpgonly <= uvmexp.swpages); if (uvmexp.swpgonly == uvmexp.swpages) { - UVMHIST_LOG(maphist, - "<- failed. out of VM",0,0,0,0); /* XXX instrumentation */ return (ENOMEM); } @@ -1717,7 +1638,6 @@ Case2: uvmfault_unlockall(&ufi, amap, uobj, NULL); pmap_update(ufi.orig_map->pmap); - UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0); return (0); } diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 24017d5811f..da5686d0881 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.105 2011/06/23 21:54:56 oga Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.106 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -268,7 +268,6 @@ uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end) struct vm_page *pp; voff_t curoff; int slot; - UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); KASSERT(uobj->pgops == &aobj_pager); @@ -352,10 +351,7 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, struct vm_page *pg; struct pglist pgl; int pla_flags; - UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist," (map=%p, obj=%p, size=0x%lx, flags=%d)", - map, obj, size, flags); KASSERT(vm_map_pmap(map) == pmap_kernel()); /* UVM_KMF_VALLOC => !UVM_KMF_ZERO */ KASSERT(!(flags & UVM_KMF_VALLOC) || @@ -377,7 +373,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, valign, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { - UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); return(0); } @@ -386,7 +381,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, */ if (flags & UVM_KMF_VALLOC) { - UVMHIST_LOG(maphist,"<- done valloc (kva=0x%lx)", kva,0,0,0); return(kva); } @@ -399,8 +393,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, else offset = 0; - UVMHIST_LOG(maphist, " kva=0x%lx, offset=0x%lx", kva, offset,0,0); - /* * now allocate and map in the memory... note that we are the only ones * whom should ever get a handle on this area of VM. @@ -449,7 +441,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, KASSERT(TAILQ_EMPTY(&pgl)); pmap_update(pmap_kernel()); - UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0); return(kva); } @@ -497,9 +488,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) vaddr_t kva, loopva; voff_t offset; struct vm_page *pg; - UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p, size=0x%lx)", map, size,0,0); KASSERT(vm_map_pmap(map) == pmap_kernel()); size = round_page(size); @@ -512,7 +501,6 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) { - UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); return(0); } @@ -521,7 +509,6 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) */ offset = kva - vm_map_min(kernel_map); - UVMHIST_LOG(maphist," kva=0x%lx, offset=0x%lx", kva, offset,0,0); /* * now allocate the memory. we must be careful about released pages. @@ -573,7 +560,6 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) if (zeroit) memset((caddr_t)kva, 0, loopva - kva); - UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0); return(kva); } @@ -599,9 +585,7 @@ vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags) { vaddr_t kva; - UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0); KASSERT(vm_map_pmap(map) == pmap_kernel()); size = round_page(size); @@ -614,11 +598,9 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags) if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flags)) != 0)) { - UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); return(0); } - UVMHIST_LOG(maphist, "<- done (kva=0x%lx)", kva,0,0,0); return(kva); } @@ -634,9 +616,7 @@ vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) { vaddr_t kva; - UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0); KASSERT(vm_map_pmap(map) == pmap_kernel()); size = round_page(size); @@ -654,7 +634,6 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) { - UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0); return(kva); } @@ -662,7 +641,6 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) * failed. sleep for a while (on map) */ - UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); tsleep((caddr_t)map, PVM, "vallocwait", 0); } /*NOTREACHED*/ diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 5d35c3a15f5..06f971f4292 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.142 2011/06/30 15:51:06 tedu Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.143 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -102,12 +102,6 @@ struct uvm_cnt uvm_mlk_call, uvm_mlk_hint; const char vmmapbsy[] = "vmmapbsy"; /* - * Da history books - */ -UVMHIST_DECL(maphist); -UVMHIST_DECL(pdhist); - -/* * pool for vmspace structures. */ @@ -397,7 +391,6 @@ uvm_mapent_alloc(struct vm_map *map, int flags) struct vm_map_entry *me, *ne; int s, i; int pool_flags; - UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); pool_flags = PR_WAITOK; if (flags & UVM_FLAG_TRYLOCK) @@ -444,8 +437,6 @@ uvm_mapent_alloc(struct vm_map *map, int flags) } out: - UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%ld]", me, - ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); return(me); } @@ -459,10 +450,7 @@ void uvm_mapent_free(struct vm_map_entry *me) { int s; - UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"<- freeing map entry=%p [flags=%ld]", - me, me->flags, 0, 0); if (me->flags & UVM_MAP_STATIC) { s = splvm(); simple_lock(&uvm.kentry_lock); @@ -536,21 +524,12 @@ void uvm_map_init(void) { static struct vm_map_entry kernel_map_entry[MAX_KMAPENT]; -#if defined(UVMHIST) - static struct uvm_history_ent maphistbuf[100]; - static struct uvm_history_ent pdhistbuf[100]; -#endif int lcv; /* * first, init logging system. */ - UVMHIST_FUNC("uvm_map_init"); - UVMHIST_INIT_STATIC(maphist, maphistbuf); - UVMHIST_INIT_STATIC(pdhist, pdhistbuf); - UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0, "# uvm_map() successful calls", 0); UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0); @@ -737,12 +716,6 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size, vm_inherit_t inherit = UVM_INHERIT(flags); int advice = UVM_ADVICE(flags); int error; - UVMHIST_FUNC("uvm_map"); - UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "(map=%p, *startp=0x%lx, size=%ld, flags=0x%lx)", - map, *startp, size, flags); - UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, (u_long)uoffset,0,0); /* * Holes are incompatible with other types of mappings. @@ -796,8 +769,6 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size, */ if ((prot & maxprot) != prot) { - UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%lx, max=0x%lx", - prot, maxprot,0,0); return (EACCES); } @@ -812,7 +783,6 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size, } if ((prev_entry = uvm_map_findspace(map, *startp, size, startp, uobj, uoffset, align, flags)) == NULL) { - UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0); vm_map_unlock(map); return (ENOMEM); } @@ -913,7 +883,6 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size, } UVMCNT_INCR(map_backmerge); - UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); /* * drop our reference to uobj since we are extending a reference @@ -931,13 +900,11 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size, uvm_tree_sanity(map, "map leave 2"); - UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); vm_map_unlock(map); return (0); } step3: - UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); /* * check for possible forward merge (which we don't do) and count @@ -1044,7 +1011,6 @@ step3: uvm_tree_sanity(map, "map leave"); - UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); vm_map_unlock(map); return (0); } @@ -1064,11 +1030,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, struct vm_map_entry *cur; struct vm_map_entry *last; int use_tree = 0; - UVMHIST_FUNC("uvm_map_lookup_entry"); - UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist,"(map=%p,addr=0x%lx,ent=%p)", - map, address, entry, 0); /* * start looking either from the head of the @@ -1099,8 +1060,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, if ((cur != last) && (cur->end > address)) { UVMCNT_INCR(uvm_mlk_hint); *entry = cur; - UVMHIST_LOG(maphist,"<- got it via hint (%p)", - cur, 0, 0, 0); return (TRUE); } @@ -1138,7 +1097,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, cur = RB_LEFT(cur, rb_entry); } *entry = prev; - UVMHIST_LOG(maphist,"<- failed!",0,0,0,0); return (FALSE); } @@ -1156,8 +1114,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, *entry = cur; SAVE_HINT(map, map->hint, cur); - UVMHIST_LOG(maphist,"<- search got it (%p)", - cur, 0, 0, 0); return (TRUE); } break; @@ -1167,7 +1123,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, *entry = cur->prev; SAVE_HINT(map, map->hint, *entry); - UVMHIST_LOG(maphist,"<- failed!",0,0,0,0); return (FALSE); } @@ -1296,13 +1251,8 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, { struct vm_map_entry *entry, *next, *tmp; struct vm_map_entry *child, *prev = NULL; - vaddr_t end, orig_hint; - UVMHIST_FUNC("uvm_map_findspace"); - UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=%p, hint=0x%lx, len=%ld, flags=0x%lx)", - map, hint, length, flags); KASSERT((align & (align - 1)) == 0); KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); @@ -1317,14 +1267,11 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, orig_hint = hint; if (hint < map->min_offset) { /* check ranges ... */ if (flags & UVM_FLAG_FIXED) { - UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0); return(NULL); } hint = map->min_offset; } if (hint > map->max_offset) { - UVMHIST_LOG(maphist,"<- VA 0x%lx > range [0x%lx->0x%lx]", - hint, map->min_offset, map->max_offset, 0); return(NULL); } @@ -1340,8 +1287,6 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, if (uvm_map_lookup_entry(map, hint, &tmp)) { /* "hint" address already in use ... */ if (flags & UVM_FLAG_FIXED) { - UVMHIST_LOG(maphist,"<- fixed & VA in use", - 0, 0, 0, 0); return(NULL); } hint = tmp->end; @@ -1352,13 +1297,11 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, if (flags & UVM_FLAG_FIXED) { end = hint + length; if (end > map->max_offset || end < hint) { - UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0); goto error; } next = entry->next; if (next == &map->header || next->start >= end) goto found; - UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0); return(NULL); /* only one shot at it ... */ } @@ -1478,7 +1421,6 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, } end = hint + length; if (end > map->max_offset || end < hint) { - UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0); goto error; } next = entry->next; @@ -1488,14 +1430,10 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, found: SAVE_HINT(map, map->hint, entry); *result = hint; - UVMHIST_LOG(maphist,"<- got it! (result=0x%lx)", hint, 0,0,0); return (entry); error: if (align != 0) { - UVMHIST_LOG(maphist, - "calling recursively, no align", - 0,0,0,0); return (uvm_map_findspace(map, orig_hint, length, result, uobj, uoffset, 0, flags)); } @@ -1516,10 +1454,6 @@ void uvm_unmap_p(vm_map_t map, vaddr_t start, vaddr_t end, struct proc *p) { vm_map_entry_t dead_entries; - UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, " (map=%p, start=0x%lx, end=0x%lx)", - map, start, end, 0); /* * work now done by helper functions. wipe the pmap's and then @@ -1532,7 +1466,6 @@ uvm_unmap_p(vm_map_t map, vaddr_t start, vaddr_t end, struct proc *p) if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); - UVMHIST_LOG(maphist, "<- done", 0,0,0,0); } @@ -1555,11 +1488,6 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, { struct vm_map_entry *entry, *first_entry, *next; vaddr_t len; - UVMHIST_FUNC("uvm_unmap_remove"); - UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist,"(map=%p, start=0x%lx, end=0x%lx)", - map, start, end, 0); VM_MAP_RANGE_CHECK(map, start, end); @@ -1711,8 +1639,6 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, * remove entry from map and put it on our list of entries * that we've nuked. then go do next entry. */ - UVMHIST_LOG(maphist, " removed map entry %p", entry, 0, 0,0); - /* critical! prevents stale hint */ SAVE_HINT(map, entry, entry->prev); @@ -1755,7 +1681,6 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, */ *entry_list = first_entry; - UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); } /* @@ -1768,15 +1693,9 @@ void uvm_unmap_detach(struct vm_map_entry *first_entry, int flags) { struct vm_map_entry *next_entry; - UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist); while (first_entry) { KASSERT(!VM_MAPENT_ISWIRED(first_entry)); - UVMHIST_LOG(maphist, - " detach 0x%lx: amap=%p, obj=%p, submap?=%ld", - first_entry, first_entry->aref.ar_amap, - first_entry->object.uvm_obj, - UVM_ET_ISSUBMAP(first_entry)); /* * drop reference to amap, if we've got one @@ -1803,7 +1722,6 @@ uvm_unmap_detach(struct vm_map_entry *first_entry, int flags) uvm_mapent_free(first_entry); first_entry = next_entry; } - UVMHIST_LOG(maphist, "<- done", 0,0,0,0); } /* @@ -1824,10 +1742,6 @@ int uvm_map_reserve(struct vm_map *map, vsize_t size, vaddr_t offset, vsize_t align, vaddr_t *raddr) { - UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "(map=%p, size=0x%lx, offset=0x%lx,addr=0x%lx)", - map,size,offset,raddr); size = round_page(size); if (*raddr < vm_map_min(map)) @@ -1840,11 +1754,9 @@ uvm_map_reserve(struct vm_map *map, vsize_t size, vaddr_t offset, if (uvm_map(map, raddr, size, NULL, offset, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { - UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); return (FALSE); } - UVMHIST_LOG(maphist, "<- done (*raddr=0x%lx)", *raddr,0,0,0); return (TRUE); } @@ -2001,11 +1913,6 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len, struct vm_map_entry *deadentry, *oldentry; vsize_t elen; int nchain, error, copy_ok; - UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist,"(srcmap=%p,start=0x%lx, len=0x%lx", srcmap, start, - len,0); - UVMHIST_LOG(maphist," ...,dstmap=%p, flags=0x%lx)", dstmap,flags,0,0); uvm_tree_sanity(srcmap, "map_extract src enter"); uvm_tree_sanity(dstmap, "map_extract dst enter"); @@ -2028,7 +1935,6 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len, if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE) return(ENOMEM); *dstaddrp = dstaddr; /* pass address back to caller */ - UVMHIST_LOG(maphist, " dstaddr=0x%lx", dstaddr,0,0,0); /* * step 2: setup for the extraction process loop by init'ing the @@ -2382,9 +2288,6 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, { struct vm_map_entry *current, *entry; int error = 0; - UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,start=0x%lx,end=0x%lx,new_prot=0x%lx)", - map, start, end, new_prot); vm_map_lock(map); @@ -2478,7 +2381,6 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, out: vm_map_unlock(map); - UVMHIST_LOG(maphist, "<- done, rv=%ld",error,0,0,0); return (error); } @@ -2498,9 +2400,6 @@ uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end, vm_inherit_t new_inheritance) { struct vm_map_entry *entry; - UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,start=0x%lx,end=0x%lx,new_inh=0x%lx)", - map, start, end, new_inheritance); switch (new_inheritance) { case MAP_INHERIT_NONE: @@ -2508,7 +2407,6 @@ uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end, case MAP_INHERIT_SHARE: break; default: - UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); return (EINVAL); } @@ -2529,7 +2427,6 @@ uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end, } vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); return (0); } @@ -2543,9 +2440,6 @@ int uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice) { struct vm_map_entry *entry; - UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,start=0x%lx,end=0x%lx,new_adv=0x%lx)", - map, start, end, new_advice); switch (new_advice) { case MADV_NORMAL: @@ -2555,7 +2449,6 @@ uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice) break; default: - UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); return (EINVAL); } vm_map_lock(map); @@ -2578,7 +2471,6 @@ uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice) } vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); return (0); } @@ -2605,9 +2497,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, #ifdef DIAGNOSTIC u_int timestamp_save; #endif - UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,start=0x%lx,end=0x%lx,new_pageable=0x%lx)", - map, start, end, new_pageable); KASSERT(map->flags & VM_MAP_PAGEABLE); if ((lockflags & UVM_LK_ENTER) == 0) @@ -2627,7 +2516,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); return (EFAULT); } entry = start_entry; @@ -2651,8 +2539,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, entry->next->start > entry->end))) { if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist, - "<- done (INVALID UNWIRE ARG)",0,0,0,0); return (EINVAL); } entry = entry->next; @@ -2673,7 +2559,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, } if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); return (0); } @@ -2743,7 +2628,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, } if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0); return (EINVAL); } entry = entry->next; @@ -2816,7 +2700,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, } if ((lockflags & UVM_LK_EXIT) == 0) vm_map_unlock(map); - UVMHIST_LOG(maphist, "<- done (RV=%ld)", rv,0,0,0); return(rv); } @@ -2834,7 +2717,6 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, vm_map_unbusy(map); } - UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); return (0); } @@ -2856,8 +2738,6 @@ uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) #ifdef DIAGNOSTIC u_int timestamp_save; #endif - UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,flags=0x%lx)", map, flags, 0, 0); KASSERT(map->flags & VM_MAP_PAGEABLE); @@ -2879,7 +2759,6 @@ uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) } vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); return (0); /* @@ -2898,7 +2777,6 @@ uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) /* * no more work to do! */ - UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0); vm_map_unlock(map); return (0); } @@ -3037,7 +2915,6 @@ uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) uvm_map_entry_unwire(map, entry); } vm_map_unlock(map); - UVMHIST_LOG(maphist,"<- done (RV=%ld)", error,0,0,0); return (error); } @@ -3045,7 +2922,6 @@ uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) vm_map_unbusy(map); vm_map_unlock_read(map); - UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); return (0); } @@ -3078,10 +2954,7 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags) vaddr_t offset; vsize_t size; int rv, error, refs; - UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=%p,start=0x%lx,end=0x%lx,flags=0x%lx)", - map, start, end, flags); KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != (PGO_FREE|PGO_DEACTIVATE)); @@ -3312,11 +3185,9 @@ uvmspace_alloc(vaddr_t min, vaddr_t max, boolean_t pageable, boolean_t remove_holes) { struct vmspace *vm; - UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist); vm = pool_get(&uvm_vmspace_pool, PR_WAITOK | PR_ZERO); uvmspace_init(vm, NULL, min, max, pageable, remove_holes); - UVMHIST_LOG(maphist,"<- done (vm=%p)", vm,0,0,0); return (vm); } @@ -3330,7 +3201,6 @@ void uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max, boolean_t pageable, boolean_t remove_holes) { - UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0); @@ -3344,8 +3214,6 @@ uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max, if (remove_holes) pmap_remove_holes(&vm->vm_map); - - UVMHIST_LOG(maphist,"<- done",0,0,0,0); } /* @@ -3458,9 +3326,7 @@ void uvmspace_free(struct vmspace *vm) { struct vm_map_entry *dead_entries; - UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(vm=%p) ref=%ld", vm, vm->vm_refcnt,0,0); if (--vm->vm_refcnt == 0) { /* * lock the map, to wait out all other references to it. delete @@ -3484,7 +3350,6 @@ uvmspace_free(struct vmspace *vm) vm->vm_map.pmap = NULL; pool_put(&uvm_vmspace_pool, vm); } - UVMHIST_LOG(maphist,"<- done", 0,0,0,0); } /* @@ -3588,7 +3453,6 @@ uvmspace_fork(struct vmspace *vm1) struct vm_map_entry *new_entry; pmap_t new_pmap; boolean_t protect_child; - UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist); vm_map_lock(old_map); @@ -3868,7 +3732,6 @@ uvmspace_fork(struct vmspace *vm1) pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap); #endif - UVMHIST_LOG(maphist,"<- done",0,0,0,0); return(vm2); } diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index 2da2511c569..625608e663a 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_page.c,v 1.110 2011/06/23 21:55:58 oga Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.111 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ /* @@ -129,11 +129,6 @@ static vaddr_t virtual_space_start; static vaddr_t virtual_space_end; /* - * History - */ -UVMHIST_DECL(pghist); - -/* * local prototypes */ @@ -157,7 +152,6 @@ __inline static void uvm_pageinsert(struct vm_page *pg) { struct vm_page *dupe; - UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); KASSERT((pg->pg_flags & PG_TABLED) == 0); dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); @@ -177,7 +171,6 @@ uvm_pageinsert(struct vm_page *pg) static __inline void uvm_pageremove(struct vm_page *pg) { - UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); KASSERT(pg->pg_flags & PG_TABLED); RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); @@ -201,13 +194,6 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) vm_page_t pagearray; int lcv, i; paddr_t paddr; -#if defined(UVMHIST) - static struct uvm_history_ent pghistbuf[100]; -#endif - - UVMHIST_FUNC("uvm_page_init"); - UVMHIST_INIT_STATIC(pghist, pghistbuf); - UVMHIST_CALLED(pghist); /* * init the page queues and page queue locks @@ -463,7 +449,6 @@ boolean_t uvm_page_physget(paddr_t *paddrp) { int lcv, x; - UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); /* pass 1: try allocating from a matching end */ #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ @@ -807,7 +792,6 @@ int uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, paddr_t boundary, struct pglist *rlist, int nsegs, int flags) { - UVMHIST_FUNC("uvm_pglistalloc"); UVMHIST_CALLED(pghist); KASSERT((alignment & (alignment - 1)) == 0); KASSERT((boundary & (boundary - 1)) == 0); @@ -854,7 +838,6 @@ uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, void uvm_pglistfree(struct pglist *list) { - UVMHIST_FUNC("uvm_pglistfree"); UVMHIST_CALLED(pghist); uvm_pmr_freepageq(list); } @@ -903,7 +886,6 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, struct pglist pgl; int pmr_flags; boolean_t use_reserve; - UVMHIST_FUNC("uvm_pagealloc"); UVMHIST_CALLED(pghist); KASSERT(obj == NULL || anon == NULL); KASSERT(off == trunc_page(off)); @@ -950,12 +932,9 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, if (flags & UVM_PGA_ZERO) atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); - UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, - (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); return(pg); fail: - UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); return (NULL); } @@ -969,8 +948,6 @@ void uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) { - UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); - /* * remove it from the old object */ @@ -1006,7 +983,6 @@ void uvm_pagefree(struct vm_page *pg) { int saved_loan_count = pg->loan_count; - UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); #ifdef DEBUG if (pg->uobject == (void *)0xdeadbeef && @@ -1015,8 +991,6 @@ uvm_pagefree(struct vm_page *pg) } #endif - UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, - (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); KASSERT((pg->pg_flags & PG_DEV) == 0); /* @@ -1134,7 +1108,6 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs) struct vm_page *pg; struct uvm_object *uobj; int i; - UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); for (i = 0; i < npgs; i++) { pg = pgs[i]; @@ -1146,7 +1119,6 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs) wakeup(pg); } if (pg->pg_flags & PG_RELEASED) { - UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); uobj = pg->uobject; if (uobj != NULL) { uvm_lock_pageq(); @@ -1163,7 +1135,6 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs) uvm_anfree(pg->uanon); } } else { - UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); } @@ -1221,7 +1192,6 @@ uvm_pageidlezero(void) struct vm_page *pg; struct pgfreelist *pgfl; int free_list; - UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); do { uvm_lock_fpageq(); diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c index d146d819f91..389ab19725c 100644 --- a/sys/uvm/uvm_pager.c +++ b/sys/uvm/uvm_pager.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pager.c,v 1.59 2011/04/15 22:00:46 oga Exp $ */ +/* $OpenBSD: uvm_pager.c,v 1.60 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */ /* @@ -250,11 +250,6 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags) vsize_t size; struct vm_page *pp; - UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist,"(pps=%p, npages=%ld, flags=%d)", - pps, npages, flags,0); - prot = VM_PROT_READ; if (flags & UVMPAGER_MAPIN_READ) prot |= VM_PROT_WRITE; @@ -263,10 +258,8 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags) KASSERT(size <= MAXBSIZE); kva = uvm_pseg_get(flags); - if (kva == 0) { - UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); + if (kva == 0) return 0; - } for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { pp = *pps++; @@ -278,12 +271,10 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags) pmap_remove(pmap_kernel(), kva, cva); pmap_update(pmap_kernel()); uvm_pseg_release(kva); - UVMHIST_LOG(maphist,"<- pmap_enter failed", 0,0,0,0); return 0; } } pmap_update(pmap_kernel()); - UVMHIST_LOG(maphist, "<- done (KVA=0x%lx)", kva,0,0,0); return kva; } @@ -295,15 +286,11 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags) void uvm_pagermapout(vaddr_t kva, int npages) { - UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, " (kva=0x%lx, npages=%ld)", kva, npages,0,0); pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT)); pmap_update(pmap_kernel()); uvm_pseg_release(kva); - UVMHIST_LOG(maphist,"<- done",0,0,0,0); } /* @@ -337,7 +324,6 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages, struct vm_page **ppsp, *pclust; voff_t lo, hi, curoff; int center_idx, forward, incr; - UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist); /* * center page should already be busy and write protected. XXX: @@ -451,7 +437,6 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages, * done! return the cluster array to the caller!!! */ - UVMHIST_LOG(maphist, "<- done",0,0,0,0); return(ppsp); } @@ -504,7 +489,6 @@ uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg, int result; daddr64_t swblk; struct vm_page **ppsp = *ppsp_ptr; - UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(pdhist); /* * note that uobj is null if we are doing a swap-backed pageout. @@ -556,7 +540,6 @@ ReTry: if (uobj) { /* object is locked */ result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags); - UVMHIST_LOG(pdhist, "put -> %ld", result, 0,0,0); /* object is now unlocked */ } else { /* nothing locked */ @@ -816,8 +799,6 @@ uvm_aio_aiodone(struct buf *bp) struct uvm_object *uobj; int i, error; boolean_t write, swap; - UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(pdhist); - UVMHIST_LOG(pdhist, "bp %p", bp, 0,0,0); KASSERT(npages <= MAXPHYS >> PAGE_SHIFT); splassert(IPL_BIO); @@ -826,10 +807,8 @@ uvm_aio_aiodone(struct buf *bp) write = (bp->b_flags & B_READ) == 0; uobj = NULL; - for (i = 0; i < npages; i++) { + for (i = 0; i < npages; i++) pgs[i] = uvm_atopg((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); - UVMHIST_LOG(pdhist, "pgs[%ld] = %p", i, pgs[i],0,0); - } uvm_pagermapout((vaddr_t)bp->b_data, npages); #ifdef UVM_SWAP_ENCRYPT /* diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index cccee8373d3..4182fe8dbeb 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pdaemon.c,v 1.57 2011/04/01 12:58:13 krw Exp $ */ +/* $OpenBSD: uvm_pdaemon.c,v 1.58 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */ /* @@ -158,7 +158,6 @@ uvm_wait(const char *wmsg) void uvmpd_tune(void) { - UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist); uvmexp.freemin = uvmexp.npages / 30; @@ -180,8 +179,6 @@ uvmpd_tune(void) /* uvmexp.inactarg: computed in main daemon loop */ uvmexp.wiredmax = uvmexp.npages / 3; - UVMHIST_LOG(pdhist, "<- done, freemin=%ld, freetarg=%ld, wiredmax=%ld", - uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0); } /* @@ -192,9 +189,6 @@ void uvm_pageout(void *arg) { int npages = 0; - UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist); - - UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0); /* * ensure correct priority and set paging parameters... @@ -213,11 +207,9 @@ uvm_pageout(void *arg) for (;;) { uvm_lock_fpageq(); - UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0); msleep(&uvm.pagedaemon, &uvm.fpageqlock, PVM | PNORELOCK, "pgdaemon", 0); uvmexp.pdwoke++; - UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0); /* * now lock page queues and recompute inactive count @@ -234,10 +226,6 @@ uvm_pageout(void *arg) uvmexp.inactarg = uvmexp.freetarg + 1; } - UVMHIST_LOG(pdhist," free/ftarg=%ld/%ld, inact/itarg=%ld/%ld", - uvmexp.free, uvmexp.freetarg, uvmexp.inactive, - uvmexp.inactarg); - /* * get pages from the buffer cache, or scan if needed */ @@ -277,7 +265,6 @@ uvm_aiodone_daemon(void *arg) { int s, free; struct buf *bp, *nbp; - UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist); uvm.aiodoned_proc = curproc; @@ -345,7 +332,6 @@ uvmpd_scan_inactive(struct pglist *pglst) boolean_t swap_backed; vaddr_t start; int dirtyreacts; - UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist); /* * note: we currently keep swap-backed pages on a separate inactive @@ -386,8 +372,6 @@ uvmpd_scan_inactive(struct pglist *pglst) if (free + uvmexp.paging >= uvmexp.freetarg << 2 || dirtyreacts == UVMPD_NUMDIRTYREACTS) { - UVMHIST_LOG(pdhist," met free target: " - "exit loop", 0, 0, 0, 0); retval = TRUE; if (swslot == 0) { @@ -908,7 +892,6 @@ uvmpd_scan(void) struct vm_page *p, *nextpg; struct uvm_object *uobj; boolean_t got_it; - UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist); uvmexp.pdrevs++; /* counter */ uobj = NULL; @@ -925,8 +908,6 @@ uvmpd_scan(void) */ if (free < uvmexp.freetarg) { uvmexp.pdswout++; - UVMHIST_LOG(pdhist," free %ld < target %ld: swapout", free, - uvmexp.freetarg, 0, 0); uvm_unlock_pageq(); uvm_swapout_threads(); uvm_lock_pageq(); @@ -940,8 +921,6 @@ uvmpd_scan(void) * to inactive ones. */ - UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0); - /* * alternate starting queue between swap and object based on the * low bit of uvmexp.pdrevs (which we bump by one each call). @@ -977,8 +956,6 @@ uvmpd_scan(void) swap_shortage = uvmexp.freetarg - uvmexp.free; } - UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%ld swap_shortage=%ld", - inactive_shortage, swap_shortage,0,0); for (p = TAILQ_FIRST(&uvm.page_active); p != NULL && (inactive_shortage > 0 || swap_shortage > 0); p = nextpg) { diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c index c20518f562d..040b8dfb96c 100644 --- a/sys/uvm/uvm_stat.c +++ b/sys/uvm/uvm_stat.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_stat.c,v 1.23 2011/06/30 15:51:07 tedu Exp $ */ +/* $OpenBSD: uvm_stat.c,v 1.24 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_stat.c,v 1.18 2001/03/09 01:02:13 chs Exp $ */ /* @@ -51,13 +51,6 @@ struct uvm_cnt *uvm_cnt_head = NULL; -#ifdef UVMHIST -struct uvm_history_head uvm_histories; -#endif - -#ifdef UVMHIST_PRINT -int uvmhist_print_enabled = 1; -#endif #ifdef DDB @@ -65,124 +58,9 @@ int uvmhist_print_enabled = 1; * prototypes */ -#ifdef UVMHIST -void uvmhist_dump(struct uvm_history *); -void uvm_hist(u_int32_t); -static void uvmhist_dump_histories(struct uvm_history *[]); -#endif void uvmcnt_dump(void); -#ifdef UVMHIST -/* call this from ddb */ -void -uvmhist_dump(struct uvm_history *l) -{ - int lcv, s; - - s = splhigh(); - lcv = l->f; - do { - if (l->e[lcv].fmt) - uvmhist_print(&l->e[lcv]); - lcv = (lcv + 1) % l->n; - } while (lcv != l->f); - splx(s); -} - -/* - * print a merged list of uvm_history structures - */ -static void -uvmhist_dump_histories(struct uvm_history *hists[]) -{ - struct timeval tv; - int cur[MAXHISTS]; - int s, lcv, hi; - - /* so we don't get corrupted lists! */ - s = splhigh(); - - /* find the first of each list */ - for (lcv = 0; hists[lcv]; lcv++) - cur[lcv] = hists[lcv]->f; - - /* - * here we loop "forever", finding the next earliest - * history entry and printing it. cur[X] is the current - * entry to test for the history in hists[X]. if it is - * -1, then this history is finished. - */ - for (;;) { - hi = -1; - tv.tv_sec = tv.tv_usec = 0; - - /* loop over each history */ - for (lcv = 0; hists[lcv]; lcv++) { -restart: - if (cur[lcv] == -1) - continue; - - /* - * if the format is empty, go to the next entry - * and retry. - */ - if (hists[lcv]->e[cur[lcv]].fmt == NULL) { - cur[lcv] = (cur[lcv] + 1) % (hists[lcv]->n); - if (cur[lcv] == hists[lcv]->f) - cur[lcv] = -1; - goto restart; - } - - /* - * if the time hasn't been set yet, or this entry is - * earlier than the current tv, set the time and history - * index. - */ - if (tv.tv_sec == 0 || - timercmp(&hists[lcv]->e[cur[lcv]].tv, &tv, <)) { - tv = hists[lcv]->e[cur[lcv]].tv; - hi = lcv; - } - } - - /* if we didn't find any entries, we must be done */ - if (hi == -1) - break; - - /* print and move to the next entry */ - uvmhist_print(&hists[hi]->e[cur[hi]]); - cur[hi] = (cur[hi] + 1) % (hists[hi]->n); - if (cur[hi] == hists[hi]->f) - cur[hi] = -1; - } - - /* done! */ - splx(s); -} - -/* - * call this from ddb. `bitmask' is from <uvm/uvm_stat.h>. it - * merges the named histories. - */ -void -uvm_hist(u_int32_t bitmask) /* XXX only support 32 hists */ -{ - struct uvm_history *hists[MAXHISTS + 1]; - int i = 0; - - if ((bitmask & UVMHIST_MAPHIST) || bitmask == 0) - hists[i++] = &maphist; - - if ((bitmask & UVMHIST_PDHIST) || bitmask == 0) - hists[i++] = &pdhist; - - hists[i] = NULL; - - uvmhist_dump_histories(hists); -} -#endif /* UVMHIST */ - void uvmcnt_dump(void) { diff --git a/sys/uvm/uvm_stat.h b/sys/uvm/uvm_stat.h index 8452948a188..c92ba6b0a19 100644 --- a/sys/uvm/uvm_stat.h +++ b/sys/uvm/uvm_stat.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_stat.h,v 1.15 2007/09/07 15:00:20 art Exp $ */ +/* $OpenBSD: uvm_stat.h,v 1.16 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_stat.h,v 1.19 2001/02/04 10:55:58 mrg Exp $ */ /* @@ -93,156 +93,4 @@ do { \ #endif /* _KERNEL */ -/* - * history/tracing - */ - -struct uvm_history_ent { - struct timeval tv; /* time stamp */ - char *fmt; /* printf format */ - size_t fmtlen; /* length of printf format */ - char *fn; /* function name */ - size_t fnlen; /* length of function name */ - u_long call; /* function call number */ - u_long v[4]; /* values */ -}; - -struct uvm_history { - const char *name; /* name of this this history */ - size_t namelen; /* length of name, not including null */ - LIST_ENTRY(uvm_history) list; /* link on list of all histories */ - int n; /* number of entries */ - int f; /* next free one */ - simple_lock_data_t l; /* lock on this history */ - struct uvm_history_ent *e; /* the malloc'd entries */ -}; - -LIST_HEAD(uvm_history_head, uvm_history); - -/* - * grovelling lists all at once. we currently do not allow more than - * 32 histories to exist, as the way to dump a number of them at once - * is by calling uvm_hist() with a bitmask. - */ - -/* this is used to set the size of some arrays */ -#define MAXHISTS 32 /* do not change this! */ - -/* and these are the bit values of each history */ -#define UVMHIST_MAPHIST 0x00000001 /* maphist */ -#define UVMHIST_PDHIST 0x00000002 /* pdhist */ -#define UVMHIST_UBCHIST 0x00000004 /* ubchist */ -#define UVMHIST_PGHIST 0x00000008 /* pghist */ - -#ifdef _KERNEL - -/* - * macros to use the history/tracing code. note that UVMHIST_LOG - * must take 4 arguments (even if they are ignored by the format). - */ -#ifndef UVMHIST -#define UVMHIST_DECL(NAME) -#define UVMHIST_INIT(NAME,N) -#define UVMHIST_INIT_STATIC(NAME,BUF) -#define UVMHIST_LOG(NAME,FMT,A,B,C,D) -#define UVMHIST_CALLED(NAME) -#define UVMHIST_FUNC(FNAME) -#define uvmhist_dump(NAME) -#else -extern struct uvm_history_head uvm_histories; - -#define UVMHIST_DECL(NAME) struct uvm_history NAME - -#define UVMHIST_INIT(NAME,N) \ -do { \ - (NAME).name = __STRING(NAME); \ - (NAME).namelen = strlen((NAME).name); \ - (NAME).n = (N); \ - (NAME).f = 0; \ - simple_lock_init(&(NAME).l); \ - (NAME).e = (struct uvm_history_ent *) \ - malloc(sizeof(struct uvm_history_ent) * (N), M_TEMP, \ - M_WAITOK|M_ZERO); \ - LIST_INSERT_HEAD(&uvm_histories, &(NAME), list); \ -} while (0) - -#define UVMHIST_INIT_STATIC(NAME,BUF) \ -do { \ - (NAME).name = __STRING(NAME); \ - (NAME).namelen = strlen((NAME).name); \ - (NAME).n = sizeof(BUF) / sizeof(struct uvm_history_ent); \ - (NAME).f = 0; \ - simple_lock_init(&(NAME).l); \ - (NAME).e = (struct uvm_history_ent *) (BUF); \ - memset((NAME).e, 0, sizeof(struct uvm_history_ent) * (NAME).n); \ - LIST_INSERT_HEAD(&uvm_histories, &(NAME), list); \ -} while (0) - -#if defined(UVMHIST_PRINT) -extern int uvmhist_print_enabled; -#define UVMHIST_PRINTNOW(E) \ -do { \ - if (uvmhist_print_enabled) { \ - uvmhist_print(E); \ - DELAY(100000); \ - } \ -} while (0) -#else -#define UVMHIST_PRINTNOW(E) /* nothing */ -#endif - -#define UVMHIST_LOG(NAME,FMT,A,B,C,D) \ -do { \ - int _i_, _s_ = splhigh(); \ - simple_lock(&(NAME).l); \ - _i_ = (NAME).f; \ - (NAME).f = (_i_ + 1) % (NAME).n; \ - simple_unlock(&(NAME).l); \ - splx(_s_); \ - if (!cold) \ - microtime(&(NAME).e[_i_].tv); \ - (NAME).e[_i_].fmt = (FMT); \ - (NAME).e[_i_].fmtlen = strlen((NAME).e[_i_].fmt); \ - (NAME).e[_i_].fn = _uvmhist_name; \ - (NAME).e[_i_].fnlen = strlen((NAME).e[_i_].fn); \ - (NAME).e[_i_].call = _uvmhist_call; \ - (NAME).e[_i_].v[0] = (u_long)(A); \ - (NAME).e[_i_].v[1] = (u_long)(B); \ - (NAME).e[_i_].v[2] = (u_long)(C); \ - (NAME).e[_i_].v[3] = (u_long)(D); \ - UVMHIST_PRINTNOW(&((NAME).e[_i_])); \ -} while (0) - -#define UVMHIST_CALLED(NAME) \ -do { \ - { \ - int s = splhigh(); \ - simple_lock(&(NAME).l); \ - _uvmhist_call = _uvmhist_cnt++; \ - simple_unlock(&(NAME).l); \ - splx(s); \ - } \ - UVMHIST_LOG(NAME,"called!", 0, 0, 0, 0); \ -} while (0) - -#define UVMHIST_FUNC(FNAME) \ - static int _uvmhist_cnt = 0; \ - static char *_uvmhist_name = FNAME; \ - int _uvmhist_call; - -static __inline void uvmhist_print(struct uvm_history_ent *); - -static __inline void -uvmhist_print(e) - struct uvm_history_ent *e; -{ - printf("%06ld.%06ld ", e->tv.tv_sec, e->tv.tv_usec); - printf("%s#%ld: ", e->fn, e->call); - printf(e->fmt, e->v[0], e->v[1], e->v[2], e->v[3]); - printf("\n"); -} -#endif /* UVMHIST */ - -#endif /* _KERNEL */ - #endif /* _UVM_UVM_STAT_H_ */ diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c index c353edeabc0..f7d16492256 100644 --- a/sys/uvm/uvm_swap.c +++ b/sys/uvm/uvm_swap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_swap.c,v 1.102 2011/04/17 19:19:47 deraadt Exp $ */ +/* $OpenBSD: uvm_swap.c,v 1.103 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */ /* @@ -270,9 +270,6 @@ void uvm_swap_initcrypt(struct swapdev *, int); void uvm_swap_init(void) { - UVMHIST_FUNC("uvm_swap_init"); - - UVMHIST_CALLED(pdhist); /* * first, init the swap list, its counter, and its lock. * then get a handle on the vnode for /dev/drum by using @@ -316,7 +313,6 @@ uvm_swap_init(void) /* * done! */ - UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0); } #ifdef UVM_SWAP_ENCRYPT @@ -488,7 +484,6 @@ void swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority) { struct swappri *spp, *pspp; - UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist); /* * find entry at or after which to insert the new device. @@ -505,8 +500,6 @@ swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority) */ if (spp == NULL || spp->spi_priority != priority) { spp = newspp; /* use newspp! */ - UVMHIST_LOG(pdhist, "created new swappri = %ld", - priority, 0, 0, 0); spp->spi_priority = priority; CIRCLEQ_INIT(&spp->spi_swapdev); @@ -650,7 +643,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval) size_t len; int count, error, misc; int priority; - UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist); misc = SCARG(uap, misc); @@ -666,8 +658,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval) * [can also be obtained with uvmexp sysctl] */ if (SCARG(uap, cmd) == SWAP_NSWAP) { - UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%ld", uvmexp.nswapdev, - 0, 0, 0); *retval = uvmexp.nswapdev; error = 0; goto out; @@ -708,8 +698,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval) } } - UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0); - *retval = count; error = 0; goto out; @@ -877,7 +865,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval) out: rw_exit_write(&swap_syscall_lock); - UVMHIST_LOG(pdhist, "<- done! error=%ld", error, 0, 0, 0); return (error); } @@ -903,7 +890,6 @@ swap_on(struct proc *p, struct swapdev *sdp) extern struct vops nfs_vops; #endif /* defined(NFSCLIENT) */ dev_t dev; - UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist); /* * we want to enable swapping on sdp. the swd_vp contains @@ -933,8 +919,6 @@ swap_on(struct proc *p, struct swapdev *sdp) } /* XXX this only works for block devices */ - UVMHIST_LOG(pdhist, " dev=%ld, major(dev)=%ld", dev, major(dev), 0,0); - /* * we now need to determine the size of the swap area. for * block specials we can call the d_psize function. @@ -1010,14 +994,10 @@ swap_on(struct proc *p, struct swapdev *sdp) */ if (size < 1) { - UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0); error = EINVAL; goto bad; } - UVMHIST_LOG(pdhist, " dev=%lx: size=%ld addr=0x%lx\n", - dev, size, addr, 0); - /* * now we need to allocate an extent to manage this swap device */ @@ -1100,8 +1080,6 @@ int swap_off(struct proc *p, struct swapdev *sdp) { int error = 0; - UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist); - UVMHIST_LOG(pdhist, " dev=%lx", sdp->swd_dev,0,0,0); /* disable the swap area being removed */ sdp->swd_flags &= ~SWF_ENABLE; @@ -1176,7 +1154,6 @@ swstrategy(struct buf *bp) { struct swapdev *sdp; int s, pageno, bn; - UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist); /* * convert block number to swapdev. note that swapdev can't @@ -1193,7 +1170,6 @@ swstrategy(struct buf *bp) s = splbio(); biodone(bp); splx(s); - UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0); return; } @@ -1204,10 +1180,6 @@ swstrategy(struct buf *bp) pageno -= sdp->swd_drumoffset; /* page # on swapdev */ bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */ - UVMHIST_LOG(pdhist, " %s: mapoff=%lx bn=0x%lx bcount=%ld", - ((bp->b_flags & B_READ) == 0) ? "write" : "read", - sdp->swd_drumoffset, bn, bp->b_bcount); - /* * for block devices we finish up here. * for regular files we have to do more work which we delegate @@ -1254,7 +1226,6 @@ sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn) caddr_t addr; off_t byteoff; int s, off, nra, error, sz, resid; - UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist); /* * allocate a vndxfer head for this transfer and point it to @@ -1326,10 +1297,6 @@ sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn) if (sz > resid) sz = resid; - UVMHIST_LOG(pdhist, "sw_reg_strategy: " - "vp %p/%p offset 0x%lx/0x%llx", - sdp->swd_vp, vp, (u_long)byteoff, nbn); - /* * now get a buf structure. note that the vb_buf is * at the front of the nbp structure so that you can @@ -1422,7 +1389,6 @@ void sw_reg_start(struct swapdev *sdp) { struct buf *bp; - UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist); /* XXX: recursion control */ if ((sdp->swd_flags & SWF_BUSY) != 0) @@ -1437,9 +1403,6 @@ sw_reg_start(struct swapdev *sdp) sdp->swd_active++; - UVMHIST_LOG(pdhist, - "sw_reg_start: bp %p vp %p blkno 0x%lx cnt 0x%lx", - bp, bp->b_vp, bp->b_blkno, bp->b_bcount); if ((bp->b_flags & B_READ) == 0) bp->b_vp->v_numoutput++; @@ -1477,12 +1440,6 @@ sw_reg_iodone_internal(void *arg0, void *unused) struct buf *pbp = vnx->vx_bp; /* parent buffer */ struct swapdev *sdp = vnx->vx_sdp; int resid, s; - UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist); - - UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=0x%lx addr=%p", - vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data); - UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx", - vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0); s = splbio(); @@ -1490,13 +1447,9 @@ sw_reg_iodone_internal(void *arg0, void *unused) pbp->b_resid -= resid; vnx->vx_pending--; - if (vbp->vb_buf.b_error) { - UVMHIST_LOG(pdhist, " got error=%ld !", - vbp->vb_buf.b_error, 0, 0, 0); - - /* pass error upward */ + /* pass error upward */ + if (vbp->vb_buf.b_error) vnx->vx_error = vbp->vb_buf.b_error; - } /* * disassociate this buffer from the vnode (if any). @@ -1525,8 +1478,6 @@ sw_reg_iodone_internal(void *arg0, void *unused) } else if (pbp->b_resid == 0) { KASSERT(vnx->vx_pending == 0); if ((vnx->vx_flags & VX_BUSY) == 0) { - UVMHIST_LOG(pdhist, " iodone error=%ld !", - pbp, vnx->vx_error, 0, 0); putvndxfer(vnx); biodone(pbp); } @@ -1557,7 +1508,6 @@ uvm_swap_alloc(int *nslots, boolean_t lessok) struct swapdev *sdp; struct swappri *spp; u_long result; - UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist); /* * no swap devices configured yet? definite failure. @@ -1596,9 +1546,6 @@ ReTry: /* XXXMRG */ uvmexp.swpginuse += *nslots; simple_unlock(&uvm.swap_data_lock); /* done! return drum slot number */ - UVMHIST_LOG(pdhist, - "success! returning %ld slots starting at %ld", - *nslots, result + sdp->swd_drumoffset, 0, 0); return(result + sdp->swd_drumoffset); } } @@ -1623,7 +1570,6 @@ void uvm_swap_markbad(int startslot, int nslots) { struct swapdev *sdp; - UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist); simple_lock(&uvm.swap_data_lock); sdp = swapdrum_getsdp(startslot); @@ -1635,7 +1581,6 @@ uvm_swap_markbad(int startslot, int nslots) * one swap device. */ sdp->swd_npgbad += nslots; - UVMHIST_LOG(pdhist, "now %ld bad", sdp->swd_npgbad, 0,0,0); } simple_unlock(&uvm.swap_data_lock); } @@ -1650,10 +1595,6 @@ void uvm_swap_free(int startslot, int nslots) { struct swapdev *sdp; - UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist); - - UVMHIST_LOG(pdhist, "freeing %ld slots starting at %ld", nslots, - startslot, 0, 0); /* * ignore attempts to free the "bad" slot. @@ -1778,10 +1719,6 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) struct swapdev *sdp; int encrypt = 0; #endif - UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist); - - UVMHIST_LOG(pdhist, "<- called, startslot=%ld, npages=%ld, flags=%ld", - startslot, npages, flags, 0); write = (flags & B_READ) == 0; async = (flags & B_ASYNC) != 0; @@ -2000,11 +1937,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) bp->b_flags |= B_CALL | (curproc == uvm.pagedaemon_proc ? B_PDAEMON : 0); bp->b_iodone = uvm_aio_biodone; - UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0); } - UVMHIST_LOG(pdhist, - "about to start io: data = %p blkno = 0x%lx, bcount = %ld", - bp->b_data, bp->b_blkno, bp->b_bcount, 0); /* * now we start the I/O, and if async, return. @@ -2084,7 +2017,6 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) /* * finally return. */ - UVMHIST_LOG(pdhist, "<- done (sync) result=%ld", result, 0, 0, 0); return (result); } diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c index a4ed3bd71d6..026ba38dfbf 100644 --- a/sys/uvm/uvm_vnode.c +++ b/sys/uvm/uvm_vnode.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_vnode.c,v 1.73 2011/07/02 15:52:25 thib Exp $ */ +/* $OpenBSD: uvm_vnode.c,v 1.74 2011/07/03 18:34:14 oga Exp $ */ /* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */ /* @@ -154,9 +154,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) int oldflags, result; struct partinfo pi; u_quad_t used_vnode_size; - UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "(vn=%p)", arg,0,0,0); used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */ @@ -166,11 +163,9 @@ uvn_attach(void *arg, vm_prot_t accessprot) simple_lock(&uvn->u_obj.vmobjlock); while (uvn->u_flags & UVM_VNODE_BLOCKED) { uvn->u_flags |= UVM_VNODE_WANTED; - UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, "uvn_attach", 0); simple_lock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); } /* @@ -178,7 +173,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) */ if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ - UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } @@ -193,8 +187,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) /* regain vref if we were persisting */ if (uvn->u_obj.uo_refs == 0) { vref(vp); - UVMHIST_LOG(maphist," vref (reclaim persisting vnode)", - 0,0,0,0); } uvn->u_obj.uo_refs++; /* bump uvn ref! */ @@ -208,8 +200,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) /* unlock and return */ simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist,"<- done, refcnt=%ld", uvn->u_obj.uo_refs, - 0, 0, 0); return (&uvn->u_obj); } @@ -255,7 +245,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) wakeup(uvn); uvn->u_flags = 0; simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ - UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); return(NULL); } @@ -293,7 +282,6 @@ uvn_attach(void *arg, vm_prot_t accessprot) if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); - UVMHIST_LOG(maphist,"<- done/vref, ret %p", &uvn->u_obj,0,0,0); return(&uvn->u_obj); } @@ -316,7 +304,6 @@ uvn_reference(struct uvm_object *uobj) #ifdef DEBUG struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; #endif - UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); #ifdef DEBUG @@ -327,8 +314,6 @@ uvn_reference(struct uvm_object *uobj) } #endif uobj->uo_refs++; - UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)", - uobj, uobj->uo_refs,0,0); simple_unlock(&uobj->vmobjlock); } @@ -347,15 +332,11 @@ uvn_detach(struct uvm_object *uobj) struct uvm_vnode *uvn; struct vnode *vp; int oldflags; - UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); - UVMHIST_LOG(maphist," (uobj=%p) ref=%ld", uobj,uobj->uo_refs,0,0); - uobj->uo_refs--; /* drop ref! */ if (uobj->uo_refs) { /* still more refs */ simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); return; } @@ -382,7 +363,6 @@ uvn_detach(struct uvm_object *uobj) uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); simple_unlock(&uobj->vmobjlock); vrele(vp); /* drop vnode reference */ - UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); return; } @@ -390,8 +370,6 @@ uvn_detach(struct uvm_object *uobj) * its a goner! */ - UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); - uvn->u_flags |= UVM_VNODE_DYING; /* @@ -406,8 +384,6 @@ uvn_detach(struct uvm_object *uobj) (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); - UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); - /* * given the structure of this pager, the above flush request will * create the following state: all the pages that were in the object @@ -449,7 +425,6 @@ uvn_detach(struct uvm_object *uobj) * drop our reference to the vnode. */ vrele(vp); - UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); return; } @@ -485,17 +460,13 @@ uvm_vnp_terminate(struct vnode *vp) { struct uvm_vnode *uvn = &vp->v_uvm; int oldflags; - UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); /* * lock object and check if it is valid */ simple_lock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist, " vp=%p, ref=%ld, flag=0x%lx", vp, - uvn->u_obj.uo_refs, uvn->u_flags, 0); if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); return; } @@ -608,7 +579,6 @@ uvm_vnp_terminate(struct vnode *vp) wakeup(uvn); /* object lock still held */ simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); } @@ -694,7 +664,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) int npages, result, lcv; boolean_t retval, need_iosync, needs_clean; voff_t curoff; - UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); /* * get init vals and determine how we are going to traverse object @@ -710,10 +679,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) stop = MIN(round_page(stop), round_page(uvn->u_size)); } - UVMHIST_LOG(maphist, - " flush start=0x%lx, stop=0x%lx, flags=0x%lx", - (u_long)start, (u_long)stop, flags, 0); - /* * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint @@ -957,8 +922,6 @@ ReTry: * now wait for all I/O if required. */ if (need_iosync) { - - UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0); while (uvn->u_nio != 0) { uvn->u_flags |= UVM_VNODE_IOSYNC; UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, @@ -971,7 +934,6 @@ ReTry: } /* return, with object locked! */ - UVMHIST_LOG(maphist,"<- done (retval=0x%lx)",retval,0,0,0); return(retval); } @@ -1048,8 +1010,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, struct vm_page *ptmp; int lcv, result, gotpages; boolean_t done; - UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "flags=%ld", flags,0,0,0); /* * step 1: handled the case where fault data structures are locked. @@ -1296,9 +1256,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) off_t file_offset; int waitf, result, mapinflags; size_t got, wanted; - UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); - - UVMHIST_LOG(maphist, "rw=%ld", rw,0,0,0); /* * init values @@ -1315,7 +1272,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) while (uvn->u_flags & UVM_VNODE_IOSYNC) { if (waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); return(VM_PAGER_AGAIN); } uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; @@ -1329,9 +1285,8 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) */ if (file_offset >= uvn->u_size) { - simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); - return(VM_PAGER_BAD); + simple_unlock(&uvn->u_obj.vmobjlock); + return(VM_PAGER_BAD); } /* @@ -1344,7 +1299,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) kva = uvm_pagermapin(pps, npages, mapinflags); if (kva == 0 && waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); - UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); return(VM_PAGER_AGAIN); } @@ -1388,8 +1342,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) * do the I/O! (XXX: curproc?) */ - UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); - /* * This process may already have this vnode locked, if we faulted in * copyin() or copyout() on a region backed by this vnode @@ -1417,8 +1369,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) /* NOTE: vnode now unlocked (unless vnislocked) */ - UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); - /* * result == unix style errno (0 == OK!) * @@ -1459,7 +1409,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw) * done! */ - UVMHIST_LOG(maphist, "<- done (result %ld)", result,0,0,0); if (result == 0) return(VM_PAGER_OK); else |