diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2007-11-20 21:48:18 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2007-11-20 21:48:18 +0000 |
commit | d2aac05e85cb60d1c8a4139f77d81c24ff95eaf8 (patch) | |
tree | 207236482316c1ce88f7a7b98e9b9eb56981ae56 /sys/arch | |
parent | 404c89ab454f40f47c1918e5410d1719b84bd4b5 (diff) |
Overhaul of the pmap debug messages. They now provide slightly better
information, and are easier to parse.
While there, a few never-happening checks move from option DIAGNOSTIC to
option PMAPDEBUG.
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/m88k/m88k/pmap.c | 355 |
1 files changed, 167 insertions, 188 deletions
diff --git a/sys/arch/m88k/m88k/pmap.c b/sys/arch/m88k/m88k/pmap.c index d2f6e88e62d..f3346e70fff 100644 --- a/sys/arch/m88k/m88k/pmap.c +++ b/sys/arch/m88k/m88k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.33 2007/11/14 23:12:46 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.34 2007/11/20 21:48:17 miod Exp $ */ /* * Copyright (c) 2001-2004, Miodrag Vallat * Copyright (c) 1998-2001 Steve Murphree, Jr. @@ -77,7 +77,7 @@ extern vaddr_t last_addr; #define SETBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) |= (1 << (cpu_number)); #define CLRBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) &= ~(1 << (cpu_number)); -#ifdef DEBUG +#ifdef PMAPDEBUG /* * Static variables, functions and variables for debugging */ @@ -105,11 +105,13 @@ extern vaddr_t last_addr; #define CD_CBIT 0x0040000 /* pmap_changebit */ #define CD_TBIT 0x0080000 /* pmap_testbit */ #define CD_USBIT 0x0100000 /* pmap_unsetbit */ -#define CD_ALL 0x0FFFFFC +#define CD_COPY 0x0200000 /* pmap_copy_page */ +#define CD_ZERO 0x0400000 /* pmap_zero_page */ +#define CD_ALL 0x0fffffc -int pmap_con_dbg = 0; +int pmap_debug = 0; -#endif /* DEBUG */ +#endif /* PMAPDEBUG */ struct pool pmappool, pvpool; @@ -295,16 +297,16 @@ pmap_expand_kmap(vaddr_t virt, vm_prot_t prot, int canfail) sdt_entry_t template, *sdt; kpdt_entry_t kpdt_ent; -#ifdef DEBUG - if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL)) - printf("(pmap_expand_kmap: %x) v %x\n", curproc, virt); +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL)) + printf("pmap_expand_kmap(%p, %x, %d)\n", virt, prot, canfail); #endif template = m88k_protection(prot) | PG_M | SG_V; /* segment table entry derivate from map and virt. */ sdt = SDTENT(kernel_pmap, virt); -#ifdef DEBUG +#ifdef PMAPDEBUG if (SDT_VALID(sdt)) panic("pmap_expand_kmap: segment table entry VALID"); #endif @@ -375,17 +377,16 @@ pmap_map(vaddr_t virt, paddr_t start, paddr_t end, vm_prot_t prot, u_int cmode) pt_entry_t template, *pte; paddr_t page; -#ifdef DEBUG - if (pmap_con_dbg & CD_MAP) - printf ("(pmap_map: %x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n", - curproc, start, end, virt, prot, cmode); +#ifdef PMAPDEBUG + if (pmap_debug & CD_MAP) + printf("pmap_map(%p, %p, %p, %x, %x)\n", + virt, start, end, prot, cmode); #endif -#ifdef DEBUG +#ifdef PMAPDEBUG /* Check for zero if we map the very end of the address space... */ - if (start > end && end != 0) { + if (start > end && end != 0) panic("pmap_map: start greater than end address"); - } #endif template = m88k_protection(prot) | cmode | PG_V; @@ -401,10 +402,10 @@ pmap_map(vaddr_t virt, paddr_t start, paddr_t end, vm_prot_t prot, u_int cmode) pte = pmap_expand_kmap(virt, VM_PROT_READ | VM_PROT_WRITE, 0); -#ifdef DEBUG - if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL)) +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL)) if (PDT_VALID(pte)) - printf("(pmap_map: %x) pte @ %p already valid\n", curproc, pte); + printf("pmap_map: pte @%p already valid\n", pte); #endif *pte = template | page; @@ -447,18 +448,10 @@ pmap_cache_ctrl(pmap_t pmap, vaddr_t s, vaddr_t e, u_int mode) paddr_t pa; cpuid_t cpu; -#ifdef DEBUG - if ((mode & CACHE_MASK) != mode) { - printf("(cache_ctrl) illegal mode %x\n", mode); - return; - } - if (pmap_con_dbg & CD_CACHE) { - printf("(pmap_cache_ctrl: %x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode); - } - - if (pmap == NULL) - panic("pmap_cache_ctrl: pmap is NULL"); -#endif /* DEBUG */ +#ifdef PMAPDEBUG + if (pmap_debug & CD_CACHE) + printf("pmap_cache_ctrl(%p, %p, %p, %x)\n", pmap, s, e, mode); +#endif spl = splvm(); PMAP_LOCK(pmap); @@ -466,11 +459,10 @@ pmap_cache_ctrl(pmap_t pmap, vaddr_t s, vaddr_t e, u_int mode) for (va = s; va != e; va += PAGE_SIZE) { if ((pte = pmap_pte(pmap, va)) == NULL) continue; -#ifdef DEBUG - if (pmap_con_dbg & CD_CACHE) { - printf("(cache_ctrl) pte@%p\n", pte); - } -#endif /* DEBUG */ +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_CACHE | CD_FULL)) == (CD_CACHE | CD_FULL)) + printf("cache_ctrl: pte@%p\n", pte); +#endif /* * Invalidate pte temporarily to avoid being written back * the modified bit and/or the reference bit by any other cpu. @@ -590,8 +582,8 @@ pmap_bootstrap(vaddr_t load_start) */ kernel_pmap_size = 2 * SDT_SIZE; -#ifdef DEBUG - printf("kernel segment table size = 0x%x\n", kernel_pmap_size); +#ifdef PMAPDEBUG + printf("kernel segment table size = %p\n", kernel_pmap_size); #endif /* init all segment descriptors to zero */ bzero(kernel_pmap->pm_stab, kernel_pmap_size); @@ -619,13 +611,11 @@ pmap_bootstrap(vaddr_t load_start) /* init all page descriptors to zero */ bzero((void *)kpdt_phys, pdt_size); -#ifdef DEBUG - printf("--------------------------------------\n"); - printf(" kernel page start = 0x%x\n", kpdt_phys); - printf(" kernel page table size = 0x%x\n", pdt_size); - printf(" kernel page end = 0x%x\n", avail_start); - - printf("kpdt_virt = 0x%x\n", kpdt_virt); +#ifdef PMAPDEBUG + printf("kernel page start = %p\n", kpdt_phys); + printf("kernel page table size = %p\n", pdt_size); + printf("kernel page end = %p\n", avail_start); + printf("kpdt_virt = %p\n", kpdt_virt); #endif /* * init the kpdt queue @@ -742,7 +732,7 @@ pmap_bootstrap_cpu(cpuid_t cpu) /* Load supervisor pointer to segment table. */ cmmu_set_sapr(cpu, kernel_pmap->pm_apr); -#ifdef DEBUG +#ifdef PMAPDEBUG printf("cpu%d: running virtual\n", cpu); #endif SETBIT_CPUSET(cpu, &kernel_pmap->pm_cpus); @@ -764,8 +754,8 @@ pmap_bootstrap_cpu(cpuid_t cpu) void pmap_init(void) { -#ifdef DEBUG - if (pmap_con_dbg & CD_INIT) +#ifdef PMAPDEBUG + if (pmap_debug & CD_INIT) printf("pmap_init()\n"); #endif @@ -802,6 +792,11 @@ pmap_zero_page(struct vm_page *pg) int cpu = cpu_number(); pt_entry_t *pte; +#ifdef PMAPDEBUG + if (pmap_debug & CD_ZERO) + printf("pmap_zero_page(%p) pa %p\n", pg, pa); +#endif + va = (vaddr_t)(phys_map_vaddr + 2 * (cpu << PAGE_SHIFT)); pte = pmap_pte(kernel_pmap, va); @@ -853,12 +848,6 @@ pmap_create(void) * Allocate memory for *actual* segment table and *shadow* table. */ s = round_page(2 * SDT_SIZE); -#ifdef DEBUG - if (pmap_con_dbg & CD_CREAT) { - printf("(pmap_create: %x) need %d pages for sdt\n", - curproc, atop(s)); - } -#endif segdt = (sdt_entry_t *)uvm_km_zalloc(kernel_map, s); if (segdt == NULL) @@ -873,15 +862,10 @@ pmap_create(void) panic("pmap_create: pmap_extract failed!"); pmap->pm_apr = (atop(stpa) << PG_SHIFT) | CACHE_GLOBAL | APR_V; -#ifdef DEBUG - if (stpa & PAGE_MASK) - panic("pmap_create: sdt_table 0x%x not aligned on page boundary", - (int)stpa); - - if (pmap_con_dbg & CD_CREAT) { - printf("(pmap_create: %x) pmap=%p, pm_stab=0x%x (pa 0x%x)\n", - curproc, pmap, pmap->pm_stab, stpa); - } +#ifdef PMAPDEBUG + if (pmap_debug & CD_CREAT) + printf("pmap_create() -> pmap %p, pm_stab %p (pa %p)\n", + pmap, pmap->pm_stab, stpa); #endif /* memory for page tables should not be writeback or local */ @@ -937,19 +921,19 @@ pmap_release(pmap_t pmap) sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */ pt_entry_t *gdttbl; /* ptr to first entry in a page table */ -#ifdef DEBUG - if (pmap_con_dbg & CD_FREE) - printf("(pmap_release: %x) pmap %x\n", curproc, pmap); +#ifdef PMAPDEBUG + if (pmap_debug & CD_FREE) + printf("pmap_release(%p)\n", pmap); #endif /* segment table loop */ for (sdt = VM_MIN_ADDRESS >> SDT_SHIFT; sdt <= VM_MAX_ADDRESS >> SDT_SHIFT; sdt++) { if ((gdttbl = pmap_pte(pmap, sdt << SDT_SHIFT)) != NULL) { -#ifdef DEBUG - if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL)) - printf("(pmap_release: %x) free page table = 0x%x\n", - curproc, gdttbl); +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL)) + printf("pmap_release(%p) free page table %p\n", + pmap, gdttbl); #endif uvm_km_free(kernel_map, (vaddr_t)gdttbl, PAGE_SIZE); } @@ -959,17 +943,12 @@ pmap_release(pmap_t pmap) * Freeing both *actual* and *shadow* segment tables */ sdttbl = pmap->pm_stab; /* addr of segment table */ -#ifdef DEBUG - if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL)) - printf("(pmap_release: %x) free segment table = 0x%x\n", - curproc, sdttbl); +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL)) + printf("(pmap_release(%p) free segment table %p\n", + pmap, sdttbl); #endif uvm_km_free(kernel_map, (vaddr_t)sdttbl, round_page(2 * SDT_SIZE)); - -#ifdef DEBUG - if (pmap_con_dbg & CD_FREE) - printf("(pmap_release: %x) pm_count = 0\n", curproc); -#endif } /* @@ -999,11 +978,6 @@ pmap_destroy(pmap_t pmap) { int count; -#ifdef DEBUG - if (pmap == kernel_pmap) - panic("pmap_destroy: Attempt to destroy kernel pmap"); -#endif - PMAP_LOCK(pmap); count = --pmap->pm_count; PMAP_UNLOCK(pmap); @@ -1072,13 +1046,9 @@ pmap_remove_pte(pmap_t pmap, vaddr_t va, pt_entry_t *pte) struct vm_page *pg; paddr_t pa; -#ifdef DEBUG - if (pmap_con_dbg & CD_RM) { - if (pmap == kernel_pmap) - printf("(pmap_remove_pte: %x) pmap kernel va %x\n", curproc, va); - else - printf("(pmap_remove_pte: %x) pmap %x va %x\n", curproc, pmap, va); - } +#ifdef PMAPDEBUG + if (pmap_debug & CD_RM) + printf("pmap_remove_pte(%p, %p)\n", pmap, va); #endif if (pte == NULL || !PDT_VALID(pte)) { @@ -1187,13 +1157,9 @@ pmap_remove_range(pmap_t pmap, vaddr_t s, vaddr_t e) { vaddr_t va, eseg; -#ifdef DEBUG - if (pmap_con_dbg & CD_RM) { - if (pmap == kernel_pmap) - printf("(pmap_remove_range: %x) pmap kernel s %x e %x\n", curproc, s, e); - else - printf("(pmap_remove_range: %x) pmap %x s %x e %x\n", curproc, pmap, s, e); - } +#ifdef PMAPDEBUG + if (pmap_debug & CD_RM) + printf("pmap_remove_range(%p, %p, %p)\n", pmap, s, e); #endif /* @@ -1251,11 +1217,6 @@ pmap_remove(pmap_t pmap, vaddr_t s, vaddr_t e) if (pmap == NULL) return; -#ifdef DEBUG - if (s >= e) - panic("pmap_remove: start greater than end address"); -#endif - spl = splvm(); PMAP_LOCK(pmap); pmap_remove_range(pmap, s, e); @@ -1306,16 +1267,16 @@ pmap_remove_all(struct vm_page *pg) if (pg == NULL) { /* not a managed page. */ -#ifdef DEBUG - if (pmap_con_dbg & CD_RMAL) - printf("(pmap_remove_all: %x) vm page 0x%x not a managed page\n", curproc, pg); +#ifdef PMAPDEBUG + if (pmap_debug & CD_RMAL) + printf("pmap_remove_all(%p): not a managed page\n", pg); #endif return; } -#ifdef DEBUG - if (pmap_con_dbg & CD_RMAL) - printf("(pmap_remove_all: %x) va %x\n", curproc, pg, pg_to_pvh(pg)->pv_va); +#ifdef PMAPDEBUG + if (pmap_debug & CD_RMAL) + printf("pmap_remove_all(%p)\n", pg); #endif spl = splvm(); @@ -1347,9 +1308,9 @@ remove_all_Retry: goto next; /* no page mapping */ } if (pmap_pte_w(pte)) { -#ifdef DEBUG - if (pmap_con_dbg & CD_RMAL) - printf("pmap_remove_all: wired mapping for %lx not removed\n", +#ifdef PMAPDEBUG + if (pmap_debug & CD_RMAL) + printf("pmap_remove_all(%p): wired mapping not removed\n", pg); #endif pvl = pvl->pv_next; @@ -1492,9 +1453,9 @@ pmap_expand(pmap_t pmap, vaddr_t v) sdt_entry_t *sdt; pt_entry_t *pte; -#ifdef DEBUG - if (pmap_con_dbg & CD_EXP) - printf ("(pmap_expand: %x) map %x v %x\n", curproc, pmap, v); +#ifdef PMAPDEBUG + if (pmap_debug & CD_EXP) + printf ("pmap_expand(%p, %p)\n", pmap, v); #endif /* XXX */ @@ -1517,9 +1478,10 @@ pmap_expand(pmap_t pmap, vaddr_t v) PMAP_UNLOCK(pmap); uvm_km_free(kernel_map, pdt_vaddr, PAGE_SIZE); -#ifdef DEBUG - if (pmap_con_dbg & CD_EXP) - printf("(pmap_expand: %x) table has already been allocated\n", curproc); +#ifdef PMAPDEBUG + if (pmap_debug & CD_EXP) + printf("pmap_expand(%p, %p): table has already been allocated\n", + pmap, v); #endif splx(spl); return; @@ -1621,13 +1583,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) boolean_t wired = (flags & PMAP_WIRED) != 0; struct vm_page *pg; -#ifdef DEBUG - if (pmap_con_dbg & CD_ENT) { - if (pmap == kernel_pmap) - printf("(pmap_enter: %x) pmap kernel va %x pa %x\n", curproc, va, pa); - else - printf("(pmap_enter: %x) pmap %x va %x pa %x\n", curproc, pmap, va, pa); - } +#ifdef PMAPDEBUG + if (pmap_debug & CD_ENT) + printf("pmap_enter(%p, %p, %p, %x, %x)\n", + pmap, va, pa, prot, flags); #endif template = m88k_protection(prot); @@ -1642,8 +1601,13 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if (pmap == kernel_pmap) { /* will only return NULL if PMAP_CANFAIL is set */ if (pmap_expand_kmap(va, VM_PROT_READ | VM_PROT_WRITE, - flags & PMAP_CANFAIL) == NULL) + flags & PMAP_CANFAIL) == NULL) { +#ifdef PMAPDEBUG + if (pmap_debug & CD_ENT) + printf("failed (ENOMEM)\n"); +#endif return (ENOMEM); + } } else { /* * Must unlock to expand the pmap. @@ -1657,9 +1621,9 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) * Special case if the physical page is already mapped at this address. */ old_pa = ptoa(PG_PFNUM(*pte)); -#ifdef DEBUG - if (pmap_con_dbg & CD_ENT) - printf("(pmap_enter) old_pa %x pte %x\n", old_pa, *pte); +#ifdef PMAPDEBUG + if (pmap_debug & CD_ENT) + printf("pmap_enter: old_pa %p pte %p\n", old_pa, *pte); #endif pg = PHYS_TO_VM_PAGE(pa); @@ -1691,7 +1655,6 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) pvl->pv_pmap = pmap; pvl->pv_next = NULL; pvl->pv_flags = 0; - } else { /* * Add new pv_entry after header. @@ -1745,9 +1708,9 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) template |= invalidate_pte(pte) & PG_M_U; *pte = template | pa; flush_atc_entry(pmap, va); -#ifdef DEBUG - if (pmap_con_dbg & CD_ENT) - printf("(pmap_enter) set pte to %x\n", *pte); +#ifdef PMAPDEBUG + if (pmap_debug & CD_ENT) + printf("pmap_enter: new pte %p\n", *pte); #endif /* @@ -1911,9 +1874,9 @@ pmap_collect(pmap_t pmap) in a page table's address range */ int spl; -#ifdef DEBUG - if (pmap_con_dbg & CD_COL) - printf ("(pmap_collect: %x) pmap %x\n", curproc, pmap); +#ifdef PMAPDEBUG + if (pmap_debug & CD_COL) + printf ("pmap_collect(%p)\n", pmap); #endif spl = splvm(); @@ -1964,9 +1927,9 @@ pmap_collect(pmap_t pmap) PMAP_UNLOCK(pmap); splx(spl); -#ifdef DEBUG - if (pmap_con_dbg & CD_COL) - printf("(pmap_collect: %x) done\n", curproc); +#ifdef PMAPDEBUG + if (pmap_debug & CD_COL) + printf("pmap_collect(%p) done\n", pmap); #endif } @@ -1993,9 +1956,9 @@ pmap_activate(struct proc *p) pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map); int cpu = cpu_number(); -#ifdef DEBUG - if (pmap_con_dbg & CD_ACTIVATE) - printf("(pmap_activate: %x) pmap %p\n", p, pmap); +#ifdef PMAPDEBUG + if (pmap_debug & CD_ACTIVATE) + printf("pmap_activate(%p) pmap %p\n", p, pmap); #endif if (pmap != kernel_pmap) { @@ -2071,6 +2034,12 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) pt_entry_t *dstpte, *srcpte; int cpu = cpu_number(); +#ifdef PMAPDEBUG + if (pmap_debug & CD_COPY) + printf("pmap_copy_page(%p,%p) pa %p %p\n", + srcpg, dstpg, src, dst); +#endif + dstva = (vaddr_t)(phys_map_vaddr + 2 * (cpu << PAGE_SHIFT)); srcva = dstva + PAGE_SIZE; dstpte = pmap_pte(kernel_pmap, dstva); @@ -2134,6 +2103,11 @@ pmap_changebit(struct vm_page *pg, int set, int mask) int spl; vaddr_t va; +#ifdef PMAPDEBUG + if (pmap_debug & CD_CBIT) + printf("pmap_changebit(%p, %x, %x)\n", pg, set, mask); +#endif + spl = splvm(); #if defined(MULTIPROCESSOR) && 0 @@ -2146,15 +2120,8 @@ changebit_Retry: */ pvl->pv_flags &= mask; - if (pvl->pv_pmap == NULL) { -#ifdef DEBUG - if (pmap_con_dbg & CD_CBIT) - printf("(pmap_changebit: %x) vm page 0x%x not mapped\n", - curproc, pg); -#endif - splx(spl); - return; - } + if (pvl->pv_pmap == NULL) + goto done; /* for each listed pmap, update the affected bits */ for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) { @@ -2173,7 +2140,7 @@ changebit_Retry: if (pte == NULL || !PDT_VALID(pte)) { goto next; /* no page mapping */ } -#ifdef DIAGNOSTIC +#ifdef PMAPDEBUG if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg)) panic("pmap_changebit: pte %x in pmap %p doesn't point to page %p %lx", *pte, pmap, pg, VM_PAGE_TO_PHYS(pg)); @@ -2199,6 +2166,8 @@ changebit_Retry: next: PMAP_UNLOCK(pmap); } + +done: splx(spl); } @@ -2235,6 +2204,11 @@ pmap_testbit(struct vm_page *pg, int bit) pmap_t pmap; int spl; +#ifdef PMAPDEBUG + if (pmap_debug & CD_CBIT) + printf("pmap_testbit(%p, %x): ", pg, bit); +#endif + spl = splvm(); #if defined(MULTIPROCESSOR) && 0 @@ -2245,24 +2219,16 @@ testbit_Retry: if (pvl->pv_flags & bit) { /* we've already cached this flag for this page, no use looking further... */ -#ifdef DEBUG - if (pmap_con_dbg & CD_TBIT) - printf("(pmap_testbit: %x) already cached a %x flag for this page\n", - curproc, bit); +#ifdef PMAPDEBUG + if (pmap_debug & CD_TBIT) + printf("cached\n"); #endif splx(spl); return (TRUE); } - if (pvl->pv_pmap == NULL) { -#ifdef DEBUG - if (pmap_con_dbg & CD_TBIT) - printf("(pmap_testbit: %x) vm page 0x%x not mapped\n", - curproc, pg); -#endif - splx(spl); - return (FALSE); - } + if (pvl->pv_pmap == NULL) + goto done; /* for each listed pmap, check modified bit for given page */ for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) { @@ -2278,7 +2244,7 @@ testbit_Retry: goto next; } -#ifdef DIAGNOSTIC +#ifdef PMAPDEBUG if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg)) panic("pmap_testbit: pte %x in pmap %p %d doesn't point to page %p %lx", *pte, pmap, pmap == kernel_pmap ? 1 : 0, pg, VM_PAGE_TO_PHYS(pg)); @@ -2287,9 +2253,12 @@ testbit_Retry: if ((*pte & bit) != 0) { PMAP_UNLOCK(pmap); pvl->pv_flags |= bit; -#ifdef DEBUG - if ((pmap_con_dbg & (CD_TBIT | CD_FULL)) == (CD_TBIT | CD_FULL)) - printf("(pmap_testbit: %x) true on page pte@%p\n", curproc, pte); +#ifdef PMAPDEBUG + if ((pmap_debug & (CD_TBIT | CD_FULL)) == (CD_TBIT | CD_FULL)) + printf("found in pte %p @%p\n", + *pte, pte); + else if (pmap_debug & CD_TBIT) + printf("found\n"); #endif splx(spl); return (TRUE); @@ -2298,6 +2267,11 @@ next: PMAP_UNLOCK(pmap); } +done: +#ifdef PMAPDEBUG + if (pmap_debug & CD_TBIT) + printf("not found\n"); +#endif splx(spl); return (FALSE); } @@ -2323,6 +2297,11 @@ pmap_unsetbit(struct vm_page *pg, int bit) int spl; vaddr_t va; +#ifdef PMAPDEBUG + if (pmap_debug & CD_USBIT) + printf("pmap_unsetbit(%p, %x): ", pg, bit); +#endif + spl = splvm(); #if defined(MULTIPROCESSOR) && 0 @@ -2333,18 +2312,14 @@ unsetbit_Retry: /* * Clear saved attributes */ - pvl->pv_flags &= ~bit; - - if (pvl->pv_pmap == NULL) { -#ifdef DEBUG - if (pmap_con_dbg & CD_USBIT) - printf("(pmap_unsetbit: %x) vm page 0x%x not mapped\n", - curproc, pg); -#endif - splx(spl); - return (FALSE); + if (pvl->pv_flags & bit) { + pvl->pv_flags ^= bit; + rv = TRUE; } + if (pvl->pv_pmap == NULL) + goto done; + /* for each listed pmap, update the specified bit */ for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) { pmap = pvep->pv_pmap; @@ -2362,7 +2337,7 @@ unsetbit_Retry: if (pte == NULL || !PDT_VALID(pte)) { goto next; /* no page mapping */ } -#ifdef DIAGNOSTIC +#ifdef PMAPDEBUG if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg)) panic("pmap_unsetbit: pte %x in pmap %p doesn't point to page %p %lx", *pte, pmap, pg, VM_PAGE_TO_PHYS(pg)); @@ -2382,13 +2357,18 @@ unsetbit_Retry: invalidate_pte(pte); *pte = opte ^ bit; flush_atc_entry(pmap, va); - } else rv = TRUE; + } next: PMAP_UNLOCK(pmap); } splx(spl); +done: +#ifdef PMAPDEBUG + if (pmap_debug & CD_USBIT) + printf(rv ? "TRUE\n" : "FALSE\n"); +#endif return (rv); } @@ -2449,10 +2429,9 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) int spl; pt_entry_t template, *pte; -#ifdef DEBUG - if (pmap_con_dbg & CD_ENT) { - printf ("(pmap_kenter_pa: %x) va %x pa %x\n", curproc, va, pa); - } +#ifdef PMAPDEBUG + if (pmap_debug & CD_ENT) + printf ("pmap_kenter_pa(%p, %p, %x)\n", va, pa, prot); #endif spl = splvm(); @@ -2498,9 +2477,9 @@ pmap_kremove(vaddr_t va, vsize_t len) int spl; vaddr_t e, eseg; -#ifdef DEBUG - if (pmap_con_dbg & CD_RM) - printf("(pmap_kremove: %x) va %x len %x\n", curproc, va, len); +#ifdef PMAPDEBUG + if (pmap_debug & CD_RM) + printf("pmap_kremove(%p, %x)\n", va, len); #endif spl = splvm(); |