diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2001-06-16 22:31:51 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2001-06-16 22:31:51 +0000 |
commit | 8e00246ca02ee16eadfe6c1bca034513ad439708 (patch) | |
tree | addaf119f85185d80be7fba8f2e5d3824d5ee999 | |
parent | fc1797c26959affbceb1bbecbad12b89f452a884 (diff) |
More cleanink of unused code and incorrect comments.
Replace a ton of english typos with (fewer) miodish typos, too...
-rw-r--r-- | sys/arch/mvme88k/mvme88k/pmap.c | 893 |
1 files changed, 131 insertions, 762 deletions
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c index bfc7d3ccb57..0909c603e9b 100644 --- a/sys/arch/mvme88k/mvme88k/pmap.c +++ b/sys/arch/mvme88k/mvme88k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.29 2001/06/14 21:30:46 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.30 2001/06/16 22:31:50 miod Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -65,6 +65,7 @@ #include <machine/asm_macro.h> #include <machine/assert.h> #include <machine/board.h> +#include <machine/cmmu.h> #include <machine/cpu_number.h> #include <machine/m882xx.h> /* CMMU stuff */ #include <machine/pmap_table.h> @@ -78,29 +79,20 @@ extern vm_offset_t avail_start, avail_next, avail_end; extern vm_offset_t virtual_avail, virtual_end; -extern vm_offset_t pcc2consvaddr; -extern vm_offset_t clconsvaddr; - -extern void *iomapbase; -int iiomapsize; - -extern int max_cpus; /* * Macros to operate cpus_using field */ #define SETBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) |= (1 << (cpu_number)); #define CLRBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) &= ~(1 << (cpu_number)); +#ifdef DEBUG /* * Static variables, functions and variables for debugging */ -#ifdef DEBUG - #define STATIC - #define DBG + /* * conditional debugging */ - #define CD_NONE 0x00 #define CD_NORM 0x01 #define CD_FULL 0x02 @@ -131,11 +123,8 @@ extern int max_cpus; #define CD_CHKPM 0x2000000 /* check_pmap_consistency */ #define CD_CHKM 0x4000000 /* check_map */ #define CD_ALL 0x0FFFFFC -int pmap_con_dbg = CD_NONE; -#else - - #define STATIC static +int pmap_con_dbg = CD_NONE; #endif /* DEBUG */ caddr_t vmmap; @@ -151,7 +140,7 @@ struct kpdt_entry { }; #define KPDT_ENTRY_NULL ((kpdt_entry_t)0) -STATIC kpdt_entry_t kpdt_free; +kpdt_entry_t kpdt_free; /* * MAX_KERNEL_VA_SIZE must fit into the virtual address space between @@ -185,12 +174,10 @@ STATIC kpdt_entry_t kpdt_free; /* * Two pages of scratch space. - * Used in copy_to_phys(), pmap_copy_page() and pmap_zero_page(). + * Used in pmap_copy_page() and pmap_zero_page(). */ vm_offset_t phys_map_vaddr1, phys_map_vaddr2; -#define PMAP_MAX 512 - /* * The Modify List * @@ -334,11 +321,6 @@ static vm_offset_t pmap_phys_end = (vm_offset_t) 0; void *etherbuf=NULL; int etherlen; -/* - * This variable extract vax's pmap.c. - * pmap_init initialize this. - * '90.7.17 Fuzzy - */ boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ /* @@ -379,22 +361,16 @@ extern vm_offset_t obiova; /* * Rooutine: FLUSH_ATC_ENTRY * - * Author: N. Sugai - * * Function: * Flush atc(TLB) which maps given virtual address, in the CPUs which - * are spcified by 'users', using the function 'flush'. + * are specified by 'users', for the operating mode specified by + * 'kernel'. * * Parameters: * users bit patterns of the CPUs which may hold the TLB, and * should be flushed * va virtual address that should be flushed - * flush pointer to the function actually flushes the TLB - * - * Special Assumptions: - * (*flush)() has two arguments, 1st one specifies the CPU number, - * and 2nd one specifies the virtual address should be flushed. - * + * kernel nonzero if supervisor mode, zero if user mode */ void flush_atc_entry(long users, vm_offset_t va, int kernel) @@ -417,18 +393,10 @@ flush_atc_entry(long users, vm_offset_t va, int kernel) } /* - * Author: Joe Uemura * Convert machine-independent protection code to M88K protection bits. - * - * History: - * '90.8.3 Fuzzy - * if defined TEST, 'static' undeclared. - * '90.8.30 Fuzzy - * delete "if defined TEST, 'static' undeclared." - * */ -STATIC unsigned int +unsigned int m88k_protection(pmap_t map, vm_prot_t prot) { pte_template_t p; @@ -467,10 +435,12 @@ pmap_pte(pmap_t map, vm_offset_t virt) { sdt_entry_t *sdt; +#ifdef DIAGNOSTIC /*XXX will this change if physical memory is not contiguous? */ /* take a look at PDTIDX XXXnivas */ if (map == PMAP_NULL) panic("pmap_pte: pmap is NULL"); +#endif sdt = SDTENT(map,virt); /* @@ -492,7 +462,7 @@ pmap_pte(pmap_t map, vm_offset_t virt) * segment table entry, returning pointer to page table entry. This is * much like 'pmap_expand', except that table space is acquired * from an area set up by pmap_bootstrap, instead of through - * kmem_alloc. (Obviously, because kmem_alloc uses the kernel map + * uvm_km_zalloc. (Obviously, because uvm_km_zalloc uses the kernel map * for allocation - which we can't do when trying to expand the * kernel map!) Note that segment tables for the kernel map were * all allocated at pmap_bootstrap time, so we only need to worry @@ -520,7 +490,7 @@ pmap_pte(pmap_t map, vm_offset_t virt) * */ -STATIC pt_entry_t * +pt_entry_t * pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot) { int aprot; @@ -562,10 +532,10 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot) * mapped are not managed and are never unmapped. * * Parameters: - * virt virtual address of range to map (IN) - * start physical address of range to map (IN) - * end physical address of end of range (IN) - * prot protection attributes (IN) + * virt virtual address of range to map + * start physical address of range to map + * end physical address of end of range + * prot protection attributes * * Calls: * pmap_pte @@ -592,7 +562,9 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot) * } * */ -void m197_load_patc(int, vm_offset_t, vm_offset_t, int); +#ifdef MVME197 +extern void m197_load_patc(int, vm_offset_t, vm_offset_t, int); +#endif vm_offset_t pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot) @@ -604,7 +576,7 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot) pt_entry_t *pte; pte_template_t template; #ifdef MVME197 - static unsigned i = 0; + static mvme197_atc_initialized = FALSE; #endif /* * cache mode is passed in the top 16 bits. @@ -643,12 +615,13 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot) *pte = template.pte; #ifdef MVME197 /* hack for MVME197 */ - if (cputyp == CPU_197) { - if (i < 32) { + if (cputyp == CPU_197 && m197_atc_initialized == FALSE) { + int i; + + for (i = 0; i < 32; i++) m197_load_patc(i, virt, (vm_offset_t)template.bits, 1); - i++; - } + m197_atc_initialized = TRUE; } #endif virt += PAGE_SIZE; @@ -667,14 +640,14 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot) * mapped are not managed and are never unmapped. * * Parameters: - * virt virtual address of range to map (IN) - * start physical address of range to map (IN) - * end physical address of end of range (IN) - * prot protection attributes (IN) - * cmode cache control attributes (IN) + * virt virtual address of range to map + * start physical address of range to map + * end physical address of end of range + * prot protection attributes + * cmode cache control attributes * * External & Global: - * batc_used number of BATC used (IN/OUT) + * batc_used number of BATC used * * Calls: * m88k_protection @@ -683,7 +656,6 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot) * pmap_pte * pmap_expand_kmap * - * * For each page that needs mapping: * If both virt and phys are on the BATC block boundary, map using BATC. * Else make mapping in the same manner as pmap_map. @@ -846,7 +818,7 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode) register pte_template_t opte; #ifdef DEBUG - if ( mode & CACHE_MASK ) { + if (mode & CACHE_MASK) { printf("(cache_ctrl) illegal mode %x\n",mode); return; } @@ -855,15 +827,12 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode) } #endif /* DEBUG */ - if ( pmap == PMAP_NULL ) { + if (pmap == PMAP_NULL) { panic("pmap_cache_ctrl: pmap is NULL"); } PMAP_LOCK(pmap, spl); - /* - * - */ users = pmap->cpus_using; if (pmap == kernel_pmap) { kflush = 1; @@ -909,25 +878,25 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode) * Routine: PMAP_BOOTSTRAP * * Function: - * Bootstarp the system enough to run with virtual memory. + * Bootstrap the system enough to run with virtual memory. * Map the kernel's code and data, allocate the kernel * translation table space, and map control registers * and other IO addresses. * * Parameters: - * load_start PA where kernel was loaded (IN) - * &phys_start PA of first available physical page (IN/OUT) - * &phys_end PA of last available physical page (IN) + * load_start PA where kernel was loaded + * &phys_start PA of first available physical page + * &phys_end PA of last available physical page * &virtual_avail VA of first available page (after kernel bss) * &virtual_end VA of last available page (end of kernel address space) * * Extern/Global: * - * PAGE_SIZE VM (software) page size (IN) - * kernelstart start symbol of kernel text (IN) - * etext end of kernel text (IN) - * phys_map_vaddr1 VA of page mapped arbitrarily for debug/IO (OUT) - * phys_map_vaddr2 VA of page mapped arbitrarily for debug/IO (OUT) + * PAGE_SIZE VM (software) page size + * kernelstart start symbol of kernel text + * etext end of kernel text + * phys_map_vaddr1 VA of page mapped arbitrarily for debug/IO + * phys_map_vaddr2 VA of page mapped arbitrarily for debug/IO * * Calls: * simple_lock_init @@ -937,7 +906,7 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode) * The physical address 'load_start' is mapped at * VM_MIN_KERNEL_ADDRESS, which maps the kernel code and data at the * virtual address for which it was (presumably) linked. Immediately - * following the end of the kernel code/data, sufficent page of + * following the end of the kernel code/data, sufficient page of * physical memory are reserved to hold translation tables for the kernel * address space. The 'phys_start' parameter is adjusted upward to * reflect this allocation. This space is mapped in virtual memory @@ -954,11 +923,11 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode) */ void -pmap_bootstrap(vm_offset_t load_start, /* IN */ - vm_offset_t *phys_start, /* IN/OUT */ - vm_offset_t *phys_end, /* IN */ - vm_offset_t *virt_start, /* OUT */ - vm_offset_t *virt_end) /* OUT */ +pmap_bootstrap(vm_offset_t load_start, + vm_offset_t *phys_start, + vm_offset_t *phys_end, + vm_offset_t *virt_start, + vm_offset_t *virt_end) { kpdt_entry_t kpdt_virt; sdt_entry_t *kmap; @@ -1068,7 +1037,7 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */ * init the kpdt queue */ kpdt_free = kpdt_virt; - for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) { + for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i > 0; i--) { kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE); kpdt_virt->phys = kpdt_phys; kpdt_virt = kpdt_virt->next; @@ -1230,7 +1199,7 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */ * Allocate all the submaps we need. Note that SYSMAP just allocates * kernel virtual address with no physical backing memory. The idea * is physical memory will be mapped at this va before using that va. - * This means that if different physcal pages are going to be mapped + * This means that if different physical pages are going to be mapped * at different times, we better do a tlb flush before using it - * else we will be referencing the wrong page. */ @@ -1283,7 +1252,6 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */ apr_data.field.te = 1; /* Translation enable */ #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { - void show_apr(unsigned value); show_apr(apr_data.bits); } #endif @@ -1331,13 +1299,12 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */ } #endif avail_next = *phys_start; - return; } /* pmap_bootstrap() */ /* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. - * After that point, either kmem_alloc or malloc should be used. This + * After that point, either uvm_km_zalloc or malloc should be used. This * function works by stealing pages from the (to be) managed page pool, * stealing virtual address space, then mapping the pages and zeroing them. * @@ -1382,16 +1349,15 @@ pmap_bootstrap_alloc(int size) * phys_end physical address of last available page * * Extern/Globals - * pv_head_table (OUT) - * pv_lock_table (OUT) - * pmap_modify_list (OUT) - * pmap_phys_start (OUT) - * pmap_phys_end (OUT) - * pmap_initialized(OUT) + * pv_head_table + * pv_lock_table + * pmap_modify_list + * pmap_phys_start + * pmap_phys_end + * pmap_initialized * * Calls: - * kmem_alloc - * zinit + * uvm_km_zalloc * * This routine does not really have much to do. It allocates space * for the pv_head_table, pv_lock_table, pmap_modify_list; and sets these @@ -1405,10 +1371,8 @@ pmap_bootstrap_alloc(int size) * wired.) * * - * kmem_alloc() memory for pv_table - * kmem_alloc() memory for modify_bits - * zinit(pmap_zone) - * zinit(segment zone) + * uvm_km_zalloc() memory for pv_table + * uvm_km_zalloc() memory for modify_bits * */ #ifdef MACHINE_NEW_NONCONTIG @@ -1559,18 +1523,8 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) /* * Routine: PMAP_ZERO_PAGE * - * History: - * '90.7.13 Fuzzy - * '90.9.05 Fuzzy - * Bug: template page invalid --> template page valid - * - * template = trunc_page(phys) - * | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) - * | DT_VALID; - * ^^^^^^^^ add - * * Function: - * Zeros the specified (machine independent) page. + * Zeroes the specified page. * * Parameters: * phys PA of page to zero @@ -1579,7 +1533,6 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) * phys_map_vaddr1 * * Calls: - * trunc_page * m88k_protection * cmmu_sflush_page * bzero @@ -1587,7 +1540,7 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) * Special Assumptions: * no locking required * - * This routine maps the physical pages ath the 'phys_map' virtual + * This routine maps the physical pages at the 'phys_map' virtual * address set up in pmap_bootstrap. It flushes the TLB to make the new * mappings effective, and zeros all the bits. */ @@ -1597,11 +1550,11 @@ pmap_zero_page(vm_offset_t phys) vm_offset_t srcva; pte_template_t template; unsigned int spl_sav; - int my_cpu; + int cpu; pt_entry_t *srcpte; - my_cpu = cpu_number(); - srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE)); + cpu = cpu_number(); + srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu * PAGE_SIZE)); srcpte = pmap_pte(kernel_pmap, srcva); template.bits = trunc_page(phys) @@ -1614,32 +1567,19 @@ pmap_zero_page(vm_offset_t phys) splx(spl_sav); bzero((void*)srcva, PAGE_SIZE); /* force the data out */ - cmmu_flush_remote_data_cache(my_cpu,phys, PAGE_SIZE); + cmmu_flush_remote_data_cache(cpu, phys, PAGE_SIZE); } /* pmap_zero_page() */ /* * Routine: PMAP_CREATE * - * Author: Fuzzy - * - * History: - * '90.7.13 Fuzzy level 1 --> segment exchange - * '90.7.16 Fuzzy PT_ALIGNED --> PAGE_ALIGNED exchange - * l1_utemplate delete - * '90.7.20 Fuzzy kernel segment entries in segment table - * entries for user space address delete. - * copying kernel segment entries - * to user pmap segment entries delete. - * all user segment table entries initialize - * to zero (invalid). - * * Function: * Create and return a physical map. If the size specified for the * map is zero, the map is an actual physical map, and may be referenced * by the hardware. If the size specified is non-zero, the map will be * used in software only, and is bounded by that size. * - * Paramerters: + * Parameters: * size size of the map * * This routines allocates a pmap structure. @@ -1798,7 +1738,7 @@ pmap_pinit(pmap_t p) * ranges represented by the table group sizes(PDT_TABLE_GROUP_VA_SPACE). * */ -STATIC void +void pmap_free_tables(pmap_t pmap) { unsigned long sdt_va; /* outer loop index */ @@ -1877,7 +1817,7 @@ pmap_release(register pmap_t p) * CHECK_PMAP_CONSISTENCY * PMAP_LOCK, PMAP_UNLOCK * pmap_free_tables - * zfree + * free * * Special Assumptions: * Map contains no valid mappings. @@ -1912,7 +1852,7 @@ pmap_destroy(pmap_t p) if (c == 0) { pmap_release(p); - free((caddr_t)p,M_VMPMAP); + free((caddr_t)p, M_VMPMAP); } } /* pmap_destroy() */ @@ -1970,13 +1910,12 @@ pmap_reference(pmap_t p) * SDT_NEXT * pmap_pte * PDT_VALID - * ptoa * PMAP_MANAGED * LOCK_PVH * UNLOCK_PVH * PA_TO_PVH * CHECK_PV_LIST - * zfree + * free * invalidate_pte * flush_atc_entry * vm_page_set_modified @@ -2000,7 +1939,7 @@ pmap_reference(pmap_t p) * its zone. */ -STATIC void +void pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e) { int pfn; @@ -2112,7 +2051,7 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e) CHECK_PV_LIST(pa, pvl, "pmap_remove_range after"); UNLOCK_PVH(pa); - } /* if PAGE_MANAGED */ + } /* if PMAP_MANAGED */ /* * Reflect modify bits to pager and zero (invalidate, @@ -2213,12 +2152,11 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e) * PA_TO_PVH * CHECK_PV_LIST * simple_lock - * ptoa * PDT_VALID * pmap_pte * vm_page_set_modified * PHYS_TO_VM_PAGE - * zfree + * free * * If the page specified by the given address is not a managed page, * this routine simply returns. Otherwise, the PV list associated with @@ -2356,7 +2294,6 @@ remove_all_Retry: * simple_lock, simple_unlock * panic * PDT_VALID - * ptoa * pmap_pte * * Special Assumptions: @@ -2366,7 +2303,7 @@ remove_all_Retry: * the page table entry (the PTE), and sets the hardware enforced * read-only bit. The TLB is appropriately flushed. */ -STATIC void +void pmap_copy_on_write(vm_offset_t phys) { register pv_entry_t pv_e; @@ -2378,7 +2315,7 @@ pmap_copy_on_write(vm_offset_t phys) if (!PMAP_MANAGED(phys)) { #ifdef DEBUG - if (pmap_con_dbg & CD_CMOD) + if (pmap_con_dbg & CD_COW) printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys); #endif return; @@ -2396,9 +2333,7 @@ copy_on_write_Retry: if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM)) printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys); #endif - UNLOCK_PVH(phys); - SPLX(spl); - return; /* no mappings */ + goto out; /* no mappings */ } /* @@ -2455,9 +2390,9 @@ copy_on_write_Retry: } CHECK_PV_LIST(phys, PA_TO_PVH(phys), "pmap_copy_on_write"); +out: UNLOCK_PVH(phys); SPLX(spl); - } /* pmap_copy_on_write */ /* @@ -2578,7 +2513,7 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot) * * Must be called with the pmap system and the pmap unlocked, since * these must be unlocked to use vm_allocate or vm_deallocate (via - * kmem_alloc, zalloc). Thus it must be called in a unlock/lock loop + * uvm_km_zalloc). Thus it must be called in a unlock/lock loop * that checks whether the map has been expanded enough. ( We won't loop * forever, since page table aren't shrunk.) * @@ -2595,7 +2530,7 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot) * kmem_alloc * kmem_free * zalloc - * zfree + * free * pmap_extract * * Special Assumptions @@ -2613,7 +2548,7 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot) * ptva = kmem_alloc(user_pt_map) * */ -STATIC void +void pmap_expand(pmap_t map, vm_offset_t v) { int i, spl; @@ -2714,17 +2649,6 @@ pmap_expand(pmap_t map, vm_offset_t v) /* * Routine: PMAP_ENTER * - * - * Update: - * July 13,90 - JUemura - * initial porting - * *****TO CHECK***** - * locks removed since we don't have to allocate - * level 2 tables anymore. locks needed? - * '90.7.26 Fuzzy VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS - * '90.8.17 Fuzzy Debug message added(PV no mapped at VA) - * '90.8.31 Sugai Remove redundant message output - * * Function: * Insert the given physical page (p) at the specified virtual * address (v) in the target phisical map with the protecton requested. @@ -2751,7 +2675,7 @@ pmap_expand(pmap_t map, vm_offset_t v) * pmap_pte * pmap_expand * pmap_remove_range - * zfree + * free * * This routine starts off by calling pmap_pte to obtain a (virtual) * pointer to the page table entry corresponding to given virtual @@ -2840,9 +2764,7 @@ Retry: users = pmap->cpus_using; /* - * Expand pmap to include this pte. Assume that - * pmap is always expanded to include enough M88K - * pages to map one VM page. + * Expand pmap to include this pte. */ while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) { /* @@ -3015,13 +2937,11 @@ Retry: /* * Routine: pmap_unwire * - * Author: Fuzzy - * * Function: Change the wiring attributes for a map/virtual-address * Pair. - * Prameterts: + * Parameters: * pmap pointer to pmap structure - * v virtual address of page to be wired/unwired + * v virtual address of page to be unwired * * Extern/Global: * pte_per_vm_page @@ -3058,8 +2978,6 @@ pmap_unwire(pmap_t map, vm_offset_t v) /* * Routine: PMAP_EXTRACT * - * Author: Fuzzy - * * Function: * Extract the physical page address associoated * with the given map/virtual_address pair. @@ -3127,48 +3045,6 @@ fail: } /* pmap_extract() */ /* - a version for the kernel debugger -*/ - -vm_offset_t -pmap_extract_unlocked(pmap_t pmap, vm_offset_t va) -{ - pt_entry_t *pte; - vm_offset_t pa; - int i; - - if (pmap == PMAP_NULL) - panic("pmap_extract: pmap is NULL"); - - /* - * check BATC first - */ - if (pmap == kernel_pmap && batc_used > 0) - for (i = batc_used-1; i > 0; i--) - if (batc_entry[i].lba == M88K_BTOBLK(va)) { - pa = (batc_entry[i].pba << BATC_BLKSHIFT) | - (va & BATC_BLKMASK ); - return (pa); - } - - if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) - pa = (vm_offset_t) 0; - else { - if (PDT_VALID(pte)) - pa = ptoa(pte->pfn); - else - pa = (vm_offset_t) 0; - } - - if (pa) - pa |= (va & PAGE_MASK); /* offset within page */ - - return (pa); - -} /* pamp_extract_unlocked() */ - - -/* * Routine: PMAP_COPY * * Function: @@ -3187,14 +3063,11 @@ pmap_extract_unlocked(pmap_t pmap, vm_offset_t va) * function. Translation tables in the destination map will be allocated * at VM fault time. */ +/* ARGSUSED */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { -#ifdef lint - dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++; -#endif - }/* pmap_copy() */ @@ -3365,7 +3238,7 @@ pmap_collect(pmap_t pmap) /* * we have to unlock before freeing the table, since PT_FREE - * calls kmem_free or zfree, which will invoke another + * calls uvm_km_free or free, which will invoke another * pmap routine */ PMAP_UNLOCK(pmap, spl); @@ -3390,7 +3263,7 @@ pmap_collect(pmap_t pmap) * Function: * Binds the given physical map to the given * processor, and returns a hardware map description. - * In a mono-processor implementation the my_cpu + * In a mono-processor implementation the cpu * argument is ignored, and the PMAP_ACTIVATE macro * simply sets the MMU root pointer element of the PCB * to the physical address of the segment descriptor table. @@ -3417,7 +3290,7 @@ pmap_activate(struct proc *p) #endif #endif pmap_t pmap = p->p_vmspace->vm_map.pmap; - int my_cpu = cpu_number(); + int cpu = cpu_number(); #ifdef DEBUG if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM)) @@ -3444,7 +3317,7 @@ pmap_activate(struct proc *p) * ABOUT THE BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE * FLUSHED AS WELL. */ - cmmu_pmap_activate(my_cpu, apr_data.bits, + cmmu_pmap_activate(cpu, apr_data.bits, pmap->i_batc, pmap->d_batc); for (n = 0; n < BATC_MAX; n++) *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits; @@ -3463,7 +3336,7 @@ pmap_activate(struct proc *p) /* * Mark that this cpu is using the pmap. */ - SETBIT_CPUSET(my_cpu, &(pmap->cpus_using)); + SETBIT_CPUSET(cpu, &(pmap->cpus_using)); simple_unlock(&pmap->lock); } else { @@ -3490,14 +3363,14 @@ pmap_activate(struct proc *p) * Parameters: * p pointer to proc structure * - * _pmap_deactive simply clears the cpus_using field in given pmap structure. + * pmap_deactive simply clears the cpus_using field in given pmap structure. * */ void pmap_deactivate(struct proc *p) { pmap_t pmap = p->p_vmspace->vm_map.pmap; - int my_cpu = cpu_number(); + int cpu = cpu_number(); if (pmap != kernel_pmap) { @@ -3505,7 +3378,7 @@ pmap_deactivate(struct proc *p) * we expect the spl is already raised to sched level. */ simple_lock(&pmap->lock); - CLRBIT_CPUSET(my_cpu, &(pmap->cpus_using)); + CLRBIT_CPUSET(cpu, &(pmap->cpus_using)); simple_unlock(&pmap->lock); } } /* pmap_deactivate() */ @@ -3514,7 +3387,7 @@ pmap_deactivate(struct proc *p) * Routine: PMAP_COPY_PAGE * * Function: - * Copies the specified (machine independent) pages. + * Copies the specified pages. * * Parameters: * src PA of source page @@ -3526,14 +3399,13 @@ pmap_deactivate(struct proc *p) * * Calls: * m88kprotection - * trunc_page * cmmu_sflush_page * bcopy * * Special Assumptions: * no locking reauired * - * This routine maps the phsical pages at the 'phys_map' virtual + * This routine maps the physical pages at the 'phys_map' virtual * addresses set up in pmap_bootstrap. It flushes the TLB to make the * new mappings effective, and performs the copy. */ @@ -3545,15 +3417,15 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) int aprot; pte_template_t template; pt_entry_t *dstpte, *srcpte; - int my_cpu = cpu_number(); + int cpu = cpu_number(); /* * Map source physical address. */ aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE); - srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE)); - dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE)); + srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu << PAGE_SHIFT)); + dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu << PAGE_SHIFT)); srcpte = pmap_pte(kernel_pmap, srcva); dstpte = pmap_pte(kernel_pmap, dstva); @@ -3577,192 +3449,12 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) bcopy((void*)srcva, (void*)dstva, PAGE_SIZE); /* flush source, dest out of cache? */ - cmmu_flush_remote_data_cache(my_cpu, src, PAGE_SIZE); - cmmu_flush_remote_data_cache(my_cpu, dst, PAGE_SIZE); + cmmu_flush_remote_data_cache(cpu, src, PAGE_SIZE); + cmmu_flush_remote_data_cache(cpu, dst, PAGE_SIZE); } /* pmap_copy_page() */ /* - * copy_to_phys - * - * Copy virtual memory to physical memory by mapping the physical - * memory into virtual memory and then doing a virtual to virtual - * copy with bcopy. - * - * Parameters: - * srcva VA of source page - * dstpa PA of destination page - * bytecount copy byte size - * - * Extern/Global: - * phys_map_vaddr2 - * - * Calls: - * m88kprotection - * trunc_page - * cmmu_sflush_page - * bcopy - * - */ -void -copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount) -{ - vm_offset_t dstva; - pt_entry_t *dstpte; - int copy_size, - offset, - aprot; - pte_template_t template; - - dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE)); - dstpte = pmap_pte(kernel_pmap, dstva); - copy_size = PAGE_SIZE; - offset = dstpa - trunc_page(dstpa); - dstpa -= offset; - - aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE); - while (bytecount > 0) { - copy_size = PAGE_SIZE - offset; - if (copy_size > bytecount) - copy_size = bytecount; - /* - * Map destination physical address. - */ - template.bits = trunc_page(dstpa) | aprot | CACHE_WT | DT_VALID; - cmmu_flush_tlb(1, dstva, PAGE_SIZE); - *dstpte = template.pte; - - dstva += offset; - bcopy((void*)srcva, (void*)dstva, copy_size); - srcva += copy_size; - dstva += copy_size; - dstpa += PAGE_SIZE; - bytecount -= copy_size; - offset = 0; - } -} - -/* - * copy_from_phys - * - * Copy physical memory to virtual memory by mapping the physical - * memory into virtual memory and then doing a virtual to virtual - * copy with bcopy. - * - * Parameters: - * srcpa PA of source page - * dstva VA of destination page - * bytecount copy byte size - * - * Extern/Global: - * phys_map_vaddr2 - * - * Calls: - * m88kprotection - * trunc_page - * cmmu_sflush_page - * bcopy - * - */ -void -copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount) -{ - register vm_offset_t srcva; - register pt_entry_t *srcpte; - register int copy_size, offset; - int aprot; - pte_template_t template; - - srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE)); - srcpte = pmap_pte(kernel_pmap, srcva); - copy_size = PAGE_SIZE; - offset = srcpa - trunc_page(srcpa); - srcpa -= offset; - - aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE); - while (bytecount > 0) { - copy_size = PAGE_SIZE - offset; - if (copy_size > bytecount) - copy_size = bytecount; - /* - * Map destination physical address. - */ - template.bits = trunc_page(srcpa) | aprot | CACHE_WT | DT_VALID; - cmmu_flush_tlb(1, srcva, PAGE_SIZE); - *srcpte = template.pte; - - srcva += offset; - bcopy((void*)srcva, (void*)dstva, copy_size); - srcpa += PAGE_SIZE; - dstva += copy_size; - srcva += copy_size; - bytecount -= copy_size; - offset = 0; - /* cache flush source? */ - } -} - -/* - * Routine: PMAP_REDZONE - * - * Function: - * Give the kernel read-only access to the specified address. This - * is used to detect stack overflows. It is assumed that the address - * specified is the last possible kernel stack address. Therefore, we - * round up to the nearest machine dependent page. - * - * Parameters: - * pmap pointer to pmap structure - * addr virtual address of page to which access should - * be restricted to read-only - * - * Calls: - * round_page - * PMAP_LOCK - * pmap_pte - * PDT_VALID - * - * This function calls pmap_pte to obtain a pointer to the page - * table entry associated with the given virtual address. If there is a - * page entry, and it is valid, its write protect bit will be set. - */ -void -pmap_redzone(pmap_t pmap, vm_offset_t va) -{ - pt_entry_t *pte; - int spl, spl_sav; - unsigned users; - pte_template_t opte; - int kflush; - - va = round_page(va); - PMAP_LOCK(pmap, spl); - - users = pmap->cpus_using; - if (pmap == kernel_pmap) { - kflush = 1; - } else { - kflush = 0; - } - - if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte)) { - /* - * Invalidate pte temporarily to avoid being written back - * the modified bit and/or the reference bit by other cpu. - */ - spl_sav = splimp(); - opte.bits = invalidate_pte(pte); - opte.pte.prot = M88K_RO; - ((pte_template_t *)pte)->bits = opte.bits; - flush_atc_entry(users, va, kflush); - splx(spl_sav); - } - - PMAP_UNLOCK(pmap, spl); - -} /* pmap_redzone() */ - -/* * Routine: PMAP_CLEAR_MODIFY * * Function: @@ -3818,7 +3510,7 @@ clear_modify_Retry: CHECK_PV_LIST (phys, pvl, "pmap_clear_modify"); LOCK_PVH(phys); - /* update correspoinding pmap_modify_list element */ + /* update corresponding pmap_modify_list element */ SET_ATTRIB(phys, 0); if (pvl->pmap == PMAP_NULL) { @@ -3831,7 +3523,7 @@ clear_modify_Retry: return; } - /* for each listed pmap, trun off the page modified bit */ + /* for each listed pmap, turn off the page modified bit */ pvep = pvl; while (pvep != PV_ENTRY_NULL) { pmap = pvep->pmap; @@ -3926,7 +3618,7 @@ pmap_is_modified(vm_offset_t phys) pvl = PA_TO_PVH(phys); CHECK_PV_LIST (phys, pvl, "pmap_is_modified"); - is_mod_Retry: +is_mod_Retry: if ((boolean_t) PA_TO_ATTRIB(phys)) { /* we've already cached a modify flag for this page, @@ -3989,12 +3681,6 @@ pmap_is_modified(vm_offset_t phys) /* * Routine: PMAP_CLEAR_REFERECE * - * History: - * '90. 7.16 Fuzzy unchanged - * '90. 7.19 Fuzzy comment "Calls:' add - * '90. 8.21 Fuzzy Debugging message add - * '93. 3. 1 jfriedl Added call to LOCK_PVH - * * Function: * Clear the reference bits on the specified physical page. * @@ -4016,7 +3702,7 @@ pmap_is_modified(vm_offset_t phys) * For managed pages, the coressponding PV list will be traversed. * For each pmap/va the hardware 'used' bit in the page table entry * inspected - and turned off if necessary. If any of the inspected bits - * werw found on, an TLB flush will be performed. + * were found on, a TLB flush will be performed. */ void pmap_clear_reference(vm_offset_t phys) @@ -4100,12 +3786,8 @@ pmap_clear_reference(vm_offset_t phys) /* * Routine: PMAP_IS_REFERENCED * - * History: - * '90. 7.16 Fuzzy - * '90. 7.19 Fuzzy comment 'Calls:' add - * * Function: - * Retrun whether or not the specifeid physical page is referenced by + * Return whether or not the specified physical page is referenced by * any physical maps. That is, whether the hardware has touched the page. * * Parameters: @@ -4147,8 +3829,7 @@ pmap_is_referenced(vm_offset_t phys) pvl = PA_TO_PVH(phys); CHECK_PV_LIST(phys, pvl, "pmap_is_referenced"); - is_ref_Retry: - +is_ref_Retry: if (pvl->pmap == PMAP_NULL) { SPLX(spl); return (FALSE); @@ -4206,7 +3887,7 @@ pmap_page_protect(vm_offset_t phys, vm_prot_t prot) } } -#if FUTURE_MAYBE +#ifdef FUTURE_MAYBE /* * Routine: PAGEMOVE * @@ -4215,7 +3896,7 @@ pmap_page_protect(vm_offset_t phys, vm_prot_t prot) * * Parameters: * from kernel virtual address of source - * to kernel virtual address of distination + * to kernel virtual address of destination * size size in bytes * * Calls: @@ -4252,7 +3933,7 @@ pagemove(vm_offset_t from, vm_offset_t to, int size) */ if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) { printf("pagemove: source vaddr 0x%x\n", from); - panic("pagemove: Source addr not mapped"); + panic("pagemove: source addr not mapped"); } /* @@ -4261,13 +3942,13 @@ pagemove(vm_offset_t from, vm_offset_t to, int size) if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL) if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE)) == PT_ENTRY_NULL) - panic("pagemove: Cannot allocate distination pte"); + panic("pagemove: Cannot allocate destination pte"); /* * */ if (dstpte->dtype == DT_VALID) { - printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte)); - panic("pagemove: Distination pte already valid"); + printf("pagemove: destination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte)); + panic("pagemove: destination pte already valid"); } #ifdef DEBUG @@ -4308,76 +3989,8 @@ pagemove(vm_offset_t from, vm_offset_t to, int size) } /* pagemove */ #endif /* FUTURE_MAYBE */ -/* - * Routine: icache_flush - * - * Function: - * Invalidate instruction cache for all CPUs on specified - * physical address. Called when a page is removed from a - * vm_map. This is done because the Instruction CMMUs are not - * snooped, and if a page is subsequently used as a text page, - * we want the CMMUs to re-load the cache for the page. - * - * Parameters: - * pa physical address of the (vm) page - * - * Calls: - * cachefall - * - * Called by: - * vm_remove_page - * - */ -void -icache_flush(vm_offset_t pa) -{ - int cpu = 0; - for (cpu=0; cpu<max_cpus; cpu++) { - if (cpu_sets[cpu]) { - cmmu_flush_remote_inst_cache(cpu, pa, - PAGE_SIZE); - } - } -} /* icache_flush */ - -/* - * Routine: pmap_dcache_flush - * - * Function: - * Flush DATA cache on specified virtual address. - * - * Parameters: - * pmap specify pmap - * va virtual address of the (vm) page to be flushed - * - * Extern/globals: - * pmap_pte - * - * Calls: - * dcacheflush - * - */ void -pmap_dcache_flush(pmap_t pmap, vm_offset_t va) -{ - vm_offset_t pa; - int spl; - - if (pmap == PMAP_NULL) - panic("pmap_dcache_flush: pmap is NULL"); - - PMAP_LOCK(pmap, spl); - - pa = ptoa((pmap_pte(pmap, va))->pfn); - cmmu_flush_data_cache(pa, PAGE_SIZE); - - PMAP_UNLOCK(pmap, spl); - - -} /* pmap_dcache_flush */ - -STATIC void cache_flush_loop(int mode, vm_offset_t pa, int size) { int i; @@ -4431,37 +4044,6 @@ cache_flush_loop(int mode, vm_offset_t pa, int size) } } -/* - * pmap_cache_flush - * Internal function. - */ -void -pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode) -{ - vm_offset_t pa; - vm_offset_t va; - int spl; - - if (pmap == PMAP_NULL) - panic("pmap_cache_flush: NULL pmap"); - - /* - * If it is more than a couple of pages, just blow the whole cache - * because of the number of cycles involved. - */ - if (bytes > 2*PAGE_SIZE) { - cache_flush_loop(mode, 0, -1); - return; - } - - PMAP_LOCK(pmap, spl); - for (va = virt; bytes > 0; bytes -= PAGE_SIZE,va += PAGE_SIZE) { - pa = ptoa((pmap_pte(pmap, va))->pfn); - cache_flush_loop(mode, pa, PAGE_SIZE); - } - PMAP_UNLOCK(pmap, spl); -} /* pmap_cache_flush */ - #ifdef DEBUG /* * DEBUGGING ROUTINES - check_pv_list and check_pmp_consistency are used @@ -4496,11 +4078,11 @@ pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode) * * This function walks the given PV list. For each pmap/va pair, * pmap_extract is called to obtain the physical address of the page from - * the pmap in question. If the retruned physical address does not match + * the pmap in question. If the returned physical address does not match * that for the PV list being perused, the function panics. */ -STATIC void +void check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who) { pv_entry_t pv_e; @@ -4549,7 +4131,7 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who) } /* check_pv_list() */ /* - * Routine: CHECK_MAP (itnernal) + * Routine: CHECK_MAP (internal) * * Function: * Debug mode routine to check consistency of map. @@ -4574,12 +4156,11 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who) * This function sequences through the given range of addresses. For * each page, pmap_pte is called to obtain the page table entry. If * its valid, and the physical page it maps is managed, the PV list is - * searched for the corresponding pmap/va entry. If not found, the - * function panics. If duplicate PV list entries are found, the function - * panics. + * searched for the corresponding pmap/va entry. If not found, or if + * duplicate PV list entries are found, the function panics. */ -STATIC void +void check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who) { vm_offset_t va, @@ -4716,15 +4297,14 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who) * Once all pmaps have been checked, check_pv_list is called to check * consistency of the PV lists for each managed page. * - * NOTE: Added by Sugai 10/29/90 - * There are some pages do not appaer in PV list. These pages are - * allocated for pv structures by kmem_alloc called in pmap_init. + * There are some pages which do not appear in PV list. These pages + * are allocated for pv structures by uvm_km_zalloc called in pmap_init. * Though they are in the range of pmap_phys_start to pmap_phys_end, - * PV maniupulations had not been activated when these pages were alloceted. + * PV manipulations had not been activated when these pages were alloceted. * */ -STATIC void +void check_pmap_consistency(char *who) { pmap_t p; @@ -4751,10 +4331,10 @@ check_pmap_consistency(char *who) check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who); /* run through all pmaps. check consistency of each one... */ - i = PMAP_MAX; - for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) { + i = 512; + for (p = kernel_pmap->next;p != kernel_pmap; p = p->next, i--) { if (i == 0) { /* can not read pmap list */ - printf("check_pmap_consistency: pmap strcut loop error.\n"); + printf("check_pmap_consistency: pmap struct loop error.\n"); panic(who); } check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who); @@ -4807,217 +4387,6 @@ check_pmap_consistency(char *who) p->modified, \ p->pg_used); -/* - * Routine: PMAP_PRINT - * - * History: - * - * Function: - * Print pmap stucture, including segment table. - * - * Parameters: - * pmap pointer to pmap structure - * - * Special Assumptions: - * No locking required. - * - * This function prints the fields of the pmap structure, then - * iterates through the segment translation table, printing each entry. - */ -void -pmap_print(pmap_t pmap) -{ - sdt_entry_t *sdtp; - sdt_entry_t *sdtv; - int i; - - printf("Pmap @ 0x%x:\n", (unsigned)pmap); - sdtp = pmap->sdt_paddr; - sdtv = pmap->sdt_vaddr; - printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n", - (unsigned)sdtp, (unsigned)sdtv, - pmap->ref_count); - -#ifdef statistics_not_yet_maintained - printf(" statistics: pagesize %d: free_count %d; " - "active_count %d; inactive_count %d; wire_count %d\n", - pmap->stats.pagesize, - pmap->stats.free_count, - pmap->stats.active_count, - pmap->stats.inactive_count, - pmap->stats.wire_count); - - printf(" zero_fill_count %d; reactiveations %d; " - "pageins %d; pageouts %d; faults %d\n", - pmap->stats.zero_fill_count, - pmap->stats.reactivations, - pmap->stats.pageins, - pmap->stats.pageouts, - pmap->stats.fault); - - printf(" cow_faults %d, lookups %d, hits %d\n", - pmap->stats.cow_faults, - pmap->stats.loopups, - pmap->stats.faults); -#endif - sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */ - sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */ - if (sdtp == (sdt_entry_t *)0) - printf("Error in pmap - sdt_paddr is null.\n"); - else { - int count = 0; - printf(" Segment table at 0x%x (0x%x):\n", - (unsigned)sdtp, (unsigned)sdtv); - for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) { - if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) { - if (count != 0) - printf("sdt entry %d skip !!\n", count); - count = 0; - printf(" (%x)phys: ", i); - PRINT_SDT(sdtp); - printf(" (%x)virt: ", i); - PRINT_SDT(sdtv); - } else - count++; - } - if (count != 0) - printf("sdt entry %d skip !!\n", count); - } - -} /* pmap_print() */ - -/* - * Routine: PMAP_PRINT_TRACE - * - * Function: - * Using virt addr, derive phys addr, printing pmap tables along the way. - * - * Parameters: - * pmap pointer to pmap strucuture - * va virtual address whose translation is to be trace - * long_format flag indicating long from output is desired - * - * Special Assumptions: - * No locking required. - * - * This function chases down through the translation tree as - * appropriate for the given virtual address. each table entry - * encoutered is printed. If the long_format is desired, all entries of - * each table are printed, with special indication of the entries used in - * the translation. - */ -void -pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format) -{ - sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */ - sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */ - pt_entry_t *ptep; /* ptr to pte table of physical page addresses */ - - int i; /* table loop index */ - unsigned long prev_entry; /* keep track of value of previous table entry */ - int n_dup_entries; /* count contiguous duplicate entries */ - - printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n", - va, (unsigned)pmap); - - /*** SDT TABLES ***/ - /* get addrs of sdt tables */ - sdtp = (sdt_entry_t *)pmap->sdt_vaddr; - sdtv = sdtp + SDT_ENTRIES; - - if (sdtp == SDT_ENTRY_NULL) { - printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n"); - return; - } - - n_dup_entries = 0; - prev_entry = 0xFFFFFFFF; - - if (long_format) { - printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n", - (unsigned)sdtp, (unsigned)sdtv); - for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) { - if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits - && SDTIDX(va) != i && i != SDT_ENTRIES-1) { - n_dup_entries++; - continue; /* suppress duplicate entry */ - } - if (n_dup_entries != 0) { - printf(" - %d duplicate entries skipped -\n",n_dup_entries); - n_dup_entries = 0; - } - prev_entry = ((pte_template_t *)sdtp)->bits; - if (SDTIDX(va) == i) { - printf(" >> (%x)phys: ", i); - } else { - printf(" (%x)phys: ", i); - } - PRINT_SDT(sdtp); - if (SDTIDX(va) == i) { - printf(" >> (%x)virt: ", i); - } else { - printf(" (%x)virt: ", i); - } - PRINT_SDT(sdtv); - } /* for */ - } else { - /* index into both tables for given VA */ - sdtp += SDTIDX(va); - sdtv += SDTIDX(va); - printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n", - SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv); - printf(" phys: "); - PRINT_SDT(sdtp); - printf(" virt: "); - PRINT_SDT(sdtv); - } - - /*** PTE TABLES ***/ - /* get addrs of page (pte) table (no shadow table) */ - - sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va); -#ifdef DEBUG - printf("*** DEBUG (sdtp) "); - PRINT_SDT(sdtp); -#endif - sdtv = sdtp + SDT_ENTRIES; - ptep = (pt_entry_t *)(ptoa(sdtv->table_addr)); - if (sdtp->dtype != DT_VALID) { - printf(" segment table entry invlid, trace stops.\n"); - return; - } - - n_dup_entries = 0; - prev_entry = 0xFFFFFFFF; - if (long_format) { - printf(" page table (ptes) at 0x%x\n", (unsigned)ptep); - for (i = 0; i < PDT_ENTRIES; i++, ptep++) { - if (prev_entry == ((pte_template_t *)ptep)->bits - && PDTIDX(va) != i && i != PDT_ENTRIES-1) { - n_dup_entries++; - continue; /* suppress suplicate entry */ - } - if (n_dup_entries != 0) { - printf(" - %d duplicate entries skipped -\n",n_dup_entries); - n_dup_entries = 0; - } - prev_entry = ((pte_template_t *)ptep)->bits; - if (PDTIDX(va) == i) { - printf(" >> (%x)pte: ", i); - } else { - printf(" (%x)pte: ", i); - } - PRINT_PDT(ptep); - } /* for */ - } else { - /* index into page table */ - ptep += PDTIDX(va); - printf(" pte index 0x%x\n", PDTIDX(va)); - printf(" pte: "); - PRINT_PDT(ptep); - } -} /* pmap_print_trace() */ - void pmap_virtual_space(vm_offset_t *startp, vm_offset_t *endp) { @@ -5046,7 +4415,7 @@ pmap_next_page(vm_offset_t *addrp) #endif -#if USING_BATC +#ifdef USING_BATC #ifdef OMRON_PMAP /* * Set BATC @@ -5109,7 +4478,7 @@ use_batc(task_t task, #endif #endif /* USING_BATC */ -#if FUTURE_MAYBE +#ifdef FUTURE_MAYBE /* * Machine-level page attributes * |