diff options
author | Niklas Hallqvist <niklas@cvs.openbsd.org> | 2003-04-07 06:14:31 +0000 |
---|---|---|
committer | Niklas Hallqvist <niklas@cvs.openbsd.org> | 2003-04-07 06:14:31 +0000 |
commit | 6576661903b5745dc7089f4a6a117d28cc3aa0c4 (patch) | |
tree | c0c9607ed5edd54bbd470ca31c8016ccc0972709 /sys/arch/i386 | |
parent | 0e5bcbfa6c0b83de7bd2013d2c0b74a1e4affa9d (diff) |
Spring cleaning: remove unused code.
Diffstat (limited to 'sys/arch/i386')
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 452 | ||||
-rw-r--r-- | sys/arch/i386/include/pmap.h | 16 |
2 files changed, 3 insertions, 465 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index a27c936996a..bbcb0416df1 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.65 2002/10/13 18:26:12 krw Exp $ */ +/* $OpenBSD: pmap.c,v 1.66 2003/04/07 06:14:30 niklas Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* @@ -404,11 +404,6 @@ static vaddr_t pmap_tmpmap_pa(paddr_t); static pt_entry_t *pmap_tmpmap_pvepte(struct pv_entry *); static void pmap_tmpunmap_pa(void); static void pmap_tmpunmap_pvepte(struct pv_entry *); -static boolean_t pmap_transfer_ptes(struct pmap *, - struct pmap_transfer_location *, - struct pmap *, - struct pmap_transfer_location *, - int, boolean_t); static boolean_t pmap_try_steal_pv(struct pv_head *, struct pv_entry *, struct pv_entry *); @@ -2944,449 +2939,6 @@ pmap_collect(pmap) } /* - * pmap_transfer: transfer (move or copy) mapping from one pmap - * to another. - * - * => this function is optional, it doesn't have to do anything - * => we assume that the mapping in the src pmap is valid (i.e. that - * it doesn't run off the end of the map's virtual space). - * => we assume saddr, daddr, and len are page aligned/lengthed - */ - -void -pmap_transfer(dstpmap, srcpmap, daddr, len, saddr, move) - struct pmap *dstpmap, *srcpmap; - vaddr_t daddr, saddr; - vsize_t len; - boolean_t move; -{ - /* base address of PTEs, dst could be NULL */ - pt_entry_t *srcptes, *dstptes; - - struct pmap_transfer_location srcl, dstl; - int dstvalid; /* # of PTEs left in dst's current PTP */ - struct pmap *mapped_pmap; /* the pmap we passed to pmap_map_ptes */ - vsize_t blklen; - int blkpgs, toxfer; - boolean_t ok; - -#ifdef DIAGNOSTIC - /* - * sanity check: let's make sure our len doesn't overflow our dst - * space. - */ - - if (daddr < VM_MAXUSER_ADDRESS) { - if (VM_MAXUSER_ADDRESS - daddr < len) { - printf("pmap_transfer: no room in user pmap " - "(addr=0x%lx, len=0x%lx)\n", daddr, len); - return; - } - } else if (daddr < VM_MIN_KERNEL_ADDRESS || - daddr >= VM_MAX_KERNEL_ADDRESS) { - printf("pmap_transfer: invalid transfer address 0x%lx\n", - daddr); - } else { - if (VM_MAX_KERNEL_ADDRESS - daddr < len) { - printf("pmap_transfer: no room in kernel pmap " - "(addr=0x%lx, len=0x%lx)\n", daddr, len); - return; - } - } -#endif - - /* - * ideally we would like to have either src or dst pmap's be the - * current pmap so that we can map the other one in APTE space - * (if needed... one of the maps could be the kernel's pmap). - * - * however, if we can't get this, then we have to use the tmpmap - * (alternately we could punt). - */ - - if (!pmap_is_curpmap(dstpmap) && !pmap_is_curpmap(srcpmap)) { - dstptes = NULL; /* dstptes NOT mapped */ - srcptes = pmap_map_ptes(srcpmap); /* let's map the source */ - mapped_pmap = srcpmap; - } else { - if (!pmap_is_curpmap(srcpmap)) { - srcptes = pmap_map_ptes(srcpmap); /* possible APTE */ - dstptes = PTE_BASE; - mapped_pmap = srcpmap; - } else { - dstptes = pmap_map_ptes(dstpmap); /* possible APTE */ - srcptes = PTE_BASE; - mapped_pmap = dstpmap; - } - } - - /* - * at this point we know that the srcptes are mapped. the dstptes - * are mapped if (dstptes != NULL). if (dstptes == NULL) then we - * will have to map the dst PTPs page at a time using the tmpmap. - * [XXX: is it worth the effort, or should we just punt?] - */ - - srcl.addr = saddr; - srcl.pte = &srcptes[i386_btop(srcl.addr)]; - srcl.ptp = NULL; - dstl.addr = daddr; - if (dstptes) - dstl.pte = &dstptes[i386_btop(dstl.addr)]; - else - dstl.pte = NULL; /* we map page at a time */ - dstl.ptp = NULL; - dstvalid = 0; /* force us to load a new dst PTP to start */ - - while (len) { - - /* - * compute the size of this block. - */ - - /* length in bytes */ - blklen = i386_round_pdr(srcl.addr+1) - srcl.addr; - if (blklen > len) - blklen = len; - blkpgs = i386_btop(blklen); - - /* - * if the block is not valid in the src pmap, - * then we can skip it! - */ - - if (!pmap_valid_entry(srcpmap->pm_pdir[pdei(srcl.addr)])) { - len = len - blklen; - srcl.pte = srcl.pte + blkpgs; - srcl.addr += blklen; - dstl.addr += blklen; - if (blkpgs > dstvalid) { - dstvalid = 0; - dstl.ptp = NULL; - } else { - dstvalid = dstvalid - blkpgs; - } - if (dstptes == NULL && (len == 0 || dstvalid == 0)) { - if (dstl.pte) { - pmap_tmpunmap_pa(); - dstl.pte = NULL; - } - } else { - dstl.pte += blkpgs; - } - continue; - } - - /* - * we have a valid source block of "blkpgs" PTEs to transfer. - * if we don't have any dst PTEs ready, then get some. - */ - - if (dstvalid == 0) { - if (!pmap_valid_entry(dstpmap-> - pm_pdir[pdei(dstl.addr)])) { -#ifdef DIAGNOSTIC - if (dstl.addr >= VM_MIN_KERNEL_ADDRESS) - panic("pmap_transfer: missing kernel " - "PTP at 0x%lx", dstl.addr); -#endif - dstl.ptp = pmap_get_ptp(dstpmap, - pdei(dstl.addr), TRUE); - if (dstl.ptp == NULL) /* out of RAM? punt. */ - break; - } else { - dstl.ptp = NULL; - } - dstvalid = i386_btop(i386_round_pdr(dstl.addr+1) - - dstl.addr); - if (dstptes == NULL) { - dstl.pte = (pt_entry_t *) - pmap_tmpmap_pa(dstpmap-> - pm_pdir[pdei(dstl.addr)] - & PG_FRAME); - dstl.pte = dstl.pte + (PTES_PER_PTP - dstvalid); - } - } - - /* - * we have a valid source block of "blkpgs" PTEs to transfer. - * we have a valid dst block of "dstvalid" PTEs ready. - * thus we can transfer min(blkpgs, dstvalid) PTEs now. - */ - - srcl.ptp = NULL; /* don't know source PTP yet */ - if (dstvalid < blkpgs) - toxfer = dstvalid; - else - toxfer = blkpgs; - - if (toxfer > 0) { - ok = pmap_transfer_ptes(srcpmap, &srcl, dstpmap, &dstl, - toxfer, move); - - if (!ok) /* memory shortage? punt. */ - break; - - dstvalid -= toxfer; - blkpgs -= toxfer; - len -= i386_ptob(toxfer); - if (blkpgs == 0) /* out of src PTEs? restart */ - continue; - } - - /* - * we have a valid source block of "blkpgs" PTEs left - * to transfer. we have just used up our "dstvalid" - * PTEs, and thus must obtain more dst PTEs to finish - * off the src block. since we are now going to - * obtain a brand new dst PTP, we know we can finish - * the src block in one more transfer. - */ - -#ifdef DIAGNOSTIC - if (dstvalid) - panic("pmap_transfer: dstvalid non-zero after drain"); - if ((dstl.addr & (NBPD-1)) != 0) - panic("pmap_transfer: dstaddr not on PD boundary " - "(0x%lx)", dstl.addr); -#endif - - if (dstptes == NULL && dstl.pte != NULL) { - /* dispose of old PT mapping */ - pmap_tmpunmap_pa(); - dstl.pte = NULL; - } - - /* - * get new dst PTP - */ - if (!pmap_valid_entry(dstpmap->pm_pdir[pdei(dstl.addr)])) { -#ifdef DIAGNOSTIC - if (dstl.addr >= VM_MIN_KERNEL_ADDRESS) - panic("pmap_transfer: missing kernel PTP at " - "0x%lx", dstl.addr); -#endif - dstl.ptp = pmap_get_ptp(dstpmap, pdei(dstl.addr), TRUE); - if (dstl.ptp == NULL) /* out of free RAM? punt. */ - break; - } else { - dstl.ptp = NULL; - } - - dstvalid = PTES_PER_PTP; /* new PTP */ - - /* - * if the dstptes are un-mapped, then we need to tmpmap in the - * dstl.ptp. - */ - - if (dstptes == NULL) { - dstl.pte = (pt_entry_t *) - pmap_tmpmap_pa(dstpmap->pm_pdir[pdei(dstl.addr)] - & PG_FRAME); - } - - /* - * we have a valid source block of "blkpgs" PTEs left - * to transfer. we just got a brand new dst PTP to - * receive these PTEs. - */ - -#ifdef DIAGNOSTIC - if (dstvalid < blkpgs) - panic("pmap_transfer: too many blkpgs?"); -#endif - toxfer = blkpgs; - ok = pmap_transfer_ptes(srcpmap, &srcl, dstpmap, &dstl, toxfer, - move); - - if (!ok) /* memory shortage? punt. */ - break; - - dstvalid -= toxfer; - blkpgs -= toxfer; - len -= i386_ptob(toxfer); - - /* - * done src pte block - */ - } - if (dstptes == NULL && dstl.pte != NULL) - pmap_tmpunmap_pa(); /* dst PTP still mapped? */ - pmap_unmap_ptes(mapped_pmap); -} - -/* - * pmap_transfer_ptes: transfer PTEs from one pmap to another - * - * => we assume that the needed PTPs are mapped and that we will - * not cross a block boundary. - * => we return TRUE if we transfered all PTEs, FALSE if we were - * unable to allocate a pv_entry - */ - -static boolean_t -pmap_transfer_ptes(srcpmap, srcl, dstpmap, dstl, toxfer, move) - struct pmap *srcpmap, *dstpmap; - struct pmap_transfer_location *srcl, *dstl; - int toxfer; - boolean_t move; -{ - pt_entry_t dstproto, opte; - int bank, off; - struct pv_head *pvh; - struct pv_entry *pve, *lpve; - - /* - * generate "prototype" dst PTE - */ - - if (dstl->addr < VM_MAX_ADDRESS) - dstproto = PG_u; /* "user" page */ - else - dstproto = pmap_pg_g; /* kernel page */ - - /* - * ensure we have dst PTP for user addresses. - */ - - if (dstl->ptp == NULL && dstl->addr < VM_MAXUSER_ADDRESS) - dstl->ptp = PHYS_TO_VM_PAGE(dstpmap->pm_pdir[pdei(dstl->addr)] & - PG_FRAME); - - /* - * main loop over range - */ - - for (/*null*/; toxfer > 0 ; toxfer--, - srcl->addr += NBPG, dstl->addr += NBPG, - srcl->pte++, dstl->pte++) { - - if (!pmap_valid_entry(*srcl->pte)) /* skip invalid entrys */ - continue; - -#ifdef DIAGNOSTIC - if (pmap_valid_entry(*dstl->pte)) - panic("pmap_transfer_ptes: attempt to overwrite " - "active entry"); -#endif - - /* - * let's not worry about non-pvlist mappings (typically device - * pager mappings). - */ - - opte = *srcl->pte; - - if ((opte & PG_PVLIST) == 0) - continue; - - /* - * if we are moving the mapping, then we can just adjust the - * current pv_entry. if we are copying the mapping, then we - * need to allocate a new pv_entry to account for it. - */ - - if (move == FALSE) { - pve = pmap_alloc_pv(dstpmap, ALLOCPV_TRY); - if (pve == NULL) - return(FALSE); /* punt! */ - } else { - pve = NULL; /* XXX: quiet gcc warning */ - } - - /* - * find the pv_head for this mapping. since our mapping is - * on the pvlist (PG_PVLIST), there must be a pv_head. - */ - - bank = vm_physseg_find(atop(opte & PG_FRAME), &off); -#ifdef DIAGNOSTIC - if (bank == -1) - panic("pmap_transfer_ptes: PG_PVLIST PTE and " - "no pv_head!"); -#endif - pvh = &vm_physmem[bank].pmseg.pvhead[off]; - - /* - * now lock down the pvhead and find the current entry (there - * must be one). - */ - - simple_lock(&pvh->pvh_lock); - for (lpve = pvh->pvh_list ; lpve ; lpve = lpve->pv_next) - if (lpve->pv_pmap == srcpmap && - lpve->pv_va == srcl->addr) - break; -#ifdef DIAGNOSTIC - if (lpve == NULL) - panic("pmap_transfer_ptes: PG_PVLIST PTE, but " - "entry not found"); -#endif - - /* - * update src ptp. if the ptp is null in the pventry, then - * we are not counting valid entrys for this ptp (this is only - * true for kernel PTPs). - */ - - if (srcl->ptp == NULL) - srcl->ptp = lpve->pv_ptp; -#ifdef DIAGNOSTIC - if (srcl->ptp && - (srcpmap->pm_pdir[pdei(srcl->addr)] & PG_FRAME) != - VM_PAGE_TO_PHYS(srcl->ptp)) - panic("pmap_transfer_ptes: pm_pdir - pv_ptp mismatch!"); -#endif - - /* - * for move, update the pve we just found (lpve) to - * point to its new mapping. for copy, init the new - * pve and put it in the list. - */ - - if (move == TRUE) { - pve = lpve; - } - pve->pv_pmap = dstpmap; - pve->pv_va = dstl->addr; - pve->pv_ptp = dstl->ptp; - if (move == FALSE) { /* link in copy */ - pve->pv_next = lpve->pv_next; - lpve->pv_next = pve; - } - - /* - * sync the R/M bits while we are here. - */ - - vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); - - /* - * now actually update the ptes and unlock the pvlist. - */ - - if (move) { - *srcl->pte = 0; /* zap! */ - if (pmap_is_curpmap(srcpmap)) - pmap_update_pg(srcl->addr); - if (srcl->ptp) - /* don't bother trying to free PTP */ - srcl->ptp->wire_count--; - srcpmap->pm_stats.resident_count--; - if (opte & PG_W) - srcpmap->pm_stats.wired_count--; - } - *dstl->pte = (opte & ~(PG_u|PG_U|PG_M|PG_G|PG_W)) | dstproto; - dstpmap->pm_stats.resident_count++; - if (dstl->ptp) - dstl->ptp->wire_count++; - simple_unlock(&pvh->pvh_lock); - } - return(TRUE); -} - -/* * pmap_copy: copy mappings from one pmap to another * * => optional function @@ -3394,7 +2946,7 @@ pmap_transfer_ptes(srcpmap, srcl, dstpmap, dstl, toxfer, move) */ /* - * defined as macro call in pmap.h + * defined as macro in pmap.h */ /* diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h index e03106c9187..361c7ac28e1 100644 --- a/sys/arch/i386/include/pmap.h +++ b/sys/arch/i386/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.29 2002/11/24 19:54:54 pb Exp $ */ +/* $OpenBSD: pmap.h,v 1.30 2003/04/07 06:14:30 niklas Exp $ */ /* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */ /* @@ -342,18 +342,6 @@ struct pmap_remove_record { }; /* - * pmap_transfer_location: used to pass the current location in the - * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during - * a pmap_copy]. - */ - -struct pmap_transfer_location { - vaddr_t addr; /* the address (page-aligned) */ - pt_entry_t *pte; /* the PTE that maps address */ - struct vm_page *ptp; /* the PTP that the PTE lives in */ -}; - -/* * global kernel variables */ @@ -395,8 +383,6 @@ static void pmap_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); void pmap_remove(struct pmap *, vaddr_t, vaddr_t); boolean_t pmap_test_attrs(struct vm_page *, int); -void pmap_transfer(struct pmap *, struct pmap *, vaddr_t, - vsize_t, vaddr_t, boolean_t); static void pmap_update_pg(vaddr_t); static void pmap_update_2pg(vaddr_t,vaddr_t); void pmap_write_protect(struct pmap *, vaddr_t, |