diff options
Diffstat (limited to 'sys')
33 files changed, 432 insertions, 514 deletions
diff --git a/sys/arch/amiga/amiga/machdep.c b/sys/arch/amiga/amiga/machdep.c index f517b51e97f..794e44a9036 100644 --- a/sys/arch/amiga/amiga/machdep.c +++ b/sys/arch/amiga/amiga/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.47 2001/07/05 07:17:52 art Exp $ */ +/* $OpenBSD: machdep.c,v 1.48 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: machdep.c,v 1.95 1997/08/27 18:31:17 is Exp $ */ /* @@ -460,13 +460,7 @@ again: if (pg == NULL) panic("cpu_startup: not enough memory for " "buffer cache"); -#if defined(PMAP_NEW) pmap_kenter_pgs(curbuf, &pg, 1); -#else - pmap_enter(kernel_map->pmap, curbuf, - VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE, - VM_PROT_READ|VM_PROT_WRITE); -#endif curbuf += PAGE_SIZE; curbufsize -= PAGE_SIZE; } diff --git a/sys/arch/amiga/amiga/pmap.c b/sys/arch/amiga/amiga/pmap.c index 27a1d1c1064..ae23ce73391 100644 --- a/sys/arch/amiga/amiga/pmap.c +++ b/sys/arch/amiga/amiga/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.32 2001/06/27 03:54:13 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.33 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: pmap.c,v 1.68 1999/06/19 19:44:09 is Exp $ */ /*- @@ -788,28 +788,17 @@ pmap_map(virt, start, end, prot) * the map will be used in software only, and * is bounded by that size. */ -pmap_t -pmap_create(size) - vsize_t size; +struct pmap * +pmap_create(void) { - pmap_t pmap; + struct pmap *pmap; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) printf("pmap_create(%lx)\n", size); #endif - /* - * Software use map does not need a pmap - */ - if (size) - return(NULL); - /* XXX: is it ok to wait here? */ - pmap = (pmap_t)malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); -#ifdef notifwewait - if (pmap == NULL) - panic("pmap_create: cannot allocate a pmap"); -#endif + pmap = (struct pmap *)malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); bzero(pmap, sizeof(*pmap)); pmap_pinit(pmap); return (pmap); @@ -971,13 +960,14 @@ pmap_remove(pmap, sva, eva) * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(pa, prot) - paddr_t pa; - vm_prot_t prot; +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { + paddr_t pa; pv_entry_t pv; int s; + pa = VM_PAGE_TO_PHYS(pg); + #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) @@ -1735,15 +1725,21 @@ pmap_copy_page(src, dst) * Clear the modify bits on the specified physical page. */ -void -pmap_clear_modify(pa) - paddr_t pa; +boolean_t +pmap_clear_modify(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_modify(%lx)\n", pa); #endif + ret = pmap_is_modified(pg); + pmap_changebit(pa, PG_M, FALSE); + + return (ret); } /* @@ -1752,14 +1748,19 @@ pmap_clear_modify(pa) * Clear the reference bit on the specified physical page. */ -void pmap_clear_reference(pa) - paddr_t pa; +boolean_t +pmap_clear_reference(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_reference(%lx)\n", pa); #endif + ret = pmap_is_referenced(pg); pmap_changebit(pa, PG_U, FALSE); + + return (ret); } /* @@ -1770,9 +1771,9 @@ void pmap_clear_reference(pa) */ boolean_t -pmap_is_referenced(pa) - paddr_t pa; +pmap_is_referenced(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); @@ -1791,9 +1792,9 @@ pmap_is_referenced(pa) */ boolean_t -pmap_is_modified(pa) - paddr_t pa; +pmap_is_modified(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); @@ -2594,3 +2595,29 @@ pmap_virtual_space(vstartp, vendp) *vstartp = virtual_avail; *vendp = virtual_end; } + +void +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE); +} + +void +pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs) +{ + int i; + + for (i = 0; i < npgs; i++, va += PAGE_SIZE) { + pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), + VM_PROT_READ|VM_PROT_WRITE, 1, + VM_PROT_READ|VM_PROT_WRITE); + } +} + +void +pmap_kremove(vaddr_t va, vsize_t len) +{ + for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { + pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); + } +} diff --git a/sys/arch/hp300/hp300/machdep.c b/sys/arch/hp300/hp300/machdep.c index 06f2fa01f87..0c840515cb2 100644 --- a/sys/arch/hp300/hp300/machdep.c +++ b/sys/arch/hp300/hp300/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.53 2001/07/05 10:12:06 art Exp $ */ +/* $OpenBSD: machdep.c,v 1.54 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: machdep.c,v 1.121 1999/03/26 23:41:29 mycroft Exp $ */ /* @@ -327,13 +327,7 @@ cpu_startup() if (pg == NULL) panic("cpu_startup: not enough memory for " "buffer cache"); -#if defined(PMAP_NEW) pmap_kenter_pgs(curbuf, &pg, 1); -#else - pmap_enter(kernel_map->pmap, curbuf, - VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE, - TRUE, VM_PROT_READ|VM_PROT_WRITE); -#endif curbuf += PAGE_SIZE; curbufsize -= PAGE_SIZE; } diff --git a/sys/arch/hp300/hp300/pmap.c b/sys/arch/hp300/hp300/pmap.c index 459b76b906e..7a7de7b2d5a 100644 --- a/sys/arch/hp300/hp300/pmap.c +++ b/sys/arch/hp300/hp300/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:05:45 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.26 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: pmap.c,v 1.80 1999/09/16 14:52:06 chs Exp $ */ /*- @@ -707,7 +707,6 @@ pmap_map(va, spa, epa, prot) * * Note: no locking is necessary in this function. */ -#ifdef PMAP_NEW pmap_t pmap_create() { @@ -721,29 +720,6 @@ pmap_create() pmap_pinit(pmap); return (pmap); } -#else -pmap_t -pmap_create(size) - vsize_t size; -{ - pmap_t pmap; - - PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, - ("pmap_create(%lx)\n", size)); - - /* - * Software use map does not need a pmap - */ - if (size) - return (NULL); - - pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); - - bzero(pmap, sizeof(*pmap)); - pmap_pinit(pmap); - return (pmap); -} -#endif /* * pmap_pinit: @@ -995,18 +971,11 @@ pmap_remove(pmap, sva, eva) * the permissions specified. */ void -#ifdef PMAP_NEW pmap_page_protect(pg, prot) struct vm_page *pg; vm_prot_t prot; { paddr_t pa = VM_PAGE_TO_PHYS(pg); -#else -pmap_page_protect(pa, prot) - paddr_t pa; - vm_prot_t prot; -{ -#endif struct pv_entry *pv; int s; @@ -1450,7 +1419,6 @@ validate: #endif } -#ifdef PMAP_NEW void pmap_kenter_pa(va, pa, prot) vaddr_t va; @@ -1483,7 +1451,6 @@ pmap_kremove(va, len) pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); } } -#endif /* * pmap_unwire: [ INTERFACE] @@ -1886,7 +1853,6 @@ pmap_copy_page(src, dst) * * Clear the modify bits on the specified physical page. */ -#ifdef PMAP_NEW boolean_t pmap_clear_modify(pg) struct vm_page *pg; @@ -1900,24 +1866,12 @@ pmap_clear_modify(pg) pmap_changebit(pa, 0, ~PG_M); return rv; } -#else -void -pmap_clear_modify(pa) - paddr_t pa; -{ - - PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa)); - - pmap_changebit(pa, 0, ~PG_M); -} -#endif /* * pmap_clear_reference: [ INTERFACE ] * * Clear the reference bit on the specified physical page. */ -#ifdef PMAP_NEW boolean_t pmap_clear_reference(pg) struct vm_page *pg; @@ -1931,17 +1885,6 @@ pmap_clear_reference(pg) pmap_changebit(pa, 0, ~PG_U); return rv; } -#else -void -pmap_clear_reference(pa) - paddr_t pa; -{ - - PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa)); - - pmap_changebit(pa, 0, ~PG_U); -} -#endif /* * pmap_is_referenced: [ INTERFACE ] @@ -1950,16 +1893,10 @@ pmap_clear_reference(pa) * by any physical maps. */ boolean_t -#ifdef PMAP_NEW pmap_is_referenced(pg) struct vm_page *pg; { paddr_t pa = VM_PAGE_TO_PHYS(pg); -#else -pmap_is_referenced(pa) - paddr_t pa; -{ -#endif #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); @@ -1977,16 +1914,10 @@ pmap_is_referenced(pa) * by any physical maps. */ boolean_t -#ifdef PMAP_NEW pmap_is_modified(pg) struct vm_page *pg; { paddr_t pa = VM_PAGE_TO_PHYS(pg); -#else -pmap_is_modified(pa) - paddr_t pa; -{ -#endif #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); diff --git a/sys/arch/hp300/hp300/vm_machdep.c b/sys/arch/hp300/hp300/vm_machdep.c index 3ba7f2d6c29..471db3600e3 100644 --- a/sys/arch/hp300/hp300/vm_machdep.c +++ b/sys/arch/hp300/hp300/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.27 2001/06/27 04:05:45 art Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.28 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: vm_machdep.c,v 1.47 1999/03/26 23:41:29 mycroft Exp $ */ /* @@ -238,16 +238,8 @@ pagemove(from, to, size) if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE) panic("pagemove 3"); #endif -#ifdef PMAP_NEW pmap_kremove((vaddr_t)from, PAGE_SIZE); pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE); -#else - pmap_remove(pmap_kernel(), - (vaddr_t)from, (vaddr_t)from + PAGE_SIZE); - pmap_enter(pmap_kernel(), - (vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1, - VM_PROT_READ|VM_PROT_WRITE); -#endif from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; diff --git a/sys/arch/hp300/include/param.h b/sys/arch/hp300/include/param.h index 1a5e31b2a06..e2a2fb05e49 100644 --- a/sys/arch/hp300/include/param.h +++ b/sys/arch/hp300/include/param.h @@ -1,4 +1,4 @@ -/* $OpenBSD: param.h,v 1.15 2001/06/27 04:05:45 art Exp $ */ +/* $OpenBSD: param.h,v 1.16 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: param.h,v 1.35 1997/07/10 08:22:38 veego Exp $ */ /* @@ -105,6 +105,4 @@ void _delay __P((u_int)); ((unsigned)(v) & ~HPMMMASK) #endif -#define PMAP_NEW - #endif /* !_MACHINE_PARAM_H_ */ diff --git a/sys/arch/hppa/include/vmparam.h b/sys/arch/hppa/include/vmparam.h index 327034c445e..090350eb780 100644 --- a/sys/arch/hppa/include/vmparam.h +++ b/sys/arch/hppa/include/vmparam.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmparam.h,v 1.14 2001/06/27 06:19:45 art Exp $ */ +/* $OpenBSD: vmparam.h,v 1.15 2001/07/18 10:47:04 art Exp $ */ /* * Copyright (c) 1988-1994, The University of Utah and @@ -126,8 +126,6 @@ #define VM_FREELIST_DEFAULT 0 #define VM_FREELIST_FIRST16 1 -#define PMAP_NEW - #ifdef _KERNEL struct pmap_physseg { struct pv_entry *pvent; diff --git a/sys/arch/i386/include/param.h b/sys/arch/i386/include/param.h index cbfeca72c22..c218be4b785 100644 --- a/sys/arch/i386/include/param.h +++ b/sys/arch/i386/include/param.h @@ -1,4 +1,4 @@ -/* $OpenBSD: param.h,v 1.17 2001/07/06 02:07:41 provos Exp $ */ +/* $OpenBSD: param.h,v 1.18 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: param.h,v 1.29 1996/03/04 05:04:26 cgd Exp $ */ /*- @@ -95,8 +95,6 @@ #define MSGBUFSIZE 2*NBPG /* default message buffer size */ #endif -#define PMAP_NEW - /* * Constants related to network buffer management. * MCLBYTES must be no larger than the software page size, and, diff --git a/sys/arch/mac68k/mac68k/pmap.c b/sys/arch/mac68k/mac68k/pmap.c index a97f05d39d0..72b9f2655d3 100644 --- a/sys/arch/mac68k/mac68k/pmap.c +++ b/sys/arch/mac68k/mac68k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:22:38 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.26 2001/07/18 10:47:04 art Exp $ */ /* $NetBSD: pmap.c,v 1.55 1999/04/22 04:24:53 chs Exp $ */ /* @@ -663,21 +663,14 @@ pmap_map(va, spa, epa, prot) * * Note: no locking is necessary in this function. */ -pmap_t -pmap_create(size) - vsize_t size; +struct pmap * +pmap_create(void) { - pmap_t pmap; + struct pmap *pmap; PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_create(%lx)\n", size)); - /* - * Software use map does not need a pmap - */ - if (size) - return (NULL); - pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); bzero(pmap, sizeof(*pmap)); @@ -896,13 +889,14 @@ pmap_remove(pmap, sva, eva) * the permissions specified. */ void -pmap_page_protect(pa, prot) - paddr_t pa; - vm_prot_t prot; +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { + paddr_t pa; struct pv_entry *pv; int s; + pa = VM_PAGE_TO_PHYS(pg); + #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) @@ -1654,14 +1648,19 @@ pmap_copy_page(src, dst) * * Clear the modify bits on the specified physical page. */ -void -pmap_clear_modify(pa) - paddr_t pa; +boolean_t +pmap_clear_modify(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + + ret = pmap_is_modified(pg); PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa)); pmap_changebit(pa, 0, ~PG_M); + + return (ret); } /* @@ -1670,13 +1669,18 @@ pmap_clear_modify(pa) * Clear the reference bit on the specified physical page. */ void -pmap_clear_reference(pa) - paddr_t pa; +pmap_clear_reference(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + + ret = pmap_is_referenced(pg); PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa)); pmap_changebit(pa, 0, ~PG_U); + + return (ret); } /* @@ -1686,9 +1690,10 @@ pmap_clear_reference(pa) * by any physical maps. */ boolean_t -pmap_is_referenced(pa) - paddr_t pa; +pmap_is_referenced(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); @@ -1706,9 +1711,10 @@ pmap_is_referenced(pa) * by any physical maps. */ boolean_t -pmap_is_modified(pa) - paddr_t pa; +pmap_is_modified(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); @@ -2352,3 +2358,29 @@ pmap_check_wiring(str, va) str, va, entry->wired_count, count); } #endif /* DEBUG */ + +void +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE); +} + +void +pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs) +{ + int i; + + for (i = 0; i < npgs; i++, va += PAGE_SIZE) { + pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), + VM_PROT_READ|VM_PROT_WRITE, 1, + VM_PROT_READ|VM_PROT_WRITE); + } +} + +void +pmap_kremove(vaddr_t va, vsize_t len) +{ + for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { + pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); + } +} diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c index 9f8d23476ba..de0741e52b6 100644 --- a/sys/arch/mvme68k/mvme68k/pmap.c +++ b/sys/arch/mvme68k/mvme68k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.24 2001/06/27 06:19:49 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.25 2001/07/18 10:47:04 art Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -676,29 +676,17 @@ pmap_map(va, spa, epa, prot) * the map will be used in software only, and * is bounded by that size. */ -pmap_t -pmap_create(size) - vm_size_t size; +struct pmap * +pmap_create(void) { - register pmap_t pmap; + struct pmap *pmap; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) printf("pmap_create(%x)\n", size); #endif - /* - * Software use map does not need a pmap - */ - if (size) - return (NULL); - - /* XXX: is it ok to wait here? */ - pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); -#ifdef notifwewait - if (pmap == NULL) - panic("pmap_create: cannot allocate a pmap"); -#endif + pmap = (struct pmap *) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); bzero(pmap, sizeof(*pmap)); pmap_pinit(pmap); return (pmap); @@ -901,11 +889,10 @@ pmap_remove(pmap, sva, eva) * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(pa, prot) - vm_offset_t pa; - vm_prot_t prot; +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { - register struct pv_entry *pv; + paddr_t pa = VM_PAGE_TO_PHYS(pg); + struct pv_entry *pv; int s; #ifdef DEBUG @@ -1584,14 +1571,20 @@ pmap_copy_page(src, dst) */ void -pmap_clear_modify(pa) - vm_offset_t pa; +pmap_clear_modify(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + + ret = pmap_is_modified(pg); + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_modify(%x)\n", pa); #endif pmap_changebit(pa, PG_M, FALSE); + + return (ret); } /* @@ -1600,14 +1593,21 @@ pmap_clear_modify(pa) * Clear the reference bit on the specified physical page. */ -void pmap_clear_reference(pa) - vm_offset_t pa; +boolean_t +pmap_clear_reference(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + + ret = pmap_is_referenced(pg); + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_reference(%x)\n", pa); #endif pmap_changebit(pa, PG_U, FALSE); + + return (ret); } /* @@ -1618,9 +1618,10 @@ void pmap_clear_reference(pa) */ boolean_t -pmap_is_referenced(pa) - vm_offset_t pa; +pmap_is_referenced(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); @@ -1639,9 +1640,9 @@ pmap_is_referenced(pa) */ boolean_t -pmap_is_modified(pa) - vm_offset_t pa; +pmap_is_modified(struct vm_page *pg) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); @@ -2410,3 +2411,29 @@ pmap_check_wiring(str, va) str, va, entry->wired_count, count); } #endif + +void +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE); +} + +void +pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs) +{ + int i; + + for (i = 0; i < npgs; i++, va += PAGE_SIZE) { + pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), + VM_PROT_READ|VM_PROT_WRITE, 1, + VM_PROT_READ|VM_PROT_WRITE); + } +} + +void +pmap_kremove(vaddr_t va, vsize_t len) +{ + for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { + pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); + } +} diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c index cf54862a50e..77f0be10d53 100644 --- a/sys/arch/mvme88k/mvme88k/pmap.c +++ b/sys/arch/mvme88k/mvme88k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.34 2001/07/05 07:20:45 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.35 2001/07/18 10:47:04 art Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -1452,20 +1452,14 @@ pmap_zero_page(vm_offset_t phys) * * This routines allocates a pmap structure. */ -pmap_t -pmap_create(vm_size_t size) +struct pmap * +pmap_create(void) { - pmap_t p; - - /* - * A software use-only map doesn't even need a map. - */ - if (size != 0) - return (PMAP_NULL); + struct pmap *p; CHECK_PMAP_CONSISTENCY("pmap_create"); - p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK); + p = (struct pmap *)malloc(sizeof(*p), M_VMPMAP, M_WAITOK); bzero(p, sizeof(*p)); pmap_pinit(p); @@ -3302,7 +3296,7 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) * Clear the modify bits on the specified physical page. * * Parameters: - * phys physical address of page + * pg vm_page * * Extern/Global: * pv_head_table, pv_lists @@ -3317,14 +3311,14 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) * pmap_pte * panic * - * For managed pages, the modify_list entry corresponding to the + * The modify_list entry corresponding to the * page's frame index will be zeroed. The PV list will be traversed. * For each pmap/va the hardware 'modified' bit in the page descripter table * entry inspected - and turned off if necessary. If any of the * inspected bits were found on, an TLB flush will be performed. */ void -pmap_clear_modify(vm_offset_t phys) +pmap_clear_modify(struct vm_page *pg) { pv_entry_t pvl; pv_entry_t pvep; @@ -3335,14 +3329,16 @@ pmap_clear_modify(vm_offset_t phys) unsigned users; pte_template_t opte; int kflush; + paddr_t phys = VM_PAGE_TO_PHYS(pg); + boolean_t ret; + + ret = pmap_is_modified(pg); +#ifdef DIAGNOSTIC if (!PMAP_MANAGED(phys)) { -#ifdef DEBUG - if (pmap_con_dbg & CD_CMOD) - printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys); -#endif - return; + panic("pmap_clear_modify: not managed?"); } +#endif SPLVM(spl); @@ -3361,7 +3357,7 @@ clear_modify_Retry: #endif UNLOCK_PVH(phys); SPLX(spl); - return; + return (ret); } /* for each listed pmap, turn off the page modified bit */ @@ -3401,6 +3397,8 @@ clear_modify_Retry: } UNLOCK_PVH(phys); SPLX(spl); + + return (ret); } /* pmap_clear_modify() */ /* @@ -3412,7 +3410,7 @@ clear_modify_Retry: * stored data into the page. * * Parameters: - * phys physical address og a page + * pg vm_page * * Extern/Global: * pv_head_array, pv lists @@ -3425,9 +3423,6 @@ clear_modify_Retry: * PA_TO_PVH * pmap_pte * - * If the physical address specified is not a managed page, this - * routine simply returns TRUE (looks like it is returning FALSE XXX). - * * If the entry in the modify list, corresponding to the given page, * is TRUE, this routine return TRUE. (This means at least one mapping * has been invalidated where the MMU had set the modified bit in the @@ -3439,21 +3434,20 @@ clear_modify_Retry: * immediately (doesn't need to walk remainder of list). */ boolean_t -pmap_is_modified(vm_offset_t phys) +pmap_is_modified(struct vm_page *pg) { pv_entry_t pvl; pv_entry_t pvep; pt_entry_t *ptep; int spl; boolean_t modified_flag; + paddr_t phys = VM_PAGE_TO_PHYS(pg); +#ifdef DIAGNOSTIC if (!PMAP_MANAGED(phys)) { -#ifdef DEBUG - if (pmap_con_dbg & CD_IMOD) - printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys); -#endif - return (FALSE); + panic("pmap_is_modified: not managed?"); } +#endif SPLVM(spl); @@ -3526,7 +3520,7 @@ is_mod_Retry: * Clear the reference bits on the specified physical page. * * Parameters: - * phys physical address of page + * pg vm_page * * Calls: * PMAP_MANAGED @@ -3540,32 +3534,33 @@ is_mod_Retry: * Extern/Global: * pv_head_array, pv lists * - * For managed pages, the coressponding PV list will be traversed. * For each pmap/va the hardware 'used' bit in the page table entry * inspected - and turned off if necessary. If any of the inspected bits * were found on, a TLB flush will be performed. */ -void -pmap_clear_reference(vm_offset_t phys) +boolean_t +pmap_clear_reference(struct vm_page *pg) { - pv_entry_t pvl; - pv_entry_t pvep; - pt_entry_t *pte; - pmap_t pmap; - int spl, spl_sav; - vm_offset_t va; - unsigned users; - pte_template_t opte; - int kflush; + pv_entry_t pvl; + pv_entry_t pvep; + pt_entry_t *pte; + pmap_t pmap; + int spl, spl_sav; + vm_offset_t va; + unsigned users; + pte_template_t opte; + int kflush; + paddr_t phys; + boolean_t ret; + + phys = VM_PAGE_TO_PHYS(pg); +#ifdef DIAGNOSTIC if (!PMAP_MANAGED(phys)) { -#ifdef DEBUG - if (pmap_con_dbg & CD_CREF) { - printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys); - } -#endif - return; + panic("pmap_clear_reference: not managed?"); } +#endif + ret = pmap_is_referenced(pg); SPLVM(spl); @@ -3582,7 +3577,7 @@ pmap_clear_reference(vm_offset_t phys) #endif UNLOCK_PVH(phys); SPLX(spl); - return; + return (ret); } /* for each listed pmap, turn off the page refrenced bit */ @@ -3622,6 +3617,8 @@ pmap_clear_reference(vm_offset_t phys) } UNLOCK_PVH(phys); SPLX(spl); + + return (ret); } /* pmap_clear_reference() */ /* @@ -3632,7 +3629,7 @@ pmap_clear_reference(vm_offset_t phys) * any physical maps. That is, whether the hardware has touched the page. * * Parameters: - * phys physical address of a page + * pg vm_page * * Extern/Global: * pv_head_array, pv lists @@ -3645,25 +3642,25 @@ pmap_clear_reference(vm_offset_t phys) * simple_lock * pmap_pte * - * If the physical address specified is not a managed page, this - * routine simply returns TRUE. - * - * Otherwise, this routine walks the PV list corresponding to the + * This routine walks the PV list corresponding to the * given page. For each pmap/va/ pair, the page descripter table entry is * examined. If a used bit is found on, the function returns TRUE * immediately (doesn't need to walk remainder of list). */ boolean_t -pmap_is_referenced(vm_offset_t phys) +pmap_is_referenced(struct vm_page *pg) { pv_entry_t pvl; pv_entry_t pvep; pt_entry_t *ptep; int spl; + paddr_t phys = VM_PAGE_TO_PHYS(pg); +#ifdef DIAGNOSTIC if (!PMAP_MANAGED(phys)) - return (FALSE); + panic("pmap_is_referenced: not managed?"); +#endif SPLVM(spl); @@ -3713,8 +3710,10 @@ is_ref_Retry: * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(vm_offset_t phys, vm_prot_t prot) +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { + paddr_t phys = VM_PAGE_TO_PHYS(pg); + switch (prot) { case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: @@ -4426,3 +4425,29 @@ pmap_range_remove(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end) range->start = end; } #endif /* FUTURE_MAYBE */ + +void +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE); +} + +void +pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs) +{ + int i; + + for (i = 0; i < npgs; i++, va += PAGE_SIZE) { + pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), + VM_PROT_READ|VM_PROT_WRITE, 1, + VM_PROT_READ|VM_PROT_WRITE); + } +} + +void +pmap_kremove(vaddr_t va, vsize_t len) +{ + for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { + pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); + } +} diff --git a/sys/arch/mvmeppc/include/pmap.h b/sys/arch/mvmeppc/include/pmap.h index 9491d97aadd..a0a0b898f89 100644 --- a/sys/arch/mvmeppc/include/pmap.h +++ b/sys/arch/mvmeppc/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.1 2001/06/26 21:57:47 smurph Exp $ */ +/* $OpenBSD: pmap.h,v 1.2 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */ /*- @@ -77,10 +77,10 @@ typedef struct pmap *pmap_t; extern struct pmap kernel_pmap_; #define pmap_kernel() (&kernel_pmap_) -#define pmap_clear_modify(pa) (ptemodify((pa), PTE_CHG, 0)) -#define pmap_clear_reference(pa) (ptemodify((pa), PTE_REF, 0)) -#define pmap_is_modified(pa) (ptebits((pa), PTE_CHG)) -#define pmap_is_referenced(pa) (ptebits((pa), PTE_REF)) +#define pmap_clear_modify(pa) (ptemodify(VM_PAGE_TO_PHYS(pa), PTE_CHG, 0)) +#define pmap_clear_reference(pa) (ptemodify(VM_PAGE_TO_PHYS(pa), PTE_REF, 0)) +#define pmap_is_modified(pg) (ptebits(VM_PAGE_TO_PHYS(pg), PTE_CHG)) +#define pmap_is_referenced(pg) (ptebits(VM_PAGE_TO_PHYS(pg), PTE_REF)) #define pmap_change_wiring(pm, va, wired) #define pmap_unwire(pm, va) diff --git a/sys/arch/mvmeppc/mvmeppc/pmap.c b/sys/arch/mvmeppc/mvmeppc/pmap.c index fd04d8590c8..84edaf00974 100644 --- a/sys/arch/mvmeppc/mvmeppc/pmap.c +++ b/sys/arch/mvmeppc/mvmeppc/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.4 2001/07/06 05:14:30 smurph Exp $ */ +/* $OpenBSD: pmap.c,v 1.5 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.c,v 1.1 1996/09/30 16:34:52 ws Exp $ */ /* @@ -806,14 +806,8 @@ pmap_next_page(paddr) /* * Create and return a physical map. */ -#if defined(PMAP_NEW) struct pmap * pmap_create() -#else -struct pmap * -pmap_create(size) - vsize_t size; -#endif { struct pmap *pm; @@ -1426,9 +1420,9 @@ pmap_protect(pm, sva, eva, prot) pmap_remove(pm, sva, eva); } -void +boolean_t ptemodify(pa, mask, val) - vm_offset_t pa; + paddr_t pa; u_int mask; u_int val; { @@ -1437,10 +1431,13 @@ ptemodify(pa, mask, val) struct pte_ovfl *po; int i, s; char * pattr; + boolean_t ret; + + ret = ptebits(pa, mask); pv = pmap_find_pv(pa); if (pv == NULL) - return; + return (ret); pattr = pmap_find_attr(pa); /* @@ -1450,7 +1447,7 @@ ptemodify(pa, mask, val) *pattr |= val >> ATTRSHFT; if (pv->pv_idx < 0) - return; + return (ret); s = splimp(); for (; pv; pv = pv->pv_next) { @@ -1485,6 +1482,8 @@ ptemodify(pa, mask, val) } } splx(s); + + return (ret); } int @@ -1553,21 +1552,12 @@ ptebits(pa, bit) * There are only two cases: either the protection is going to 0, * or it is going to read-only. */ -#if defined(PMAP_NEW) void pmap_page_protect(pg, prot) struct vm_page *pg; vm_prot_t prot; -#else -void -pmap_page_protect(pa, prot) - vm_offset_t pa; - vm_prot_t prot; -#endif { -#if defined(PMAP_NEW) vm_offset_t pa = VM_PAGE_TO_PHYS(pg); -#endif vm_offset_t va; pte_t *ptp; struct pte_ovfl *po, *npo; @@ -1577,7 +1567,7 @@ pmap_page_protect(pa, prot) pa &= ~ADDR_POFF; if (prot & VM_PROT_READ) { - ptemodify(pa, PTE_PP, PTE_RO); + ptemodify(pg, PTE_PP, PTE_RO); return; } diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h index 1bcba5c2b3e..4d1f268427d 100644 --- a/sys/arch/powerpc/include/pmap.h +++ b/sys/arch/powerpc/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.13 2001/07/10 01:34:32 drahn Exp $ */ +/* $OpenBSD: pmap.h,v 1.14 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */ /*- @@ -38,10 +38,6 @@ #include <machine/pte.h> /* - * FUCK -#define PMAP_NEW - */ -/* * Segment registers */ #ifndef _LOCORE @@ -84,22 +80,11 @@ extern struct pmap kernel_pmap_; int ptebits(paddr_t pa, int bit); -#ifdef PMAP_NEW -#define pmap_clear_modify(page) (ptemodify((page)->phys_addr, PTE_CHG, 0)) -#define pmap_clear_reference(page) (ptemodify((page)->phys_addr, PTE_REF, 0)) -#define pmap_is_modified(page) (ptebits((page)->phys_addr, PTE_CHG)) -#define pmap_is_referenced(page) (ptebits((page)->phys_addr, PTE_REF)) +#define pmap_clear_modify(page) (ptemodify(VM_PAGE_TO_PHYS(page), PTE_CHG, 0)) +#define pmap_clear_reference(page) (ptemodify(VM_PAGE_TO_PHYS(page), PTE_REF, 0)) +#define pmap_is_modified(page) (ptebits(VM_PAGE_TO_PHYS(page), PTE_CHG)) +#define pmap_is_referenced(page) (ptebits(VM_PAGE_TO_PHYS(page), PTE_REF)) #define pmap_unwire(pm, va) -#else -#define pmap_clear_modify(pa) (ptemodify((pa), PTE_CHG, 0)) -#define pmap_clear_reference(pa) (ptemodify((pa), PTE_REF, 0)) -#define pmap_is_modified(pa) (ptebits((pa), PTE_CHG)) -#define pmap_is_referenced(pa) (ptebits((pa), PTE_REF)) -#define pmap_unwire(pm, va) -/* XXX */ -void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot); -#endif - #define pmap_phys_address(x) (x) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c index 7938f0fea2d..44c104b4edf 100644 --- a/sys/arch/powerpc/powerpc/pmap.c +++ b/sys/arch/powerpc/powerpc/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.35 2001/07/09 02:14:05 mickey Exp $ */ +/* $OpenBSD: pmap.c,v 1.36 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.c,v 1.1 1996/09/30 16:34:52 ws Exp $ */ /* @@ -825,14 +825,8 @@ pmap_next_page(paddr) /* * Create and return a physical map. */ -#if defined(PMAP_NEW) struct pmap * pmap_create() -#else -struct pmap * -pmap_create(size) - vsize_t size; -#endif { struct pmap *pm; @@ -1442,9 +1436,9 @@ pmap_protect(pm, sva, eva, prot) pmap_remove(pm, sva, eva); } -void +boolean_t ptemodify(pa, mask, val) - vm_offset_t pa; + paddr_t pa; u_int mask; u_int val; { @@ -1453,10 +1447,13 @@ ptemodify(pa, mask, val) struct pte_ovfl *po; int i, s; char * pattr; + boolean_t ret; + + ret = ptebits(pa, mask); pv = pmap_find_pv(pa); if (pv == NULL) - return; + return (ret); pattr = pmap_find_attr(pa); /* @@ -1466,7 +1463,7 @@ ptemodify(pa, mask, val) *pattr |= val >> ATTRSHFT; if (pv->pv_idx < 0) - return; + return (ret); s = splimp(); for (; pv; pv = pv->pv_next) { @@ -1501,6 +1498,8 @@ ptemodify(pa, mask, val) } } splx(s); + + return (ret); } int @@ -1569,21 +1568,12 @@ ptebits(pa, bit) * There are only two cases: either the protection is going to 0, * or it is going to read-only. */ -#if defined(PMAP_NEW) void pmap_page_protect(pg, prot) struct vm_page *pg; vm_prot_t prot; -#else -void -pmap_page_protect(pa, prot) - vm_offset_t pa; - vm_prot_t prot; -#endif { -#if defined(PMAP_NEW) vm_offset_t pa = VM_PAGE_TO_PHYS(pg); -#endif vm_offset_t va; int s; struct pmap *pm; diff --git a/sys/arch/sparc/include/param.h b/sys/arch/sparc/include/param.h index a101f8e57af..d743bb76d18 100644 --- a/sys/arch/sparc/include/param.h +++ b/sys/arch/sparc/include/param.h @@ -1,4 +1,4 @@ -/* $OpenBSD: param.h,v 1.19 2001/07/06 02:07:43 provos Exp $ */ +/* $OpenBSD: param.h,v 1.20 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: param.h,v 1.29 1997/03/10 22:50:37 pk Exp $ */ /* @@ -133,8 +133,6 @@ extern int nbpg, pgofset, pgshift; #define NKMEMCLUSTERS (6 * 1024 * 1024 / PAGE_SIZE) #endif -#define PMAP_NEW - /* pages ("clicks") to disk blocks */ #define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT)) #define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT)) diff --git a/sys/arch/sun3/sun3/pmap.c b/sys/arch/sun3/sun3/pmap.c index fd4b9f7a218..f838fdcec4d 100644 --- a/sys/arch/sun3/sun3/pmap.c +++ b/sys/arch/sun3/sun3/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.26 2001/06/27 04:44:03 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.27 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.c,v 1.64 1996/11/20 18:57:35 gwr Exp $ */ /*- @@ -1678,16 +1678,12 @@ pmap_page_upload() * the map will be used in software only, and * is bounded by that size. */ -pmap_t -pmap_create(size) - vm_size_t size; +struct pmap * +pmap_create(void) { - pmap_t pmap; - - if (size) - return NULL; + struct pmap *pmap; - pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK); + pmap = (struct pmap *) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK); pmap_common_init(pmap); pmap_user_pmap_init(pmap); return pmap; @@ -1748,11 +1744,12 @@ pmap_destroy(pmap) * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(pa, prot) - vm_offset_t pa; - vm_prot_t prot; +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { int s; + paddr_t pa; + + pa = VM_PAGE_TO_PHYS(pg); PMAP_LOCK(); @@ -2584,35 +2581,36 @@ int pmap_fault_reload(pmap, va, ftype) /* * Clear the modify bit for the given physical page. */ -void -pmap_clear_modify(pa) - register vm_offset_t pa; +boolean_t +pmap_clear_modify(struct vm_page *pg) { - register pv_entry_t pvhead; + pv_entry_t pvhead; + paddr_t pa = VM_PAGE_TO_PHYS(pg); + boolean_t ret; if (!pv_initialized) - return; - if (!managed(pa)) - return; + return (0); pvhead = pa_to_pvp(pa); pv_syncflags(pvhead); + ret = pvhead->pv_flags & PV_MOD; pvhead->pv_flags &= ~PV_MOD; + + return (ret); } /* * Tell whether the given physical page has been modified. */ int -pmap_is_modified(pa) - register vm_offset_t pa; +pmap_is_modified(struct vm_page *pg) { - register pv_entry_t pvhead; + pv_entry_t pvhead; + paddr_t pa = VM_PAGE_TO_PHYS(pg); if (!pv_initialized) return (0); - if (!managed(pa)) - return (0); + pvhead = pa_to_pvp(pa); if ((pvhead->pv_flags & PV_MOD) == 0) pv_syncflags(pvhead); @@ -2623,20 +2621,24 @@ pmap_is_modified(pa) * Clear the reference bit for the given physical page. * It's OK to just remove mappings if that's easier. */ -void -pmap_clear_reference(pa) - register vm_offset_t pa; +boolean_t +pmap_clear_reference(struct vm_page *pg) { - register pv_entry_t pvhead; + pv_entry_t pvhead; + paddr_t pa; + boolean_t ret; + + pa = VM_PAGE_TO_PHYS(pg); if (!pv_initialized) - return; - if (!managed(pa)) - return; + return (0); pvhead = pa_to_pvp(pa); pv_syncflags(pvhead); + ret = pvhead->pv_flags & PV_REF; pvhead->pv_flags &= ~PV_REF; + + return (ret); } /* @@ -2644,15 +2646,16 @@ pmap_clear_reference(pa) * It's OK to just return FALSE if page is not mapped. */ int -pmap_is_referenced(pa) - vm_offset_t pa; +pmap_is_referenced(struct vm_page *pg) { - register pv_entry_t pvhead; + pv_entry_t pvhead; + paddr_t pa; + + pa = VM_PAGE_TO_PHYS(pg); if (!pv_initialized) return (0); - if (!managed(pa)) - return (0); + pvhead = pa_to_pvp(pa); if ((pvhead->pv_flags & PV_REF) == 0) pv_syncflags(pvhead); @@ -3358,3 +3361,29 @@ pmap_deactivate(p) { /* not implemented. */ } + +void +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE); +} + +void +pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs) +{ + int i; + + for (i = 0; i < npgs; i++, va += PAGE_SIZE) { + pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), + VM_PROT_READ|VM_PROT_WRITE, 1, + VM_PROT_READ|VM_PROT_WRITE); + } +} + +void +pmap_kremove(vaddr_t va, vsize_t len) +{ + for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { + pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); + } +} diff --git a/sys/arch/vax/include/vmparam.h b/sys/arch/vax/include/vmparam.h index 77896851c95..4b2f0f810b3 100644 --- a/sys/arch/vax/include/vmparam.h +++ b/sys/arch/vax/include/vmparam.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmparam.h,v 1.13 2001/06/27 06:19:57 art Exp $ */ +/* $OpenBSD: vmparam.h,v 1.14 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: vmparam.h,v 1.32 2000/03/07 00:05:59 matt Exp $ */ /*- @@ -148,5 +148,4 @@ struct pmap_physseg { #define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES) -#define PMAP_NEW #endif diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c index 75c61259a19..ffb3835f635 100644 --- a/sys/kern/kern_malloc_debug.c +++ b/sys/kern/kern_malloc_debug.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_malloc_debug.c,v 1.7 2001/07/17 15:49:22 art Exp $ */ +/* $OpenBSD: kern_malloc_debug.c,v 1.8 2001/07/18 10:47:05 art Exp $ */ /* * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org> @@ -149,12 +149,7 @@ debug_malloc(size, type, flags, addr) splx(s); -#ifdef PMAP_NEW pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_ALL); -#else - pmap_enter(pmap_kernel(), md->md_va, md->md_pa, VM_PROT_ALL, TRUE, - VM_PROT_READ|VM_PROT_WRITE); -#endif md->md_size = size; md->md_type = type; @@ -213,11 +208,7 @@ debug_free(addr, type) /* * unmap the page. */ -#ifdef PMAP_NEW pmap_kremove(md->md_va, PAGE_SIZE); -#else - pmap_remove(pmap_kernel(), md->md_va, md->md_va + PAGE_SIZE); -#endif splx(s); return 1; diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c index 94e7bd7999f..f447b1b4819 100644 --- a/sys/uvm/uvm_amap.c +++ b/sys/uvm/uvm_amap.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_amap.c,v 1.8 2001/03/15 11:48:17 art Exp $ */ -/* $NetBSD: uvm_amap.c,v 1.21 1999/07/06 02:15:53 cgd Exp $ */ +/* $OpenBSD: uvm_amap.c,v 1.9 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_amap.c,v 1.22 1999/09/12 01:17:33 chs Exp $ */ /* * @@ -478,9 +478,8 @@ amap_share_protect(entry, prot) if (amap->am_anon[lcv] == NULL) continue; if (amap->am_anon[lcv]->u.an_page != NULL) - pmap_page_protect( - PMAP_PGARG(amap->am_anon[lcv]->u.an_page), - prot); + pmap_page_protect(amap->am_anon[lcv]->u.an_page, + prot); } return; } @@ -491,8 +490,7 @@ amap_share_protect(entry, prot) if (slot < entry->aref.ar_pageoff || slot >= stop) continue; if (amap->am_anon[slot]->u.an_page != NULL) - pmap_page_protect( - PMAP_PGARG(amap->am_anon[slot]->u.an_page), prot); + pmap_page_protect(amap->am_anon[slot]->u.an_page, prot); } return; } diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h index bc8f43e3fb6..8b6d76848ec 100644 --- a/sys/uvm/uvm_amap_i.h +++ b/sys/uvm/uvm_amap_i.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_amap_i.h,v 1.8 2001/05/10 14:51:21 art Exp $ */ -/* $NetBSD: uvm_amap_i.h,v 1.13 1999/07/07 05:31:40 thorpej Exp $ */ +/* $OpenBSD: uvm_amap_i.h,v 1.9 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_amap_i.h,v 1.14 1999/09/12 01:17:34 chs Exp $ */ /* * @@ -137,8 +137,7 @@ amap_add(aref, offset, anon, replace) panic("amap_add: replacing null anon"); if (amap->am_anon[slot]->u.an_page != NULL && (amap->am_flags & AMAP_SHARED) != 0) { - pmap_page_protect( - PMAP_PGARG(amap->am_anon[slot]->u.an_page), + pmap_page_protect(amap->am_anon[slot]->u.an_page, VM_PROT_NONE); /* * XXX: suppose page is supposed to be wired somewhere? diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c index b393d701ddb..c94fd494ac6 100644 --- a/sys/uvm/uvm_anon.c +++ b/sys/uvm/uvm_anon.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_anon.c,v 1.7 2001/06/23 19:24:33 smart Exp $ */ -/* $NetBSD: uvm_anon.c,v 1.3 1999/08/14 06:25:48 ross Exp $ */ +/* $OpenBSD: uvm_anon.c,v 1.8 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_anon.c,v 1.4 1999/09/12 01:17:34 chs Exp $ */ /* * @@ -212,7 +212,7 @@ uvm_anfree(anon) return; } - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uvm_lock_pageq(); /* lock out pagedaemon */ uvm_pagefree(pg); /* bye bye */ uvm_unlock_pageq(); /* free the daemon */ diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c index 84e8a3539f5..82b1986f2d3 100644 --- a/sys/uvm/uvm_aobj.c +++ b/sys/uvm/uvm_aobj.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_aobj.c,v 1.10 2001/06/23 19:24:33 smart Exp $ */ -/* $NetBSD: uvm_aobj.c,v 1.25 1999/08/21 02:19:05 thorpej Exp $ */ +/* $OpenBSD: uvm_aobj.c,v 1.11 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_aobj.c,v 1.26 1999/09/12 01:17:34 chs Exp $ */ /* * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and @@ -681,7 +681,7 @@ uao_detach(uobj) } /* zap the mappings, free the swap slot, free the page */ - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); uvm_lock_pageq(); uvm_pagefree(pg); @@ -859,8 +859,7 @@ uao_flush(uobj, start, stop, flags) continue; /* zap all mappings for the page. */ - pmap_page_protect(PMAP_PGARG(pp), - VM_PROT_NONE); + pmap_page_protect(pp, VM_PROT_NONE); /* ...and deactivate the page. */ uvm_pagedeactivate(pp); @@ -889,8 +888,7 @@ uao_flush(uobj, start, stop, flags) } /* zap all mappings for the page. */ - pmap_page_protect(PMAP_PGARG(pp), - VM_PROT_NONE); + pmap_page_protect(pp, VM_PROT_NONE); uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); uvm_pagefree(pp); @@ -1193,7 +1191,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) */ ptmp->flags &= ~PG_FAKE; /* data is valid ... */ - pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ + pmap_clear_modify(ptmp); /* ... and clean */ pps[lcv] = ptmp; } /* lcv loop */ @@ -1237,7 +1235,7 @@ static boolean_t uao_releasepg(pg, nextpgp) /* * dispose of the page [caller handles PG_WANTED] and swap slot. */ - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); uvm_lock_pageq(); if (nextpgp) diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index 2db0ebe05e1..0d82a99b2ae 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_fault.c,v 1.14 2001/06/23 19:24:33 smart Exp $ */ -/* $NetBSD: uvm_fault.c,v 1.44 1999/07/22 22:58:38 thorpej Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.15 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.45 1999/09/12 01:17:35 chs Exp $ */ /* * @@ -208,7 +208,7 @@ uvmfault_anonflush(anons, n) if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) { - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uvm_pagedeactivate(pg); } uvm_unlock_pageq(); @@ -448,8 +448,7 @@ int uvmfault_anonget(ufi, amap, anon) * anon and try again. */ if (pg->flags & PG_RELEASED) { - pmap_page_protect(PMAP_PGARG(pg), - VM_PROT_NONE); /* to be safe */ + pmap_page_protect(pg, VM_PROT_NONE); simple_unlock(&anon->an_lock); uvm_anfree(anon); /* frees page for us */ if (locked) @@ -489,7 +488,7 @@ int uvmfault_anonget(ufi, amap, anon) * must be OK, clear modify (already PG_CLEAN) * and activate */ - pmap_clear_modify(PMAP_PGARG(pg)); + pmap_clear_modify(pg); uvm_lock_pageq(); uvm_pageactivate(pg); uvm_unlock_pageq(); @@ -858,7 +857,6 @@ ReFault: /* * note that if we are really short of RAM we could sleep in the above * call to pmap_enter with everything locked. bad? - * XXXCDC: this is fixed in PMAP_NEW (no sleep alloc's in pmap) */ /* @@ -1112,8 +1110,8 @@ ReFault: uvm_pagecopy(anon->u.an_page, pg); /* force reload */ - pmap_page_protect(PMAP_PGARG(anon->u.an_page), - VM_PROT_NONE); + pmap_page_protect(anon->u.an_page, + VM_PROT_NONE); uvm_lock_pageq(); /* KILL loan */ if (uobj) /* if we were loaning */ @@ -1501,8 +1499,7 @@ Case2: */ uvm_pagecopy(uobjpage, pg); /* old -> new */ pg->flags &= ~(PG_FAKE|PG_CLEAN); - pmap_page_protect(PMAP_PGARG(uobjpage), - VM_PROT_NONE); + pmap_page_protect(uobjpage, VM_PROT_NONE); if (uobjpage->flags & PG_WANTED) wakeup(uobjpage); /* uobj still locked */ @@ -1609,8 +1606,7 @@ Case2: * procs see it */ if ((amap_flags(amap) & AMAP_SHARED) != 0) { - pmap_page_protect(PMAP_PGARG(uobjpage), - VM_PROT_NONE); + pmap_page_protect(uobjpage, VM_PROT_NONE); } /* diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 8841554be91..394732d081d 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_km.c,v 1.10 2001/06/23 19:24:33 smart Exp $ */ -/* $NetBSD: uvm_km.c,v 1.31 1999/07/22 22:58:38 thorpej Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.11 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_km.c,v 1.32 1999/09/12 01:17:36 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -601,13 +601,8 @@ uvm_km_kmemalloc(map, obj, size, flags) * it will need to lock it itself!) */ if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { -#if defined(PMAP_NEW) pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL); -#else - pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), - UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); -#endif } else { pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c index 6ec2a96d307..64fff56afad 100644 --- a/sys/uvm/uvm_loan.c +++ b/sys/uvm/uvm_loan.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_loan.c,v 1.6 2001/03/08 15:21:37 smart Exp $ */ -/* $NetBSD: uvm_loan.c,v 1.18 1999/07/22 22:58:38 thorpej Exp $ */ +/* $OpenBSD: uvm_loan.c,v 1.7 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.19 1999/09/12 01:17:36 chs Exp $ */ /* * @@ -336,7 +336,7 @@ uvm_loananon(ufi, output, flags, anon) pg = anon->u.an_page; if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) /* read protect it */ - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ); + pmap_page_protect(pg, VM_PROT_READ); anon->an_ref++; **output = anon; *output = (*output) + 1; @@ -381,7 +381,7 @@ uvm_loananon(ufi, output, flags, anon) pg = anon->u.an_page; uvm_lock_pageq(); if (pg->loan_count == 0) - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ); + pmap_page_protect(pg, VM_PROT_READ); pg->loan_count++; uvm_pagewire(pg); /* always wire it */ uvm_unlock_pageq(); @@ -533,7 +533,7 @@ uvm_loanuobj(ufi, output, flags, va) if ((flags & UVM_LOAN_TOANON) == 0) { /* loan to wired-kernel page? */ uvm_lock_pageq(); if (pg->loan_count == 0) - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ); + pmap_page_protect(pg, VM_PROT_READ); pg->loan_count++; uvm_pagewire(pg); uvm_unlock_pageq(); @@ -587,7 +587,7 @@ uvm_loanuobj(ufi, output, flags, va) pg->uanon = anon; uvm_lock_pageq(); if (pg->loan_count == 0) - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ); + pmap_page_protect(pg, VM_PROT_READ); pg->loan_count++; uvm_pageactivate(pg); uvm_unlock_pageq(); @@ -751,7 +751,7 @@ uvm_unloanpage(ploans, npages) panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg); /* be safe */ - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uvm_pagefree(pg); /* pageq locked above */ } diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 13f15ea07dc..5fb1a9ddeb1 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.19 2001/07/17 10:55:02 mts Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.20 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: uvm_map.c,v 1.68 1999/08/21 02:19:05 thorpej Exp $ */ /* @@ -1021,12 +1021,7 @@ uvm_unmap_remove(map, start, end, entry_list) * to vm_map_min(kernel_map). */ if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) { -#if defined(PMAP_NEW) pmap_kremove(entry->start, len); -#else - pmap_remove(pmap_kernel(), entry->start, - entry->start + len); -#endif uvm_km_pgremove_intrsafe(entry->object.uvm_obj, entry->start - vm_map_min(kernel_map), entry->end - vm_map_min(kernel_map)); @@ -2571,8 +2566,7 @@ uvm_map_clean(map, start, end, flags) #endif /* zap all mappings for the page. */ - pmap_page_protect(PMAP_PGARG(pg), - VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); /* ...and deactivate the page. */ uvm_pagedeactivate(pg); @@ -2730,11 +2724,7 @@ uvmspace_init(vm, pmap, min, max, pageable) if (pmap) pmap_reference(pmap); else -#if defined(PMAP_NEW) pmap = pmap_create(); -#else - pmap = pmap_create(0); -#endif vm->vm_map.pmap = pmap; vm->vm_refcnt = 1; diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index 14fadaac3b9..db387595171 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_page.c,v 1.16 2001/04/10 06:59:12 niklas Exp $ */ -/* $NetBSD: uvm_page.c,v 1.24 1999/07/22 22:58:38 thorpej Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.17 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_page.c,v 1.25 1999/09/12 01:17:38 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -438,19 +438,11 @@ uvm_pageboot_alloc(size) if (!uvm_page_physget(&paddr)) panic("uvm_pageboot_alloc: out of memory"); - /* XXX: should be wired, but some pmaps don't like that ... */ -#if defined(PMAP_NEW) /* * Note this memory is no longer managed, so using * pmap_kenter is safe. */ pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); -#else - pmap_enter(pmap_kernel(), vaddr, paddr, - VM_PROT_READ|VM_PROT_WRITE, FALSE, - VM_PROT_READ|VM_PROT_WRITE); -#endif - } return(addr); #endif /* PMAP_STEAL_MEMORY */ diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h index 6e5b493df8c..d191471984f 100644 --- a/sys/uvm/uvm_page_i.h +++ b/sys/uvm/uvm_page_i.h @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_page_i.h,v 1.5 2001/01/29 02:07:47 niklas Exp $ */ -/* $NetBSD: uvm_page_i.h,v 1.10 1999/05/24 19:10:57 thorpej Exp $ */ +/* $OpenBSD: uvm_page_i.h,v 1.6 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_page_i.h,v 1.11 1999/09/12 01:17:38 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -226,8 +226,8 @@ uvm_pagedeactivate(pg) TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); pg->pqflags |= PQ_INACTIVE; uvmexp.inactive++; - pmap_clear_reference(PMAP_PGARG(pg)); - if (pmap_is_modified(PMAP_PGARG(pg))) + pmap_clear_reference(pg); + if (pmap_is_modified(pg)) pg->flags &= ~PG_CLEAN; } } diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c index b88cb44ea8a..66f7c7f3309 100644 --- a/sys/uvm/uvm_pager.c +++ b/sys/uvm/uvm_pager.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pager.c,v 1.9 2001/06/08 08:09:40 art Exp $ */ -/* $NetBSD: uvm_pager.c,v 1.21 1999/07/08 01:02:44 thorpej Exp $ */ +/* $OpenBSD: uvm_pager.c,v 1.10 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_pager.c,v 1.23 1999/09/12 01:17:41 chs Exp $ */ /* * @@ -327,7 +327,7 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi) if ((pclust->flags & PG_CLEANCHK) == 0) { if ((pclust->flags & (PG_CLEAN|PG_BUSY)) == PG_CLEAN && - pmap_is_modified(PMAP_PGARG(pclust))) + pmap_is_modified(pclust)) pclust->flags &= ~PG_CLEAN; /* now checked */ pclust->flags |= PG_CLEANCHK; @@ -341,7 +341,7 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi) pclust->flags |= PG_BUSY; /* busy! */ UVM_PAGE_OWN(pclust, "uvm_mk_pcluster"); /* XXX: protect wired page? see above comment. */ - pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ); + pmap_page_protect(pclust, VM_PROT_READ); if (!forward) { ppsp--; /* back up one page */ *ppsp = pclust; @@ -391,7 +391,7 @@ uvm_shareprot(entry, prot) for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { if (pp->offset >= start && pp->offset < stop) - pmap_page_protect(PMAP_PGARG(pp), prot); + pmap_page_protect(pp, prot); } UVMHIST_LOG(maphist, "<- done",0,0,0,0); } @@ -656,8 +656,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk) ppsp[lcv]->flags &= ~(PG_BUSY); UVM_PAGE_OWN(ppsp[lcv], NULL); - pmap_page_protect(PMAP_PGARG(ppsp[lcv]), - VM_PROT_NONE); /* be safe */ + pmap_page_protect(ppsp[lcv], VM_PROT_NONE); simple_unlock(&ppsp[lcv]->uanon->an_lock); /* kills anon and frees pg */ uvm_anfree(ppsp[lcv]->uanon); @@ -707,8 +706,8 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk) * had a successful pageout update the page! */ if (flags & PGO_PDFREECLUST) { - pmap_clear_reference(PMAP_PGARG(ppsp[lcv])); - pmap_clear_modify(PMAP_PGARG(ppsp[lcv])); + pmap_clear_reference(ppsp[lcv]); + pmap_clear_modify(ppsp[lcv]); ppsp[lcv]->flags |= PG_CLEAN; } diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index 9f492be2a87..7cfa18d5d76 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_pdaemon.c,v 1.9 2001/03/22 03:05:56 smart Exp $ */ -/* $NetBSD: uvm_pdaemon.c,v 1.17 1999/07/22 22:58:39 thorpej Exp $ */ +/* $OpenBSD: uvm_pdaemon.c,v 1.10 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_pdaemon.c,v 1.18 1999/09/12 01:17:41 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -402,7 +402,7 @@ uvmpd_scan_inactive(pglst) * inactive pages shouldn't have any valid mappings * and we cleared reference before deactivating). */ - if (pmap_is_referenced(PMAP_PGARG(p))) { + if (pmap_is_referenced(p)) { uvm_pageactivate(p); uvmexp.pdreact++; continue; @@ -498,7 +498,7 @@ uvmpd_scan_inactive(pglst) } /* zap all mappings with pmap_page_protect... */ - pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); uvm_pagefree(p); uvmexp.pdfreed++; @@ -595,7 +595,7 @@ uvmpd_scan_inactive(pglst) swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0); p->flags |= PG_BUSY; /* now we own it */ UVM_PAGE_OWN(p, "scan_inactive"); - pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ); + pmap_page_protect(p, VM_PROT_READ); uvmexp.pgswapout++; /* @@ -833,8 +833,7 @@ uvmpd_scan_inactive(pglst) simple_unlock(&anon->an_lock); uvm_anfree(anon); /* kills anon */ - pmap_page_protect(PMAP_PGARG(p), - VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); anon = NULL; uvm_lock_pageq(); nextpg = p->pageq.tqe_next; @@ -877,15 +876,15 @@ uvmpd_scan_inactive(pglst) /* pageout was a failure... */ if (result != VM_PAGER_AGAIN) uvm_pageactivate(p); - pmap_clear_reference(PMAP_PGARG(p)); + pmap_clear_reference(p); /* XXXCDC: if (swap_backed) FREE p's * swap block? */ } else { /* pageout was a success... */ - pmap_clear_reference(PMAP_PGARG(p)); - pmap_clear_modify(PMAP_PGARG(p)); + pmap_clear_reference(p); + pmap_clear_modify(p); p->flags |= PG_CLEAN; /* XXX: could free page here, but old * pagedaemon does not */ @@ -1092,7 +1091,7 @@ uvmpd_scan() * inactive pages. */ if (inactive_shortage > 0) { - pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); /* no need to check wire_count as pg is "active" */ uvm_pagedeactivate(p); uvmexp.pddeact++; diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c index 12327f97836..9d035720307 100644 --- a/sys/uvm/uvm_vnode.c +++ b/sys/uvm/uvm_vnode.c @@ -1,5 +1,5 @@ -/* $OpenBSD: uvm_vnode.c,v 1.13 2001/06/23 19:24:34 smart Exp $ */ -/* $NetBSD: uvm_vnode.c,v 1.25 1999/07/22 22:58:39 thorpej Exp $ */ +/* $OpenBSD: uvm_vnode.c,v 1.14 2001/07/18 10:47:05 art Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.26 1999/09/12 01:17:42 chs Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -698,7 +698,7 @@ uvn_releasepg(pg, nextpgp) /* * dispose of the page [caller handles PG_WANTED] */ - pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); + pmap_page_protect(pg, VM_PROT_NONE); uvm_lock_pageq(); if (nextpgp) *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ @@ -973,9 +973,9 @@ uvn_flush(uobj, start, stop, flags) if ((pp->flags & PG_CLEAN) != 0 && (flags & PGO_FREE) != 0 && (pp->pqflags & PQ_ACTIVE) != 0) - pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); + pmap_page_protect(pp, VM_PROT_NONE); if ((pp->flags & PG_CLEAN) != 0 && - pmap_is_modified(PMAP_PGARG(pp))) + pmap_is_modified(pp)) pp->flags &= ~(PG_CLEAN); pp->flags |= PG_CLEANCHK; /* update "hint" */ @@ -998,8 +998,7 @@ uvn_flush(uobj, start, stop, flags) if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { - pmap_page_protect(PMAP_PGARG(pp), - VM_PROT_NONE); + pmap_page_protect(pp, VM_PROT_NONE); uvm_pagedeactivate(pp); } @@ -1008,8 +1007,7 @@ uvn_flush(uobj, start, stop, flags) /* release busy pages */ pp->flags |= PG_RELEASED; } else { - pmap_page_protect(PMAP_PGARG(pp), - VM_PROT_NONE); + pmap_page_protect(pp, VM_PROT_NONE); /* removed page from object */ uvm_pagefree(pp); } @@ -1029,7 +1027,7 @@ uvn_flush(uobj, start, stop, flags) pp->flags |= PG_BUSY; /* we 'own' page now */ UVM_PAGE_OWN(pp, "uvn_flush"); - pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ); + pmap_page_protect(pp, VM_PROT_READ); pp_version = pp->version; ReTry: ppsp = pps; @@ -1178,8 +1176,7 @@ ReTry: } else { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) - pmap_clear_modify( - PMAP_PGARG(ptmp)); + pmap_clear_modify(ptmp); } } @@ -1190,8 +1187,7 @@ ReTry: if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { - pmap_page_protect(PMAP_PGARG(ptmp), - VM_PROT_NONE); + pmap_page_protect(ptmp, VM_PROT_NONE); uvm_pagedeactivate(ptmp); } @@ -1211,8 +1207,7 @@ ReTry: "lost!\n"); retval = FALSE; } - pmap_page_protect(PMAP_PGARG(ptmp), - VM_PROT_NONE); + pmap_page_protect(ptmp, VM_PROT_NONE); uvm_pagefree(ptmp); } } @@ -1544,7 +1539,7 @@ uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) */ ptmp->flags &= ~PG_FAKE; /* data is valid ... */ - pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ + pmap_clear_modify(ptmp); /* ... and clean */ pps[lcv] = ptmp; } /* lcv loop */ diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index fae9746cb92..9de119abdc4 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.18 2001/06/27 06:19:58 art Exp $ */ +/* $OpenBSD: pmap.h,v 1.19 2001/07/18 10:47:05 art Exp $ */ /* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */ /* @@ -88,34 +88,12 @@ typedef struct pmap_statistics *pmap_statistics_t; #include <machine/pmap.h> /* - * PMAP_PGARG hack - * - * operations that take place on managed pages used to take PAs. - * this caused us to translate the PA back to a page (or pv_head). - * PMAP_NEW avoids this by passing the vm_page in (pv_head should be - * pointed to by vm_page (or be a part of it)). - * - * applies to: pmap_page_protect, pmap_is_referenced, pmap_is_modified, - * pmap_clear_reference, pmap_clear_modify. - * - * the latter two functions are boolean_t in PMAP_NEW. they return - * TRUE if something was cleared. - */ -#if defined(PMAP_NEW) -#define PMAP_PGARG(PG) (PG) -#else -#define PMAP_PGARG(PG) (VM_PAGE_TO_PHYS(PG)) -#endif - -#ifdef PMAP_NEW -/* * Flags passed to pmap_enter(). Note the bottom 3 bits are VM_PROT_* * bits, used to indicate the access type that was made (to seed modified * and referenced information). */ #define PMAP_WIRED 0x00000010 /* wired mapping */ #define PMAP_CANFAIL 0x00000020 /* can fail if resource shortage */ -#endif #ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */ #ifdef _KERNEL @@ -123,28 +101,19 @@ __BEGIN_DECLS void *pmap_bootstrap_alloc __P((int)); void pmap_unwire __P((pmap_t, vaddr_t)); -#if defined(PMAP_NEW) #if !defined(pmap_clear_modify) boolean_t pmap_clear_modify __P((struct vm_page *)); #endif #if !defined(pmap_clear_reference) boolean_t pmap_clear_reference __P((struct vm_page *)); #endif -#else /* PMAP_NEW */ -void pmap_clear_modify __P((paddr_t pa)); -void pmap_clear_reference __P((paddr_t pa)); -#endif /* PMAP_NEW */ void pmap_collect __P((pmap_t)); void pmap_copy __P((pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t)); void pmap_copy_page __P((paddr_t, paddr_t)); -#if defined(PMAP_NEW) struct pmap *pmap_create __P((void)); -#else -pmap_t pmap_create __P((vsize_t)); -#endif void pmap_destroy __P((pmap_t)); -#if defined(PMAP_NEW) && defined(__i386__) +#if defined(__i386__) #ifdef notyet int pmap_enter __P((pmap_t, vaddr_t, paddr_t, vm_prot_t, int)); #else @@ -158,13 +127,12 @@ void pmap_enter __P((pmap_t, #endif boolean_t pmap_extract __P((pmap_t, vaddr_t, paddr_t *)); -#if defined(PMAP_NEW) && defined(PMAP_GROWKERNEL) +#if defined(PMAP_GROWKERNEL) vaddr_t pmap_growkernel __P((vaddr_t)); #endif void pmap_init __P((void)); -#if defined(PMAP_NEW) void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t)); void pmap_kenter_pgs __P((vaddr_t, struct vm_page **, int)); void pmap_kremove __P((vaddr_t, vsize_t)); @@ -174,16 +142,7 @@ boolean_t pmap_is_modified __P((struct vm_page *)); #if !defined(pmap_is_referenced) boolean_t pmap_is_referenced __P((struct vm_page *)); #endif -#else /* PMAP_NEW */ -boolean_t pmap_is_modified __P((paddr_t pa)); -boolean_t pmap_is_referenced __P((paddr_t pa)); -#endif /* PMAP_NEW */ - -#if defined(PMAP_NEW) void pmap_page_protect __P((struct vm_page *, vm_prot_t)); -#else -void pmap_page_protect __P((paddr_t, vm_prot_t)); -#endif #if !defined(pmap_phys_address) paddr_t pmap_phys_address __P((int)); |