summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-07-18 10:47:06 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-07-18 10:47:06 +0000
commit7014e9bdb5c2bbd8611c221808ec547e916bbb79 (patch)
tree696ce0ce037cffc3cf643967ca0d7b194de7df7a /sys/arch
parent33fbe5eea52c9938f6f888b886db7cbe2583b9d3 (diff)
Get rid of the PMAP_NEW option by making it mandatory for all archs.
The archs that didn't have a proper PMAP_NEW now have a dummy implementation with wrappers around the old functions.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amiga/amiga/machdep.c8
-rw-r--r--sys/arch/amiga/amiga/pmap.c83
-rw-r--r--sys/arch/hp300/hp300/machdep.c8
-rw-r--r--sys/arch/hp300/hp300/pmap.c71
-rw-r--r--sys/arch/hp300/hp300/vm_machdep.c10
-rw-r--r--sys/arch/hp300/include/param.h4
-rw-r--r--sys/arch/hppa/include/vmparam.h4
-rw-r--r--sys/arch/i386/include/param.h4
-rw-r--r--sys/arch/mac68k/mac68k/pmap.c78
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap.c85
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c141
-rw-r--r--sys/arch/mvmeppc/include/pmap.h10
-rw-r--r--sys/arch/mvmeppc/mvmeppc/pmap.c32
-rw-r--r--sys/arch/powerpc/include/pmap.h25
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c30
-rw-r--r--sys/arch/sparc/include/param.h4
-rw-r--r--sys/arch/sun3/sun3/pmap.c101
-rw-r--r--sys/arch/vax/include/vmparam.h3
18 files changed, 354 insertions, 347 deletions
diff --git a/sys/arch/amiga/amiga/machdep.c b/sys/arch/amiga/amiga/machdep.c
index f517b51e97f..794e44a9036 100644
--- a/sys/arch/amiga/amiga/machdep.c
+++ b/sys/arch/amiga/amiga/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.47 2001/07/05 07:17:52 art Exp $ */
+/* $OpenBSD: machdep.c,v 1.48 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: machdep.c,v 1.95 1997/08/27 18:31:17 is Exp $ */
/*
@@ -460,13 +460,7 @@ again:
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
-#if defined(PMAP_NEW)
pmap_kenter_pgs(curbuf, &pg, 1);
-#else
- pmap_enter(kernel_map->pmap, curbuf,
- VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE,
- VM_PROT_READ|VM_PROT_WRITE);
-#endif
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
diff --git a/sys/arch/amiga/amiga/pmap.c b/sys/arch/amiga/amiga/pmap.c
index 27a1d1c1064..ae23ce73391 100644
--- a/sys/arch/amiga/amiga/pmap.c
+++ b/sys/arch/amiga/amiga/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.32 2001/06/27 03:54:13 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.33 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: pmap.c,v 1.68 1999/06/19 19:44:09 is Exp $ */
/*-
@@ -788,28 +788,17 @@ pmap_map(virt, start, end, prot)
* the map will be used in software only, and
* is bounded by that size.
*/
-pmap_t
-pmap_create(size)
- vsize_t size;
+struct pmap *
+pmap_create(void)
{
- pmap_t pmap;
+ struct pmap *pmap;
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
printf("pmap_create(%lx)\n", size);
#endif
- /*
- * Software use map does not need a pmap
- */
- if (size)
- return(NULL);
- /* XXX: is it ok to wait here? */
- pmap = (pmap_t)malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
-#ifdef notifwewait
- if (pmap == NULL)
- panic("pmap_create: cannot allocate a pmap");
-#endif
+ pmap = (struct pmap *)malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
bzero(pmap, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
@@ -971,13 +960,14 @@ pmap_remove(pmap, sva, eva)
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(pa, prot)
- paddr_t pa;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
+ paddr_t pa;
pv_entry_t pv;
int s;
+ pa = VM_PAGE_TO_PHYS(pg);
+
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
@@ -1735,15 +1725,21 @@ pmap_copy_page(src, dst)
* Clear the modify bits on the specified physical page.
*/
-void
-pmap_clear_modify(pa)
- paddr_t pa;
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_modify(%lx)\n", pa);
#endif
+ ret = pmap_is_modified(pg);
+
pmap_changebit(pa, PG_M, FALSE);
+
+ return (ret);
}
/*
@@ -1752,14 +1748,19 @@ pmap_clear_modify(pa)
* Clear the reference bit on the specified physical page.
*/
-void pmap_clear_reference(pa)
- paddr_t pa;
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_reference(%lx)\n", pa);
#endif
+ ret = pmap_is_referenced(pg);
pmap_changebit(pa, PG_U, FALSE);
+
+ return (ret);
}
/*
@@ -1770,9 +1771,9 @@ void pmap_clear_reference(pa)
*/
boolean_t
-pmap_is_referenced(pa)
- paddr_t pa;
+pmap_is_referenced(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
@@ -1791,9 +1792,9 @@ pmap_is_referenced(pa)
*/
boolean_t
-pmap_is_modified(pa)
- paddr_t pa;
+pmap_is_modified(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
@@ -2594,3 +2595,29 @@ pmap_virtual_space(vstartp, vendp)
*vstartp = virtual_avail;
*vendp = virtual_end;
}
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/hp300/hp300/machdep.c b/sys/arch/hp300/hp300/machdep.c
index 06f2fa01f87..0c840515cb2 100644
--- a/sys/arch/hp300/hp300/machdep.c
+++ b/sys/arch/hp300/hp300/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.53 2001/07/05 10:12:06 art Exp $ */
+/* $OpenBSD: machdep.c,v 1.54 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: machdep.c,v 1.121 1999/03/26 23:41:29 mycroft Exp $ */
/*
@@ -327,13 +327,7 @@ cpu_startup()
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
-#if defined(PMAP_NEW)
pmap_kenter_pgs(curbuf, &pg, 1);
-#else
- pmap_enter(kernel_map->pmap, curbuf,
- VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
- TRUE, VM_PROT_READ|VM_PROT_WRITE);
-#endif
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
diff --git a/sys/arch/hp300/hp300/pmap.c b/sys/arch/hp300/hp300/pmap.c
index 459b76b906e..7a7de7b2d5a 100644
--- a/sys/arch/hp300/hp300/pmap.c
+++ b/sys/arch/hp300/hp300/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:05:45 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.26 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: pmap.c,v 1.80 1999/09/16 14:52:06 chs Exp $ */
/*-
@@ -707,7 +707,6 @@ pmap_map(va, spa, epa, prot)
*
* Note: no locking is necessary in this function.
*/
-#ifdef PMAP_NEW
pmap_t
pmap_create()
{
@@ -721,29 +720,6 @@ pmap_create()
pmap_pinit(pmap);
return (pmap);
}
-#else
-pmap_t
-pmap_create(size)
- vsize_t size;
-{
- pmap_t pmap;
-
- PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
- ("pmap_create(%lx)\n", size));
-
- /*
- * Software use map does not need a pmap
- */
- if (size)
- return (NULL);
-
- pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
-
- bzero(pmap, sizeof(*pmap));
- pmap_pinit(pmap);
- return (pmap);
-}
-#endif
/*
* pmap_pinit:
@@ -995,18 +971,11 @@ pmap_remove(pmap, sva, eva)
* the permissions specified.
*/
void
-#ifdef PMAP_NEW
pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
-#else
-pmap_page_protect(pa, prot)
- paddr_t pa;
- vm_prot_t prot;
-{
-#endif
struct pv_entry *pv;
int s;
@@ -1450,7 +1419,6 @@ validate:
#endif
}
-#ifdef PMAP_NEW
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
@@ -1483,7 +1451,6 @@ pmap_kremove(va, len)
pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
}
}
-#endif
/*
* pmap_unwire: [ INTERFACE]
@@ -1886,7 +1853,6 @@ pmap_copy_page(src, dst)
*
* Clear the modify bits on the specified physical page.
*/
-#ifdef PMAP_NEW
boolean_t
pmap_clear_modify(pg)
struct vm_page *pg;
@@ -1900,24 +1866,12 @@ pmap_clear_modify(pg)
pmap_changebit(pa, 0, ~PG_M);
return rv;
}
-#else
-void
-pmap_clear_modify(pa)
- paddr_t pa;
-{
-
- PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
-
- pmap_changebit(pa, 0, ~PG_M);
-}
-#endif
/*
* pmap_clear_reference: [ INTERFACE ]
*
* Clear the reference bit on the specified physical page.
*/
-#ifdef PMAP_NEW
boolean_t
pmap_clear_reference(pg)
struct vm_page *pg;
@@ -1931,17 +1885,6 @@ pmap_clear_reference(pg)
pmap_changebit(pa, 0, ~PG_U);
return rv;
}
-#else
-void
-pmap_clear_reference(pa)
- paddr_t pa;
-{
-
- PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa));
-
- pmap_changebit(pa, 0, ~PG_U);
-}
-#endif
/*
* pmap_is_referenced: [ INTERFACE ]
@@ -1950,16 +1893,10 @@ pmap_clear_reference(pa)
* by any physical maps.
*/
boolean_t
-#ifdef PMAP_NEW
pmap_is_referenced(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
-#else
-pmap_is_referenced(pa)
- paddr_t pa;
-{
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
@@ -1977,16 +1914,10 @@ pmap_is_referenced(pa)
* by any physical maps.
*/
boolean_t
-#ifdef PMAP_NEW
pmap_is_modified(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
-#else
-pmap_is_modified(pa)
- paddr_t pa;
-{
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
diff --git a/sys/arch/hp300/hp300/vm_machdep.c b/sys/arch/hp300/hp300/vm_machdep.c
index 3ba7f2d6c29..471db3600e3 100644
--- a/sys/arch/hp300/hp300/vm_machdep.c
+++ b/sys/arch/hp300/hp300/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.27 2001/06/27 04:05:45 art Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.28 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.47 1999/03/26 23:41:29 mycroft Exp $ */
/*
@@ -238,16 +238,8 @@ pagemove(from, to, size)
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
-#ifdef PMAP_NEW
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE);
-#else
- pmap_remove(pmap_kernel(),
- (vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
- pmap_enter(pmap_kernel(),
- (vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1,
- VM_PROT_READ|VM_PROT_WRITE);
-#endif
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
diff --git a/sys/arch/hp300/include/param.h b/sys/arch/hp300/include/param.h
index 1a5e31b2a06..e2a2fb05e49 100644
--- a/sys/arch/hp300/include/param.h
+++ b/sys/arch/hp300/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.15 2001/06/27 04:05:45 art Exp $ */
+/* $OpenBSD: param.h,v 1.16 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: param.h,v 1.35 1997/07/10 08:22:38 veego Exp $ */
/*
@@ -105,6 +105,4 @@ void _delay __P((u_int));
((unsigned)(v) & ~HPMMMASK)
#endif
-#define PMAP_NEW
-
#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/arch/hppa/include/vmparam.h b/sys/arch/hppa/include/vmparam.h
index 327034c445e..090350eb780 100644
--- a/sys/arch/hppa/include/vmparam.h
+++ b/sys/arch/hppa/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.14 2001/06/27 06:19:45 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.15 2001/07/18 10:47:04 art Exp $ */
/*
* Copyright (c) 1988-1994, The University of Utah and
@@ -126,8 +126,6 @@
#define VM_FREELIST_DEFAULT 0
#define VM_FREELIST_FIRST16 1
-#define PMAP_NEW
-
#ifdef _KERNEL
struct pmap_physseg {
struct pv_entry *pvent;
diff --git a/sys/arch/i386/include/param.h b/sys/arch/i386/include/param.h
index cbfeca72c22..c218be4b785 100644
--- a/sys/arch/i386/include/param.h
+++ b/sys/arch/i386/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.17 2001/07/06 02:07:41 provos Exp $ */
+/* $OpenBSD: param.h,v 1.18 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: param.h,v 1.29 1996/03/04 05:04:26 cgd Exp $ */
/*-
@@ -95,8 +95,6 @@
#define MSGBUFSIZE 2*NBPG /* default message buffer size */
#endif
-#define PMAP_NEW
-
/*
* Constants related to network buffer management.
* MCLBYTES must be no larger than the software page size, and,
diff --git a/sys/arch/mac68k/mac68k/pmap.c b/sys/arch/mac68k/mac68k/pmap.c
index a97f05d39d0..72b9f2655d3 100644
--- a/sys/arch/mac68k/mac68k/pmap.c
+++ b/sys/arch/mac68k/mac68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:22:38 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.26 2001/07/18 10:47:04 art Exp $ */
/* $NetBSD: pmap.c,v 1.55 1999/04/22 04:24:53 chs Exp $ */
/*
@@ -663,21 +663,14 @@ pmap_map(va, spa, epa, prot)
*
* Note: no locking is necessary in this function.
*/
-pmap_t
-pmap_create(size)
- vsize_t size;
+struct pmap *
+pmap_create(void)
{
- pmap_t pmap;
+ struct pmap *pmap;
PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
("pmap_create(%lx)\n", size));
- /*
- * Software use map does not need a pmap
- */
- if (size)
- return (NULL);
-
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
bzero(pmap, sizeof(*pmap));
@@ -896,13 +889,14 @@ pmap_remove(pmap, sva, eva)
* the permissions specified.
*/
void
-pmap_page_protect(pa, prot)
- paddr_t pa;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
+ paddr_t pa;
struct pv_entry *pv;
int s;
+ pa = VM_PAGE_TO_PHYS(pg);
+
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
@@ -1654,14 +1648,19 @@ pmap_copy_page(src, dst)
*
* Clear the modify bits on the specified physical page.
*/
-void
-pmap_clear_modify(pa)
- paddr_t pa;
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_modified(pg);
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
pmap_changebit(pa, 0, ~PG_M);
+
+ return (ret);
}
/*
@@ -1670,13 +1669,18 @@ pmap_clear_modify(pa)
* Clear the reference bit on the specified physical page.
*/
void
-pmap_clear_reference(pa)
- paddr_t pa;
+pmap_clear_reference(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_referenced(pg);
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa));
pmap_changebit(pa, 0, ~PG_U);
+
+ return (ret);
}
/*
@@ -1686,9 +1690,10 @@ pmap_clear_reference(pa)
* by any physical maps.
*/
boolean_t
-pmap_is_referenced(pa)
- paddr_t pa;
+pmap_is_referenced(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
@@ -1706,9 +1711,10 @@ pmap_is_referenced(pa)
* by any physical maps.
*/
boolean_t
-pmap_is_modified(pa)
- paddr_t pa;
+pmap_is_modified(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
@@ -2352,3 +2358,29 @@ pmap_check_wiring(str, va)
str, va, entry->wired_count, count);
}
#endif /* DEBUG */
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c
index 9f8d23476ba..de0741e52b6 100644
--- a/sys/arch/mvme68k/mvme68k/pmap.c
+++ b/sys/arch/mvme68k/mvme68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 2001/06/27 06:19:49 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 2001/07/18 10:47:04 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -676,29 +676,17 @@ pmap_map(va, spa, epa, prot)
* the map will be used in software only, and
* is bounded by that size.
*/
-pmap_t
-pmap_create(size)
- vm_size_t size;
+struct pmap *
+pmap_create(void)
{
- register pmap_t pmap;
+ struct pmap *pmap;
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
printf("pmap_create(%x)\n", size);
#endif
- /*
- * Software use map does not need a pmap
- */
- if (size)
- return (NULL);
-
- /* XXX: is it ok to wait here? */
- pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
-#ifdef notifwewait
- if (pmap == NULL)
- panic("pmap_create: cannot allocate a pmap");
-#endif
+ pmap = (struct pmap *) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
bzero(pmap, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
@@ -901,11 +889,10 @@ pmap_remove(pmap, sva, eva)
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(pa, prot)
- vm_offset_t pa;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- register struct pv_entry *pv;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ struct pv_entry *pv;
int s;
#ifdef DEBUG
@@ -1584,14 +1571,20 @@ pmap_copy_page(src, dst)
*/
void
-pmap_clear_modify(pa)
- vm_offset_t pa;
+pmap_clear_modify(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_modified(pg);
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_modify(%x)\n", pa);
#endif
pmap_changebit(pa, PG_M, FALSE);
+
+ return (ret);
}
/*
@@ -1600,14 +1593,21 @@ pmap_clear_modify(pa)
* Clear the reference bit on the specified physical page.
*/
-void pmap_clear_reference(pa)
- vm_offset_t pa;
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_referenced(pg);
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_reference(%x)\n", pa);
#endif
pmap_changebit(pa, PG_U, FALSE);
+
+ return (ret);
}
/*
@@ -1618,9 +1618,10 @@ void pmap_clear_reference(pa)
*/
boolean_t
-pmap_is_referenced(pa)
- vm_offset_t pa;
+pmap_is_referenced(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
@@ -1639,9 +1640,9 @@ pmap_is_referenced(pa)
*/
boolean_t
-pmap_is_modified(pa)
- vm_offset_t pa;
+pmap_is_modified(struct vm_page *pg)
{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
@@ -2410,3 +2411,29 @@ pmap_check_wiring(str, va)
str, va, entry->wired_count, count);
}
#endif
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index cf54862a50e..77f0be10d53 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.34 2001/07/05 07:20:45 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.35 2001/07/18 10:47:04 art Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
@@ -1452,20 +1452,14 @@ pmap_zero_page(vm_offset_t phys)
*
* This routines allocates a pmap structure.
*/
-pmap_t
-pmap_create(vm_size_t size)
+struct pmap *
+pmap_create(void)
{
- pmap_t p;
-
- /*
- * A software use-only map doesn't even need a map.
- */
- if (size != 0)
- return (PMAP_NULL);
+ struct pmap *p;
CHECK_PMAP_CONSISTENCY("pmap_create");
- p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
+ p = (struct pmap *)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
bzero(p, sizeof(*p));
pmap_pinit(p);
@@ -3302,7 +3296,7 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* Clear the modify bits on the specified physical page.
*
* Parameters:
- * phys physical address of page
+ * pg vm_page
*
* Extern/Global:
* pv_head_table, pv_lists
@@ -3317,14 +3311,14 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* pmap_pte
* panic
*
- * For managed pages, the modify_list entry corresponding to the
+ * The modify_list entry corresponding to the
* page's frame index will be zeroed. The PV list will be traversed.
* For each pmap/va the hardware 'modified' bit in the page descripter table
* entry inspected - and turned off if necessary. If any of the
* inspected bits were found on, an TLB flush will be performed.
*/
void
-pmap_clear_modify(vm_offset_t phys)
+pmap_clear_modify(struct vm_page *pg)
{
pv_entry_t pvl;
pv_entry_t pvep;
@@ -3335,14 +3329,16 @@ pmap_clear_modify(vm_offset_t phys)
unsigned users;
pte_template_t opte;
int kflush;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_modified(pg);
+#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys)) {
-#ifdef DEBUG
- if (pmap_con_dbg & CD_CMOD)
- printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
-#endif
- return;
+ panic("pmap_clear_modify: not managed?");
}
+#endif
SPLVM(spl);
@@ -3361,7 +3357,7 @@ clear_modify_Retry:
#endif
UNLOCK_PVH(phys);
SPLX(spl);
- return;
+ return (ret);
}
/* for each listed pmap, turn off the page modified bit */
@@ -3401,6 +3397,8 @@ clear_modify_Retry:
}
UNLOCK_PVH(phys);
SPLX(spl);
+
+ return (ret);
} /* pmap_clear_modify() */
/*
@@ -3412,7 +3410,7 @@ clear_modify_Retry:
* stored data into the page.
*
* Parameters:
- * phys physical address og a page
+ * pg vm_page
*
* Extern/Global:
* pv_head_array, pv lists
@@ -3425,9 +3423,6 @@ clear_modify_Retry:
* PA_TO_PVH
* pmap_pte
*
- * If the physical address specified is not a managed page, this
- * routine simply returns TRUE (looks like it is returning FALSE XXX).
- *
* If the entry in the modify list, corresponding to the given page,
* is TRUE, this routine return TRUE. (This means at least one mapping
* has been invalidated where the MMU had set the modified bit in the
@@ -3439,21 +3434,20 @@ clear_modify_Retry:
* immediately (doesn't need to walk remainder of list).
*/
boolean_t
-pmap_is_modified(vm_offset_t phys)
+pmap_is_modified(struct vm_page *pg)
{
pv_entry_t pvl;
pv_entry_t pvep;
pt_entry_t *ptep;
int spl;
boolean_t modified_flag;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys)) {
-#ifdef DEBUG
- if (pmap_con_dbg & CD_IMOD)
- printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
-#endif
- return (FALSE);
+ panic("pmap_is_modified: not managed?");
}
+#endif
SPLVM(spl);
@@ -3526,7 +3520,7 @@ is_mod_Retry:
* Clear the reference bits on the specified physical page.
*
* Parameters:
- * phys physical address of page
+ * pg vm_page
*
* Calls:
* PMAP_MANAGED
@@ -3540,32 +3534,33 @@ is_mod_Retry:
* Extern/Global:
* pv_head_array, pv lists
*
- * For managed pages, the coressponding PV list will be traversed.
* For each pmap/va the hardware 'used' bit in the page table entry
* inspected - and turned off if necessary. If any of the inspected bits
* were found on, a TLB flush will be performed.
*/
-void
-pmap_clear_reference(vm_offset_t phys)
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
{
- pv_entry_t pvl;
- pv_entry_t pvep;
- pt_entry_t *pte;
- pmap_t pmap;
- int spl, spl_sav;
- vm_offset_t va;
- unsigned users;
- pte_template_t opte;
- int kflush;
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ vm_offset_t va;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+ paddr_t phys;
+ boolean_t ret;
+
+ phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys)) {
-#ifdef DEBUG
- if (pmap_con_dbg & CD_CREF) {
- printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
- }
-#endif
- return;
+ panic("pmap_clear_reference: not managed?");
}
+#endif
+ ret = pmap_is_referenced(pg);
SPLVM(spl);
@@ -3582,7 +3577,7 @@ pmap_clear_reference(vm_offset_t phys)
#endif
UNLOCK_PVH(phys);
SPLX(spl);
- return;
+ return (ret);
}
/* for each listed pmap, turn off the page refrenced bit */
@@ -3622,6 +3617,8 @@ pmap_clear_reference(vm_offset_t phys)
}
UNLOCK_PVH(phys);
SPLX(spl);
+
+ return (ret);
} /* pmap_clear_reference() */
/*
@@ -3632,7 +3629,7 @@ pmap_clear_reference(vm_offset_t phys)
* any physical maps. That is, whether the hardware has touched the page.
*
* Parameters:
- * phys physical address of a page
+ * pg vm_page
*
* Extern/Global:
* pv_head_array, pv lists
@@ -3645,25 +3642,25 @@ pmap_clear_reference(vm_offset_t phys)
* simple_lock
* pmap_pte
*
- * If the physical address specified is not a managed page, this
- * routine simply returns TRUE.
- *
- * Otherwise, this routine walks the PV list corresponding to the
+ * This routine walks the PV list corresponding to the
* given page. For each pmap/va/ pair, the page descripter table entry is
* examined. If a used bit is found on, the function returns TRUE
* immediately (doesn't need to walk remainder of list).
*/
boolean_t
-pmap_is_referenced(vm_offset_t phys)
+pmap_is_referenced(struct vm_page *pg)
{
pv_entry_t pvl;
pv_entry_t pvep;
pt_entry_t *ptep;
int spl;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys))
- return (FALSE);
+ panic("pmap_is_referenced: not managed?");
+#endif
SPLVM(spl);
@@ -3713,8 +3710,10 @@ is_ref_Retry:
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+
switch (prot) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
@@ -4426,3 +4425,29 @@ pmap_range_remove(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
range->start = end;
}
#endif /* FUTURE_MAYBE */
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/mvmeppc/include/pmap.h b/sys/arch/mvmeppc/include/pmap.h
index 9491d97aadd..a0a0b898f89 100644
--- a/sys/arch/mvmeppc/include/pmap.h
+++ b/sys/arch/mvmeppc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.1 2001/06/26 21:57:47 smurph Exp $ */
+/* $OpenBSD: pmap.h,v 1.2 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -77,10 +77,10 @@ typedef struct pmap *pmap_t;
extern struct pmap kernel_pmap_;
#define pmap_kernel() (&kernel_pmap_)
-#define pmap_clear_modify(pa) (ptemodify((pa), PTE_CHG, 0))
-#define pmap_clear_reference(pa) (ptemodify((pa), PTE_REF, 0))
-#define pmap_is_modified(pa) (ptebits((pa), PTE_CHG))
-#define pmap_is_referenced(pa) (ptebits((pa), PTE_REF))
+#define pmap_clear_modify(pa) (ptemodify(VM_PAGE_TO_PHYS(pa), PTE_CHG, 0))
+#define pmap_clear_reference(pa) (ptemodify(VM_PAGE_TO_PHYS(pa), PTE_REF, 0))
+#define pmap_is_modified(pg) (ptebits(VM_PAGE_TO_PHYS(pg), PTE_CHG))
+#define pmap_is_referenced(pg) (ptebits(VM_PAGE_TO_PHYS(pg), PTE_REF))
#define pmap_change_wiring(pm, va, wired)
#define pmap_unwire(pm, va)
diff --git a/sys/arch/mvmeppc/mvmeppc/pmap.c b/sys/arch/mvmeppc/mvmeppc/pmap.c
index fd04d8590c8..84edaf00974 100644
--- a/sys/arch/mvmeppc/mvmeppc/pmap.c
+++ b/sys/arch/mvmeppc/mvmeppc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.4 2001/07/06 05:14:30 smurph Exp $ */
+/* $OpenBSD: pmap.c,v 1.5 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.c,v 1.1 1996/09/30 16:34:52 ws Exp $ */
/*
@@ -806,14 +806,8 @@ pmap_next_page(paddr)
/*
* Create and return a physical map.
*/
-#if defined(PMAP_NEW)
struct pmap *
pmap_create()
-#else
-struct pmap *
-pmap_create(size)
- vsize_t size;
-#endif
{
struct pmap *pm;
@@ -1426,9 +1420,9 @@ pmap_protect(pm, sva, eva, prot)
pmap_remove(pm, sva, eva);
}
-void
+boolean_t
ptemodify(pa, mask, val)
- vm_offset_t pa;
+ paddr_t pa;
u_int mask;
u_int val;
{
@@ -1437,10 +1431,13 @@ ptemodify(pa, mask, val)
struct pte_ovfl *po;
int i, s;
char * pattr;
+ boolean_t ret;
+
+ ret = ptebits(pa, mask);
pv = pmap_find_pv(pa);
if (pv == NULL)
- return;
+ return (ret);
pattr = pmap_find_attr(pa);
/*
@@ -1450,7 +1447,7 @@ ptemodify(pa, mask, val)
*pattr |= val >> ATTRSHFT;
if (pv->pv_idx < 0)
- return;
+ return (ret);
s = splimp();
for (; pv; pv = pv->pv_next) {
@@ -1485,6 +1482,8 @@ ptemodify(pa, mask, val)
}
}
splx(s);
+
+ return (ret);
}
int
@@ -1553,21 +1552,12 @@ ptebits(pa, bit)
* There are only two cases: either the protection is going to 0,
* or it is going to read-only.
*/
-#if defined(PMAP_NEW)
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
-#else
-void
-pmap_page_protect(pa, prot)
- vm_offset_t pa;
- vm_prot_t prot;
-#endif
{
-#if defined(PMAP_NEW)
vm_offset_t pa = VM_PAGE_TO_PHYS(pg);
-#endif
vm_offset_t va;
pte_t *ptp;
struct pte_ovfl *po, *npo;
@@ -1577,7 +1567,7 @@ pmap_page_protect(pa, prot)
pa &= ~ADDR_POFF;
if (prot & VM_PROT_READ) {
- ptemodify(pa, PTE_PP, PTE_RO);
+ ptemodify(pg, PTE_PP, PTE_RO);
return;
}
diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h
index 1bcba5c2b3e..4d1f268427d 100644
--- a/sys/arch/powerpc/include/pmap.h
+++ b/sys/arch/powerpc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.13 2001/07/10 01:34:32 drahn Exp $ */
+/* $OpenBSD: pmap.h,v 1.14 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -38,10 +38,6 @@
#include <machine/pte.h>
/*
- * FUCK
-#define PMAP_NEW
- */
-/*
* Segment registers
*/
#ifndef _LOCORE
@@ -84,22 +80,11 @@ extern struct pmap kernel_pmap_;
int ptebits(paddr_t pa, int bit);
-#ifdef PMAP_NEW
-#define pmap_clear_modify(page) (ptemodify((page)->phys_addr, PTE_CHG, 0))
-#define pmap_clear_reference(page) (ptemodify((page)->phys_addr, PTE_REF, 0))
-#define pmap_is_modified(page) (ptebits((page)->phys_addr, PTE_CHG))
-#define pmap_is_referenced(page) (ptebits((page)->phys_addr, PTE_REF))
+#define pmap_clear_modify(page) (ptemodify(VM_PAGE_TO_PHYS(page), PTE_CHG, 0))
+#define pmap_clear_reference(page) (ptemodify(VM_PAGE_TO_PHYS(page), PTE_REF, 0))
+#define pmap_is_modified(page) (ptebits(VM_PAGE_TO_PHYS(page), PTE_CHG))
+#define pmap_is_referenced(page) (ptebits(VM_PAGE_TO_PHYS(page), PTE_REF))
#define pmap_unwire(pm, va)
-#else
-#define pmap_clear_modify(pa) (ptemodify((pa), PTE_CHG, 0))
-#define pmap_clear_reference(pa) (ptemodify((pa), PTE_REF, 0))
-#define pmap_is_modified(pa) (ptebits((pa), PTE_CHG))
-#define pmap_is_referenced(pa) (ptebits((pa), PTE_REF))
-#define pmap_unwire(pm, va)
-/* XXX */
-void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot);
-#endif
-
#define pmap_phys_address(x) (x)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index 7938f0fea2d..44c104b4edf 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.35 2001/07/09 02:14:05 mickey Exp $ */
+/* $OpenBSD: pmap.c,v 1.36 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.c,v 1.1 1996/09/30 16:34:52 ws Exp $ */
/*
@@ -825,14 +825,8 @@ pmap_next_page(paddr)
/*
* Create and return a physical map.
*/
-#if defined(PMAP_NEW)
struct pmap *
pmap_create()
-#else
-struct pmap *
-pmap_create(size)
- vsize_t size;
-#endif
{
struct pmap *pm;
@@ -1442,9 +1436,9 @@ pmap_protect(pm, sva, eva, prot)
pmap_remove(pm, sva, eva);
}
-void
+boolean_t
ptemodify(pa, mask, val)
- vm_offset_t pa;
+ paddr_t pa;
u_int mask;
u_int val;
{
@@ -1453,10 +1447,13 @@ ptemodify(pa, mask, val)
struct pte_ovfl *po;
int i, s;
char * pattr;
+ boolean_t ret;
+
+ ret = ptebits(pa, mask);
pv = pmap_find_pv(pa);
if (pv == NULL)
- return;
+ return (ret);
pattr = pmap_find_attr(pa);
/*
@@ -1466,7 +1463,7 @@ ptemodify(pa, mask, val)
*pattr |= val >> ATTRSHFT;
if (pv->pv_idx < 0)
- return;
+ return (ret);
s = splimp();
for (; pv; pv = pv->pv_next) {
@@ -1501,6 +1498,8 @@ ptemodify(pa, mask, val)
}
}
splx(s);
+
+ return (ret);
}
int
@@ -1569,21 +1568,12 @@ ptebits(pa, bit)
* There are only two cases: either the protection is going to 0,
* or it is going to read-only.
*/
-#if defined(PMAP_NEW)
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
-#else
-void
-pmap_page_protect(pa, prot)
- vm_offset_t pa;
- vm_prot_t prot;
-#endif
{
-#if defined(PMAP_NEW)
vm_offset_t pa = VM_PAGE_TO_PHYS(pg);
-#endif
vm_offset_t va;
int s;
struct pmap *pm;
diff --git a/sys/arch/sparc/include/param.h b/sys/arch/sparc/include/param.h
index a101f8e57af..d743bb76d18 100644
--- a/sys/arch/sparc/include/param.h
+++ b/sys/arch/sparc/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.19 2001/07/06 02:07:43 provos Exp $ */
+/* $OpenBSD: param.h,v 1.20 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: param.h,v 1.29 1997/03/10 22:50:37 pk Exp $ */
/*
@@ -133,8 +133,6 @@ extern int nbpg, pgofset, pgshift;
#define NKMEMCLUSTERS (6 * 1024 * 1024 / PAGE_SIZE)
#endif
-#define PMAP_NEW
-
/* pages ("clicks") to disk blocks */
#define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT))
#define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT))
diff --git a/sys/arch/sun3/sun3/pmap.c b/sys/arch/sun3/sun3/pmap.c
index fd4b9f7a218..f838fdcec4d 100644
--- a/sys/arch/sun3/sun3/pmap.c
+++ b/sys/arch/sun3/sun3/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.26 2001/06/27 04:44:03 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.27 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.c,v 1.64 1996/11/20 18:57:35 gwr Exp $ */
/*-
@@ -1678,16 +1678,12 @@ pmap_page_upload()
* the map will be used in software only, and
* is bounded by that size.
*/
-pmap_t
-pmap_create(size)
- vm_size_t size;
+struct pmap *
+pmap_create(void)
{
- pmap_t pmap;
-
- if (size)
- return NULL;
+ struct pmap *pmap;
- pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
+ pmap = (struct pmap *) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
pmap_common_init(pmap);
pmap_user_pmap_init(pmap);
return pmap;
@@ -1748,11 +1744,12 @@ pmap_destroy(pmap)
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(pa, prot)
- vm_offset_t pa;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
int s;
+ paddr_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg);
PMAP_LOCK();
@@ -2584,35 +2581,36 @@ int pmap_fault_reload(pmap, va, ftype)
/*
* Clear the modify bit for the given physical page.
*/
-void
-pmap_clear_modify(pa)
- register vm_offset_t pa;
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
if (!pv_initialized)
- return;
- if (!managed(pa))
- return;
+ return (0);
pvhead = pa_to_pvp(pa);
pv_syncflags(pvhead);
+ ret = pvhead->pv_flags & PV_MOD;
pvhead->pv_flags &= ~PV_MOD;
+
+ return (ret);
}
/*
* Tell whether the given physical page has been modified.
*/
int
-pmap_is_modified(pa)
- register vm_offset_t pa;
+pmap_is_modified(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
return (0);
- if (!managed(pa))
- return (0);
+
pvhead = pa_to_pvp(pa);
if ((pvhead->pv_flags & PV_MOD) == 0)
pv_syncflags(pvhead);
@@ -2623,20 +2621,24 @@ pmap_is_modified(pa)
* Clear the reference bit for the given physical page.
* It's OK to just remove mappings if that's easier.
*/
-void
-pmap_clear_reference(pa)
- register vm_offset_t pa;
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa;
+ boolean_t ret;
+
+ pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
- return;
- if (!managed(pa))
- return;
+ return (0);
pvhead = pa_to_pvp(pa);
pv_syncflags(pvhead);
+ ret = pvhead->pv_flags & PV_REF;
pvhead->pv_flags &= ~PV_REF;
+
+ return (ret);
}
/*
@@ -2644,15 +2646,16 @@ pmap_clear_reference(pa)
* It's OK to just return FALSE if page is not mapped.
*/
int
-pmap_is_referenced(pa)
- vm_offset_t pa;
+pmap_is_referenced(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
return (0);
- if (!managed(pa))
- return (0);
+
pvhead = pa_to_pvp(pa);
if ((pvhead->pv_flags & PV_REF) == 0)
pv_syncflags(pvhead);
@@ -3358,3 +3361,29 @@ pmap_deactivate(p)
{
/* not implemented. */
}
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/vax/include/vmparam.h b/sys/arch/vax/include/vmparam.h
index 77896851c95..4b2f0f810b3 100644
--- a/sys/arch/vax/include/vmparam.h
+++ b/sys/arch/vax/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.13 2001/06/27 06:19:57 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.14 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: vmparam.h,v 1.32 2000/03/07 00:05:59 matt Exp $ */
/*-
@@ -148,5 +148,4 @@ struct pmap_physseg {
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
-#define PMAP_NEW
#endif