summaryrefslogtreecommitdiff
path: root/sys/arch/sun3/sun3/pmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/sun3/sun3/pmap.c')
-rw-r--r--sys/arch/sun3/sun3/pmap.c101
1 files changed, 65 insertions, 36 deletions
diff --git a/sys/arch/sun3/sun3/pmap.c b/sys/arch/sun3/sun3/pmap.c
index fd4b9f7a218..f838fdcec4d 100644
--- a/sys/arch/sun3/sun3/pmap.c
+++ b/sys/arch/sun3/sun3/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.26 2001/06/27 04:44:03 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.27 2001/07/18 10:47:05 art Exp $ */
/* $NetBSD: pmap.c,v 1.64 1996/11/20 18:57:35 gwr Exp $ */
/*-
@@ -1678,16 +1678,12 @@ pmap_page_upload()
* the map will be used in software only, and
* is bounded by that size.
*/
-pmap_t
-pmap_create(size)
- vm_size_t size;
+struct pmap *
+pmap_create(void)
{
- pmap_t pmap;
-
- if (size)
- return NULL;
+ struct pmap *pmap;
- pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
+ pmap = (struct pmap *) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
pmap_common_init(pmap);
pmap_user_pmap_init(pmap);
return pmap;
@@ -1748,11 +1744,12 @@ pmap_destroy(pmap)
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(pa, prot)
- vm_offset_t pa;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
int s;
+ paddr_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg);
PMAP_LOCK();
@@ -2584,35 +2581,36 @@ int pmap_fault_reload(pmap, va, ftype)
/*
* Clear the modify bit for the given physical page.
*/
-void
-pmap_clear_modify(pa)
- register vm_offset_t pa;
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
if (!pv_initialized)
- return;
- if (!managed(pa))
- return;
+ return (0);
pvhead = pa_to_pvp(pa);
pv_syncflags(pvhead);
+ ret = pvhead->pv_flags & PV_MOD;
pvhead->pv_flags &= ~PV_MOD;
+
+ return (ret);
}
/*
* Tell whether the given physical page has been modified.
*/
int
-pmap_is_modified(pa)
- register vm_offset_t pa;
+pmap_is_modified(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
return (0);
- if (!managed(pa))
- return (0);
+
pvhead = pa_to_pvp(pa);
if ((pvhead->pv_flags & PV_MOD) == 0)
pv_syncflags(pvhead);
@@ -2623,20 +2621,24 @@ pmap_is_modified(pa)
* Clear the reference bit for the given physical page.
* It's OK to just remove mappings if that's easier.
*/
-void
-pmap_clear_reference(pa)
- register vm_offset_t pa;
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa;
+ boolean_t ret;
+
+ pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
- return;
- if (!managed(pa))
- return;
+ return (0);
pvhead = pa_to_pvp(pa);
pv_syncflags(pvhead);
+ ret = pvhead->pv_flags & PV_REF;
pvhead->pv_flags &= ~PV_REF;
+
+ return (ret);
}
/*
@@ -2644,15 +2646,16 @@ pmap_clear_reference(pa)
* It's OK to just return FALSE if page is not mapped.
*/
int
-pmap_is_referenced(pa)
- vm_offset_t pa;
+pmap_is_referenced(struct vm_page *pg)
{
- register pv_entry_t pvhead;
+ pv_entry_t pvhead;
+ paddr_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg);
if (!pv_initialized)
return (0);
- if (!managed(pa))
- return (0);
+
pvhead = pa_to_pvp(pa);
if ((pvhead->pv_flags & PV_REF) == 0)
pv_syncflags(pvhead);
@@ -3358,3 +3361,29 @@ pmap_deactivate(p)
{
/* not implemented. */
}
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, 1, VM_PROT_READ|VM_PROT_WRITE);
+}
+
+void
+pmap_kenter_pgs(vaddr_t va, struct vm_page **pgs, int npgs)
+{
+ int i;
+
+ for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
+ pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE, 1,
+ VM_PROT_READ|VM_PROT_WRITE);
+ }
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}