summaryrefslogtreecommitdiff
path: root/sys/arch/sparc
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>1999-12-09 16:11:49 +0000
committerArtur Grabowski <art@cvs.openbsd.org>1999-12-09 16:11:49 +0000
commit816f2526fc8aa16fb14c8bea006807bbaf90656d (patch)
tree69bb32eb97d34486e20d1b1ab9d3a64de5d2fed9 /sys/arch/sparc
parent427461a6a4a128cee08e2a88cb5faf5e536ed06f (diff)
Change the kvm_uncache interface to a kvm_setcache that can uncache
a memory and allow the range to be cached again. Make kvm_uncache and kvm_recache to macros that call kvm_setcache. (also in the commit: Fix protection for pmap_kenter* and remove a redundant call to uvm_setpagesize).
Diffstat (limited to 'sys/arch/sparc')
-rw-r--r--sys/arch/sparc/include/pmap.h6
-rw-r--r--sys/arch/sparc/sparc/pmap.c41
2 files changed, 27 insertions, 20 deletions
diff --git a/sys/arch/sparc/include/pmap.h b/sys/arch/sparc/include/pmap.h
index f3bd9b09faa..4ac517b47f1 100644
--- a/sys/arch/sparc/include/pmap.h
+++ b/sys/arch/sparc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.13 1999/12/08 10:44:48 art Exp $ */
+/* $OpenBSD: pmap.h,v 1.14 1999/12/09 16:11:48 art Exp $ */
/* $NetBSD: pmap.h,v 1.30 1997/08/04 20:00:47 pk Exp $ */
/*
@@ -305,7 +305,9 @@ void pmap_init __P((void));
int pmap_page_index __P((paddr_t));
void pmap_virtual_space __P((vaddr_t *, vaddr_t *));
void pmap_redzone __P((void));
-void kvm_uncache __P((caddr_t, int));
+void kvm_setcache __P((caddr_t, int, int));
+#define kvm_uncache(addr, npages) kvm_setcache(addr, npages, 0)
+#define kvm_recache(addr, npages) kvm_setcache(addr, npages, 1)
struct user;
void switchexit __P((vm_map_t, struct user *, int));
int mmu_pagein __P((struct pmap *pm, vaddr_t, int));
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 13d6e50a7bd..9bd9aa02223 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.53 1999/12/09 14:26:04 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.54 1999/12/09 16:11:48 art Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -2763,14 +2763,6 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
}
}
-#if defined(UVM)
- uvmexp.pagesize = NBPG;
- uvm_setpagesize();
-#else
- cnt.v_page_size = NBPG;
- vm_set_page_size();
-#endif
-
#if defined(SUN4)
/*
* set up the segfixmask to mask off invalid bits
@@ -5741,8 +5733,8 @@ pmap_kenter_pa4m(va, pa, prot)
int pteproto, ctx;
pteproto = ((pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0) |
- PMAP_T2PTE_SRMMU(pa) | SRMMU_TEPTE | PPROT_RX_RX | PPROT_S |
- ((prot & VM_PROT_WRITE) ? PPROT_WRITE : 0);
+ PMAP_T2PTE_SRMMU(pa) | SRMMU_TEPTE |
+ ((prot & VM_PROT_WRITE) ? PPROT_N_RWX : PPROT_N_RX);
pa &= ~PMAP_TNC_SRMMU;
@@ -5768,7 +5760,7 @@ pmap_kenter_pgs4m(va, pgs, npgs)
* The pages will always be "normal" so they can always be
* cached.
*/
- pteproto = SRMMU_PG_C | SRMMU_TEPTE | PPROT_RX_RX | PPROT_S;
+ pteproto = SRMMU_PG_C | SRMMU_TEPTE | PPROT_N_RX;
#if 0
/*
* XXX - make the pages read-only until we know what protection they
@@ -6508,15 +6500,16 @@ pmap_phys_address(x)
}
/*
- * Turn off cache for a given (va, number of pages).
+ * Turn on/off cache for a given (va, number of pages).
*
* We just assert PG_NC for each PTE; the addresses must reside
* in locked kernel space. A cache flush is also done.
*/
void
-kvm_uncache(va, npages)
+kvm_setcache(va, npages, cached)
caddr_t va;
int npages;
+ int cached;
{
int pte;
struct pvlist *pv;
@@ -6535,9 +6528,15 @@ kvm_uncache(va, npages)
pv = pvhead((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
/* XXX - we probably don't need check for OBMEM */
if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && pv) {
- pv_changepte4m(pv, 0, SRMMU_PG_C);
+ if (cached)
+ pv_changepte4m(pv, SRMMU_PG_C, 0);
+ else
+ pv_changepte4m(pv, 0, SRMMU_PG_C);
}
- pte &= ~SRMMU_PG_C;
+ if (cached)
+ pte |= SRMMU_PG_C;
+ else
+ pte &= ~SRMMU_PG_C;
setpte4m((vaddr_t) va, pte);
if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
@@ -6557,9 +6556,15 @@ kvm_uncache(va, npages)
pv = pvhead(pte & PG_PFNUM);
/* XXX - we probably don't need to check for OBMEM */
if ((pte & PG_TYPE) == PG_OBMEM && pv) {
- pv_changepte4_4c(pv, PG_NC, 0);
+ if (cached)
+ pv_changepte4_4c(pv, 0, PG_NC);
+ else
+ pv_changepte4_4c(pv, PG_NC, 0);
}
- pte |= PG_NC;
+ if (cached)
+ pte &= ~PG_NC;
+ else
+ pte |= PG_NC;
setpte4(va, pte);
if ((pte & PG_TYPE) == PG_OBMEM)
cache_flush_page((int)va);