summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2014-03-10 21:17:59 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2014-03-10 21:17:59 +0000
commit683800fdaa7922d20c840dade377b3cc138078c8 (patch)
treeb3d3ac4baab2be4ac86e78c8a609ce2979f87645 /sys
parent818f5de7a8d1b822002472c3603619b1290a874f (diff)
Support PMAP_NOCACHE in pmap_enter() flags. If set when mapping a managed
page, the pte is created uncached. Make sure pmap_enter_pv() honours the cache bits of the pte, instead of assuming it will only get called for cached pages. Have it set PV_UNCACHED in the pv flags for the page, if this is the first use of this page and the mapping is not cached. Only check for a virtual aliasing cache condition if the new mapping is cached.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/mips64/include/pmap.h4
-rw-r--r--sys/arch/mips64/mips64/pmap.c14
2 files changed, 14 insertions, 4 deletions
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
index 1e73c953a39..3ab8e93f22c 100644
--- a/sys/arch/mips64/include/pmap.h
+++ b/sys/arch/mips64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.31 2014/02/08 09:34:04 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.32 2014/03/10 21:17:58 miod Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -133,6 +133,8 @@ typedef struct pmap {
#define PV_ATTR_REF PG_PMAP3
#define PV_PRESERVE (PV_ATTR_MOD | PV_ATTR_REF)
+#define PMAP_NOCACHE PMAP_MD0
+
extern struct pmap *const kernel_pmap_ptr;
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index 6b30afed2d0..21ef6dd20ba 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.68 2014/02/08 09:34:04 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.69 2014/03/10 21:17:58 miod Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -916,6 +916,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
}
}
+ if (flags & PMAP_NOCACHE) {
+ npte &= ~PG_CACHED;
+ npte |= PG_UNCACHED;
+ }
/* Set page referenced/modified status based on flags */
if (flags & VM_PROT_WRITE)
@@ -1541,7 +1545,10 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
stat_count(enter_stats.firstpv);
pv->pv_va = va;
- atomic_setbits_int(&pg->pg_flags, PV_CACHED);
+ if (*npte & PG_CACHED)
+ atomic_setbits_int(&pg->pg_flags, PV_CACHED);
+ if (*npte & PG_UNCACHED)
+ atomic_setbits_int(&pg->pg_flags, PV_UNCACHED);
pv->pv_pmap = pmap;
pv->pv_next = NULL;
} else {
@@ -1580,7 +1587,8 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
return ENOMEM;
}
- if ((pg->pg_flags & PV_CACHED) != 0 && cache_valias_mask != 0) {
+ if ((*npte & PG_CACHED) != 0 &&
+ (pg->pg_flags & PV_CACHED) != 0 && cache_valias_mask != 0) {
/*
* We have a VAC possibility. Check if virtual
* address of current mappings are compatible