summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2010-07-01 22:40:11 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2010-07-01 22:40:11 +0000
commitdc46e3aab069890452d5923b1fbb4dea446deb37 (patch)
treed23deb0aa0170d00017331afe0c6b83a3c8f0935 /sys
parent781c885708c5bd7d1339d1021e0fd04286a8be60 (diff)
Add more support bits for ARMv7, including frame for VIPT (pmap_prefer).
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/arm/arm/cpufunc.c108
-rw-r--r--sys/arch/arm/arm/cpufunc_asm_armv5.S4
-rw-r--r--sys/arch/arm/arm/cpufunc_asm_armv7.S130
-rw-r--r--sys/arch/arm/arm/pmap.c32
-rw-r--r--sys/arch/arm/include/pmap.h11
5 files changed, 271 insertions, 14 deletions
diff --git a/sys/arch/arm/arm/cpufunc.c b/sys/arch/arm/arm/cpufunc.c
index 3f09ae0c8a9..697f80ea327 100644
--- a/sys/arch/arm/arm/cpufunc.c
+++ b/sys/arch/arm/arm/cpufunc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpufunc.c,v 1.12 2010/02/22 16:45:29 kevlo Exp $ */
+/* $OpenBSD: cpufunc.c,v 1.13 2010/07/01 22:40:10 drahn Exp $ */
/* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
/*
@@ -49,7 +49,9 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
+#include <uvm/uvm.h>
#include <machine/cpu.h>
+#include <machine/pmap.h>
#include <machine/bootconfig.h>
#include <uvm/uvm.h>
@@ -1085,10 +1087,31 @@ log2(int size)
void
arm_get_cachetype_cp15v7(void)
{
+ extern int pmap_cachevivt;
uint32_t cachereg;
uint32_t cache_level_id;
uint32_t line_size, ways, sets, size;
uint32_t sel;
+ uint32_t ctr;
+
+ __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
+ : "=r" (ctr) :);
+
+ switch ((ctr >> 14) & 3) {
+ case 2:
+ pmap_cachevivt = 0;
+ #if 0
+ pmap_alias_dist = 0x4000;
+ pmap_alias_bits = 0x3000;
+ #endif
+ break;
+ case 3:
+ pmap_cachevivt = 0;
+ break;
+ default:
+ break;
+ }
+
__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
: "=r" (cache_level_id) :);
@@ -1110,8 +1133,10 @@ arm_get_cachetype_cp15v7(void)
switch (cachereg & 0xc0000000) {
case 0x00000000:
arm_pcache_type = 0;
+ break;
case 0x40000000:
arm_pcache_type = CPU_CT_CTYPE_WT;
+ break;
case 0x80000000:
case 0xc0000000:
arm_pcache_type = CPU_CT_CTYPE_WB1;
@@ -1148,7 +1173,88 @@ arm_get_cachetype_cp15v7(void)
arm_dcache_l2_assoc = log2(ways);
arm_dcache_l2_linesize = log2(line_size);
}
+
+/*
+ */
+void
+armv7_idcache_wbinv_all()
+{
+ uint32_t arg;
+ arg = 0;
+ __asm __volatile("mcr p15, 0, r0, c7, c5, 0" :: "r" (arg));
+ armv7_dcache_wbinv_all();
+}
+/* brute force cache flushing */
+void
+armv7_dcache_wbinv_all()
+{
+ int sets, ways, lvl;
+ int nincr, nsets, nways;
+ uint32_t wayincr, setincr;
+ uint32_t wayval, setval;
+ uint32_t word;
+
+ nsets = arm_picache_size/arm_picache_ways/arm_picache_line_size;
+ nways = arm_picache_ways;
+ nincr = arm_picache_line_size;
+
+ wayincr = 1 << (32 - arm_picache_ways);
+ setincr = arm_picache_line_size;
+
+#if 0
+ printf("l1 nsets %d nways %d nincr %d wayincr %x setincr %x\n",
+ nsets, nways, nincr, wayincr, setincr);
#endif
+
+ lvl = 0; /* L1 */
+ setval = 0;
+ for (sets = 0; sets < nsets; sets++) {
+ wayval = 0;
+ for (ways = 0; ways < nways; ways++) {
+ word = wayval | setval | lvl;
+
+ /* Clean D cache SE with Set/Index */
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 2"
+ : : "r" (word));
+ wayval += nincr;
+ }
+ setval += setincr;
+ }
+ /* drain the write buffer */
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
+
+ /* L2 */
+ nsets = 1 << arm_dcache_l2_nsets;
+ nways = 1 << arm_dcache_l2_assoc;
+ nincr = 1 << arm_dcache_l2_linesize;
+
+ wayincr = 1 << (32 - arm_picache_ways);
+ setincr = arm_picache_line_size;
+
+#if 0
+ printf("l2 nsets %d nways %d nincr %d wayincr %x setincr %x\n",
+ nsets, nways, nincr, wayincr, setincr);
+#endif
+
+ lvl = 1 << 1; /* L2 */
+ setval = 0;
+ for (sets = 0; sets < nsets; sets++) {
+ wayval = 0;
+ for (ways = 0; ways < nways; ways++) {
+ word = wayval | setval | lvl;
+
+ /* Clean D cache SE with Set/Index */
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 2"
+ : : "r" (word));
+ wayval += nincr;
+ }
+ setval += setincr;
+ }
+ /* drain the write buffer */
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
+
+}
+#endif /* CPU_ARMv7 */
/*
diff --git a/sys/arch/arm/arm/cpufunc_asm_armv5.S b/sys/arch/arm/arm/cpufunc_asm_armv5.S
index 8040e3be81d..d220f9072b2 100644
--- a/sys/arch/arm/arm/cpufunc_asm_armv5.S
+++ b/sys/arch/arm/arm/cpufunc_asm_armv5.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpufunc_asm_armv5.S,v 1.1 2008/09/11 02:38:14 kevlo Exp $ */
+/* $OpenBSD: cpufunc_asm_armv5.S,v 1.2 2010/07/01 22:40:10 drahn Exp $ */
/* $NetBSD: cpufunc_asm_armv5.S,v 1.3 2007/01/06 00:50:54 christos Exp $ */
/*
@@ -228,7 +228,7 @@ ENTRY(armv5_dcache_wbinv_all)
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
- .align 0
+ .align 2
C_OBJECT(armv5_dcache_sets_max)
.space 4
C_OBJECT(armv5_dcache_index_max)
diff --git a/sys/arch/arm/arm/cpufunc_asm_armv7.S b/sys/arch/arm/arm/cpufunc_asm_armv7.S
index 5cb98b82ee1..0dfcea6845b 100644
--- a/sys/arch/arm/arm/cpufunc_asm_armv7.S
+++ b/sys/arch/arm/arm/cpufunc_asm_armv7.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpufunc_asm_armv7.S,v 1.1 2009/05/09 02:28:22 drahn Exp $ */
+/* $OpenBSD: cpufunc_asm_armv7.S,v 1.2 2010/07/01 22:40:10 drahn Exp $ */
/*
* Copyright (c) 2008 Dale Rahn <drahn@openbsd.org>
*
@@ -85,16 +85,131 @@ ENTRY(armv7_tlb_flushD_SE)
mov pc, lr
-/* STUBS - implement these */
-ENTRY(armv7_icache_sync_all)
+/*
+ * Cache operations. For the entire cache we use the set/index
+ * operations.
+ */
+ s_max .req r0
+ i_max .req r1
+ s_inc .req r2
+ i_inc .req r3
ENTRY(armv7_icache_sync_range)
-ENTRY(armv7_dcache_wbinv_all)
-ENTRY(armv7_dcache_wbinv_range)
+ ldr ip, .Larmv7_line_size
+ cmp r1, #0x4000
+ bcs .Larmv7_icache_sync_all
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ mov pc, lr
+
+ENTRY(armv7_icache_sync_all)
+.Larmv7_icache_sync_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache cleaning code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
+ /* Fall through to clean Dcache. */
+
+.Larmv7_dcache_wb:
+ ldr ip, .Larmv7_cache_data
+ ldmia ip, {s_max, i_max, s_inc, i_inc}
+1:
+ orr ip, s_max, i_max
+2:
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ sub ip, ip, i_inc
+ tst ip, i_max /* Index 0 is last one */
+ bne 2b /* Next index */
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ subs s_max, s_max, s_inc
+ bpl 1b /* Next set */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ mov pc, lr
+
+.Larmv7_line_size:
+ .word _C_LABEL(arm_pdcache_line_size)
+
ENTRY(armv7_dcache_wb_range)
-ENTRY(armv7_idcache_wbinv_all)
+#if 1
+ /* until pmap can invalidate before unmapping */
+ /* XXX this also invalidates */
+ b _C_LABEL(armv7_dcache_wbinv_all)
+#else
+ ldr ip, .Larmv7_line_size
+ cmp r1, #0x4000
+ bcs .Larmv7_dcache_wb
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+#endif
+ mov pc, lr
+
ENTRY(armv7_idcache_wbinv_range)
+ ldr ip, .Larmv7_line_size
+ cmp r1, #0x4000
+#if 0
+ bcs .Larmv7_idcache_wbinv_all
+#else
+ bcc 1f
+ mov r1, #0x4000
+1:
+#endif
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mov pc, lr
+ENTRY(armv7_dcache_wbinv_range)
+ ldr ip, .Larmv7_line_size
+ cmp r1, #0x4000
+ bcs _C_LABEL(armv7_dcache_wbinv_all)
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ mov pc, lr
+
+
/*
* Context switch.
*
@@ -123,6 +238,9 @@ ENTRY(armv7_context_switch)
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
+.Larmv7_cache_data:
+ .word _C_LABEL(armv7_dcache_sets_max)
+
.align 2
C_OBJECT(armv7_dcache_sets_max)
.word 0
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 96df3c0a0e8..0cd9647cc2a 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.25 2010/03/31 19:46:26 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.26 2010/07/01 22:40:10 drahn Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -229,7 +229,7 @@ int pmap_debug_level = 0;
#define PDB_KREMOVE 0x40000
int debugmap = 1;
-int pmapdebug = 0;
+int pmapdebug = PDB_ENTER|PDB_PVDUMP;
#define NPDEBUG(_lev_,_stat_) \
if (pmapdebug & (_lev_)) \
((_stat_))
@@ -299,6 +299,8 @@ extern caddr_t msgbufaddr;
*/
boolean_t pmap_initialized;
+int pmap_cachevivt = 1;
+
/*
* Misc. locking data structures
*/
@@ -2343,8 +2345,6 @@ pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
cleanlist_idx++;
pm->pm_remove_all = TRUE;
} else {
- *ptep = 0;
- PTE_SYNC(ptep);
if (pm->pm_remove_all == FALSE) {
if (is_exec)
pmap_tlb_flushID_SE(pm, sva);
@@ -2352,6 +2352,8 @@ pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
if (is_refd)
pmap_tlb_flushD_SE(pm, sva);
}
+ *ptep = 0;
+ PTE_SYNC(ptep);
}
sva += PAGE_SIZE;
@@ -2365,6 +2367,10 @@ pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
total += cleanlist_idx;
for (cnt = 0; cnt < cleanlist_idx; cnt++) {
+ if (pmap_cachevivt == 0 &&
+ curproc->p_vmspace->vm_map.pmap != pm) {
+ pmap_idcache_wbinv_all(pm);
+ } else
if (pm->pm_cstate.cs_all != 0) {
vaddr_t clva = cleanlist[cnt].va & ~1;
if (cleanlist[cnt].va & 1) {
@@ -5188,3 +5194,21 @@ pmap_dump_ncpg(pmap_t pm)
}
}
#endif
+
+uint32_t pmap_alias_dist;
+uint32_t pmap_alias_bits;
+
+void
+pmap_prefer(vaddr_t foff, vaddr_t *vap)
+{
+ vaddr_t va = *vap;
+ long d, m;
+
+ m = pmap_alias_dist;
+ if (m == 0) /* m=0 => no cache aliasing */
+ return;
+
+ d = foff - va;
+ d &= (m - 1);
+ *vap = va + d;
+}
diff --git a/sys/arch/arm/include/pmap.h b/sys/arch/arm/include/pmap.h
index fdb0ba8b476..3b7f30031cb 100644
--- a/sys/arch/arm/include/pmap.h
+++ b/sys/arch/arm/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.11 2009/05/24 04:56:19 drahn Exp $ */
+/* $OpenBSD: pmap.h,v 1.12 2010/07/01 22:40:10 drahn Exp $ */
/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
/*
@@ -615,6 +615,15 @@ extern void (*pmap_zero_page_func)(struct vm_page *);
#define L2_L_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
+#ifndef _LOCORE
+/* pmap_prefer bits for VIPT ARMv7 */
+#define PMAP_PREFER(fo, ap) pmap_prefer((fo), (ap))
+void pmap_prefer(vaddr_t, vaddr_t *);
+
+extern uint32_t pmap_alias_dist;
+extern uint32_t pmap_alias_bits;
+#endif /* _LOCORE */
+
#endif /* _KERNEL */
#endif /* _ARM32_PMAP_H_ */