summaryrefslogtreecommitdiff
path: root/sys/arch/octeon/include
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2014-03-31 20:21:20 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2014-03-31 20:21:20 +0000
commit72c9714ab09dc9052b686f0defa49cc3a4cae679 (patch)
tree64fdc0fe21d20728c565d5443365474183b6423f /sys/arch/octeon/include
parent72f1073bb0d71241439897a02415c267f9aa4430 (diff)
Due the virtually indexed nature of the L1 instruction cache on most mips
processors, every time a new text page is mapped in a pmap, the L1 I$ is flushed for the va spanned by this page. Since we map pages of our binaries upon demand, as they get faulted in, but uvm_fault() tries to map the few neighbour pages, this can end up in a bunch of pmap_enter() calls in a row, for executable mappings. If the L1 I$ is small enough, this can cause the whole L1 I$ cache to be flushed several times. Change pmap_enter() to postpone these flushes by only registering the pending flushes, and have pmap_update() perform them. The cpu-specific cache code can then optimize this to avoid unnecessary operations. Tested on R4000SC, R4600SC, R5000SC, RM7000, R10000 with 4KB and 16KB page sizes (coherent and non-coherent designs), and Loongson 2F by mikeb@ and me. Should not affect anything on Octeon since there is no way to flush a subset of I$ anyway.
Diffstat (limited to 'sys/arch/octeon/include')
-rw-r--r--sys/arch/octeon/include/cpu.h10
1 files changed, 7 insertions, 3 deletions
diff --git a/sys/arch/octeon/include/cpu.h b/sys/arch/octeon/include/cpu.h
index 91fcb5dd270..da18f81f9eb 100644
--- a/sys/arch/octeon/include/cpu.h
+++ b/sys/arch/octeon/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.5 2014/03/09 10:12:17 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.6 2014/03/31 20:21:19 miod Exp $ */
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
@@ -65,11 +65,15 @@ void hw_ipi_intr_clear(u_long);
Octeon_SyncCache((ci))
#define Mips_InvalidateICache(ci, va, l) \
Octeon_InvalidateICache((ci), (va), (l))
+#define Mips_InvalidateICachePage(ci, va) \
+ Octeon_InvalidateICachePage((ci), (va))
+#define Mips_SyncICache(ci) \
+ Octeon_SyncICache((ci))
#define Mips_SyncDCachePage(ci, va, pa) \
Octeon_SyncDCachePage((ci), (va), (pa))
-#define Mips_HitSyncDCache(ci, va, l) \
+#define Mips_HitSyncDCache(ci, va, l) \
Octeon_HitSyncDCache((ci), (va), (l))
-#define Mips_IOSyncDCache(ci, va, l, h) \
+#define Mips_IOSyncDCache(ci, va, l, h) \
Octeon_IOSyncDCache((ci), (va), (l), (h))
#define Mips_HitInvalidateDCache(ci, va, l) \
Octeon_HitInvalidateDCache((ci), (va), (l))