summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2009-12-03 06:02:39 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2009-12-03 06:02:39 +0000
commit1e2a1f061c3c0090c1cafe2aedc73ea9e7acab5b (patch)
tree69070db45a16cf0922d228f5b7f0f49e92aaea19
parent9de6f691ee225583c25b89f80bcac8f26d9693e6 (diff)
RM7000 processors with 64 TLB pairs instead of the usual 48 use an external
control bit to enabled use of the extra 16, in order to be able to be used as drop-in R5000 replacement without modifying RM7000-unaware software. Because of this, when a 64 TLB RM7000 processor is detected, check that the upper 16 really will be used before deciding how many TLB the kernel needs to invalidate whe ASID wraps.
-rw-r--r--sys/arch/sgi/sgi/machdep.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/sys/arch/sgi/sgi/machdep.c b/sys/arch/sgi/sgi/machdep.c
index bbe75a18722..769e6804d61 100644
--- a/sys/arch/sgi/sgi/machdep.c
+++ b/sys/arch/sgi/sgi/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.90 2009/11/19 20:16:27 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.91 2009/12/03 06:02:38 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -156,16 +156,6 @@ mips_init(int argc, void *argv, caddr_t boot_esym)
*/
setsr(getsr() | SR_KX | SR_UX);
-#ifdef notyet
- /*
- * Make sure CKSEG0 cacheability match what we intend to use.
- *
- * XXX This does not work as expected on IP30. Does ARCBios
- * XXX depend on this?
- */
- cp0_setcfg((cp0_getcfg() & ~0x07) | CCA_CACHED);
-#endif
-
/*
* Clear the compiled BSS segment in OpenBSD code.
*/
@@ -366,11 +356,46 @@ mips_init(int argc, void *argv, caddr_t boot_esym)
*/
switch(sys_config.cpu[0].type) {
case MIPS_RM7000:
- /* Rev A (version >= 2) CPU's have 64 TLB entries. */
- if (sys_config.cpu[0].vers_maj < 2) {
- sys_config.cpu[0].tlbsize = 48;
- } else {
- sys_config.cpu[0].tlbsize = 64;
+ /*
+ * Rev A (version >= 2) CPU's have 64 TLB entries.
+ *
+ * However, the last 16 are only enabled if one
+ * particular configuration bit (mode bit #24)
+ * is set on cpu reset, so check whether the
+ * extra TLB are really usable.
+ *
+ * If they are disabled, they are nevertheless
+ * writable, but random TLB insert operations
+ * will never use any of them. This can be
+ * checked by inserting dummy entries and check
+ * if any of the last 16 entries have been used.
+ *
+ * Of course, due to the way the random replacement
+ * works (hashing various parts of the TLB data,
+ * such as address bits and ASID), not all the
+ * available TLB will be used; we simply check
+ * the highest valid TLB entry we can find and
+ * see if it is in the upper 16 entries or not.
+ */
+ sys_config.cpu[0].tlbsize = 48;
+ if (sys_config.cpu[0].vers_maj >= 2) {
+ struct tlb_entry te;
+ int e, lastvalid;
+
+ tlb_set_wired(0);
+ tlb_flush(64);
+ for (e = 0; e < 64 * 8; e++)
+ tlb_update(XKSSEG_BASE + ptoa(2 * e),
+ pfn_to_pad(0) | PG_ROPAGE);
+ lastvalid = 0;
+ for (e = 0; e < 64; e++) {
+ tlb_read(e, &te);
+ if ((te.tlb_lo0 & PG_V) != 0)
+ lastvalid = e;
+ }
+ tlb_flush(64);
+ if (lastvalid >= 48)
+ sys_config.cpu[0].tlbsize = 64;
}
break;