summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2014-03-22 00:01:05 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2014-03-22 00:01:05 +0000
commited1fc07629682217b4262db530476fa21391c3ca (patch)
tree3c12a739377e08409d6ae7265b4e9dc023eb6154 /sys
parent7a22ae4eeaa65944653755a8ca9c78e4374c2e53 (diff)
Second draft of my attempt to workaround the infamous R4000 end-of-page errata,
affecting R4000 processors revision 2.x and below (found on most R4000 Indigo and a few R4000 Indy). Since this errata gets triggered by TLB misses when the code flow crosses a page boundary, this code attempts to identify code pages prone to trigger the errata, and force the next page to be mapped for at least as long as the current pc lies in the troublesome page, by creating wiring extra TLB entries. These entries get recycled in a lazy-but-aggressive-enough way, either because of context switches, or because of further tlb exceptions reaching trap(). The errata workaround code is only compiled on R4000-capable kernels (i.e. sgi GENERIC-IP22 and nothing else), and only enabled on affected processors (i.e. not on R4000 revision 3, or on R4400). There is still room for improvemnt in unlucky cases, but in this simple enough incarnation, this allows my R4000 2.2 Indigo to finally reliably boot multiuser, even though both /sbin/init and /bin/sh contain code pages which can trigger the errata.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/mips64/conf/files.mips643
-rw-r--r--sys/arch/mips64/include/cpu.h14
-rw-r--r--sys/arch/mips64/include/pcb.h5
-rw-r--r--sys/arch/mips64/include/pmap.h40
-rw-r--r--sys/arch/mips64/mips64/context.S26
-rw-r--r--sys/arch/mips64/mips64/genassym.cf3
-rw-r--r--sys/arch/mips64/mips64/pmap.c35
-rw-r--r--sys/arch/mips64/mips64/r4000_errata.c245
-rw-r--r--sys/arch/mips64/mips64/tlbhandler.S86
-rw-r--r--sys/arch/mips64/mips64/trap.c51
-rw-r--r--sys/arch/sgi/sgi/machdep.c10
11 files changed, 468 insertions, 50 deletions
diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64
index f9540f1f7ef..6a62a71753c 100644
--- a/sys/arch/mips64/conf/files.mips64
+++ b/sys/arch/mips64/conf/files.mips64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.mips64,v 1.21 2012/09/29 21:37:01 miod Exp $
+# $OpenBSD: files.mips64,v 1.22 2014/03/22 00:01:04 miod Exp $
file arch/mips64/mips64/arcbios.c arcbios
file arch/mips64/mips64/clock.c clock
@@ -28,6 +28,7 @@ file arch/mips64/mips64/exception_tfp.S cpu_r8000
file arch/mips64/mips64/fp_emulate.c
file arch/mips64/mips64/lcore_access.S
file arch/mips64/mips64/lcore_float.S
+file arch/mips64/mips64/r4000_errata.c cpu_r4000
file arch/mips64/mips64/tlbhandler.S !cpu_r8000
file arch/mips64/mips64/tlb_tfp.S cpu_r8000
diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h
index dadb80c45e6..4ce540bc839 100644
--- a/sys/arch/mips64/include/cpu.h
+++ b/sys/arch/mips64/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.97 2014/03/21 23:05:41 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.98 2014/03/22 00:01:04 miod Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -400,11 +400,13 @@ void tlb_asid_wrap(struct cpu_info *);
void tlb_flush(int);
void tlb_flush_addr(vaddr_t);
void tlb_init(unsigned int);
+int64_t tlb_probe(vaddr_t);
void tlb_set_gbase(vaddr_t, vsize_t);
void tlb_set_page_mask(uint32_t);
void tlb_set_pid(u_int);
void tlb_set_wired(uint32_t);
int tlb_update(vaddr_t, register_t);
+void tlb_update_indexed(vaddr_t, register_t, register_t, uint);
void build_trampoline(vaddr_t, vaddr_t);
void cpu_switchto_asm(struct proc *, struct proc *);
@@ -434,6 +436,16 @@ int classify_insn(uint32_t);
#define INSNCLASS_BRANCH 2
/*
+ * R4000 end-of-page errata workaround routines
+ */
+
+extern int r4000_errata;
+u_int eop_page_check(paddr_t);
+int eop_tlb_miss_handler(struct trap_frame *, struct cpu_info *,
+ struct proc *);
+void eop_cleanup(struct trap_frame *, struct proc *);
+
+/*
* Low level access routines to CPU registers
*/
diff --git a/sys/arch/mips64/include/pcb.h b/sys/arch/mips64/include/pcb.h
index 6243b028b24..701ca2ae375 100644
--- a/sys/arch/mips64/include/pcb.h
+++ b/sys/arch/mips64/include/pcb.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pcb.h,v 1.5 2011/03/23 16:54:36 pirofti Exp $ */
+/* $OpenBSD: pcb.h,v 1.6 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -52,6 +52,9 @@ struct pcb {
} pcb_context; /* kernel context for resume */
int pcb_onfault; /* for copyin/copyout faults */
void *pcb_segtab; /* copy of pmap pm_segtab */
+ uint pcb_nwired; /* number of extra wired TLB entries */
+ vaddr_t pcb_wiredva; /* va of above */
+ vaddr_t pcb_wiredpc; /* last tracked pc value within above */
};
/*
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
index 4150157c7d2..7390b4e2988 100644
--- a/sys/arch/mips64/include/pmap.h
+++ b/sys/arch/mips64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.33 2014/03/21 21:49:45 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.34 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -87,15 +87,15 @@
#else
#define SEGSHIFT (PAGE_SHIFT + PMAP_L2SHIFT - 2)
#endif
-#define NBSEG (1UL << SEGSHIFT)
+#define NBSEG (1UL << SEGSHIFT)
#define SEGOFSET (NBSEG - 1)
-#define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
-#define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
-#define pmap_segmap(m, v) ((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
+#define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
+#define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
+#define pmap_segmap(m, v) ((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
/* number of segments entries */
-#define PMAP_SEGTABSIZE (PMAP_L2SIZE / sizeof(void *))
+#define PMAP_SEGTABSIZE (PMAP_L2SIZE / sizeof(void *))
struct segtab {
pt_entry_t *seg_tab[PMAP_SEGTABSIZE];
@@ -131,26 +131,28 @@ typedef struct pmap {
#define PGF_CACHED PG_PMAP1 /* Page is currently cached */
#define PGF_ATTR_MOD PG_PMAP2
#define PGF_ATTR_REF PG_PMAP3
-#define PGF_PRESERVE (PGF_ATTR_MOD | PGF_ATTR_REF)
+#define PGF_EOP_CHECKED PG_PMAP4
+#define PGF_EOP_VULN PG_PMAP5
+#define PGF_PRESERVE (PGF_ATTR_MOD | PGF_ATTR_REF | PGF_EOP_CHECKED | PGF_EOP_VULN)
#define PMAP_NOCACHE PMAP_MD0
extern struct pmap *const kernel_pmap_ptr;
-#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
-#define pmap_kernel() (kernel_pmap_ptr)
+#define pmap_kernel() (kernel_pmap_ptr)
#define PMAP_STEAL_MEMORY /* Enable 'stealing' during boot */
-#define PMAP_PREFER(pa, va) pmap_prefer(pa, va)
+#define PMAP_PREFER(pa, va) pmap_prefer(pa, va)
extern vaddr_t pmap_prefer_mask;
/* pmap prefer alignment */
-#define PMAP_PREFER_ALIGN() \
+#define PMAP_PREFER_ALIGN() \
(pmap_prefer_mask ? pmap_prefer_mask + 1 : 0)
/* pmap prefer offset in alignment */
-#define PMAP_PREFER_OFFSET(of) ((of) & pmap_prefer_mask)
+#define PMAP_PREFER_OFFSET(of) ((of) & pmap_prefer_mask)
#define pmap_update(x) do { /* nothing */ } while (0)
@@ -159,17 +161,17 @@ int pmap_is_page_ro( pmap_t, vaddr_t, pt_entry_t);
void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cache);
vaddr_t pmap_prefer(vaddr_t, vaddr_t);
void pmap_set_modify(vm_page_t);
-void pmap_page_cache(vm_page_t, int);
+void pmap_page_cache(vm_page_t, u_int);
#define pmap_collect(x) do { /* nothing */ } while (0)
-#define pmap_unuse_final(p) do { /* nothing yet */ } while (0)
+#define pmap_unuse_final(p) do { /* nothing yet */ } while (0)
#define pmap_remove_holes(map) do { /* nothing */ } while (0)
-void pmap_update_user_page(pmap_t, vaddr_t, pt_entry_t);
+void pmap_update_user_page(pmap_t, vaddr_t, pt_entry_t);
#ifdef MULTIPROCESSOR
-void pmap_update_kernel_page(vaddr_t, pt_entry_t);
+void pmap_update_kernel_page(vaddr_t, pt_entry_t);
#else
-#define pmap_update_kernel_page(va, entry) tlb_update(va, entry)
+#define pmap_update_kernel_page(va, entry) tlb_update(va, entry)
#endif
/*
@@ -192,13 +194,13 @@ vm_page_t pmap_unmap_direct(vaddr_t);
* MD flags to pmap_enter:
*/
-#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
/* Kernel virtual address to page table entry */
#define kvtopte(va) \
(Sysmap + (((vaddr_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT))
/* User virtual address to pte page entry */
-#define uvtopte(va) (((va) >> PAGE_SHIFT) & (NPTEPG -1))
+#define uvtopte(va) (((va) >> PAGE_SHIFT) & (NPTEPG -1))
extern pt_entry_t *Sysmap; /* kernel pte table */
extern u_int Sysmapsize; /* number of pte's in Sysmap */
diff --git a/sys/arch/mips64/mips64/context.S b/sys/arch/mips64/mips64/context.S
index ff3226265ca..b19316cb26e 100644
--- a/sys/arch/mips64/mips64/context.S
+++ b/sys/arch/mips64/mips64/context.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: context.S,v 1.52 2014/02/08 09:34:04 miod Exp $ */
+/* $OpenBSD: context.S,v 1.53 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -176,6 +176,25 @@ NON_LEAF(cpu_switchto_asm, FRAMESZ(CF_SZ), ra)
MTC0_HAZARD
#endif
+#ifdef CPU_R4000
+ /*
+ * Restore the number of wired TLB entries to the minimal possible
+ * value, in case the EOP errata workaround has caused more wired
+ * entries to be created.
+ */
+ lw a0, PCB_NWIRED(t3)
+ beqz a0, 1f
+ sw zero, PCB_NWIRED(t3)
+
+ li a1, UPAGES / 2
+ move s0, v0 # save asid
+ mtc0 a1, COP_0_TLB_WIRED
+ jal tlb_flush # clear formerly wired entries
+ addu a0, a1, a0
+ move v0, s0
+1:
+#endif
+
#if UPAGES > 1 /* { */
#ifdef CPU_R8000
#error Your processor has just been eaten by a grue.
@@ -193,7 +212,7 @@ NON_LEAF(cpu_switchto_asm, FRAMESZ(CF_SZ), ra)
LA t1, CKSEG0_BASE
PTR_SUBU t2, t3, t1
bgez t2, ctx3 # in CKSEG0
- LA t1, VM_MIN_KERNEL_ADDRESS # safe if expands to > 1 insn
+ LA t1, VM_MIN_KERNEL_ADDRESS # (safe if expands to > 1 insn)
PTR_SUBU t2, t3, t1
bltz t2, ctx3 # not mapped.
PTR_SRL t2, PGSHIFT+1
@@ -263,6 +282,7 @@ ctx2:
tlbwi
TLB_HAZARD
#endif /* } UPAGES > 2 */
+ctx3:
#else /* } UPAGES > 1 { */
#if PG_ASID_SHIFT != 0
dsll v0, PG_ASID_SHIFT
@@ -270,8 +290,6 @@ ctx2:
dmtc0 v0, COP_0_TLB_HI # init high entry (tlbid)
#endif /* } UPAGES > 1 */
-ctx3:
-
#ifdef CPU_LOONGSON2
li v0, COP_0_DIAG_ITLB_CLEAR | COP_0_DIAG_BTB_CLEAR | COP_0_DIAG_RAS_DISABLE
dmtc0 v0, COP_0_DIAG
diff --git a/sys/arch/mips64/mips64/genassym.cf b/sys/arch/mips64/mips64/genassym.cf
index ff531bb3493..9de943a6ceb 100644
--- a/sys/arch/mips64/mips64/genassym.cf
+++ b/sys/arch/mips64/mips64/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.9 2012/06/24 16:26:04 miod Exp $
+# $OpenBSD: genassym.cf,v 1.10 2014/03/22 00:01:04 miod Exp $
#
# Copyright (c) 1997 Per Fogelstrom / Opsycon AB
#
@@ -56,6 +56,7 @@ member PCB_FPREGS pcb_regs.f0
member pcb_context
member pcb_onfault
member pcb_segtab
+member pcb_nwired
struct cpu_info
member ci_cpuid
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index 131e981d78d..7f2cac196b6 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.71 2014/03/21 21:49:45 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.72 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -59,7 +59,6 @@ int pmap_pv_lowat = PMAP_PV_LOWAT;
uint pmap_alloc_tlbpid(struct proc *);
int pmap_enter_pv(pmap_t, vaddr_t, vm_page_t, pt_entry_t *);
-void pmap_page_cache(vm_page_t, int);
void pmap_remove_pv(pmap_t, vaddr_t, paddr_t);
void *pmap_pg_alloc(struct pool *, int, int *);
void pmap_pg_free(struct pool *, void *);
@@ -1036,11 +1035,29 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
DPRINTF(PDB_ENTER, ("pmap_enter: new pte 0x%08x\n", npte));
}
+#ifdef CPU_R4000
+ /*
+ * If mapping an executable page, check for the R4000 EOP bug, and
+ * flag it in the pte.
+ */
+ if (r4000_errata != 0) {
+ if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
+ if ((pg->pg_flags & PGF_EOP_CHECKED) == 0)
+ atomic_setbits_int(&pg->pg_flags,
+ PGF_EOP_CHECKED |
+ eop_page_check(pa));
+
+ if (pg->pg_flags & PGF_EOP_VULN)
+ npte |= PG_SP;
+ }
+ }
+#endif
+
*pte = npte;
pmap_update_user_page(pmap, va, npte);
/*
- * If mapping a memory space address invalidate ICache.
+ * If mapping an executable page, invalidate ICache.
*/
if (pg != NULL && (prot & VM_PROT_EXECUTE))
Mips_InvalidateICache(ci, va, PAGE_SIZE);
@@ -1248,6 +1265,10 @@ pmap_zero_page(struct vm_page *pg)
mem_zero_page(va);
if (df || cache_valias_mask != 0)
Mips_HitSyncDCache(ci, va, PAGE_SIZE);
+
+#ifdef CPU_R4000
+ atomic_clearbits_int(&pg->pg_flags, PGF_EOP_CHECKED | PGF_EOP_VULN);
+#endif
}
/*
@@ -1297,6 +1318,12 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
Mips_HitInvalidateDCache(ci, s, PAGE_SIZE);
if (df || cache_valias_mask != 0)
Mips_HitSyncDCache(ci, d, PAGE_SIZE);
+
+#ifdef CPU_R4000
+ atomic_clearbits_int(&dstpg->pg_flags, PGF_EOP_CHECKED | PGF_EOP_VULN);
+ atomic_setbits_int(&dstpg->pg_flags,
+ srcpg->pg_flags & (PGF_EOP_CHECKED | PGF_EOP_VULN));
+#endif
}
/*
@@ -1427,7 +1454,7 @@ pmap_is_page_ro(pmap_t pmap, vaddr_t va, pt_entry_t entry)
* mappings to cached or uncached.
*/
void
-pmap_page_cache(vm_page_t pg, int mode)
+pmap_page_cache(vm_page_t pg, u_int mode)
{
pv_entry_t pv;
pt_entry_t *pte, entry;
diff --git a/sys/arch/mips64/mips64/r4000_errata.c b/sys/arch/mips64/mips64/r4000_errata.c
new file mode 100644
index 00000000000..c246c1731ac
--- /dev/null
+++ b/sys/arch/mips64/mips64/r4000_errata.c
@@ -0,0 +1,245 @@
+/* $OpenBSD: r4000_errata.c,v 1.1 2014/03/22 00:01:04 miod Exp $ */
+
+/*
+ * Copyright (c) 2014 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * The following routines attempt to workaround the `end-of-page' errata
+ * affecting R4000 processors rev < 3.
+ *
+ * This particular errata, scarcely documented as errata #4 and #14 in the
+ * `R4000PC, R4000SC Errata, Processor Revision 2.2 and 3.0' document,
+ * is not recoverable.
+ *
+ *
+ * This errata is triggered by branch instructions in the last word of a
+ * page, when the next page (containing the delay slot instruction) causes
+ * a TLB miss. The only safe way to avoid it is to have the toolchain
+ * force all branch instructions to be aligned on 8-byte boundaries, but
+ * that wouldn't prevent a rogue binary (or just-in-time compilation) to
+ * fail this requirement.
+ *
+ * The following logic is a ``best effort'' (well, ok, ``lazy man's effort'')
+ * at trying to prevent the errata from triggering. It will not be enough
+ * when confronted to a carefully crafted binary (but then, there are easier
+ * way to get kernel mode privileges from userland, when running on the R4000
+ * processors vulnerable to the end-of-page errata, so why bother?). Yet,
+ * experience has shown this code is surprisingly good enough to allow for
+ * regular binaries to run, with a minimal performance hit.
+ *
+ *
+ * The idea behind this code is simple:
+ * - executable pages are checked - with eop_page_check() - for a branch in
+ * their last word. If they are vulnerable to this errata, page table entries
+ * for these pages get the `special' bit set.
+ * - tlb miss handlers will check for the `special' bit set in the pte and
+ * will always defer to the C code in trap() in that case. trap() will
+ * then invoke eop_tlb_miss_handler(), which will 1) force the next page
+ * to be faulted in, and 2) set up wired TLB entries for both the vulnerable
+ * page and the next page (and their neighbors if they do not share the same
+ * TLB pair), so that there is no risk of a TLB miss when the branch
+ * instruction is reached.
+ * - context switches will remove these wired entries.
+ * - tlb modification handlers will check for the current exception PC, and
+ * will remove the wired entries if the exception PC is no longer in the
+ * vulnerable page.
+ *
+ *
+ * There are a few limitations:
+ * - heavy paging may cause the page next to a vulnerable page to be swapped
+ * out (this code does not attempt to wire the vm_page). It would be worth
+ * mapping a page full of special break instructions when the page gets
+ * swapped out.
+ * - there might be other vulnerable pages in the wired tlb entries being
+ * set up. It should be simple enough to walk the next pages until the last
+ * would-be-wired TLB pair contains two safe pages. However, the amount of
+ * TLB is quite limited, so a limit has to be set at some point.
+ * - no effort has been put to catch executable pages being temporarily made
+ * writable, then vulnerable (by putting a branch instruction in the last
+ * word). This is unlikely to happen (except for just-in-time compilation).
+ *
+ *
+ * Note that, by using 16KB page sizes, the number of vulnerable pages is
+ * reduced.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+
+#include <machine/cpu.h>
+
+#include <uvm/uvm.h>
+
+int r4000_errata;
+
+/*
+ * Check for an R4000 end-of-page errata condition in an executable code page.
+ * Returns a bitmask to set in the given page pg_flags.
+ */
+u_int
+eop_page_check(paddr_t pa)
+{
+ uint32_t insn;
+
+ insn = *(uint32_t *)PHYS_TO_XKPHYS(pa + PAGE_SIZE - 4, CCA_CACHED);
+ if (classify_insn(insn) != INSNCLASS_NEUTRAL)
+ return PGF_EOP_VULN;
+
+ return 0;
+}
+
+/*
+ * Handle a TLB miss exception for a page marked as able to trigger the
+ * end-of-page errata.
+ * Returns nonzero if the exception has been completely serviced, and no
+ * further processing in the trap handler is necessary.
+ */
+int
+eop_tlb_miss_handler(struct trap_frame *trapframe, struct cpu_info *ci,
+ struct proc *p)
+{
+ struct pcb *pcb;
+ vaddr_t va, faultva;
+ struct vmspace *vm;
+ vm_map_t map;
+ pmap_t pmap;
+ pt_entry_t *pte, entry;
+ int onfault;
+ u_long asid;
+ uint i, npairs;
+ int64_t tlbidx;
+
+ /*
+ * Check for a valid pte with the `special' bit set (PG_SP)
+ * in order to apply the end-of-page errata workaround.
+ */
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ faultva = trunc_page((vaddr_t)trapframe->badvaddr);
+ pmap = map->pmap;
+
+ pte = pmap_segmap(pmap, faultva);
+ if (pte == NULL)
+ return 0;
+
+ pte += uvtopte(faultva);
+ entry = *pte;
+ if ((entry & PG_SP) == 0)
+ return 0;
+
+ pcb = &p->p_addr->u_pcb;
+ asid = pmap->pm_asid[ci->ci_cpuid].pma_asid << PG_ASID_SHIFT;
+
+ /*
+ * For now, only allow one EOP vulnerable page to get a wired TLB
+ * entry. We will aggressively attempt to recycle the wired TLB
+ * entries created for that purpose, as soon as we are no longer
+ * needing the EOP page resident in the TLB.
+ */
+
+ /*
+ * Figure out how many pages to wire in the TLB.
+ */
+
+ if ((faultva & PG_ODDPG) != 0) {
+ /* odd page: need two pairs */
+ npairs = 2;
+ } else {
+ /* even page: only need one pair */
+ npairs = 1;
+ }
+
+ /*
+ * Fault-in the next page.
+ */
+
+ va = faultva + PAGE_SIZE;
+ pte = pmap_segmap(pmap, va);
+ if (pte != NULL)
+ pte += uvtopte(va);
+
+ if (pte == NULL || (*pte & PG_V) == 0) {
+ onfault = pcb->pcb_onfault;
+ pcb->pcb_onfault = 0;
+ KERNEL_LOCK();
+ (void)uvm_fault(map, va, 0, VM_PROT_READ | VM_PROT_EXECUTE);
+ KERNEL_UNLOCK();
+ pcb->pcb_onfault = onfault;
+ }
+
+ /*
+ * Clear possible TLB entries for the pages we're about to wire.
+ */
+
+ for (i = npairs * 2, va = faultva & PG_HVPN; i != 0;
+ i--, va += PAGE_SIZE) {
+ tlbidx = tlb_probe(va | asid);
+ if (tlbidx >= 0)
+ tlb_update_indexed(CKSEG0_BASE, PG_NV, PG_NV, tlbidx);
+ }
+
+ /*
+ * Reserve the extra wired TLB, and fill them with the existing ptes.
+ */
+
+ tlb_set_wired((UPAGES / 2) + npairs);
+ for (i = 0, va = faultva & PG_HVPN; i != npairs;
+ i++, va += 2 * PAGE_SIZE) {
+ pte = pmap_segmap(pmap, va);
+ if (pte == NULL)
+ tlb_update_indexed(va | asid,
+ PG_NV, PG_NV, (UPAGES / 2) + i);
+ else {
+ pte += uvtopte(va);
+ tlb_update_indexed(va | asid,
+ pte[0], pte[1], (UPAGES / 2) + i);
+ }
+ }
+
+ /*
+ * Save the base address of the EOP vulnerable page, to be able to
+ * figure out when the wired entry is no longer necessary.
+ */
+
+ pcb->pcb_nwired = npairs;
+ pcb->pcb_wiredva = faultva & PG_HVPN;
+ pcb->pcb_wiredpc = faultva;
+
+ return 1;
+}
+
+/*
+ * Attempt to cleanup the current end-of-page errata workaround, if the
+ * current pc is no longer in an errata vulnerable page.
+ */
+void
+eop_cleanup(struct trap_frame *trapframe, struct proc *p)
+{
+ struct pcb *pcb;
+
+ pcb = &p->p_addr->u_pcb;
+ if (pcb->pcb_nwired != 0) {
+ if (trunc_page(trapframe->pc) != pcb->pcb_wiredpc) {
+ tlb_set_wired(UPAGES / 2);
+ tlb_flush((UPAGES / 2) + pcb->pcb_nwired);
+ pcb->pcb_nwired = 0;
+ }
+ }
+}
+
diff --git a/sys/arch/mips64/mips64/tlbhandler.S b/sys/arch/mips64/mips64/tlbhandler.S
index 41bd083083c..63d30e3ae5a 100644
--- a/sys/arch/mips64/mips64/tlbhandler.S
+++ b/sys/arch/mips64/mips64/tlbhandler.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: tlbhandler.S,v 1.40 2014/02/08 09:34:04 miod Exp $ */
+/* $OpenBSD: tlbhandler.S,v 1.41 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 1995-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -153,11 +153,20 @@ xtlb_miss:
PTR_ADDU k1, k1, k0
PTE_LOAD k0, 0(k1)
PTE_LOAD k1, PTE_OFFS(k1)
+#ifdef CPU_R4000
+ dsll k0, k0, (64 - PG_FRAMEBITS - 1) # clear bits left of PG_FRAME...
+ dsll k1, k1, (64 - PG_FRAMEBITS - 1) # ...but PG_SP
+ bltz k0, _inv_seg # defer to trap() if PG_SP set.
+ bltz k1, _inv_seg
+ dsll k0, k0, 1
+ dsll k1, k1, 1
+#else
dsll k0, k0, (64 - PG_FRAMEBITS) # clear bits left of PG_FRAME
- dsrl k0, k0, (64 - PG_FRAMEBITS)
- dmtc0 k0, COP_0_TLB_LO0
dsll k1, k1, (64 - PG_FRAMEBITS)
+#endif
+ dsrl k0, k0, (64 - PG_FRAMEBITS)
dsrl k1, k1, (64 - PG_FRAMEBITS)
+ dmtc0 k0, COP_0_TLB_LO0
dmtc0 k1, COP_0_TLB_LO1
TLB_HAZARD
tlbwr # update TLB
@@ -175,7 +184,9 @@ _k_miss:
b k_tlb_miss # kernel tlbmiss.
nop
-_inv_seg: # No page table for this segment.
+_inv_seg: # No page table for this segment,
+ # or processing is too complex to be
+ # done here: invoke C code.
mfc0 k0, COP_0_STATUS_REG
andi k0, SR_KSU_USER
bne k0, zero, go_u_general
@@ -290,6 +301,7 @@ NLEAF(k_tlb_miss, 0)
andi k1, SR_KSU_USER
bne k1, zero, go_u_general
LA k1, (VM_MIN_KERNEL_ADDRESS) # compute index
+ # (safe if expands to > 1 insn)
PTR_SUBU k0, k0, k1
lw k1, Sysmapsize # index within range?
PTR_SRL k0, k0, PGSHIFT
@@ -547,6 +559,72 @@ LEAF(tlb_update, 0)
li v0, 0
END(tlb_update)
+/*
+ * int64_t tlb_probe(vaddr_t a0);
+ * Probe for a TLB entry covering the given address, and return its index
+ * (< 0 if no match)
+ */
+LEAF(tlb_probe, 0)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ ori v0, v1, SR_INT_ENAB
+ xori v0, v0, SR_INT_ENAB
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+ MTC0_SR_IE_HAZARD
+ dli v0, (PG_HVPN | PG_ASID_MASK)
+ and a0, a0, v0
+ dmfc0 ta0, COP_0_TLB_HI # Save current PID
+ dmtc0 a0, COP_0_TLB_HI # Init high reg
+ TLB_HAZARD
+ tlbp # Probe for the entry.
+ TLB_HAZARD # necessary?
+ dmtc0 ta0, COP_0_TLB_HI # restore PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ MTC0_SR_IE_HAZARD
+ j ra
+ mfc0 v0, COP_0_TLB_INDEX # Return index
+END(tlb_probe)
+
+/*
+ * void tlb_update_indexed(vaddr_t a0, register_t a1, register_t a2, uint a3);
+ * Update a TLB entry pair.
+ */
+LEAF(tlb_update_indexed, 0)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ ori v0, v1, SR_INT_ENAB
+ xori v0, v0, SR_INT_ENAB
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+ MTC0_SR_IE_HAZARD
+ dli v0, (PG_HVPN | PG_ASID_MASK)
+ and a0, a0, v0
+ dmfc0 ta0, COP_0_TLB_HI # Save current PID
+ li ta1, TLB_PAGE_MASK
+
+ mtc0 a3, COP_0_TLB_INDEX
+ TLB_HAZARD
+ tlbr
+ TLB_HAZARD # necessary?
+
+ mtc0 ta1, COP_0_TLB_PG_MASK # init mask
+ dmtc0 a0, COP_0_TLB_HI # init high reg.
+
+ dsll a1, a1, (64 - PG_FRAMEBITS) # clear bits left of PG_FRAME
+ dsrl a1, a1, (64 - PG_FRAMEBITS)
+ dmtc0 a1, COP_0_TLB_LO0 # init low reg0.
+ dsll a2, a2, (64 - PG_FRAMEBITS) # clear bits left of PG_FRAME
+ dsrl a2, a2, (64 - PG_FRAMEBITS)
+ dmtc0 a2, COP_0_TLB_LO1 # init low reg1.
+
+ TLB_HAZARD
+ tlbwi # update slot
+ TLB_HAZARD
+
+ dmtc0 ta0, COP_0_TLB_HI # restore PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ MTC0_SR_IE_HAZARD
+ j ra
+ nop
+END(tlb_update_indexed)
+
/*---------------------------------------------------------------- tlb_read
* Read the TLB entry.
*/
diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c
index 87def00ef27..100dc2a647d 100644
--- a/sys/arch/mips64/mips64/trap.c
+++ b/sys/arch/mips64/mips64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.88 2012/10/03 11:18:23 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.89 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -269,6 +269,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
int onfault;
int typ = 0;
union sigval sv;
+ struct pcb *pcb;
switch (type) {
case T_TLB_MOD:
@@ -288,6 +289,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
trunc_page(trapframe->badvaddr), entry)) {
/* write to read only page in the kernel */
ftype = VM_PROT_WRITE;
+ pcb = &p->p_addr->u_pcb;
goto kernel_fault;
}
entry |= PG_M;
@@ -324,7 +326,8 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
trunc_page(trapframe->badvaddr), entry)) {
/* write to read only page */
ftype = VM_PROT_WRITE;
- goto fault_common;
+ pcb = &p->p_addr->u_pcb;
+ goto fault_common_no_miss;
}
entry |= PG_M;
*pte = entry;
@@ -343,6 +346,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
case T_TLB_LD_MISS:
case T_TLB_ST_MISS:
ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
+ pcb = &p->p_addr->u_pcb;
/* check for kernel address */
if (trapframe->badvaddr < 0) {
vaddr_t va;
@@ -350,16 +354,16 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
kernel_fault:
va = trunc_page((vaddr_t)trapframe->badvaddr);
- onfault = p->p_addr->u_pcb.pcb_onfault;
- p->p_addr->u_pcb.pcb_onfault = 0;
+ onfault = pcb->pcb_onfault;
+ pcb->pcb_onfault = 0;
KERNEL_LOCK();
rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
KERNEL_UNLOCK();
- p->p_addr->u_pcb.pcb_onfault = onfault;
+ pcb->pcb_onfault = onfault;
if (rv == 0)
return;
if (onfault != 0) {
- p->p_addr->u_pcb.pcb_onfault = 0;
+ pcb->pcb_onfault = 0;
trapframe->pc = onfault_table[onfault];
return;
}
@@ -369,7 +373,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
* It is an error for the kernel to access user space except
* through the copyin/copyout routines.
*/
- if (p->p_addr->u_pcb.pcb_onfault != 0) {
+ if (pcb->pcb_onfault != 0) {
/*
* We want to resolve the TLB fault before invoking
* pcb_onfault if necessary.
@@ -381,11 +385,29 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
case T_TLB_LD_MISS+T_USER:
ftype = VM_PROT_READ;
+ pcb = &p->p_addr->u_pcb;
goto fault_common;
case T_TLB_ST_MISS+T_USER:
ftype = VM_PROT_WRITE;
+ pcb = &p->p_addr->u_pcb;
fault_common:
+
+#ifdef CPU_R4000
+ if (r4000_errata != 0) {
+ if (eop_tlb_miss_handler(trapframe, ci, p) != 0)
+ return;
+ }
+#endif
+
+fault_common_no_miss:
+
+#ifdef CPU_R4000
+ if (r4000_errata != 0) {
+ eop_cleanup(trapframe, p);
+ }
+#endif
+
{
vaddr_t va;
struct vmspace *vm;
@@ -396,12 +418,12 @@ fault_common:
map = &vm->vm_map;
va = trunc_page((vaddr_t)trapframe->badvaddr);
- onfault = p->p_addr->u_pcb.pcb_onfault;
- p->p_addr->u_pcb.pcb_onfault = 0;
+ onfault = pcb->pcb_onfault;
+ pcb->pcb_onfault = 0;
KERNEL_LOCK();
- rv = uvm_fault(map, trunc_page(va), 0, ftype);
- p->p_addr->u_pcb.pcb_onfault = onfault;
+ rv = uvm_fault(map, va, 0, ftype);
+ pcb->pcb_onfault = onfault;
/*
* If this was a stack access we keep track of the maximum
@@ -421,7 +443,7 @@ fault_common:
return;
if (!USERMODE(trapframe->sr)) {
if (onfault != 0) {
- p->p_addr->u_pcb.pcb_onfault = 0;
+ pcb->pcb_onfault = 0;
trapframe->pc = onfault_table[onfault];
return;
}
@@ -757,8 +779,9 @@ fault_common:
case T_ADDR_ERR_LD: /* misaligned access */
case T_ADDR_ERR_ST: /* misaligned access */
case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
- if ((onfault = p->p_addr->u_pcb.pcb_onfault) != 0) {
- p->p_addr->u_pcb.pcb_onfault = 0;
+ pcb = &p->p_addr->u_pcb;
+ if ((onfault = pcb->pcb_onfault) != 0) {
+ pcb->pcb_onfault = 0;
trapframe->pc = onfault_table[onfault];
return;
}
diff --git a/sys/arch/sgi/sgi/machdep.c b/sys/arch/sgi/sgi/machdep.c
index 5e964d2ba75..a4e1c1c4f3e 100644
--- a/sys/arch/sgi/sgi/machdep.c
+++ b/sys/arch/sgi/sgi/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.135 2014/03/13 03:52:55 dlg Exp $ */
+/* $OpenBSD: machdep.c,v 1.136 2014/03/22 00:01:04 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -571,6 +571,14 @@ mips_init(int argc, void *argv, caddr_t boot_esym)
build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);
#endif /* } */
+#ifdef CPU_R4000
+ /*
+ * Enable R4000 EOP errata workaround code if necessary.
+ */
+ if (cpufamily == MIPS_R4000 && ((cp0_get_prid() >> 4) & 0x0f) < 3)
+ r4000_errata = 1;
+#endif
+
/*
* Allocate U page(s) for proc[0], pm_tlbpid 1.
*/