diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2020-09-05 19:21:11 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2020-09-05 19:21:11 +0000 |
commit | b3c0699047682992f135e2aa6b5c53e3cfbb2ee5 (patch) | |
tree | 2c8b84854a8410b6a873553980864cd467e7d0e0 /sys/arch | |
parent | db623d5c60c8e8ae1df02e649acce2d1c855f9e0 (diff) |
Implement spilling of kernel SLB entries. Mostly from FreeBSD.
This makes it possible to use more SLB entries for the kernel than the
hardware supports. The design is such that a subset of the hardware SLB
entries can be replaced when needed. This makes sure the entries
mapping kernel code and data and the page tables ar always present.
Traps for missing SLB entries are handled in real-mode and on a special
stack such that it doesn't have to rely on SLB entires mapping kernel
stacks.
With this in place we can increase KVA to 32GB. Hopefully that's enough
to support large memory configurations.
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/powerpc64/include/cpu.h | 4 | ||||
-rw-r--r-- | sys/arch/powerpc64/include/trap.h | 3 | ||||
-rw-r--r-- | sys/arch/powerpc64/include/vmparam.h | 2 | ||||
-rw-r--r-- | sys/arch/powerpc64/powerpc64/genassym.cf | 8 | ||||
-rw-r--r-- | sys/arch/powerpc64/powerpc64/machdep.c | 8 | ||||
-rw-r--r-- | sys/arch/powerpc64/powerpc64/pmap.c | 45 | ||||
-rw-r--r-- | sys/arch/powerpc64/powerpc64/trap_subr.S | 154 |
7 files changed, 214 insertions, 10 deletions
diff --git a/sys/arch/powerpc64/include/cpu.h b/sys/arch/powerpc64/include/cpu.h index 2fbf638ba3f..02df3a9a365 100644 --- a/sys/arch/powerpc64/include/cpu.h +++ b/sys/arch/powerpc64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.25 2020/09/01 20:06:49 gkoehler Exp $ */ +/* $OpenBSD: cpu.h,v 1.26 2020/09/05 19:21:10 kettenis Exp $ */ /* * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -64,6 +64,8 @@ struct cpu_info { struct slb ci_kernel_slb[32]; paddr_t ci_user_slb_pa; + register_t ci_slbsave[18]; + char ci_slbstack[1024]; #define CPUSAVE_LEN 9 register_t ci_tempsave[CPUSAVE_LEN]; diff --git a/sys/arch/powerpc64/include/trap.h b/sys/arch/powerpc64/include/trap.h index 6fd3b25d377..e79a967b078 100644 --- a/sys/arch/powerpc64/include/trap.h +++ b/sys/arch/powerpc64/include/trap.h @@ -148,8 +148,9 @@ #define DSISR_STORE (1UL << 25) -/* Magic pointer to store trap handler entry point */ +/* Magic pointers to store trap handler entry points */ #define TRAP_ENTRY 0x1f8 #define TRAP_HVENTRY 0x1f0 +#define TRAP_SLBENTRY 0x1e8 #endif /* _MACHINE_TRAP_H_ */ diff --git a/sys/arch/powerpc64/include/vmparam.h b/sys/arch/powerpc64/include/vmparam.h index 85cba6cdf68..0a7d4f8b753 100644 --- a/sys/arch/powerpc64/include/vmparam.h +++ b/sys/arch/powerpc64/include/vmparam.h @@ -47,4 +47,4 @@ #define VM_MAXUSER_ADDRESS 0xbffffffffffff000UL #define VM_MAX_ADDRESS 0xffffffffffffffffUL #define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL -#define VM_MAX_KERNEL_ADDRESS 0xc00000017fffffffUL +#define VM_MAX_KERNEL_ADDRESS 0xc0000007ffffffffUL diff --git a/sys/arch/powerpc64/powerpc64/genassym.cf b/sys/arch/powerpc64/powerpc64/genassym.cf index f90bbead19f..96988f4c900 100644 --- a/sys/arch/powerpc64/powerpc64/genassym.cf +++ b/sys/arch/powerpc64/powerpc64/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.12 2020/07/21 21:36:58 kettenis Exp $ +# $OpenBSD: genassym.cf,v 1.13 2020/09/05 19:21:10 kettenis Exp $ # # Copyright (c) 1982, 1990 The Regents of the University of California. # All rights reserved. @@ -35,11 +35,14 @@ include <sys/proc.h> include <machine/intr.h> include <machine/pcb.h> +include <machine/pte.h> struct cpu_info member ci_curpcb member ci_curproc member ci_tempsave +member ci_slbsave +member ci_slbstack member ci_kernel_slb member ci_user_slb_pa define CPUSAVE_SRR0 0 @@ -138,3 +141,6 @@ member sf_r31 member sf_cr export IPL_NONE + +export SEGMENT_MASK +export USER_ADDR diff --git a/sys/arch/powerpc64/powerpc64/machdep.c b/sys/arch/powerpc64/powerpc64/machdep.c index c4635cb6a9e..d7d9625dce8 100644 --- a/sys/arch/powerpc64/powerpc64/machdep.c +++ b/sys/arch/powerpc64/powerpc64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.59 2020/09/01 20:06:49 gkoehler Exp $ */ +/* $OpenBSD: machdep.c,v 1.60 2020/09/05 19:21:10 kettenis Exp $ */ /* * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -76,8 +76,10 @@ extern uint64_t opal_entry; extern char trapcode[], trapcodeend[]; extern char hvtrapcode[], hvtrapcodeend[]; +extern char slbtrapcode[], slbtrapcodeend[]; extern char generictrap[]; extern char generichvtrap[]; +extern char kern_slbtrap[]; extern char initstack[]; @@ -161,8 +163,12 @@ init_powernv(void *fdt, void *tocbase) memcpy((void *)EXC_HFAC, hvtrapcode, hvtrapcodeend - hvtrapcode); memcpy((void *)EXC_HVI, hvtrapcode, hvtrapcodeend - hvtrapcode); + /* SLB trap needs special handling as well. */ + memcpy((void *)EXC_DSE, slbtrapcode, slbtrapcodeend - slbtrapcode); + *((void **)TRAP_ENTRY) = generictrap; *((void **)TRAP_HVENTRY) = generichvtrap; + *((void **)TRAP_SLBENTRY) = kern_slbtrap; /* Make the stubs visible to the CPU. */ __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); diff --git a/sys/arch/powerpc64/powerpc64/pmap.c b/sys/arch/powerpc64/powerpc64/pmap.c index 6538432dbfb..e4501e85094 100644 --- a/sys/arch/powerpc64/powerpc64/pmap.c +++ b/sys/arch/powerpc64/powerpc64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.46 2020/09/04 17:27:42 kettenis Exp $ */ +/* $OpenBSD: pmap.c,v 1.47 2020/09/05 19:21:10 kettenis Exp $ */ /* * Copyright (c) 2015 Martin Pieuchot @@ -152,7 +152,8 @@ struct slb_desc { struct pmapvp1 *slbd_vp; }; -struct slb_desc kernel_slb_desc[32]; +/* Preallocated SLB entries for the kernel. */ +struct slb_desc kernel_slb_desc[16 + VM_KERNEL_SPACE_SIZE / SEGMENT_SIZE]; struct slb_desc *pmap_slbd_lookup(pmap_t, vaddr_t); @@ -1432,18 +1433,54 @@ pmap_set_kernel_slb(vaddr_t va) esid = va >> ADDR_ESID_SHIFT; - for (idx = 0; idx < 31; idx++) { + for (idx = 0; idx < nitems(kernel_slb_desc); idx++) { if (kernel_slb_desc[idx].slbd_vsid == 0) break; if (kernel_slb_desc[idx].slbd_esid == esid) return; } - KASSERT(idx < 31); + KASSERT(idx < nitems(kernel_slb_desc)); kernel_slb_desc[idx].slbd_esid = esid; kernel_slb_desc[idx].slbd_vsid = pmap_kernel_vsid(esid); } +/* + * Handle SLB entry spills for the kernel. This function runs without + * belt and suspenders in real-mode on a small per-CPU stack. + */ +void +pmap_spill_kernel_slb(vaddr_t va) +{ + struct cpu_info *ci = curcpu(); + uint64_t esid; + uint64_t slbe, slbv; + int idx; + + esid = va >> ADDR_ESID_SHIFT; + + for (idx = 0; idx < 31; idx++) { + if (ci->ci_kernel_slb[idx].slb_slbe == 0) + break; + slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | idx; + if (ci->ci_kernel_slb[idx].slb_slbe == slbe) + return; + } + + /* + * If no free slot was found, randomly replace an entry in + * slot 15-30. + */ + if (idx == 31) + idx = 15 + mftb() % 16; + + slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | idx; + slbv = pmap_kernel_vsid(esid) << SLBV_VSID_SHIFT; + + ci->ci_kernel_slb[idx].slb_slbe = slbe; + ci->ci_kernel_slb[idx].slb_slbv = slbv; +} + void pmap_bootstrap_cpu(void) { diff --git a/sys/arch/powerpc64/powerpc64/trap_subr.S b/sys/arch/powerpc64/powerpc64/trap_subr.S index 07791734ec0..3eefabc6698 100644 --- a/sys/arch/powerpc64/powerpc64/trap_subr.S +++ b/sys/arch/powerpc64/powerpc64/trap_subr.S @@ -1,4 +1,4 @@ -/* $OpenBSD: trap_subr.S,v 1.15 2020/09/05 18:36:37 kettenis Exp $ */ +/* $OpenBSD: trap_subr.S,v 1.16 2020/09/05 19:21:10 kettenis Exp $ */ /* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ /*- @@ -307,6 +307,158 @@ hvtrapcode: hvtrapcodeend: /* + * For SLB misses: do special things for the kernel + * + * Note: SPRG1 is always safe to overwrite any time the MMU was on, which is + * the only time this can be called. + */ + .globl slbtrapcode, slbtrapcodeend +slbtrapcode: + /* 0x00 */ + mtsprg1 %r1 /* save SP */ + GET_CPUINFO(%r1) + std %r2, (CI_SLBSAVE+16)(%r1) /* save r2 */ + mfcr %r2 + /* 0x10 */ + std %r2, (CI_SLBSAVE+104)(%r1) /* save CR */ + mfsrr1 %r2 /* test kernel mode */ + mtcr %r2 + bf 17, 1f /* branch if PSL_PR is false */ + /* 0x20 */ + /* User mode */ + ld %r2, (CI_SLBSAVE+104)(%r1) + mtcr %r2 /* restore CR */ + ld %r2, (CI_SLBSAVE+16)(%r1) /* restore r2 */ + mflr %r1 + /* 0x30 */ + mtsprg2 %r1 /* save LR in SPRG2 */ + ld %r1, TRAP_ENTRY(0) + mtlr %r1 + li %r1, 0x80 /* How to get the vector from LR */ + /* 0x40 */ + blrl /* Branch to generictrap */ +1: mflr %r2 /* Save the old LR in r2 */ + /* Kernel mode */ + ld %r1, TRAP_SLBENTRY(0) + mtlr %r1 + /* 0x50 */ + GET_CPUINFO(%r1) + blrl /* Branch to kern_slbtrap */ +/* must fit in 128 bytes! */ +slbtrapcodeend: + +/* + * On entry: + * SPRG1: SP + * r1: pcpu + * r2: LR + * LR: branch address in trap region + */ + .globl kern_slbtrap +kern_slbtrap: + std %r2, (CI_SLBSAVE+136)(%r1) /* old LR */ + std %r3, (CI_SLBSAVE+24)(%r1) /* save R3 */ + + /* Check if this needs to be handled as a regular trap (userseg miss) */ + mfdar %r2 + lis %r3, SEGMENT_MASK@h + ori %r3, %r3, SEGMENT_MASK@l + andc %r2, %r2, %r3 /* R2 = segment base address */ + lis %r3, USER_ADDR@highesta + ori %r3, %r3, USER_ADDR@highera + sldi %r3, %r3, 32 + oris %r3, %r3, USER_ADDR@ha + ori %r3, %r3, USER_ADDR@l + cmpd %r2, %r3 /* Compare fault base to USER_ADDR */ + bne 1f + + /* User seg miss, handle as a regular trap */ + ld %r2, (CI_SLBSAVE+104)(%r1) /* Restore CR */ + mtcr %r2 + ld %r2, (CI_SLBSAVE+16)(%r1) /* Restore R2,R3 */ + ld %r3, (CI_SLBSAVE+24)(%r1) + ld %r1, (CI_SLBSAVE+136)(%r1) /* Save the old LR in r1 */ + mtsprg2 %r1 /* And then in SPRG2 */ + li %r1, 0x80 /* How to get the vector from LR */ + b generictrap /* Retain old LR using b */ + +1: /* Real kernel SLB miss */ + std %r0, (CI_SLBSAVE+0)(%r1) /* free all volatile regs */ + mfsprg1 %r2 /* Old R1 */ + std %r2, (CI_SLBSAVE+8)(%r1) + /* R2, R3 already saved */ + std %r4, (CI_SLBSAVE+32)(%r1) + std %r5, (CI_SLBSAVE+40)(%r1) + std %r6, (CI_SLBSAVE+48)(%r1) + std %r7, (CI_SLBSAVE+56)(%r1) + std %r8, (CI_SLBSAVE+64)(%r1) + std %r9, (CI_SLBSAVE+72)(%r1) + std %r10, (CI_SLBSAVE+80)(%r1) + std %r11, (CI_SLBSAVE+88)(%r1) + std %r12, (CI_SLBSAVE+96)(%r1) + /* CR already saved */ + mfxer %r2 /* save XER */ + std %r2, (CI_SLBSAVE+112)(%r1) + mflr %r2 /* save LR (SP already saved) */ + std %r2, (CI_SLBSAVE+120)(%r1) + mfctr %r2 /* save CTR */ + std %r2, (CI_SLBSAVE+128)(%r1) + + /* Call handler */ + addi %r1, %r1, CI_SLBSTACK-48+1024 + li %r2, ~15 + and %r1, %r1, %r2 + GET_TOCBASE(%r2) + mfdar %r3 + bl pmap_spill_kernel_slb + nop + + /* Save r28-31, restore r4-r12 */ + GET_CPUINFO(%r1) + ld %r4, (CI_SLBSAVE+32)(%r1) + ld %r5, (CI_SLBSAVE+40)(%r1) + ld %r6, (CI_SLBSAVE+48)(%r1) + ld %r7, (CI_SLBSAVE+56)(%r1) + ld %r8, (CI_SLBSAVE+64)(%r1) + ld %r9, (CI_SLBSAVE+72)(%r1) + ld %r10, (CI_SLBSAVE+80)(%r1) + ld %r11, (CI_SLBSAVE+88)(%r1) + ld %r12, (CI_SLBSAVE+96)(%r1) + std %r28, (CI_SLBSAVE+64)(%r1) + std %r29, (CI_SLBSAVE+72)(%r1) + std %r30, (CI_SLBSAVE+80)(%r1) + std %r31, (CI_SLBSAVE+88)(%r1) + + /* Restore kernel mapping */ + bl restore_kernsrs + + /* Restore remaining registers */ + ld %r28, (CI_SLBSAVE+64)(%r1) + ld %r29, (CI_SLBSAVE+72)(%r1) + ld %r30, (CI_SLBSAVE+80)(%r1) + ld %r31, (CI_SLBSAVE+88)(%r1) + + ld %r2, (CI_SLBSAVE+104)(%r1) + mtcr %r2 + ld %r2, (CI_SLBSAVE+112)(%r1) + mtxer %r2 + ld %r2, (CI_SLBSAVE+120)(%r1) + mtlr %r2 + ld %r2, (CI_SLBSAVE+128)(%r1) + mtctr %r2 + ld %r2, (CI_SLBSAVE+136)(%r1) + mtlr %r2 + + /* Restore r0-r3 */ + ld %r0, (CI_SLBSAVE+0)(%r1) + ld %r2, (CI_SLBSAVE+16)(%r1) + ld %r3, (CI_SLBSAVE+24)(%r1) + mfsprg1 %r1 + + /* Back to whatever we were doing */ + rfid + +/* * generichvtrap makes a hypervisor trap look like a normal trap. */ |