diff options
author | Philip Guenther <guenther@cvs.openbsd.org> | 2018-06-05 05:04:32 +0000 |
---|---|---|
committer | Philip Guenther <guenther@cvs.openbsd.org> | 2018-06-05 05:04:32 +0000 |
commit | 9fba89d2eba4539aaaaff1969bf0adee8800017c (patch) | |
tree | 104aae4796ed7c1cf251dee67fb558efe6ea905a | |
parent | 513e2731542e974ac2aee94beb05dead3e76d5b2 (diff) |
Split "return to userspace via iretq" from intr_fast_exit into intr_user_exit.
Move AST handling from the bottom of alltraps and Xdoreti to the
top of the new routine.
syscall-return-via-iretq and the FPU #DNA trap jump into intr_user_exit after
the AST check (already performed for the former, skipped for the latter)
Delete a couple debugging hooks mlarkin@ and I used during Meltdown work
tested by many in snaps; thanks to brynet@ for spurious interrrupt testing
earlier reviews and comments kettenis@ mlarkin@; prodding from deraadt@
-rw-r--r-- | sys/arch/amd64/amd64/locore.S | 113 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/spl.S | 18 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/vector.S | 25 |
3 files changed, 106 insertions, 50 deletions
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index 1ca6250afdb..8e94a2aae9b 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.96 2018/05/20 18:14:01 guenther Exp $ */ +/* $OpenBSD: locore.S,v 1.97 2018/06/05 05:04:31 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -589,7 +589,7 @@ NENTRY(Xsyscall_untramp) /* Could registers have been changed that require an iretq? */ testl $MDP_IRET, P_MD_FLAGS(%r14) - jne intr_fast_exit + jne intr_user_exit_post_ast movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi @@ -675,15 +675,45 @@ NENTRY(proc_trampoline) /* - * Return via iretq, for real interrupts and signal returns + * Returning to userspace via iretq. We do things in this order: + * - check for ASTs + * - DIAGNOSTIC: no more C calls after this, so check the SPL + * - restore most registers + * - restore FS.base if it's not already in the CPU + * - update the iret frame from the trapframe + * - finish reading from the trapframe + * - switch to the trampoline stack \ + * - jump to the .kutext segment |-- Meltdown workaround + * - switch to the user page tables / + * - swapgs + * - iretq */ -NENTRY(intr_fast_exit) +NENTRY(intr_user_exit) #ifdef DIAGNOSTIC pushfq popq %rdx testq $PSL_I,%rdx - jnz .Lintr_exit_not_blocked + jnz .Lintr_user_exit_not_blocked +#endif /* DIAGNOSTIC */ + + /* Check for ASTs */ + CHECK_ASTPENDING(%r11) + je intr_user_exit_post_ast + CLEAR_ASTPENDING(%r11) + sti + movq %rsp,%rdi + call _C_LABEL(ast) + cli + jmp intr_user_exit + + .global intr_user_exit_post_ast +intr_user_exit_post_ast: +#ifdef DIAGNOSTIC + /* no more C calls after this, so check the SPL */ + cmpl $0,CPUVAR(ILEVEL) + jne .Luser_spl_not_lowered #endif /* DIAGNOSTIC */ + movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi movq TF_R8(%rsp),%r8 @@ -696,11 +726,6 @@ NENTRY(intr_fast_exit) movq TF_RBP(%rsp),%rbp movq TF_RBX(%rsp),%rbx - testq $SEL_RPL,TF_CS(%rsp) - je intr_exit_recurse /* returning back to kernel? */ - - /* returning to userspace. XXX fix up iret frame here */ - /* restore FS.base if it's not already in the CPU */ btsl $CPUF_USERSEGS_BIT,CPUVAR(FLAGS) jc 99f @@ -712,14 +737,6 @@ NENTRY(intr_fast_exit) wrmsr 99: /* - * Returning to userspace. We need to go things in this order: - * - update the iret frame from the trapframe - * - finish reading from the trapframe - * - switch to the trampoline stack - * - jump to the .kutext segment - * - switch to the user page tables - * - swapgs - * - iretq * To get the final value for the register that was used * for the mov to %cr3, we need access to somewhere accessible * on the user page tables, so we save it in CPUVAR(SCRATCH) @@ -757,7 +774,64 @@ KUENTRY(iretq_tramp) _C_LABEL(doreti_iret): iretq -NENTRY(intr_exit_recurse) +#ifdef DIAGNOSTIC + .text +.Lintr_user_exit_not_blocked: + movl warn_once(%rip),%edi + testl %edi,%edi + jnz 1f + incl %edi + movl %edi,warn_once(%rip) + leaq .Lnot_blocked(%rip),%rdi + call _C_LABEL(printf) +#ifdef DDB + int $3 +#endif /* DDB */ +1: cli + jmp intr_user_exit + +.Luser_spl_not_lowered: + sti + leaq intr_spl_lowered(%rip),%rdi + movl CPUVAR(ILEVEL),%esi + xorl %edx,%edx /* always SPL zero for userspace */ + xorl %eax,%eax + call _C_LABEL(printf) +#ifdef DDB + int $3 +#endif /* DDB */ + movl $0,CPUVAR(ILEVEL) + cli + jmp intr_user_exit + + .section .rodata +intr_spl_lowered: + .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n" + .text +#endif /* DIAGNOSTIC */ + + +/* + * Return to supervisor mode from trap or interrupt + */ +NENTRY(intr_fast_exit) +#ifdef DIAGNOSTIC + pushfq + popq %rdx + testq $PSL_I,%rdx + jnz .Lintr_exit_not_blocked +#endif /* DIAGNOSTIC */ + movq TF_RDI(%rsp),%rdi + movq TF_RSI(%rsp),%rsi + movq TF_R8(%rsp),%r8 + movq TF_R9(%rsp),%r9 + movq TF_R10(%rsp),%r10 + movq TF_R12(%rsp),%r12 + movq TF_R13(%rsp),%r13 + movq TF_R14(%rsp),%r14 + movq TF_R15(%rsp),%r15 + movq TF_RBP(%rsp),%rbp + movq TF_RBX(%rsp),%rbx movq TF_RDX(%rsp),%rdx movq TF_RCX(%rsp),%rcx movq TF_R11(%rsp),%r11 @@ -812,7 +886,6 @@ NENTRY(intr_exit_recurse) #ifdef DIAGNOSTIC .Lintr_exit_not_blocked: - xchgw %bx, %bx movl warn_once(%rip),%edi testl %edi,%edi jnz 1f diff --git a/sys/arch/amd64/amd64/spl.S b/sys/arch/amd64/amd64/spl.S index 2ea315f2fb5..1fee60d110c 100644 --- a/sys/arch/amd64/amd64/spl.S +++ b/sys/arch/amd64/amd64/spl.S @@ -1,4 +1,4 @@ -/* $OpenBSD: spl.S,v 1.12 2018/02/21 19:24:15 guenther Exp $ */ +/* $OpenBSD: spl.S,v 1.13 2018/06/05 05:04:31 guenther Exp $ */ /* $NetBSD: spl.S,v 1.3 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -158,18 +158,6 @@ KIDTVEC(doreti) jmp *IS_RESUME(%rax) 2: /* Check for ASTs on exit to user mode. */ movl %ebx,CPUVAR(ILEVEL) -5: CHECK_ASTPENDING(%r11) - je 3f - testb $SEL_RPL,TF_CS(%rsp) - jz 3f -4: CLEAR_ASTPENDING(%r11) - sti - movq %rsp, %rdi - call _C_LABEL(ast) - cli - jmp 5b -3: -#ifdef DIAGNOSTIC - movl $254,%esi -#endif /* DIAGNOSTIC */ + testb $SEL_RPL,TF_CS(%rsp) + jnz intr_user_exit INTRFASTEXIT diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S index d4b1f44c7e1..a4c337971f8 100644 --- a/sys/arch/amd64/amd64/vector.S +++ b/sys/arch/amd64/amd64/vector.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vector.S,v 1.58 2018/05/26 23:10:13 guenther Exp $ */ +/* $OpenBSD: vector.S,v 1.59 2018/06/05 05:04:31 guenther Exp $ */ /* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -189,7 +189,7 @@ IDTVEC(trap07) movq %rsp, %rsi call _C_LABEL(fpudna) cli - INTRFASTEXIT + jmp intr_user_exit_post_ast IDTVEC(trap08) pushq $T_DOUBLEFLT jmp calltrap_specstk @@ -325,15 +325,8 @@ KUENTRY(alltraps) call _C_LABEL(trap) 2: /* Check for ASTs on exit to user mode. */ cli - CHECK_ASTPENDING(%r11) - je 1f testb $SEL_RPL,TF_CS(%rsp) - jz 1f - CLEAR_ASTPENDING(%r11) - sti - movq %rsp, %rdi - call _C_LABEL(ast) - jmp 2b + jnz intr_user_exit #ifndef DIAGNOSTIC 1: INTRFASTEXIT #else /* DIAGNOSTIC */ @@ -623,7 +616,7 @@ IDTVEC(intr_##name##num) ;\ SMAP_CLAC ;\ incl CPUVAR(IDEPTH) ;\ movq IS_HANDLERS(%r14),%rbx ;\ -6: \ +6: /* loop, walking chain of handlers */ \ movl IH_LEVEL(%rbx),%r12d ;\ cmpl %r13d,%r12d ;\ jle 7f ;\ @@ -641,13 +634,13 @@ IDTVEC(intr_##name##num) ;\ 4: movq IH_NEXT(%rbx),%rbx /* next handler in chain */ ;\ testq %rbx,%rbx ;\ jnz 6b ;\ -5: \ +5: /* successfully handled */ \ cli ;\ unmask(num) /* unmask it in hardware */ ;\ late_ack(num) ;\ sti ;\ jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\ -7: \ +7: /* current IPL > handler's ih_level */ \ cli ;\ movq $(1 << num),%rax ;\ orq %rax,CPUVAR(IPENDING) ;\ @@ -655,16 +648,18 @@ IDTVEC(intr_##name##num) ;\ late_ack(num) ;\ sti ;\ jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\ -10: \ +10: /* currently masked */ \ cli ;\ movq $(1 << num),%rax ;\ orq %rax,CPUVAR(IPENDING) ;\ level_mask(num) ;\ late_ack(num) ;\ INTRFASTEXIT ;\ -9: \ +9: /* spurious interrupt */ \ unmask(num) ;\ late_ack(num) ;\ + testb $SEL_RPL,TF_CS(%rsp) ;\ + jnz intr_user_exit ;\ INTRFASTEXIT #define ICUADDR IO_ICU1 |