diff options
author | Philip Guenther <guenther@cvs.openbsd.org> | 2018-07-23 17:54:05 +0000 |
---|---|---|
committer | Philip Guenther <guenther@cvs.openbsd.org> | 2018-07-23 17:54:05 +0000 |
commit | 3ab7002ec7a4428cb7748725912d58bf1f9e264f (patch) | |
tree | 52e82788ef82ed18020dcf8debbf443454928617 /sys/arch | |
parent | 190743775fde29c99c1cc550651d63006feb8550 (diff) |
Do "Return stack refilling", based on the "Return stack underflow" discussion
and its associated appendix at https://support.google.com/faqs/answer/7625886
This should address at least some cases of "SpectreRSB" and earlier
Spectre variants; more commits to follow.
The refilling is done in the enter-kernel-from-userspace and
return-to-userspace-from-kernel paths, making sure to do it before
unblocking interrupts so that a successive interrupt can't get the
CPU to C code without doing this refill. Per the link above, it
also does it immediately after mwait, apparently in case the low-power
CPU states of idle-via-mwait flush the RSB.
ok mlarkin@ deraadt@
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/amd64/amd64/locore.S | 11 | ||||
-rw-r--r-- | sys/arch/amd64/include/asm.h | 17 | ||||
-rw-r--r-- | sys/arch/amd64/include/cpufunc.h | 18 | ||||
-rw-r--r-- | sys/arch/amd64/include/frameasm.h | 3 |
4 files changed, 42 insertions, 7 deletions
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index e3cfbdc8d36..1318b32f850 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.105 2018/07/12 14:11:11 guenther Exp $ */ +/* $OpenBSD: locore.S,v 1.106 2018/07/23 17:54:04 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -585,6 +585,9 @@ IDTVEC_NOALIGN(syscall) movq %rax,CPUVAR(SCRATCH) movq CPUVAR(KERN_RSP),%rax xchgq %rax,%rsp + movq %rcx,TF_RCX(%rsp) + movq %rcx,TF_RIP(%rsp) + RET_STACK_REFILL_WITH_RCX sti /* @@ -598,10 +601,8 @@ IDTVEC_NOALIGN(syscall) movq %rax,TF_RSP(%rsp) movq CPUVAR(SCRATCH),%rax INTR_SAVE_MOST_GPRS_NO_ADJ - movq %rcx,TF_RCX(%rsp) movq %r11, TF_RFLAGS(%rsp) /* old rflags from syscall insn */ movq $(GSEL(GUCODE_SEL, SEL_UPL)), TF_CS(%rsp) - movq %rcx,TF_RIP(%rsp) movq %rax,TF_ERR(%rsp) /* stash syscall # for SPL check */ INTR_CLEAR_GPRS @@ -641,6 +642,8 @@ IDTVEC_NOALIGN(syscall) jz .Lsyscall_restore_fsbase .Lsyscall_restore_registers: + RET_STACK_REFILL_WITH_RCX + movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi movq TF_R8(%rsp),%r8 @@ -804,6 +807,8 @@ intr_user_exit_post_ast: jz .Lintr_restore_fsbase .Lintr_restore_registers: + RET_STACK_REFILL_WITH_RCX + movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi movq TF_R8(%rsp),%r8 diff --git a/sys/arch/amd64/include/asm.h b/sys/arch/amd64/include/asm.h index 5c29eae0995..583e88829a3 100644 --- a/sys/arch/amd64/include/asm.h +++ b/sys/arch/amd64/include/asm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: asm.h,v 1.15 2018/07/12 14:11:11 guenther Exp $ */ +/* $OpenBSD: asm.h,v 1.16 2018/07/23 17:54:04 guenther Exp $ */ /* $NetBSD: asm.h,v 1.2 2003/05/02 18:05:47 yamt Exp $ */ /*- @@ -90,6 +90,21 @@ #define KUENTRY(x) \ KUTEXT; _ALIGN_TRAPS; _GENTRY(x) +/* Return stack refill, to prevent speculation attacks on natural returns */ +#define RET_STACK_REFILL_WITH_RCX \ + mov $8,%rcx ; \ + _ALIGN_TEXT ; \ + 3: call 5f ; \ + 4: pause ; \ + call 4b ; \ + _ALIGN_TRAPS ; \ + 5: call 7f ; \ + 6: pause ; \ + call 6b ; \ + _ALIGN_TRAPS ; \ + 7: loop 3b ; \ + add $(16*8),%rsp + #endif /* _KERNEL */ #ifdef __STDC__ diff --git a/sys/arch/amd64/include/cpufunc.h b/sys/arch/amd64/include/cpufunc.h index 60c9de76725..7c7e391f88d 100644 --- a/sys/arch/amd64/include/cpufunc.h +++ b/sys/arch/amd64/include/cpufunc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpufunc.h,v 1.26 2018/06/30 10:16:35 kettenis Exp $ */ +/* $OpenBSD: cpufunc.h,v 1.27 2018/07/23 17:54:04 guenther Exp $ */ /* $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */ /*- @@ -289,7 +289,21 @@ static __inline void mwait(u_long extensions, u_int hints) { - __asm volatile("mwait" : : "a" (hints), "c" (extensions)); + __asm volatile( + " mwait ;" + " mov $8,%%rcx ;" + " .align 16,0x90 ;" + "3: call 5f ;" + "4: pause ;" + " call 4b ;" + " .align 16,0xcc ;" + "5: call 7f ;" + "6: pause ;" + " call 6b ;" + " .align 16,0xcc ;" + "7: loop 3b ;" + " add $(16*8),%%rsp" + : "=c" (extensions) : "a" (hints)); } static __inline void diff --git a/sys/arch/amd64/include/frameasm.h b/sys/arch/amd64/include/frameasm.h index 33ab8b4b347..3d4eade3762 100644 --- a/sys/arch/amd64/include/frameasm.h +++ b/sys/arch/amd64/include/frameasm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: frameasm.h,v 1.19 2018/07/21 02:19:54 guenther Exp $ */ +/* $OpenBSD: frameasm.h,v 1.20 2018/07/23 17:54:04 guenther Exp $ */ /* $NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */ #ifndef _AMD64_MACHINE_FRAMEASM_H @@ -93,6 +93,7 @@ INTRENTRY_LABEL(label): /* from kernel */ \ movq CPUVAR(KERN_RSP),%rax ; \ xchgq %rax,%rsp ; \ movq %rcx,TF_RCX(%rsp) ; \ + RET_STACK_REFILL_WITH_RCX ; \ /* copy trapno+err to the trap frame */ \ movq 0(%rax),%rcx ; \ movq %rcx,TF_TRAPNO(%rsp) ; \ |