summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Guenther <guenther@cvs.openbsd.org>2019-05-17 19:07:17 +0000
committerPhilip Guenther <guenther@cvs.openbsd.org>2019-05-17 19:07:17 +0000
commitceca1553ac9dbf319a9ca2cb3fe29b0d376713cf (patch)
tree003229ce3f9ee38623e5165aa19f35096e915d5f
parentecff3b858a6269d46f6bd402e35f6e55a64d6296 (diff)
Mitigate Intel's Microarchitectural Data Sampling vulnerability.
If the CPU has the new VERW behavior than that is used, otherwise use the proper sequence from Intel's "Deep Dive" doc is used in the return-to-userspace and enter-VMM-guest paths. The enter-C3-idle path is not mitigated because it's only a problem when SMT/HT is enabled: mitigating everything when that's enabled would be a _huge_ set of changes that we see no point in doing. Update vmm(4) to pass through the MSR bits so that guests can apply the optimal mitigation. VMM help and specific feedback from mlarkin@ vendor-portability help from jsg@ and kettenis@ ok kettenis@ mlarkin@ deraadt@ jsg@
-rw-r--r--sys/arch/amd64/amd64/cpu.c140
-rw-r--r--sys/arch/amd64/amd64/genassym.cf4
-rw-r--r--sys/arch/amd64/amd64/identcpu.c4
-rw-r--r--sys/arch/amd64/amd64/locore.S22
-rw-r--r--sys/arch/amd64/amd64/mainbus.c7
-rw-r--r--sys/arch/amd64/amd64/mds.S192
-rw-r--r--sys/arch/amd64/amd64/vmm.c4
-rw-r--r--sys/arch/amd64/amd64/vmm_support.S16
-rw-r--r--sys/arch/amd64/conf/Makefile.amd644
-rw-r--r--sys/arch/amd64/conf/files.amd643
-rw-r--r--sys/arch/amd64/include/codepatch.h4
-rw-r--r--sys/arch/amd64/include/cpu.h6
-rw-r--r--sys/arch/amd64/include/specialreg.h5
-rw-r--r--sys/arch/amd64/include/vmmvar.h5
14 files changed, 397 insertions, 19 deletions
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c
index 2619995dda6..9e6c7b854b7 100644
--- a/sys/arch/amd64/amd64/cpu.c
+++ b/sys/arch/amd64/amd64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.134 2019/03/25 20:29:25 guenther Exp $ */
+/* $OpenBSD: cpu.c,v 1.135 2019/05/17 19:07:15 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@@ -146,6 +146,7 @@ struct cpu_softc {
void replacesmap(void);
void replacemeltdown(void);
+void replacemds(void);
extern long _stac;
extern long _clac;
@@ -190,6 +191,130 @@ replacemeltdown(void)
splx(s);
}
+void
+replacemds(void)
+{
+ static int replacedone = 0;
+ extern long mds_handler_bdw, mds_handler_ivb, mds_handler_skl;
+ extern long mds_handler_skl_sse, mds_handler_skl_avx;
+ extern long mds_handler_silvermont, mds_handler_knights;
+ struct cpu_info *ci = &cpu_info_primary;
+ CPU_INFO_ITERATOR cii;
+ void *handler = NULL, *vmm_handler = NULL;
+ const char *type;
+ int has_verw, s;
+
+ /* ci_mds_tmp must be 32byte aligned for AVX instructions */
+ CTASSERT((offsetof(struct cpu_info, ci_mds_tmp) -
+ offsetof(struct cpu_info, ci_PAGEALIGN)) % 32 == 0);
+
+ if (replacedone)
+ return;
+ replacedone = 1;
+
+ if (strcmp(cpu_vendor, "GenuineIntel") != 0 ||
+ ((ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP) &&
+ (rdmsr(MSR_ARCH_CAPABILITIES) &
+ (ARCH_CAPABILITIES_RDCL_NO | ARCH_CAPABILITIES_MDS_NO)))) {
+ /* Unaffected, nop out the handling code */
+ has_verw = 0;
+ } else if (ci->ci_feature_sefflags_edx & SEFF0EDX_MD_CLEAR) {
+ /* new firmware, use VERW */
+ has_verw = 1;
+ } else {
+ int family = ci->ci_family;
+ int model = ci->ci_model;
+ int stepping = CPUID2STEPPING(ci->ci_signature);
+
+ has_verw = 0;
+ if (family == 0x6 &&
+ (model == 0x2e || model == 0x1e || model == 0x1f ||
+ model == 0x1a || model == 0x2f || model == 0x25 ||
+ model == 0x2c || model == 0x2d || model == 0x2a ||
+ model == 0x3e || model == 0x3a)) {
+ /* Nehalem, SandyBridge, IvyBridge */
+ handler = vmm_handler = &mds_handler_ivb;
+ type = "IvyBridge";
+ CPU_INFO_FOREACH(cii, ci) {
+ ci->ci_mds_buf = malloc(672, M_DEVBUF,
+ M_WAITOK);
+ memset(ci->ci_mds_buf, 0, 16);
+ }
+ } else if (family == 0x6 &&
+ (model == 0x3f || model == 0x3c || model == 0x45 ||
+ model == 0x46 || model == 0x56 || model == 0x4f ||
+ model == 0x47 || model == 0x3d)) {
+ /* Haswell and Broadwell */
+ handler = vmm_handler = &mds_handler_bdw;
+ type = "Broadwell";
+ CPU_INFO_FOREACH(cii, ci) {
+ ci->ci_mds_buf = malloc(1536, M_DEVBUF,
+ M_WAITOK);
+ }
+ } else if (family == 0x6 &&
+ ((model == 0x55 && stepping <= 5) || model == 0x4e ||
+ model == 0x5e || (model == 0x8e && stepping <= 0xb) ||
+ (model == 0x9e && stepping <= 0xc))) {
+ /*
+ * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
+ * CascadeLake
+ */
+ /* XXX mds_handler_skl_avx512 */
+ if (xgetbv(0) & XCR0_AVX) {
+ handler = &mds_handler_skl_avx;
+ type = "Skylake AVX";
+ } else {
+ handler = &mds_handler_skl_sse;
+ type = "Skylake SSE";
+ }
+ vmm_handler = &mds_handler_skl;
+ CPU_INFO_FOREACH(cii, ci) {
+ vaddr_t b64;
+ b64 = (vaddr_t)malloc(6 * 1024 + 64 + 63,
+ M_DEVBUF, M_WAITOK);
+ ci->ci_mds_buf = (void *)((b64 + 63) & ~63);
+ memset(ci->ci_mds_buf, 0, 64);
+ }
+ } else if (family == 0x6 &&
+ (model == 0x37 || model == 0x4a || model == 0x4c ||
+ model == 0x4d || model == 0x5a || model == 0x5d ||
+ model == 0x6e || model == 0x65 || model == 0x75)) {
+ /* Silvermont, Airmont */
+ handler = vmm_handler = &mds_handler_silvermont;
+ type = "Silvermont";
+ CPU_INFO_FOREACH(cii, ci) {
+ ci->ci_mds_buf = malloc(256, M_DEVBUF,
+ M_WAITOK);
+ memset(ci->ci_mds_buf, 0, 16);
+ }
+ } else if (family == 0x6 && (model == 0x85 || model == 0x57)) {
+ handler = vmm_handler = &mds_handler_knights;
+ type = "KnightsLanding";
+ CPU_INFO_FOREACH(cii, ci) {
+ vaddr_t b64;
+ b64 = (vaddr_t)malloc(1152 + 63, M_DEVBUF,
+ M_WAITOK);
+ ci->ci_mds_buf = (void *)((b64 + 63) & ~63);
+ }
+ }
+ }
+
+ if (handler != NULL) {
+ printf("cpu0: using %s MDS workaround\n", type);
+ s = splhigh();
+ codepatch_call(CPTAG_MDS, handler);
+ codepatch_call(CPTAG_MDS_VMM, vmm_handler);
+ splx(s);
+ } else if (has_verw)
+ printf("cpu0: using %s MDS workaround\n", "VERW");
+ else {
+ s = splhigh();
+ codepatch_nop(CPTAG_MDS);
+ codepatch_nop(CPTAG_MDS_VMM);
+ splx(s);
+ }
+}
+
#ifdef MULTIPROCESSOR
int mp_cpu_start(struct cpu_info *);
void mp_cpu_start_cleanup(struct cpu_info *);
@@ -910,6 +1035,9 @@ extern vector Xsyscall_meltdown, Xsyscall, Xsyscall32;
void
cpu_init_msrs(struct cpu_info *ci)
{
+ uint64_t msr;
+ int family;
+
wrmsr(MSR_STAR,
((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
((uint64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48));
@@ -922,6 +1050,16 @@ cpu_init_msrs(struct cpu_info *ci)
wrmsr(MSR_GSBASE, (u_int64_t)ci);
wrmsr(MSR_KERNELGSBASE, 0);
+ family = ci->ci_family;
+ if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ (family > 6 || (family == 6 && ci->ci_model >= 0xd)) &&
+ rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
+ (msr & MISC_ENABLE_FAST_STRINGS) == 0) {
+ msr |= MISC_ENABLE_FAST_STRINGS;
+ wrmsr(MSR_MISC_ENABLE, msr);
+ DPRINTF("%s: enabled fast strings\n", ci->ci_dev->dv_xname);
+ }
+
patinit(ci);
}
diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
index de40400d03c..2df3df69b43 100644
--- a/sys/arch/amd64/amd64/genassym.cf
+++ b/sys/arch/amd64/amd64/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.39 2018/10/04 05:00:40 guenther Exp $
+# $OpenBSD: genassym.cf,v 1.40 2019/05/17 19:07:15 guenther Exp $
# Written by Artur Grabowski art@openbsd.org, Public Domain
include <sys/param.h>
@@ -129,6 +129,8 @@ member CPU_INFO_KERN_CR3 ci_kern_cr3
member CPU_INFO_USER_CR3 ci_user_cr3
member CPU_INFO_KERN_RSP ci_kern_rsp
member CPU_INFO_INTR_RSP ci_intr_rsp
+member CPU_INFO_MDS_BUF ci_mds_buf
+member CPU_INFO_MDS_TMP ci_mds_tmp
export CPUF_USERSEGS
export CPUF_USERXSTATE
diff --git a/sys/arch/amd64/amd64/identcpu.c b/sys/arch/amd64/amd64/identcpu.c
index 7641ff7fe97..f1c5dba931e 100644
--- a/sys/arch/amd64/amd64/identcpu.c
+++ b/sys/arch/amd64/amd64/identcpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: identcpu.c,v 1.110 2018/10/20 20:40:54 kettenis Exp $ */
+/* $OpenBSD: identcpu.c,v 1.111 2019/05/17 19:07:15 guenther Exp $ */
/* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*
@@ -207,6 +207,8 @@ const struct {
}, cpu_seff0_edxfeatures[] = {
{ SEFF0EDX_AVX512_4FNNIW, "AVX512FNNIW" },
{ SEFF0EDX_AVX512_4FMAPS, "AVX512FMAPS" },
+ { SEFF0EDX_MD_CLEAR, "MD_CLEAR" },
+ { SEFF0EDX_TSXFA, "TSXFA" },
{ SEFF0EDX_IBRS, "IBRS,IBPB" },
{ SEFF0EDX_STIBP, "STIBP" },
{ SEFF0EDX_L1DF, "L1DF" },
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index 2b58ca2a7e0..01cac6f47e6 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.117 2019/05/12 22:23:38 guenther Exp $ */
+/* $OpenBSD: locore.S,v 1.118 2019/05/17 19:07:15 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -643,8 +643,6 @@ IDTVEC_NOALIGN(syscall)
.Lsyscall_restore_registers:
RET_STACK_REFILL_WITH_RCX
- movq TF_RDI(%rsp),%rdi
- movq TF_RSI(%rsp),%rsi
movq TF_R8(%rsp),%r8
movq TF_R9(%rsp),%r9
movq TF_R10(%rsp),%r10
@@ -652,6 +650,14 @@ IDTVEC_NOALIGN(syscall)
movq TF_R13(%rsp),%r13
movq TF_R14(%rsp),%r14
movq TF_R15(%rsp),%r15
+
+ CODEPATCH_START
+ movw %ds,TF_R8(%rsp)
+ verw TF_R8(%rsp)
+ CODEPATCH_END(CPTAG_MDS)
+
+ movq TF_RDI(%rsp),%rdi
+ movq TF_RSI(%rsp),%rsi
movq TF_RBP(%rsp),%rbp
movq TF_RBX(%rsp),%rbx
@@ -810,8 +816,6 @@ intr_user_exit_post_ast:
.Lintr_restore_registers:
RET_STACK_REFILL_WITH_RCX
- movq TF_RDI(%rsp),%rdi
- movq TF_RSI(%rsp),%rsi
movq TF_R8(%rsp),%r8
movq TF_R9(%rsp),%r9
movq TF_R10(%rsp),%r10
@@ -819,6 +823,14 @@ intr_user_exit_post_ast:
movq TF_R13(%rsp),%r13
movq TF_R14(%rsp),%r14
movq TF_R15(%rsp),%r15
+
+ CODEPATCH_START
+ movw %ds,TF_R8(%rsp)
+ verw TF_R8(%rsp)
+ CODEPATCH_END(CPTAG_MDS)
+
+ movq TF_RDI(%rsp),%rdi
+ movq TF_RSI(%rsp),%rsi
movq TF_RBP(%rsp),%rbp
movq TF_RBX(%rsp),%rbx
diff --git a/sys/arch/amd64/amd64/mainbus.c b/sys/arch/amd64/amd64/mainbus.c
index b3da7975883..6d2080ee2b1 100644
--- a/sys/arch/amd64/amd64/mainbus.c
+++ b/sys/arch/amd64/amd64/mainbus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mainbus.c,v 1.46 2019/05/04 11:34:47 kettenis Exp $ */
+/* $OpenBSD: mainbus.c,v 1.47 2019/05/17 19:07:15 guenther Exp $ */
/* $NetBSD: mainbus.c,v 1.1 2003/04/26 18:39:29 fvdl Exp $ */
/*
@@ -73,6 +73,8 @@
#include <machine/efifbvar.h>
#endif
+void replacemds(void);
+
int mainbus_match(struct device *, void *, void *);
void mainbus_attach(struct device *, struct device *, void *);
@@ -205,6 +207,9 @@ mainbus_attach(struct device *parent, struct device *self, void *aux)
config_found(self, &caa, mainbus_print);
}
+ /* All CPUs are attached, handle MDS */
+ replacemds();
+
#if NACPI > 0
if (!acpi_hasprocfvs)
#endif
diff --git a/sys/arch/amd64/amd64/mds.S b/sys/arch/amd64/amd64/mds.S
new file mode 100644
index 00000000000..991a4ae7f76
--- /dev/null
+++ b/sys/arch/amd64/amd64/mds.S
@@ -0,0 +1,192 @@
+/* $OpenBSD: mds.S,v 1.1 2019/05/17 19:07:15 guenther Exp $ */
+/*
+ * Copyright (c) 2019 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * ASM sequences for mitigating MDS on different Intel CPU models, taken from
+ * https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-microarchitectural-data-sampling
+ * and adjusted to fit OpenBSD style and kernel usage.
+ * Some naming inspired by FreeBSD's usage of these sequences.
+ */
+
+#include "assym.h"
+
+#include <machine/asm.h>
+#include <machine/specialreg.h>
+
+ENTRY(mds_handler_ivb)
+ RETGUARD_SETUP(mds_handler_ivb, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ movdqa %xmm0,CPUVAR(MDS_TMP)
+ pxor %xmm0,%xmm0
+
+ lfence
+ orpd (%rax),%xmm0
+ orpd (%rax),%xmm0
+ mfence
+ movl $40,%ecx
+ addq $16,%rax
+1: movntdq %xmm0,(%rax)
+ addq $16,%rax
+ decl %ecx
+ jnz 1b
+ mfence
+
+ movdqa CPUVAR(MDS_TMP),%xmm0
+ RETGUARD_CHECK(mds_handler_ivb, r11)
+ retq
+END(mds_handler_ivb)
+
+ENTRY(mds_handler_bdw)
+ RETGUARD_SETUP(mds_handler_bdw, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ movdqa %xmm0,CPUVAR(MDS_TMP)
+ pxor %xmm0,%xmm0
+
+ movq %rax,%rdi
+ movq %rax,%rsi
+ movl $40,%ecx
+1: movntdq %xmm0,(%rax)
+ addq $16,%rax
+ decl %ecx
+ jnz 1b
+ mfence
+ movl $1536,%ecx
+ rep movsb
+ lfence
+
+ movdqa CPUVAR(MDS_TMP),%xmm0
+ RETGUARD_CHECK(mds_handler_bdw, r11)
+ retq
+END(mds_handler_bdw)
+
+ENTRY(mds_handler_skl)
+ xorl %ecx,%ecx
+ xgetbv
+ testb $XCR0_AVX,%al
+ jne mds_handler_skl_avx
+ jmp mds_handler_skl_sse
+END(mds_handler_skl)
+
+ENTRY(mds_handler_skl_sse)
+ RETGUARD_SETUP(mds_handler_skl_sse, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ leaq 64(%rax),%rdi
+ movdqa %xmm0,CPUVAR(MDS_TMP)
+ pxor %xmm0,%xmm0
+
+ lfence
+ orpd (%rax),%xmm0
+ orpd (%rax),%xmm0
+ xorl %eax,%eax
+1: clflushopt 5376(%rdi,%rax,8)
+ addl $8,%eax
+ cmpl $8*12,%eax
+ jb 1b
+ sfence
+ movl $6144,%ecx
+ xorl %eax,%eax
+ rep stosb
+ mfence
+
+ movdqa CPUVAR(MDS_TMP),%xmm0
+ RETGUARD_CHECK(mds_handler_skl_sse, r11)
+ retq
+END(mds_handler_skl_sse)
+
+ENTRY(mds_handler_skl_avx)
+ RETGUARD_SETUP(mds_handler_skl_avx, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ leaq 64(%rax),%rdi
+ vmovdqa %ymm0,CPUVAR(MDS_TMP)
+ vpxor %ymm0,%ymm0,%ymm0
+
+ lfence
+ vorpd (%rax),%ymm0,%ymm0
+ vorpd (%rax),%ymm0,%ymm0
+ xorl %eax,%eax
+1: clflushopt 5376(%rdi,%rax,8)
+ addl $8,%eax
+ cmpl $8*12,%eax
+ jb 1b
+ sfence
+ movl $6144,%ecx
+ xorl %eax,%eax
+ rep stosb
+ mfence
+
+ vmovdqa CPUVAR(MDS_TMP),%ymm0
+ RETGUARD_CHECK(mds_handler_skl_avx, r11)
+ retq
+END(mds_handler_skl_avx)
+
+/* we don't support AVX512 yet */
+#if 0
+ENTRY(mds_handler_skl_avx512)
+ RETGUARD_SETUP(mds_handler_skl_avx512, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ leaq 64(%rax),%rdi
+ vmovdqa64 %zmm0,CPUVAR(MDS_TMP)
+ vpxor %zmm0,%zmm0,%zmm0
+
+ lfence
+ vorpd (%rax),%zmm0,%zmm0
+ vorpd (%rax),%zmm0,%zmm0
+ xorl %eax,%eax
+1: clflushopt 5376(%rdi,%rax,8)
+ addl $8,%eax
+ cmpl $8*12,%eax
+ jb 1b
+ sfence
+ movl $6144,%ecx
+ xorl %eax,%eax
+ rep stosb
+ mfence
+
+ vmovdqa64 CPUVAR(MDS_TMP),%zmm0
+ RETGUARD_CHECK(mds_handler_skl_avx512, r11)
+ retq
+END(mds_handler_skl_avx512)
+#endif
+
+ENTRY(mds_handler_silvermont)
+ RETGUARD_SETUP(mds_handler_silvermont, r11)
+ movq CPUVAR(MDS_BUF),%rax
+ movdqa %xmm0,CPUVAR(MDS_TMP)
+ pxor %xmm0,%xmm0
+
+ movl $16,%ecx
+1: movntdq %xmm0,(%rax)
+ addq $16,%rax
+ decl %ecx
+ jnz 1b
+ mfence
+
+ movdqa CPUVAR(MDS_TMP),%xmm0
+ RETGUARD_CHECK(mds_handler_silvermont, r11)
+ retq
+END(mds_handler_silvermont)
+
+ENTRY(mds_handler_knights)
+ RETGUARD_SETUP(mds_handler_knights, r11)
+ movq CPUVAR(MDS_BUF),%rdi
+ xorl %eax,%eax
+ movl $16,%ecx
+ rep stosq
+ movl $128,%ecx
+ rep stosq
+ mfence
+ RETGUARD_CHECK(mds_handler_knights, r11)
+END(mds_handler_knights)
diff --git a/sys/arch/amd64/amd64/vmm.c b/sys/arch/amd64/amd64/vmm.c
index bdbecb9560b..4ffb2ff899f 100644
--- a/sys/arch/amd64/amd64/vmm.c
+++ b/sys/arch/amd64/amd64/vmm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm.c,v 1.244 2019/05/13 15:40:34 pd Exp $ */
+/* $OpenBSD: vmm.c,v 1.245 2019/05/17 19:07:15 guenther Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -6334,7 +6334,7 @@ vmm_handle_cpuid(struct vcpu *vcpu)
*rax = 0; /* Highest subleaf supported */
*rbx = curcpu()->ci_feature_sefflags_ebx & VMM_SEFF0EBX_MASK;
*rcx = curcpu()->ci_feature_sefflags_ecx & VMM_SEFF0ECX_MASK;
- *rdx = 0;
+ *rdx = curcpu()->ci_feature_sefflags_edx & VMM_SEFF0EDX_MASK;
} else {
/* Unsupported subleaf */
DPRINTF("%s: function 0x07 (SEFF) unsupported subleaf "
diff --git a/sys/arch/amd64/amd64/vmm_support.S b/sys/arch/amd64/amd64/vmm_support.S
index c8cb97ba7f4..404cc2e9a62 100644
--- a/sys/arch/amd64/amd64/vmm_support.S
+++ b/sys/arch/amd64/amd64/vmm_support.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm_support.S,v 1.15 2019/02/20 06:59:16 mlarkin Exp $ */
+/* $OpenBSD: vmm_support.S,v 1.16 2019/05/17 19:07:16 guenther Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -18,6 +18,7 @@
#include "assym.h"
#include <machine/param.h>
#include <machine/asm.h>
+#include <machine/codepatch.h>
#include <machine/psl.h>
#include <machine/specialreg.h>
@@ -247,6 +248,19 @@ skip_init:
pushq %rbx
pushq %rsi /* Guest Regs Pointer */
+ /*
+ * XXX this MDS mitigation and the L1TF mitigation are believed
+ * XXX to overlap in some cases, but Intel hasn't provided the
+ * XXX information yet to make the correct choices.
+ */
+ CODEPATCH_START
+ subq $8, %rsp
+ movw %ds, (%rsp)
+ verw (%rsp)
+ addq $8, %rsp
+ CODEPATCH_END(CPTAG_MDS_VMM)
+ movq (%rsp),%rsi /* reload now that it's mucked with */
+
movq $VMCS_HOST_IA32_RSP, %rdi
movq %rsp, %rax
vmwrite %rax, %rdi /* Host RSP */
diff --git a/sys/arch/amd64/conf/Makefile.amd64 b/sys/arch/amd64/conf/Makefile.amd64
index 52c328d836d..f4f7a5f6e18 100644
--- a/sys/arch/amd64/conf/Makefile.amd64
+++ b/sys/arch/amd64/conf/Makefile.amd64
@@ -1,4 +1,4 @@
-# $OpenBSD: Makefile.amd64,v 1.113 2019/04/14 14:07:06 deraadt Exp $
+# $OpenBSD: Makefile.amd64,v 1.114 2019/05/17 19:07:16 guenther Exp $
# For instructions on building kernels consult the config(8) and options(4)
# manual pages.
@@ -173,7 +173,7 @@ cleandir: clean
depend obj:
locore0.o: ${_machdir}/${_mach}/locore0.S assym.h
-mutex.o vector.o copy.o spl.o: assym.h
+mutex.o vector.o copy.o spl.o mds.o: assym.h
mptramp.o acpi_wakecode.o vmm_support.o: assym.h
hardlink-obsd:
diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64
index 260fd500bad..15e7d6bf137 100644
--- a/sys/arch/amd64/conf/files.amd64
+++ b/sys/arch/amd64/conf/files.amd64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.amd64,v 1.101 2018/10/26 20:26:19 kettenis Exp $
+# $OpenBSD: files.amd64,v 1.102 2019/05/17 19:07:16 guenther Exp $
maxpartitions 16
maxusers 2 16 128
@@ -32,6 +32,7 @@ file arch/amd64/amd64/cacheinfo.c
file arch/amd64/amd64/vector.S
file arch/amd64/amd64/copy.S
file arch/amd64/amd64/spl.S
+file arch/amd64/amd64/mds.S
file arch/amd64/amd64/intr.c
file arch/amd64/amd64/bus_space.c
diff --git a/sys/arch/amd64/include/codepatch.h b/sys/arch/amd64/include/codepatch.h
index 74f8f198113..62348a98dbd 100644
--- a/sys/arch/amd64/include/codepatch.h
+++ b/sys/arch/amd64/include/codepatch.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: codepatch.h,v 1.8 2018/10/04 05:00:40 guenther Exp $ */
+/* $OpenBSD: codepatch.h,v 1.9 2019/05/17 19:07:16 guenther Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@@ -59,6 +59,8 @@ void codepatch_disable(void);
#define CPTAG_XSAVE 5
#define CPTAG_MELTDOWN_NOP 6
#define CPTAG_PCID_SET_REUSE 7
+#define CPTAG_MDS 8
+#define CPTAG_MDS_VMM 9
/*
* As stac/clac SMAP instructions are 3 bytes, we want the fastest
diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h
index e0297a488fb..161aa6e465b 100644
--- a/sys/arch/amd64/include/cpu.h
+++ b/sys/arch/amd64/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.130 2019/05/12 22:23:38 guenther Exp $ */
+/* $OpenBSD: cpu.h,v 1.131 2019/05/17 19:07:16 guenther Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
@@ -117,6 +117,10 @@ struct cpu_info {
u_int64_t ci_intr_rsp; /* U<-->K trampoline stack */
u_int64_t ci_user_cr3; /* U-K page table */
+ /* bits for mitigating Micro-architectural Data Sampling */
+ char ci_mds_tmp[32]; /* 32byte aligned */
+ void *ci_mds_buf;
+
struct pcb *ci_curpcb;
struct pcb *ci_idle_pcb;
diff --git a/sys/arch/amd64/include/specialreg.h b/sys/arch/amd64/include/specialreg.h
index fdf2ad77d12..a34d39e14d5 100644
--- a/sys/arch/amd64/include/specialreg.h
+++ b/sys/arch/amd64/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.83 2019/05/02 07:00:46 mlarkin Exp $ */
+/* $OpenBSD: specialreg.h,v 1.84 2019/05/17 19:07:16 guenther Exp $ */
/* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */
/* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */
@@ -220,6 +220,8 @@
/* SEFF EDX bits */
#define SEFF0EDX_AVX512_4FNNIW 0x00000004 /* AVX-512 neural network insns */
#define SEFF0EDX_AVX512_4FMAPS 0x00000008 /* AVX-512 mult accum single prec */
+#define SEFF0EDX_MD_CLEAR 0x00000400 /* Microarch Data Clear */
+#define SEFF0EDX_TSXFA 0x00002000 /* TSX Forced Abort */
#define SEFF0EDX_IBRS 0x04000000 /* IBRS / IBPB Speculation Control */
#define SEFF0EDX_STIBP 0x08000000 /* STIBP Speculation Control */
#define SEFF0EDX_L1DF 0x10000000 /* L1D_FLUSH */
@@ -374,6 +376,7 @@
#define ARCH_CAPABILITIES_RSBA (1 << 2) /* RSB Alternate */
#define ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY (1 << 3)
#define ARCH_CAPABILITIES_SSB_NO (1 << 4) /* Spec St Byp safe */
+#define ARCH_CAPABILITIES_MDS_NO (1 << 5) /* microarch data-sampling */
#define MSR_FLUSH_CMD 0x10b
#define FLUSH_CMD_L1D_FLUSH 0x1 /* (1ULL << 0) */
#define MSR_BBL_CR_ADDR 0x116 /* PII+ only */
diff --git a/sys/arch/amd64/include/vmmvar.h b/sys/arch/amd64/include/vmmvar.h
index a6dfab50d00..e4df09f6f92 100644
--- a/sys/arch/amd64/include/vmmvar.h
+++ b/sys/arch/amd64/include/vmmvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmmvar.h,v 1.65 2019/05/13 15:40:34 pd Exp $ */
+/* $OpenBSD: vmmvar.h,v 1.66 2019/05/17 19:07:16 guenther Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -625,6 +625,9 @@ struct vm_rwregs_params {
SEFF0EBX_AVX512BW | SEFF0EBX_AVX512VL)
#define VMM_SEFF0ECX_MASK ~(SEFF0ECX_AVX512VBMI)
+/* EDX mask contains the bits to include */
+#define VMM_SEFF0EDX_MASK (SEFF0EDX_MD_CLEAR)
+
/*
* Extended function flags - copy from host minus:
* 0x80000001 EDX:RDTSCP Support