diff options
author | Dave Voutila <dv@cvs.openbsd.org> | 2023-01-30 02:32:02 +0000 |
---|---|---|
committer | Dave Voutila <dv@cvs.openbsd.org> | 2023-01-30 02:32:02 +0000 |
commit | 73fc9051051cba0e3dbf47bc0894e11ce65d013a (patch) | |
tree | 586251b0d851731f9c60effc8539cc5089773d77 /sys/arch/amd64 | |
parent | 273aa7ad605d718b02de623b27f66be07c0fb695 (diff) |
vmm(4): save and restore guest pkru.
Take a simple approach for saving and restoring PKRU if the host
has PKE support enabled. Uses explicit rdpkru/wrpkru instructions
for now instead of xsave.
This functionality is still gated behind amd64 pmap checking for
operation under a hypervisor as well as vmm masking the cpuid bit
for PKU.
"if your diff is good, then commit it" -deraadt@
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r-- | sys/arch/amd64/amd64/vmm.c | 27 | ||||
-rw-r--r-- | sys/arch/amd64/include/cpufunc.h | 9 | ||||
-rw-r--r-- | sys/arch/amd64/include/vmmvar.h | 5 |
3 files changed, 38 insertions, 3 deletions
diff --git a/sys/arch/amd64/amd64/vmm.c b/sys/arch/amd64/amd64/vmm.c index 969f184a1dc..55244baa3e5 100644 --- a/sys/arch/amd64/amd64/vmm.c +++ b/sys/arch/amd64/amd64/vmm.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vmm.c,v 1.335 2023/01/13 14:15:49 dv Exp $ */ +/* $OpenBSD: vmm.c,v 1.336 2023/01/30 02:32:01 dv Exp $ */ /* * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> * @@ -128,6 +128,7 @@ struct vmm_softc { uint32_t nr_svm_cpus; /* [I] */ uint32_t nr_rvi_cpus; /* [I] */ uint32_t nr_ept_cpus; /* [I] */ + uint8_t pkru_enabled; /* [I] */ /* Managed VMs */ struct vmlist_head vm_list; /* [v] */ @@ -429,6 +430,10 @@ vmm_attach(struct device *parent, struct device *self, void *aux) sc->nr_ept_cpus++; } + sc->pkru_enabled = 0; + if (rcr4() & CR4_PKE) + sc->pkru_enabled = 1; + SLIST_INIT(&sc->vm_list); rw_init(&sc->vm_lock, "vm_list"); @@ -5029,11 +5034,21 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp) TRACEPOINT(vmm, guest_enter, vcpu, vrp); + /* Restore any guest PKRU state. */ + if (vmm_softc->pkru_enabled) + wrpkru(vcpu->vc_pkru); + ret = vmx_enter_guest(&vcpu->vc_control_pa, &vcpu->vc_gueststate, (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED), ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr); + /* Restore host PKRU state. */ + if (vmm_softc->pkru_enabled) { + vcpu->vc_pkru = rdpkru(0); + wrpkru(PGK_VALUE); + } + bare_lgdt(&gdtr); lidt(&idtr); lldt(ldt_sel); @@ -7331,12 +7346,22 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp) break; } + /* Restore any guest PKRU state. */ + if (vmm_softc->pkru_enabled) + wrpkru(vcpu->vc_pkru); + KASSERT(vmcb->v_intercept1 & SVM_INTERCEPT_INTR); wrmsr(MSR_AMD_VM_HSAVE_PA, vcpu->vc_svm_hsa_pa); ret = svm_enter_guest(vcpu->vc_control_pa, &vcpu->vc_gueststate, &gdt); + /* Restore host PKRU state. */ + if (vmm_softc->pkru_enabled) { + vcpu->vc_pkru = rdpkru(0); + wrpkru(PGK_VALUE); + } + /* * On exit, interrupts are disabled, and we are running with * the guest FPU state still possibly on the CPU. Save the FPU diff --git a/sys/arch/amd64/include/cpufunc.h b/sys/arch/amd64/include/cpufunc.h index cd2d2db753d..0db2d135380 100644 --- a/sys/arch/amd64/include/cpufunc.h +++ b/sys/arch/amd64/include/cpufunc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpufunc.h,v 1.38 2023/01/20 16:01:04 deraadt Exp $ */ +/* $OpenBSD: cpufunc.h,v 1.39 2023/01/30 02:32:01 dv Exp $ */ /* $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */ /*- @@ -241,6 +241,13 @@ rdpkru(u_int ecx) } static __inline void +wrpkru(uint32_t pkru) +{ + uint32_t ecx = 0, edx = 0; + __asm volatile("wrpkru" : : "a" (pkru), "c" (ecx), "d" (edx)); +} + +static __inline void wrmsr(u_int msr, u_int64_t newval) { __asm volatile("wrmsr" : diff --git a/sys/arch/amd64/include/vmmvar.h b/sys/arch/amd64/include/vmmvar.h index 344261fce1e..4e020bcafc9 100644 --- a/sys/arch/amd64/include/vmmvar.h +++ b/sys/arch/amd64/include/vmmvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmmvar.h,v 1.88 2023/01/28 14:40:53 dv Exp $ */ +/* $OpenBSD: vmmvar.h,v 1.89 2023/01/30 02:32:01 dv Exp $ */ /* * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> * @@ -957,6 +957,9 @@ struct vcpu { /* Shadowed MSRs */ uint64_t vc_shadow_pat; /* [v] */ + /* Userland Protection Keys */ + uint32_t vc_pkru; /* [v] */ + /* VMX only (all requiring [v]) */ uint64_t vc_vmx_basic; uint64_t vc_vmx_entry_ctls; |