diff options
-rw-r--r-- | sys/arch/amd64/amd64/vmm.c | 12 | ||||
-rw-r--r-- | sys/arch/amd64/include/vmmvar.h | 4 |
2 files changed, 11 insertions, 5 deletions
diff --git a/sys/arch/amd64/amd64/vmm.c b/sys/arch/amd64/amd64/vmm.c index 36623121521..6c1c39486c0 100644 --- a/sys/arch/amd64/amd64/vmm.c +++ b/sys/arch/amd64/amd64/vmm.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vmm.c,v 1.47 2016/04/05 09:33:05 mlarkin Exp $ */ +/* $OpenBSD: vmm.c,v 1.48 2016/04/06 06:15:06 mlarkin Exp $ */ /* * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> * @@ -1894,17 +1894,21 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, struct vcpu_init_state *vis) msr_store[6].vms_index = MSR_KERNELGSBASE; msr_store[6].vms_data = 0ULL; /* Initial value */ - if (vmwrite(VMCS_EXIT_MSR_STORE_COUNT, 0x7)) { + /* + * Currently we have the same count of entry/exit MSRs loads/stores + * but this is not an architectural requirement. + */ + if (vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_NUM_MSR_STORE)) { ret = EINVAL; goto exit; } - if (vmwrite(VMCS_EXIT_MSR_LOAD_COUNT, 0x7)) { + if (vmwrite(VMCS_EXIT_MSR_LOAD_COUNT, VMX_NUM_MSR_STORE)) { ret = EINVAL; goto exit; } - if (vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, 0x7)) { + if (vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, VMX_NUM_MSR_STORE)) { ret = EINVAL; goto exit; } diff --git a/sys/arch/amd64/include/vmmvar.h b/sys/arch/amd64/include/vmmvar.h index f770968dd8b..ce15c3e1a4d 100644 --- a/sys/arch/amd64/include/vmmvar.h +++ b/sys/arch/amd64/include/vmmvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmmvar.h,v 1.11 2016/03/13 13:11:47 stefan Exp $ */ +/* $OpenBSD: vmmvar.h,v 1.12 2016/04/06 06:15:06 mlarkin Exp $ */ /* * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> * @@ -282,6 +282,8 @@ struct vm_intr_params { #define VMX_FAIL_LAUNCH_INVALID_VMCS 2 #define VMX_FAIL_LAUNCH_VALID_VMCS 3 +#define VMX_NUM_MSR_STORE 7 + enum { VMM_MODE_UNKNOWN, VMM_MODE_VMX, |