summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2017-01-19 23:18:12 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2017-01-19 23:18:12 +0000
commit1309bbf159e50db75ae5f992927c9bedaee6bd59 (patch)
tree0fcd3f342a0bab010599f41fa88c3916a93aeed8 /sys
parentd38b16970ac4f10991656aef68a4be84c9d63aa2 (diff)
SVM: register reset and intercept setup code
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/vmm.c283
-rw-r--r--sys/arch/i386/i386/vmm.c275
2 files changed, 550 insertions, 8 deletions
diff --git a/sys/arch/amd64/amd64/vmm.c b/sys/arch/amd64/amd64/vmm.c
index 47c9af5a4a6..8fddce0e942 100644
--- a/sys/arch/amd64/amd64/vmm.c
+++ b/sys/arch/amd64/amd64/vmm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm.c,v 1.110 2017/01/19 05:53:40 mlarkin Exp $ */
+/* $OpenBSD: vmm.c,v 1.111 2017/01/19 23:18:11 mlarkin Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -162,6 +162,9 @@ int vmx_handle_np_fault(struct vcpu *);
const char *vcpu_state_decode(u_int);
const char *vmx_exit_reason_decode(uint32_t);
const char *vmx_instruction_error_decode(uint32_t);
+void svm_setmsrbr(struct vcpu *, uint32_t);
+void svm_setmsrbw(struct vcpu *, uint32_t);
+void svm_setmsrbrw(struct vcpu *, uint32_t);
void vmx_setmsrbr(struct vcpu *, uint32_t);
void vmx_setmsrbw(struct vcpu *, uint32_t);
void vmx_setmsrbrw(struct vcpu *, uint32_t);
@@ -1527,24 +1530,296 @@ out:
/*
* vcpu_writeregs_svm
*
- * XXX - unimplemented
+ * Writes 'vcpu's registers
+ *
+ * Parameters:
+ * vcpu: the vcpu that has to get its registers written to
+ * regmask: the types of registers to write
+ * vrs: the register values to write
+ *
+ * Return values:
+ * 0: if successful
+ * EINVAL an error writing registers occured
*/
int
vcpu_writeregs_svm(struct vcpu *vcpu, uint64_t regmask,
struct vcpu_reg_state *vrs)
{
+ uint64_t *gprs = vrs->vrs_gprs;
+ uint64_t *crs = vrs->vrs_crs;
+ struct vcpu_segment_info *sregs = vrs->vrs_sregs;
+ struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
+
+ if (regmask & VM_RWREGS_GPRS) {
+ vcpu->vc_gueststate.vg_rax = gprs[VCPU_REGS_RAX];
+ vcpu->vc_gueststate.vg_rbx = gprs[VCPU_REGS_RBX];
+ vcpu->vc_gueststate.vg_rcx = gprs[VCPU_REGS_RCX];
+ vcpu->vc_gueststate.vg_rdx = gprs[VCPU_REGS_RDX];
+ vcpu->vc_gueststate.vg_rsi = gprs[VCPU_REGS_RSI];
+ vcpu->vc_gueststate.vg_rdi = gprs[VCPU_REGS_RDI];
+ vcpu->vc_gueststate.vg_r8 = gprs[VCPU_REGS_R8];
+ vcpu->vc_gueststate.vg_r9 = gprs[VCPU_REGS_R9];
+ vcpu->vc_gueststate.vg_r10 = gprs[VCPU_REGS_R10];
+ vcpu->vc_gueststate.vg_r11 = gprs[VCPU_REGS_R11];
+ vcpu->vc_gueststate.vg_r12 = gprs[VCPU_REGS_R12];
+ vcpu->vc_gueststate.vg_r13 = gprs[VCPU_REGS_R13];
+ vcpu->vc_gueststate.vg_r14 = gprs[VCPU_REGS_R14];
+ vcpu->vc_gueststate.vg_r15 = gprs[VCPU_REGS_R15];
+ vcpu->vc_gueststate.vg_rbp = gprs[VCPU_REGS_RBP];
+ vcpu->vc_gueststate.vg_rip = gprs[VCPU_REGS_RIP];
+
+ vmcb->v_rip = gprs[VCPU_REGS_RIP];
+ vmcb->v_rsp = gprs[VCPU_REGS_RSP];
+ vmcb->v_rflags = gprs[VCPU_REGS_RFLAGS];
+ DPRINTF("%s: set vcpu GPRs (rip=0x%llx rsp=0x%llx)\n",
+ __func__, vmcb->v_rip, vmcb->v_rsp);
+ }
+
+ if (regmask & VM_RWREGS_SREGS) {
+ vmcb->v_cs.vs_sel = sregs[VCPU_REGS_CS].vsi_sel;
+ vmcb->v_cs.vs_lim = sregs[VCPU_REGS_CS].vsi_limit;
+ vmcb->v_cs.vs_attr = sregs[VCPU_REGS_CS].vsi_ar;
+ vmcb->v_cs.vs_base = sregs[VCPU_REGS_CS].vsi_base;
+ vmcb->v_ds.vs_sel = sregs[VCPU_REGS_DS].vsi_sel;
+ vmcb->v_ds.vs_lim = sregs[VCPU_REGS_DS].vsi_limit;
+ vmcb->v_ds.vs_attr = sregs[VCPU_REGS_DS].vsi_ar;
+ vmcb->v_ds.vs_base = sregs[VCPU_REGS_DS].vsi_base;
+ vmcb->v_es.vs_sel = sregs[VCPU_REGS_ES].vsi_sel;
+ vmcb->v_es.vs_lim = sregs[VCPU_REGS_ES].vsi_limit;
+ vmcb->v_es.vs_attr = sregs[VCPU_REGS_ES].vsi_ar;
+ vmcb->v_es.vs_base = sregs[VCPU_REGS_ES].vsi_base;
+ vmcb->v_fs.vs_sel = sregs[VCPU_REGS_FS].vsi_sel;
+ vmcb->v_fs.vs_lim = sregs[VCPU_REGS_FS].vsi_limit;
+ vmcb->v_fs.vs_attr = sregs[VCPU_REGS_FS].vsi_ar;
+ vmcb->v_fs.vs_base = sregs[VCPU_REGS_FS].vsi_base;
+ vmcb->v_gs.vs_sel = sregs[VCPU_REGS_GS].vsi_sel;
+ vmcb->v_gs.vs_lim = sregs[VCPU_REGS_GS].vsi_limit;
+ vmcb->v_gs.vs_attr = sregs[VCPU_REGS_GS].vsi_ar;
+ vmcb->v_gs.vs_base = sregs[VCPU_REGS_GS].vsi_base;
+ vmcb->v_ss.vs_sel = sregs[VCPU_REGS_SS].vsi_sel;
+ vmcb->v_ss.vs_lim = sregs[VCPU_REGS_SS].vsi_limit;
+ vmcb->v_ss.vs_attr = sregs[VCPU_REGS_SS].vsi_ar;
+ vmcb->v_ss.vs_base = sregs[VCPU_REGS_SS].vsi_base;
+ vmcb->v_ldtr.vs_sel = sregs[VCPU_REGS_LDTR].vsi_sel;
+ vmcb->v_ldtr.vs_lim = sregs[VCPU_REGS_LDTR].vsi_limit;
+ vmcb->v_ldtr.vs_attr = sregs[VCPU_REGS_LDTR].vsi_ar;
+ vmcb->v_ldtr.vs_base = sregs[VCPU_REGS_LDTR].vsi_base;
+ vmcb->v_tr.vs_sel = sregs[VCPU_REGS_TR].vsi_sel;
+ vmcb->v_tr.vs_lim = sregs[VCPU_REGS_TR].vsi_limit;
+ vmcb->v_tr.vs_attr = sregs[VCPU_REGS_TR].vsi_ar;
+ vmcb->v_tr.vs_base = sregs[VCPU_REGS_TR].vsi_base;
+ vmcb->v_gdtr.vs_lim = vrs->vrs_gdtr.vsi_limit;
+ vmcb->v_gdtr.vs_base = vrs->vrs_gdtr.vsi_base;
+ vmcb->v_idtr.vs_lim = vrs->vrs_idtr.vsi_limit;
+ vmcb->v_idtr.vs_base = vrs->vrs_idtr.vsi_base;
+
+ DPRINTF("%s: set vcpu seg regs (gdt.base=0x%llx, "
+ "cs.sel=0x%llx)\n", __func__, vmcb->v_gdtr.vs_base,
+ (uint64_t)vmcb->v_cs.vs_sel);
+ }
+
+ if (regmask & VM_RWREGS_CRS) {
+ vmcb->v_cr0 = crs[VCPU_REGS_CR0];
+ vmcb->v_cr3 = crs[VCPU_REGS_CR3];
+ vmcb->v_cr4 = crs[VCPU_REGS_CR4];
+
+ DPRINTF("%s: set vcpu CRs (cr0=0x%llx cr3=0x%llx "
+ "cr4=0x%llx)\n", __func__, vmcb->v_cr0, vmcb->v_cr3,
+ vmcb->v_cr4);
+ }
+
return (0);
}
/*
* vcpu_reset_regs_svm
*
- * XXX - unimplemented
+ * Initializes 'vcpu's registers to supplied state
+ *
+ * Parameters:
+ * vcpu: the vcpu whose register state is to be initialized
+ * vrs: the register state to set
+ *
+ * Return values:
+ * 0: registers init'ed successfully
+ * EINVAL: an error occurred setting register state
*/
int
vcpu_reset_regs_svm(struct vcpu *vcpu, struct vcpu_reg_state *vrs)
{
- return (0);
+ struct vmcb *vmcb;
+ int ret;
+
+ vmcb = (struct vmcb *)vcpu->vc_control_va;
+
+ /*
+ * Intercept controls
+ *
+ * External Interrupt exiting (SVM_INTERCEPT_INTR)
+ * External NMI exiting (SVM_INTERCEPT_NMI)
+ * CPUID instruction (SVM_INTERCEPT_CPUID)
+ * HLT instruction (SVM_INTERCEPT_HLT)
+ * I/O instructions (SVM_INTERCEPT_INOUT)
+ * MSR access (SVM_INTERCEPT_MSR)
+ *
+ * VMRUN instruction (SVM_INTERCEPT_VMRUN)
+ * VMMCALL instruction (SVM_INTERCEPT_VMMCALL)
+ * VMLOAD instruction (SVM_INTERCEPT_VMLOAD)
+ * VMSAVE instruction (SVM_INTERCEPT_VMSAVE)
+ * STGI instruction (SVM_INTERCEPT_STGI)
+ * CLGI instruction (SVM_INTERCEPT_CLGI)
+ * SKINIT instruction (SVM_INTERCEPT_SKINIT)
+ * ICEBP instruction (SVM_INTERCEPT_ICEBP)
+ * MWAIT instruction (SVM_INTERCEPT_MWAIT_UNCOND)
+ */
+ vmcb->v_intercept1 = SVM_INTERCEPT_INTR | SVM_INTERCEPT_NMI |
+ SVM_INTERCEPT_CPUID | SVM_INTERCEPT_HLT | SVM_INTERCEPT_INOUT |
+ SVM_INTERCEPT_MSR;
+
+ vmcb->v_intercept2 = SVM_INTERCEPT_VMRUN | SVM_INTERCEPT_VMMCALL |
+ SVM_INTERCEPT_VMLOAD | SVM_INTERCEPT_VMSAVE | SVM_INTERCEPT_STGI |
+ SVM_INTERCEPT_CLGI | SVM_INTERCEPT_SKINIT | SVM_INTERCEPT_ICEBP |
+ SVM_INTERCEPT_MWAIT_UNCOND;
+
+ /* Setup I/O bitmap */
+ memset((uint8_t *)vcpu->vc_svm_ioio_va, 0xFF, 3 * PAGE_SIZE);
+ vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_svm_ioio_pa);
+
+ /* Setup MSR bitmap */
+ memset((uint8_t *)vcpu->vc_msr_bitmap_va, 0xFF, 2 * PAGE_SIZE);
+ vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_msr_bitmap_pa);
+ svm_setmsrbrw(vcpu, MSR_IA32_FEATURE_CONTROL);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_CS);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_ESP);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_EIP);
+ svm_setmsrbrw(vcpu, MSR_EFER);
+ svm_setmsrbrw(vcpu, MSR_STAR);
+ svm_setmsrbrw(vcpu, MSR_LSTAR);
+ svm_setmsrbrw(vcpu, MSR_CSTAR);
+ svm_setmsrbrw(vcpu, MSR_SFMASK);
+ svm_setmsrbrw(vcpu, MSR_FSBASE);
+ svm_setmsrbrw(vcpu, MSR_GSBASE);
+ svm_setmsrbrw(vcpu, MSR_KERNELGSBASE);
+
+ /* Guest VCPU ASID */
+ vmcb->v_asid = vcpu->vc_parent->vm_id;
+
+ /* TLB Control */
+ vmcb->v_tlb_control = 2; /* Flush this guest's TLB entries */
+
+ /* NPT */
+ if (vmm_softc->mode == VMM_MODE_RVI) {
+ vmcb->v_np_enable = 1;
+ vmcb->v_n_cr3 = vcpu->vc_parent->vm_map->pmap->pm_pdirpa;
+ }
+
+ ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_ALL, vrs);
+
+ return ret;
+}
+
+/*
+ * svm_setmsrbr
+ *
+ * Allow read access to the specified msr on the supplied vcpu.
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbr(struct vcpu *vcpu, uint32_t msr)
+{
+ uint8_t *msrs;
+ uint16_t idx;
+
+ msrs = (uint8_t *)vcpu->vc_msr_bitmap_va;
+
+ /*
+ * MSR Read bitmap layout:
+ * Pentium MSRs (0x0 - 0x1fff) @ 0x0
+ * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800
+ * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000
+ *
+ * Read enable bit is low order bit of 2-bit pair
+ * per MSR (eg, MSR 0x0 write bit is at bit 0 @ 0x0)
+ */
+ if (msr <= 0x1fff) {
+ idx = SVM_MSRIDX(msr);
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr));
+ } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x800;
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0000000));
+ } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) {
+ idx = SVM_MSRIDX(msr - 0xc0010000) + 0x1000;
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0000000));
+ } else {
+ printf("%s: invalid msr 0x%x\n", __func__, msr);
+ return;
+ }
+
+ DPRINTF("%s: set msr read bitmap, msr=0x%x, idx=0x%x, "
+ "msrs[0x%x]=0x%x\n", __func__, msr, idx, idx, msrs[idx]);
+}
+
+/*
+ * svm_setmsrbw
+ *
+ * Allow write access to the specified msr on the supplied vcpu
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbw(struct vcpu *vcpu, uint32_t msr)
+{
+ uint8_t *msrs;
+ uint16_t idx;
+
+ msrs = (uint8_t *)vcpu->vc_msr_bitmap_va;
+
+ /*
+ * MSR Write bitmap layout:
+ * Pentium MSRs (0x0 - 0x1fff) @ 0x0
+ * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800
+ * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000
+ *
+ * Write enable bit is high order bit of 2-bit pair
+ * per MSR (eg, MSR 0x0 write bit is at bit 1 @ 0x0)
+ */
+ if (msr <= 0x1fff) {
+ idx = SVM_MSRIDX(msr);
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr));
+ } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x800;
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0000000));
+ } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x1000;
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0010000));
+ } else {
+ printf("%s: invalid msr 0x%x\n", __func__, msr);
+ return;
+ }
+
+ DPRINTF("%s: set msr write bitmap, msr=0x%x, idx=0x%x, "
+ "msrs[0x%x]=0x%x\n", __func__, msr, idx, idx, msrs[idx]);
+}
+
+/*
+ * svm_setmsrbrw
+ *
+ * Allow read/write access to the specified msr on the supplied vcpu
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbrw(struct vcpu *vcpu, uint32_t msr)
+{
+ svm_setmsrbr(vcpu, msr);
+ svm_setmsrbw(vcpu, msr);
}
/*
diff --git a/sys/arch/i386/i386/vmm.c b/sys/arch/i386/i386/vmm.c
index a610a00649c..59d44eced99 100644
--- a/sys/arch/i386/i386/vmm.c
+++ b/sys/arch/i386/i386/vmm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm.c,v 1.18 2017/01/19 05:53:40 mlarkin Exp $ */
+/* $OpenBSD: vmm.c,v 1.19 2017/01/19 23:18:11 mlarkin Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -162,6 +162,9 @@ int vmx_handle_np_fault(struct vcpu *);
const char *vcpu_state_decode(u_int);
const char *vmx_exit_reason_decode(uint32_t);
const char *vmx_instruction_error_decode(uint32_t);
+void svm_setmsrbr(struct vcpu *, uint32_t);
+void svm_setmsrbw(struct vcpu *, uint32_t);
+void svm_setmsrbrw(struct vcpu *, uint32_t);
void vmx_setmsrbr(struct vcpu *, uint32_t);
void vmx_setmsrbw(struct vcpu *, uint32_t);
void vmx_setmsrbrw(struct vcpu *, uint32_t);
@@ -1522,24 +1525,288 @@ out:
/*
* vcpu_writeregs_svm
*
- * XXX - unimplemented
+ * Writes 'vcpu's registers
+ *
+ * Parameters:
+ * vcpu: the vcpu that has to get its registers written to
+ * regmask: the types of registers to write
+ * vrs: the register values to write
+ *
+ * Return values:
+ * 0: if successful
+ * EINVAL an error writing registers occured
*/
int
vcpu_writeregs_svm(struct vcpu *vcpu, uint64_t regmask,
struct vcpu_reg_state *vrs)
{
+ uint32_t *gprs = vrs->vrs_gprs;
+ uint32_t *crs = vrs->vrs_crs;
+ struct vcpu_segment_info *sregs = vrs->vrs_sregs;
+ struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
+
+ if (regmask & VM_RWREGS_GPRS) {
+ vcpu->vc_gueststate.vg_eax = gprs[VCPU_REGS_EAX];
+ vcpu->vc_gueststate.vg_ebx = gprs[VCPU_REGS_EBX];
+ vcpu->vc_gueststate.vg_ecx = gprs[VCPU_REGS_ECX];
+ vcpu->vc_gueststate.vg_edx = gprs[VCPU_REGS_EDX];
+ vcpu->vc_gueststate.vg_esi = gprs[VCPU_REGS_ESI];
+ vcpu->vc_gueststate.vg_edi = gprs[VCPU_REGS_EDI];
+ vcpu->vc_gueststate.vg_ebp = gprs[VCPU_REGS_EBP];
+ vcpu->vc_gueststate.vg_eip = gprs[VCPU_REGS_EIP];
+
+ vmcb->v_rip = gprs[VCPU_REGS_EIP];
+ vmcb->v_rsp = gprs[VCPU_REGS_ESP];
+ vmcb->v_rflags = gprs[VCPU_REGS_EFLAGS];
+ DPRINTF("%s: set vcpu GPRs (eip=0x%llx esp=0x%llx)\n",
+ __func__, vmcb->v_rip, vmcb->v_rsp);
+ }
+
+ if (regmask & VM_RWREGS_SREGS) {
+ vmcb->v_cs.vs_sel = sregs[VCPU_REGS_CS].vsi_sel;
+ vmcb->v_cs.vs_lim = sregs[VCPU_REGS_CS].vsi_limit;
+ vmcb->v_cs.vs_attr = sregs[VCPU_REGS_CS].vsi_ar;
+ vmcb->v_cs.vs_base = sregs[VCPU_REGS_CS].vsi_base;
+ vmcb->v_ds.vs_sel = sregs[VCPU_REGS_DS].vsi_sel;
+ vmcb->v_ds.vs_lim = sregs[VCPU_REGS_DS].vsi_limit;
+ vmcb->v_ds.vs_attr = sregs[VCPU_REGS_DS].vsi_ar;
+ vmcb->v_ds.vs_base = sregs[VCPU_REGS_DS].vsi_base;
+ vmcb->v_es.vs_sel = sregs[VCPU_REGS_ES].vsi_sel;
+ vmcb->v_es.vs_lim = sregs[VCPU_REGS_ES].vsi_limit;
+ vmcb->v_es.vs_attr = sregs[VCPU_REGS_ES].vsi_ar;
+ vmcb->v_es.vs_base = sregs[VCPU_REGS_ES].vsi_base;
+ vmcb->v_fs.vs_sel = sregs[VCPU_REGS_FS].vsi_sel;
+ vmcb->v_fs.vs_lim = sregs[VCPU_REGS_FS].vsi_limit;
+ vmcb->v_fs.vs_attr = sregs[VCPU_REGS_FS].vsi_ar;
+ vmcb->v_fs.vs_base = sregs[VCPU_REGS_FS].vsi_base;
+ vmcb->v_gs.vs_sel = sregs[VCPU_REGS_GS].vsi_sel;
+ vmcb->v_gs.vs_lim = sregs[VCPU_REGS_GS].vsi_limit;
+ vmcb->v_gs.vs_attr = sregs[VCPU_REGS_GS].vsi_ar;
+ vmcb->v_gs.vs_base = sregs[VCPU_REGS_GS].vsi_base;
+ vmcb->v_ss.vs_sel = sregs[VCPU_REGS_SS].vsi_sel;
+ vmcb->v_ss.vs_lim = sregs[VCPU_REGS_SS].vsi_limit;
+ vmcb->v_ss.vs_attr = sregs[VCPU_REGS_SS].vsi_ar;
+ vmcb->v_ss.vs_base = sregs[VCPU_REGS_SS].vsi_base;
+ vmcb->v_ldtr.vs_sel = sregs[VCPU_REGS_LDTR].vsi_sel;
+ vmcb->v_ldtr.vs_lim = sregs[VCPU_REGS_LDTR].vsi_limit;
+ vmcb->v_ldtr.vs_attr = sregs[VCPU_REGS_LDTR].vsi_ar;
+ vmcb->v_ldtr.vs_base = sregs[VCPU_REGS_LDTR].vsi_base;
+ vmcb->v_tr.vs_sel = sregs[VCPU_REGS_TR].vsi_sel;
+ vmcb->v_tr.vs_lim = sregs[VCPU_REGS_TR].vsi_limit;
+ vmcb->v_tr.vs_attr = sregs[VCPU_REGS_TR].vsi_ar;
+ vmcb->v_tr.vs_base = sregs[VCPU_REGS_TR].vsi_base;
+ vmcb->v_gdtr.vs_lim = vrs->vrs_gdtr.vsi_limit;
+ vmcb->v_gdtr.vs_base = vrs->vrs_gdtr.vsi_base;
+ vmcb->v_idtr.vs_lim = vrs->vrs_idtr.vsi_limit;
+ vmcb->v_idtr.vs_base = vrs->vrs_idtr.vsi_base;
+
+ DPRINTF("%s: set vcpu seg regs (gdt.base=0x%llx, "
+ "cs.sel=0x%llx)\n", __func__, vmcb->v_gdtr.vs_base,
+ (uint64_t)vmcb->v_cs.vs_sel);
+ }
+
+ if (regmask & VM_RWREGS_CRS) {
+ vmcb->v_cr0 = crs[VCPU_REGS_CR0];
+ vmcb->v_cr3 = crs[VCPU_REGS_CR3];
+ vmcb->v_cr4 = crs[VCPU_REGS_CR4];
+
+ DPRINTF("%s: set vcpu CRs (cr0=0x%llx cr3=0x%llx "
+ "cr4=0x%llx)\n", __func__, vmcb->v_cr0, vmcb->v_cr3,
+ vmcb->v_cr4);
+ }
+
return (0);
}
/*
* vcpu_reset_regs_svm
*
- * XXX - unimplemented
+ * Initializes 'vcpu's registers to supplied state
+ *
+ * Parameters:
+ * vcpu: the vcpu whose register state is to be initialized
+ * vrs: the register state to set
+ *
+ * Return values:
+ * 0: registers init'ed successfully
+ * EINVAL: an error occurred setting register state
*/
int
vcpu_reset_regs_svm(struct vcpu *vcpu, struct vcpu_reg_state *vrs)
{
- return (0);
+ struct vmcb *vmcb;
+ int ret;
+
+ vmcb = (struct vmcb *)vcpu->vc_control_va;
+
+ /*
+ * Intercept controls
+ *
+ * External Interrupt exiting (SVM_INTERCEPT_INTR)
+ * External NMI exiting (SVM_INTERCEPT_NMI)
+ * CPUID instruction (SVM_INTERCEPT_CPUID)
+ * HLT instruction (SVM_INTERCEPT_HLT)
+ * I/O instructions (SVM_INTERCEPT_INOUT)
+ * MSR access (SVM_INTERCEPT_MSR)
+ *
+ * VMRUN instruction (SVM_INTERCEPT_VMRUN)
+ * VMMCALL instruction (SVM_INTERCEPT_VMMCALL)
+ * VMLOAD instruction (SVM_INTERCEPT_VMLOAD)
+ * VMSAVE instruction (SVM_INTERCEPT_VMSAVE)
+ * STGI instruction (SVM_INTERCEPT_STGI)
+ * CLGI instruction (SVM_INTERCEPT_CLGI)
+ * SKINIT instruction (SVM_INTERCEPT_SKINIT)
+ * ICEBP instruction (SVM_INTERCEPT_ICEBP)
+ * MWAIT instruction (SVM_INTERCEPT_MWAIT_UNCOND)
+ */
+ vmcb->v_intercept1 = SVM_INTERCEPT_INTR | SVM_INTERCEPT_NMI |
+ SVM_INTERCEPT_CPUID | SVM_INTERCEPT_HLT | SVM_INTERCEPT_INOUT |
+ SVM_INTERCEPT_MSR;
+
+ vmcb->v_intercept2 = SVM_INTERCEPT_VMRUN | SVM_INTERCEPT_VMMCALL |
+ SVM_INTERCEPT_VMLOAD | SVM_INTERCEPT_VMSAVE | SVM_INTERCEPT_STGI |
+ SVM_INTERCEPT_CLGI | SVM_INTERCEPT_SKINIT | SVM_INTERCEPT_ICEBP |
+ SVM_INTERCEPT_MWAIT_UNCOND;
+
+ /* Setup I/O bitmap */
+ memset((uint8_t *)vcpu->vc_svm_ioio_va, 0xFF, 3 * PAGE_SIZE);
+ vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_svm_ioio_pa);
+
+ /* Setup MSR bitmap */
+ memset((uint8_t *)vcpu->vc_msr_bitmap_va, 0xFF, 2 * PAGE_SIZE);
+ vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_msr_bitmap_pa);
+ svm_setmsrbrw(vcpu, MSR_IA32_FEATURE_CONTROL);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_CS);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_ESP);
+ svm_setmsrbrw(vcpu, MSR_SYSENTER_EIP);
+ svm_setmsrbrw(vcpu, MSR_EFER);
+ svm_setmsrbrw(vcpu, MSR_STAR);
+ svm_setmsrbrw(vcpu, MSR_LSTAR);
+ svm_setmsrbrw(vcpu, MSR_CSTAR);
+ svm_setmsrbrw(vcpu, MSR_SFMASK);
+ svm_setmsrbrw(vcpu, MSR_FSBASE);
+ svm_setmsrbrw(vcpu, MSR_GSBASE);
+ svm_setmsrbrw(vcpu, MSR_KERNELGSBASE);
+
+ /* Guest VCPU ASID */
+ vmcb->v_asid = vcpu->vc_parent->vm_id;
+
+ /* TLB Control */
+ vmcb->v_tlb_control = 2; /* Flush this guest's TLB entries */
+
+ /* NPT */
+ if (vmm_softc->mode == VMM_MODE_RVI) {
+ vmcb->v_np_enable = 1;
+ vmcb->v_n_cr3 = vcpu->vc_parent->vm_map->pmap->pm_pdirpa;
+ }
+
+ ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_ALL, vrs);
+
+ return ret;
+}
+
+/*
+ * svm_setmsrbr
+ *
+ * Allow read access to the specified msr on the supplied vcpu.
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbr(struct vcpu *vcpu, uint32_t msr)
+{
+ uint8_t *msrs;
+ uint16_t idx;
+
+ msrs = (uint8_t *)vcpu->vc_msr_bitmap_va;
+
+ /*
+ * MSR Read bitmap layout:
+ * Pentium MSRs (0x0 - 0x1fff) @ 0x0
+ * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800
+ * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000
+ *
+ * Read enable bit is low order bit of 2-bit pair
+ * per MSR (eg, MSR 0x0 write bit is at bit 0 @ 0x0)
+ */
+ if (msr <= 0x1fff) {
+ idx = SVM_MSRIDX(msr);
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr));
+ } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x800;
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0000000));
+ } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) {
+ idx = SVM_MSRIDX(msr - 0xc0010000) + 0x1000;
+ msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0000000));
+ } else {
+ printf("%s: invalid msr 0x%x\n", __func__, msr);
+ return;
+ }
+
+ DPRINTF("%s: set msr read bitmap, msr=0x%x, idx=0x%x, "
+ "msrs[0x%x]=0x%x\n", __func__, msr, idx, idx, msrs[idx]);
+}
+
+/*
+ * svm_setmsrbw
+ *
+ * Allow write access to the specified msr on the supplied vcpu
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbw(struct vcpu *vcpu, uint32_t msr)
+{
+ uint8_t *msrs;
+ uint16_t idx;
+
+ msrs = (uint8_t *)vcpu->vc_msr_bitmap_va;
+
+ /*
+ * MSR Write bitmap layout:
+ * Pentium MSRs (0x0 - 0x1fff) @ 0x0
+ * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800
+ * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000
+ *
+ * Write enable bit is high order bit of 2-bit pair
+ * per MSR (eg, MSR 0x0 write bit is at bit 1 @ 0x0)
+ */
+ if (msr <= 0x1fff) {
+ idx = SVM_MSRIDX(msr);
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr));
+ } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x800;
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0000000));
+ } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) {
+ idx = SVM_MSRIDX(msr - 0xc0000000) + 0x1000;
+ msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0010000));
+ } else {
+ printf("%s: invalid msr 0x%x\n", __func__, msr);
+ return;
+ }
+
+ DPRINTF("%s: set msr write bitmap, msr=0x%x, idx=0x%x, "
+ "msrs[0x%x]=0x%x\n", __func__, msr, idx, idx, msrs[idx]);
+}
+
+/*
+ * svm_setmsrbrw
+ *
+ * Allow read/write access to the specified msr on the supplied vcpu
+ *
+ * Parameters:
+ * vcpu: the VCPU to allow access
+ * msr: the MSR number to allow access to
+ */
+void
+svm_setmsrbrw(struct vcpu *vcpu, uint32_t msr)
+{
+ svm_setmsrbr(vcpu, msr);
+ svm_setmsrbw(vcpu, msr);
}
/*