summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2017-01-19 01:34:22 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2017-01-19 01:34:22 +0000
commiteae54df15f11f57aed58ed371607308e69fdfd29 (patch)
tree4438ef08a571d3704572c0549fba2ba3d9863a97 /sys/arch
parent4173f9968a9d46996a14a0f82331923cfd4198c7 (diff)
SVM: vcpu_init_svm - allocate memory for control structures (vmcb,
msr bitmap, ioio bitmap, and host state save area) matches amd64 version previously committed
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/i386/i386/vmm.c131
-rw-r--r--sys/arch/i386/include/vmmvar.h6
2 files changed, 134 insertions, 3 deletions
diff --git a/sys/arch/i386/i386/vmm.c b/sys/arch/i386/i386/vmm.c
index ce716e1ced4..b64974054b4 100644
--- a/sys/arch/i386/i386/vmm.c
+++ b/sys/arch/i386/i386/vmm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm.c,v 1.15 2017/01/19 00:04:38 mlarkin Exp $ */
+/* $OpenBSD: vmm.c,v 1.16 2017/01/19 01:34:21 mlarkin Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -195,6 +195,13 @@ struct vmm_reg_debug_info {
const char *vmm_hv_signature = VMM_HV_SIGNATURE;
+const struct kmem_pa_mode vmm_kp_contig = {
+ .kp_constraint = &no_constraint,
+ .kp_maxseg = 1,
+ .kp_align = 4096,
+ .kp_zero = 1,
+};
+
struct cfdriver vmm_cd = {
NULL, "vmm", DV_DULL
};
@@ -1176,6 +1183,8 @@ vm_impl_init_svm(struct vm *vm, struct proc *p)
return (ENOMEM);
}
+ DPRINTF("%s: RVI pmap allocated @ %p\n", __func__, pmap);
+
/*
* Create a new UVM map for this VM, and assign it the pmap just
* created.
@@ -2081,6 +2090,13 @@ exit:
*
* This function allocates various per-VCPU memory regions, sets up initial
* VCPU VMCS controls, and sets initial register values.
+ * Parameters:
+ * vcpu: the VCPU structure being initialized
+ *
+ * Return values:
+ * 0: the VCPU was initialized successfully
+ * ENOMEM: insufficient resources
+ * EINVAL: an error occurred during VCPU initialization
*/
int
vcpu_init_vmx(struct vcpu *vcpu)
@@ -2304,12 +2320,121 @@ vcpu_reset_regs(struct vcpu *vcpu, struct vcpu_reg_state *vrs)
* vcpu_init_svm
*
* AMD SVM specific VCPU initialization routine.
+ *
+ * This function allocates various per-VCPU memory regions, sets up initial
+ * VCPU VMCB controls, and sets initial register values.
+ *
+ * Parameters:
+ * vcpu: the VCPU structure being initialized
+ *
+ * Return values:
+ * 0: the VCPU was initialized successfully
+ * ENOMEM: insufficient resources
+ * EINVAL: an error occurred during VCPU initialization
*/
int
vcpu_init_svm(struct vcpu *vcpu)
{
- /* XXX removed due to rot */
- return (0);
+ int ret;
+
+ ret = 0;
+
+ /* Allocate VMCB VA */
+ vcpu->vc_control_va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_page, &kp_zero,
+ &kd_waitok);
+
+ if (!vcpu->vc_control_va)
+ return (ENOMEM);
+
+ /* Compute VMCB PA */
+ if (!pmap_extract(pmap_kernel(), vcpu->vc_control_va,
+ (paddr_t *)&vcpu->vc_control_pa)) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ DPRINTF("%s: VMCB va @ 0x%x, pa @ 0x%x\n", __func__,
+ (uint32_t)vcpu->vc_control_va,
+ (uint32_t)vcpu->vc_control_pa);
+
+
+ /* Allocate MSR bitmap VA (2 pages) */
+ vcpu->vc_msr_bitmap_va = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any,
+ &vmm_kp_contig, &kd_waitok);
+
+ if (!vcpu->vc_msr_bitmap_va) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ /* Compute MSR bitmap PA */
+ if (!pmap_extract(pmap_kernel(), vcpu->vc_msr_bitmap_va,
+ (paddr_t *)&vcpu->vc_msr_bitmap_pa)) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ DPRINTF("%s: MSR bitmap va @ 0x%x, pa @ 0x%x\n", __func__,
+ (uint32_t)vcpu->vc_msr_bitmap_va,
+ (uint32_t)vcpu->vc_msr_bitmap_pa);
+
+ /* Allocate host state area VA */
+ vcpu->vc_svm_hsa_va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_page,
+ &kp_zero, &kd_waitok);
+
+ if (!vcpu->vc_svm_hsa_va) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ /* Compute host state area PA */
+ if (!pmap_extract(pmap_kernel(), vcpu->vc_svm_hsa_va,
+ &vcpu->vc_svm_hsa_pa)) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ DPRINTF("%s: HSA va @ 0x%x, pa @ 0x%x\n", __func__,
+ (uint32_t)vcpu->vc_svm_hsa_va,
+ (uint32_t)vcpu->vc_svm_hsa_pa);
+
+ /* Allocate IOIO area VA (3 pages) */
+ vcpu->vc_svm_ioio_va = (vaddr_t)km_alloc(3 * PAGE_SIZE, &kv_any,
+ &vmm_kp_contig, &kd_waitok);
+
+ if (!vcpu->vc_svm_ioio_va) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ /* Compute IOIO area PA */
+ if (!pmap_extract(pmap_kernel(), vcpu->vc_svm_ioio_va,
+ &vcpu->vc_svm_ioio_pa)) {
+ ret = ENOMEM;
+ goto exit;
+ }
+
+ DPRINTF("%s: IOIO va @ 0x%x, pa @ 0x%x\n", __func__,
+ (uint32_t)vcpu->vc_svm_ioio_va,
+ (uint32_t)vcpu->vc_svm_ioio_pa);
+
+exit:
+ if (ret) {
+ if (vcpu->vc_control_va)
+ km_free((void *)vcpu->vc_control_va, PAGE_SIZE,
+ &kv_page, &kp_zero);
+ if (vcpu->vc_msr_bitmap_va)
+ km_free((void *)vcpu->vc_msr_bitmap_va, 2 * PAGE_SIZE,
+ &kv_any, &vmm_kp_contig);
+ if (vcpu->vc_svm_hsa_va)
+ km_free((void *)vcpu->vc_svm_hsa_va, PAGE_SIZE,
+ &kv_page, &kp_zero);
+ if (vcpu->vc_svm_ioio_va)
+ km_free((void *)vcpu->vc_svm_ioio_va,
+ 3 * PAGE_SIZE, &kv_any, &vmm_kp_contig);
+ }
+
+ return (ret);
}
/*
diff --git a/sys/arch/i386/include/vmmvar.h b/sys/arch/i386/include/vmmvar.h
index 73985a587f7..5a818eac990 100644
--- a/sys/arch/i386/include/vmmvar.h
+++ b/sys/arch/i386/include/vmmvar.h
@@ -670,6 +670,12 @@ struct vcpu {
paddr_t vc_vmx_msr_exit_load_pa;
vaddr_t vc_vmx_msr_entry_load_va;
paddr_t vc_vmx_msr_entry_load_pa;
+
+ /* SVM only */
+ vaddr_t vc_svm_hsa_va;
+ paddr_t vc_svm_hsa_pa;
+ vaddr_t vc_svm_ioio_va;
+ paddr_t vc_svm_ioio_pa;
};
SLIST_HEAD(vcpu_head, vcpu);