summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2017-02-20 07:36:21 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2017-02-20 07:36:21 +0000
commit20397d08c7d28053b3c17ca7b9b6944d5b053a92 (patch)
treeb3639a8ebe545fe4b969758dcb635a2a77eca373 /sys/arch/amd64
parent3c36ac3c775cc860659e4b1ae1ded2a43ffa10a3 (diff)
SVM: asm support for SVM/RVI
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/vmm_support.S205
-rw-r--r--sys/arch/amd64/include/vmmvar.h4
2 files changed, 207 insertions, 2 deletions
diff --git a/sys/arch/amd64/amd64/vmm_support.S b/sys/arch/amd64/amd64/vmm_support.S
index 268e84b2b78..6980dc0b827 100644
--- a/sys/arch/amd64/amd64/vmm_support.S
+++ b/sys/arch/amd64/amd64/vmm_support.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm_support.S,v 1.5 2016/12/20 07:05:24 kettenis Exp $ */
+/* $OpenBSD: vmm_support.S,v 1.6 2017/02/20 07:36:20 mlarkin Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -38,6 +38,7 @@
.global _C_LABEL(invept)
.global _C_LABEL(vmx_enter_guest)
.global _C_LABEL(vmm_dispatch_intr)
+ .global _C_LABEL(svm_enter_guest)
.text
.code64
@@ -384,3 +385,205 @@ restore_host:
movq %rdi, %rax
ret
+_C_LABEL(svm_enter_guest):
+ clgi
+ movq %rdi, %r8
+ pushfq
+
+ pushq %rdx /* gdt pointer */
+
+ /*
+ * Save (possibly) lazy-switched selectors
+ */
+ strw %ax
+ pushw %ax
+ movw %es, %ax
+ pushw %ax
+ movw %ds, %ax
+ pushw %ax
+ movw %ss, %ax
+ pushw %ax
+
+ movq $MSR_FSBASE, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+ pushw %fs
+ movq $MSR_GSBASE, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+ pushw %gs
+ movq $MSR_KERNELGSBASE, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+
+ /*
+ * Save various MSRs
+ */
+ movq $MSR_STAR, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+
+ movq $MSR_LSTAR, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+
+ /* XXX - unused? */
+ movq $MSR_CSTAR, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+
+ movq $MSR_SFMASK, %rcx
+ rdmsr
+ pushq %rax
+ pushq %rdx
+
+ /* Preserve callee-preserved registers as per AMD64 ABI */
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbp
+ pushq %rbx
+ pushq %rsi /* Guest Regs Pointer */
+
+ /* Restore guest registers */
+ movq %r8, %rax /* rax = vmcb pa */
+ movq 0x78(%rsi), %r8
+ movq %r8, %cr2
+ movq 0x70(%rsi), %r15
+ movq 0x68(%rsi), %r14
+ movq 0x60(%rsi), %r13
+ movq 0x58(%rsi), %r12
+ movq 0x50(%rsi), %r11
+ movq 0x48(%rsi), %r10
+ movq 0x40(%rsi), %r9
+ movq 0x38(%rsi), %r8
+ movq 0x30(%rsi), %rbp
+ movq 0x28(%rsi), %rdi
+ movq 0x20(%rsi), %rdx
+ movq 0x18(%rsi), %rcx
+ movq 0x10(%rsi), %rbx
+ /* %rax at 0x08(%rsi) is not needed in SVM */
+ movq 0x00(%rsi), %rsi
+
+ vmload
+ vmrun
+ vmsave
+
+ /* Preserve guest registers not saved in VMCB */
+ pushq %rsi
+ pushq %rdi
+ movq 0x10(%rsp), %rdi
+ movq 0x8(%rsp), %rsi
+ movq %rsi, (%rdi)
+ popq %rdi
+ popq %rsi /* discard */
+
+ popq %rsi
+ /* %rax at 0x08(%rsi) is not needed in SVM */
+ movq %rbx, 0x10(%rsi)
+ movq %rcx, 0x18(%rsi)
+ movq %rdx, 0x20(%rsi)
+ movq %rdi, 0x28(%rsi)
+ movq %rbp, 0x30(%rsi)
+ movq %r8, 0x38(%rsi)
+ movq %r9, 0x40(%rsi)
+ movq %r10, 0x48(%rsi)
+ movq %r11, 0x50(%rsi)
+ movq %r12, 0x58(%rsi)
+ movq %r13, 0x60(%rsi)
+ movq %r14, 0x68(%rsi)
+ movq %r15, 0x70(%rsi)
+ movq %cr2, %rax
+ movq %rax, 0x78(%rsi)
+
+ /* %rdi = 0 means we took an exit */
+ xorq %rdi, %rdi
+
+restore_host_svm:
+ popq %rbx
+ popq %rbp
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+
+ /*
+ * Restore saved MSRs
+ */
+ popq %rdx
+ popq %rax
+ movq $MSR_SFMASK, %rcx
+ wrmsr
+
+ /* XXX - unused? */
+ popq %rdx
+ popq %rax
+ movq $MSR_CSTAR, %rcx
+ wrmsr
+
+ popq %rdx
+ popq %rax
+ movq $MSR_LSTAR, %rcx
+ wrmsr
+
+ popq %rdx
+ popq %rax
+ movq $MSR_STAR, %rcx
+ wrmsr
+
+ /*
+ * popw %gs will reset gsbase to 0, so preserve it
+ * first. This is to accommodate possibly lazy-switched
+ * selectors from above
+ */
+ cli /* XXX not needed on amd due to implicit clgi on #vmexit */
+ popq %rdx
+ popq %rax
+ movq $MSR_KERNELGSBASE, %rcx
+ wrmsr
+
+ popw %gs
+ popq %rdx
+ popq %rax
+ movq $MSR_GSBASE, %rcx
+ wrmsr
+
+ popw %fs
+ popq %rdx
+ popq %rax
+ movq $MSR_FSBASE, %rcx
+ wrmsr
+
+ popw %ax
+ movw %ax, %ss
+ popw %ax
+ movw %ax, %ds
+ popw %ax
+ movw %ax, %es
+
+ xorq %rax, %rax
+ popw %ax /* ax = saved TR */
+
+ popq %rdx
+ addq $0x2, %rdx
+ movq (%rdx), %rdx
+
+ /* rdx = GDTR base addr */
+ andb $0xF9, 5(%rdx, %rax)
+
+ ltrw %ax
+
+ popfq
+
+ movq %rdi, %rax
+ stgi
+ sti
+
+ ret
diff --git a/sys/arch/amd64/include/vmmvar.h b/sys/arch/amd64/include/vmmvar.h
index 5dcbe493599..d707c412d53 100644
--- a/sys/arch/amd64/include/vmmvar.h
+++ b/sys/arch/amd64/include/vmmvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmmvar.h,v 1.30 2017/01/24 09:50:54 mlarkin Exp $ */
+/* $OpenBSD: vmmvar.h,v 1.31 2017/02/20 07:36:20 mlarkin Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -713,6 +713,8 @@ int vmread(uint64_t, uint64_t *);
void invvpid(uint64_t, struct vmx_invvpid_descriptor *);
void invept(uint64_t, struct vmx_invept_descriptor *);
int vmx_enter_guest(uint64_t *, struct vmx_gueststate *, int);
+int svm_enter_guest(uint64_t, struct vmx_gueststate *,
+ struct region_descriptor *);
void start_vmm_on_cpu(struct cpu_info *);
void stop_vmm_on_cpu(struct cpu_info *);