summaryrefslogtreecommitdiff
path: root/sys/arch/amd64/stand
diff options
context:
space:
mode:
authorAlexander Bluhm <bluhm@cvs.openbsd.org>2024-10-04 22:21:29 +0000
committerAlexander Bluhm <bluhm@cvs.openbsd.org>2024-10-04 22:21:29 +0000
commit8045a8742ed864d29185fb30bc8666217bd34208 (patch)
treefa79ce2fb86f48aff574e0bd87c2f1a85fc10429 /sys/arch/amd64/stand
parentfed3216ff87eaa3713d70a981f3e2bd2e91eab14 (diff)
Allow boot loader to run as AMD SEV guest on QEMU with EFI.
When efibooting amd64, the boot loader rewrites the page table built by EFI firmware to ensure that there are no read-only mappings. The rewrite is needed for some HP EFI BIOS, that maps computrace section read-only. When efibooting on SEV enabled QEMU, we would have to ensure the crypt bit is set when changing page tables. However, there is no need for the HP workaround when booting on QEMU (or any other VM), so just do not modify the page table, when SEV gest mode is detected. from Sebastian Sturm; via hshoexer@; OK kettenis@
Diffstat (limited to 'sys/arch/amd64/stand')
-rw-r--r--sys/arch/amd64/stand/efiboot/exec_i386.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/sys/arch/amd64/stand/efiboot/exec_i386.c b/sys/arch/amd64/stand/efiboot/exec_i386.c
index b84476a2288..75451897aca 100644
--- a/sys/arch/amd64/stand/efiboot/exec_i386.c
+++ b/sys/arch/amd64/stand/efiboot/exec_i386.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_i386.c,v 1.11 2023/07/22 10:11:19 jsg Exp $ */
+/* $OpenBSD: exec_i386.c,v 1.12 2024/10/04 22:21:28 bluhm Exp $ */
/*
* Copyright (c) 1997-1998 Michael Shalayeff
@@ -239,6 +239,33 @@ ucode_load(void)
}
#ifdef __amd64__
+int
+detect_sev(void)
+{
+ uint32_t max_ex_leaf, sev_feat;
+ uint32_t vendor[4];
+ uint32_t sev_status, dummy;
+
+ /* check whether we have SEV feature cpuid leaf */
+ CPUID(0x80000000, max_ex_leaf, vendor[0], vendor[2], vendor[1]);
+ vendor[3] = 0; /* NULL-terminate */
+ if (strcmp((char *)vendor, "AuthenticAMD") != 0 ||
+ max_ex_leaf < 0x8000001F)
+ return -ENODEV;
+
+ CPUID(0x8000001F, sev_feat, dummy, dummy, dummy);
+ /* check that SEV is supported */
+ if ((sev_feat & CPUIDEAX_SEV) == 0)
+ return -ENODEV;
+
+ __asm volatile ("rdmsr" : "=a" (sev_status), "=d"(dummy) : "c"(MSR_SEV_STATUS));
+ /* check whether SEV is enabled */
+ if ((sev_status & SEV_STAT_ENABLED) == 0)
+ return -ENODEV;
+
+ return 0;
+}
+
void
protect_writeable(uint64_t addr, size_t len)
{
@@ -247,6 +274,9 @@ protect_writeable(uint64_t addr, size_t len)
uint64_t cr0;
size_t idx;
+ if (detect_sev() == 0)
+ return;
+
__asm volatile("movq %%cr0, %0;" : "=r"(cr0) : :);
if ((cr0 & CR0_PG) == 0)
return;