From 02c03184b3a77cc7142aa12091094759d56da8f6 Mon Sep 17 00:00:00 2001 From: bluhm Date: Fri, 4 Oct 2024 22:21:28 +0000 Subject: [PATCH] Allow boot loader to run as AMD SEV guest on QEMU with EFI. When efibooting amd64, the boot loader rewrites the page table built by EFI firmware to ensure that there are no read-only mappings. The rewrite is needed for some HP EFI BIOS, that maps computrace section read-only. When efibooting on SEV enabled QEMU, we would have to ensure the crypt bit is set when changing page tables. However, there is no need for the HP workaround when booting on QEMU (or any other VM), so just do not modify the page table, when SEV gest mode is detected. from Sebastian Sturm; via hshoexer@; OK kettenis@ --- sys/arch/amd64/stand/efiboot/exec_i386.c | 32 +++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/sys/arch/amd64/stand/efiboot/exec_i386.c b/sys/arch/amd64/stand/efiboot/exec_i386.c index b84476a2288..75451897aca 100644 --- a/sys/arch/amd64/stand/efiboot/exec_i386.c +++ b/sys/arch/amd64/stand/efiboot/exec_i386.c @@ -1,4 +1,4 @@ -/* $OpenBSD: exec_i386.c,v 1.11 2023/07/22 10:11:19 jsg Exp $ */ +/* $OpenBSD: exec_i386.c,v 1.12 2024/10/04 22:21:28 bluhm Exp $ */ /* * Copyright (c) 1997-1998 Michael Shalayeff @@ -239,6 +239,33 @@ ucode_load(void) } #ifdef __amd64__ +int +detect_sev(void) +{ + uint32_t max_ex_leaf, sev_feat; + uint32_t vendor[4]; + uint32_t sev_status, dummy; + + /* check whether we have SEV feature cpuid leaf */ + CPUID(0x80000000, max_ex_leaf, vendor[0], vendor[2], vendor[1]); + vendor[3] = 0; /* NULL-terminate */ + if (strcmp((char *)vendor, "AuthenticAMD") != 0 || + max_ex_leaf < 0x8000001F) + return -ENODEV; + + CPUID(0x8000001F, sev_feat, dummy, dummy, dummy); + /* check that SEV is supported */ + if ((sev_feat & CPUIDEAX_SEV) == 0) + return -ENODEV; + + __asm volatile ("rdmsr" : "=a" (sev_status), "=d"(dummy) : "c"(MSR_SEV_STATUS)); + /* check whether SEV is enabled */ + if ((sev_status & SEV_STAT_ENABLED) == 0) + return -ENODEV; + + return 0; +} + void protect_writeable(uint64_t addr, size_t len) { @@ -247,6 +274,9 @@ protect_writeable(uint64_t addr, size_t len) uint64_t cr0; size_t idx; + if (detect_sev() == 0) + return; + __asm volatile("movq %%cr0, %0;" : "=r"(cr0) : :); if ((cr0 & CR0_PG) == 0) return; -- 2.20.1