vmm(4): save and restore guest pkru.
authordv <dv@openbsd.org>
Mon, 30 Jan 2023 02:32:01 +0000 (02:32 +0000)
committerdv <dv@openbsd.org>
Mon, 30 Jan 2023 02:32:01 +0000 (02:32 +0000)
Take a simple approach for saving and restoring PKRU if the host
has PKE support enabled. Uses explicit rdpkru/wrpkru instructions
for now instead of xsave.

This functionality is still gated behind amd64 pmap checking for
operation under a hypervisor as well as vmm masking the cpuid bit
for PKU.

"if your diff is good, then commit it" -deraadt@

sys/arch/amd64/amd64/vmm.c
sys/arch/amd64/include/cpufunc.h
sys/arch/amd64/include/vmmvar.h

index 969f184..55244ba 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vmm.c,v 1.335 2023/01/13 14:15:49 dv Exp $    */
+/*     $OpenBSD: vmm.c,v 1.336 2023/01/30 02:32:01 dv Exp $    */
 /*
  * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
  *
@@ -128,6 +128,7 @@ struct vmm_softc {
        uint32_t                nr_svm_cpus;    /* [I] */
        uint32_t                nr_rvi_cpus;    /* [I] */
        uint32_t                nr_ept_cpus;    /* [I] */
+       uint8_t                 pkru_enabled;   /* [I] */
 
        /* Managed VMs */
        struct vmlist_head      vm_list;        /* [v] */
@@ -429,6 +430,10 @@ vmm_attach(struct device *parent, struct device *self, void *aux)
                        sc->nr_ept_cpus++;
        }
 
+       sc->pkru_enabled = 0;
+       if (rcr4() & CR4_PKE)
+               sc->pkru_enabled = 1;
+
        SLIST_INIT(&sc->vm_list);
        rw_init(&sc->vm_lock, "vm_list");
 
@@ -5029,11 +5034,21 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp)
 
                TRACEPOINT(vmm, guest_enter, vcpu, vrp);
 
+               /* Restore any guest PKRU state. */
+               if (vmm_softc->pkru_enabled)
+                       wrpkru(vcpu->vc_pkru);
+
                ret = vmx_enter_guest(&vcpu->vc_control_pa,
                    &vcpu->vc_gueststate,
                    (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED),
                    ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr);
 
+               /* Restore host PKRU state. */
+               if (vmm_softc->pkru_enabled) {
+                       vcpu->vc_pkru = rdpkru(0);
+                       wrpkru(PGK_VALUE);
+               }
+
                bare_lgdt(&gdtr);
                lidt(&idtr);
                lldt(ldt_sel);
@@ -7331,12 +7346,22 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp)
                        break;
                }
 
+               /* Restore any guest PKRU state. */
+               if (vmm_softc->pkru_enabled)
+                       wrpkru(vcpu->vc_pkru);
+
                KASSERT(vmcb->v_intercept1 & SVM_INTERCEPT_INTR);
                wrmsr(MSR_AMD_VM_HSAVE_PA, vcpu->vc_svm_hsa_pa);
 
                ret = svm_enter_guest(vcpu->vc_control_pa,
                    &vcpu->vc_gueststate, &gdt);
 
+               /* Restore host PKRU state. */
+               if (vmm_softc->pkru_enabled) {
+                       vcpu->vc_pkru = rdpkru(0);
+                       wrpkru(PGK_VALUE);
+               }
+
                /*
                 * On exit, interrupts are disabled, and we are running with
                 * the guest FPU state still possibly on the CPU. Save the FPU
index cd2d2db..0db2d13 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: cpufunc.h,v 1.38 2023/01/20 16:01:04 deraadt Exp $    */
+/*     $OpenBSD: cpufunc.h,v 1.39 2023/01/30 02:32:01 dv Exp $ */
 /*     $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */
 
 /*-
@@ -240,6 +240,13 @@ rdpkru(u_int ecx)
        return pkru;
 }
 
+static __inline void
+wrpkru(uint32_t pkru)
+{
+       uint32_t ecx = 0, edx = 0;
+       __asm volatile("wrpkru" : : "a" (pkru), "c" (ecx), "d" (edx));
+}
+
 static __inline void
 wrmsr(u_int msr, u_int64_t newval)
 {
index 344261f..4e020bc 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vmmvar.h,v 1.88 2023/01/28 14:40:53 dv Exp $  */
+/*     $OpenBSD: vmmvar.h,v 1.89 2023/01/30 02:32:01 dv Exp $  */
 /*
  * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
  *
@@ -957,6 +957,9 @@ struct vcpu {
        /* Shadowed MSRs */
        uint64_t vc_shadow_pat;                 /* [v] */
 
+       /* Userland Protection Keys */
+       uint32_t vc_pkru;                       /* [v] */
+
        /* VMX only (all requiring [v]) */
        uint64_t vc_vmx_basic;
        uint64_t vc_vmx_entry_ctls;