vmm: add support for send / receive for AMD SVM hosts
authorpd <pd@openbsd.org>
Fri, 2 Feb 2018 08:24:28 +0000 (08:24 +0000)
committerpd <pd@openbsd.org>
Fri, 2 Feb 2018 08:24:28 +0000 (08:24 +0000)
ok mlarkin@

sys/arch/amd64/amd64/vmm.c

index a689ddb..f5a6fc0 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vmm.c,v 1.182 2017/12/19 18:06:41 patrick Exp $       */
+/*     $OpenBSD: vmm.c,v 1.183 2018/02/02 08:24:28 pd Exp $    */
 /*
  * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
  *
@@ -1492,12 +1492,128 @@ out:
 /*
  * vcpu_readregs_svm
  *
- * XXX - unimplemented
+ * Reads 'vcpu's registers
+ *
+ * Parameters:
+ *  vcpu: the vcpu to read register values from
+ *  regmask: the types of registers to read
+ *  vrs: output parameter where register values are stored
+ *
+ * Return values:
+ *  0: if successful
  */
 int
 vcpu_readregs_svm(struct vcpu *vcpu, uint64_t regmask,
-    struct vcpu_reg_state *regs)
+    struct vcpu_reg_state *vrs)
 {
+       uint64_t *gprs = vrs->vrs_gprs;
+       uint64_t *crs = vrs->vrs_crs;
+       uint64_t *msrs = vrs->vrs_msrs;
+       uint32_t attr;
+       struct vcpu_segment_info *sregs = vrs->vrs_sregs;
+       struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
+
+       if (regmask & VM_RWREGS_GPRS) {
+               gprs[VCPU_REGS_RAX] = vcpu->vc_gueststate.vg_rax;
+               gprs[VCPU_REGS_RBX] = vcpu->vc_gueststate.vg_rbx;
+               gprs[VCPU_REGS_RCX] = vcpu->vc_gueststate.vg_rcx;
+               gprs[VCPU_REGS_RDX] = vcpu->vc_gueststate.vg_rdx;
+               gprs[VCPU_REGS_RSI] = vcpu->vc_gueststate.vg_rsi;
+               gprs[VCPU_REGS_RDI] = vcpu->vc_gueststate.vg_rdi;
+               gprs[VCPU_REGS_R8] = vcpu->vc_gueststate.vg_r8;
+               gprs[VCPU_REGS_R9] = vcpu->vc_gueststate.vg_r9;
+               gprs[VCPU_REGS_R10] = vcpu->vc_gueststate.vg_r10;
+               gprs[VCPU_REGS_R11] = vcpu->vc_gueststate.vg_r11;
+               gprs[VCPU_REGS_R12] = vcpu->vc_gueststate.vg_r12;
+               gprs[VCPU_REGS_R13] = vcpu->vc_gueststate.vg_r13;
+               gprs[VCPU_REGS_R14] = vcpu->vc_gueststate.vg_r14;
+               gprs[VCPU_REGS_R15] = vcpu->vc_gueststate.vg_r15;
+               gprs[VCPU_REGS_RBP] = vcpu->vc_gueststate.vg_rbp;
+               gprs[VCPU_REGS_RIP] = vmcb->v_rip;
+               gprs[VCPU_REGS_RSP] = vmcb->v_rsp;
+               gprs[VCPU_REGS_RFLAGS] = vmcb->v_rflags;
+       }
+
+       if (regmask & VM_RWREGS_SREGS) {
+               sregs[VCPU_REGS_CS].vsi_sel = vmcb->v_cs.vs_sel;
+               sregs[VCPU_REGS_CS].vsi_limit = vmcb->v_cs.vs_lim;
+               attr = vmcb->v_cs.vs_attr;
+               sregs[VCPU_REGS_CS].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_CS].vsi_base = vmcb->v_cs.vs_base;
+
+               sregs[VCPU_REGS_DS].vsi_sel = vmcb->v_ds.vs_sel;
+               sregs[VCPU_REGS_DS].vsi_limit = vmcb->v_ds.vs_lim;
+               attr = vmcb->v_ds.vs_attr;
+               sregs[VCPU_REGS_DS].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_DS].vsi_base = vmcb->v_ds.vs_base;
+
+               sregs[VCPU_REGS_ES].vsi_sel = vmcb->v_es.vs_sel;
+               sregs[VCPU_REGS_ES].vsi_limit = vmcb->v_es.vs_lim;
+               attr = vmcb->v_es.vs_attr;
+               sregs[VCPU_REGS_ES].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_ES].vsi_base = vmcb->v_es.vs_base;
+
+               sregs[VCPU_REGS_FS].vsi_sel = vmcb->v_fs.vs_sel;
+               sregs[VCPU_REGS_FS].vsi_limit = vmcb->v_fs.vs_lim;
+               attr = vmcb->v_fs.vs_attr;
+               sregs[VCPU_REGS_FS].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_FS].vsi_base = vmcb->v_fs.vs_base;
+
+               sregs[VCPU_REGS_GS].vsi_sel = vmcb->v_gs.vs_sel;
+               sregs[VCPU_REGS_GS].vsi_limit = vmcb->v_gs.vs_lim;
+               attr = vmcb->v_gs.vs_attr;
+               sregs[VCPU_REGS_GS].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_GS].vsi_base = vmcb->v_gs.vs_base;
+
+               sregs[VCPU_REGS_SS].vsi_sel = vmcb->v_ss.vs_sel;
+               sregs[VCPU_REGS_SS].vsi_limit = vmcb->v_ss.vs_lim;
+               attr = vmcb->v_ss.vs_attr;
+               sregs[VCPU_REGS_SS].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_SS].vsi_base = vmcb->v_ss.vs_base;
+
+               sregs[VCPU_REGS_LDTR].vsi_sel = vmcb->v_ldtr.vs_sel;
+               sregs[VCPU_REGS_LDTR].vsi_limit = vmcb->v_ldtr.vs_lim;
+               attr = vmcb->v_ldtr.vs_attr;
+               sregs[VCPU_REGS_LDTR].vsi_ar = (attr & 0xff) | ((attr << 4)
+                   & 0xf000);
+               sregs[VCPU_REGS_LDTR].vsi_base = vmcb->v_ldtr.vs_base;
+
+               sregs[VCPU_REGS_TR].vsi_sel = vmcb->v_tr.vs_sel;
+               sregs[VCPU_REGS_TR].vsi_limit = vmcb->v_tr.vs_lim;
+               attr = vmcb->v_tr.vs_attr;
+               sregs[VCPU_REGS_TR].vsi_ar = (attr & 0xff) | ((attr << 4) &
+                   0xf000);
+               sregs[VCPU_REGS_TR].vsi_base = vmcb->v_tr.vs_base;
+
+               vrs->vrs_gdtr.vsi_limit = vmcb->v_gdtr.vs_lim;
+               vrs->vrs_gdtr.vsi_base = vmcb->v_gdtr.vs_base;
+               vrs->vrs_idtr.vsi_limit = vmcb->v_idtr.vs_lim;
+               vrs->vrs_idtr.vsi_base = vmcb->v_idtr.vs_base;
+       }
+
+       if (regmask & VM_RWREGS_CRS) {
+               crs[VCPU_REGS_CR0] = vmcb->v_cr0;
+               crs[VCPU_REGS_CR3] = vmcb->v_cr3;
+               crs[VCPU_REGS_CR4] = vmcb->v_cr4;
+               crs[VCPU_REGS_CR2] = vcpu->vc_gueststate.vg_cr2;
+               crs[VCPU_REGS_XCR0] = vcpu->vc_gueststate.vg_xcr0;
+       }
+
+       if (regmask & VM_RWREGS_MSRS) {
+                msrs[VCPU_REGS_EFER] = vmcb->v_efer;
+                msrs[VCPU_REGS_STAR] = vmcb->v_star;
+                msrs[VCPU_REGS_LSTAR] = vmcb->v_lstar;
+                msrs[VCPU_REGS_CSTAR] = vmcb->v_cstar;
+                msrs[VCPU_REGS_SFMASK] = vmcb->v_sfmask;
+                msrs[VCPU_REGS_KGSBASE] = vmcb->v_kgsbase;
+       }
+
        return (0);
 }
 
@@ -1725,11 +1841,18 @@ vcpu_writeregs_svm(struct vcpu *vcpu, uint64_t regmask,
                vmcb->v_cr0 = crs[VCPU_REGS_CR0];
                vmcb->v_cr3 = crs[VCPU_REGS_CR3];
                vmcb->v_cr4 = crs[VCPU_REGS_CR4];
+               vcpu->vc_gueststate.vg_cr2 = crs[VCPU_REGS_CR2];
+               vcpu->vc_gueststate.vg_xcr0 = crs[VCPU_REGS_XCR0];
        }
 
-       /* The only presettable MSR via this function in SVM is EFER */
-       if (regmask & VM_RWREGS_MSRS)
+       if (regmask & VM_RWREGS_MSRS) {
                vmcb->v_efer |= msrs[VCPU_REGS_EFER];
+               vmcb->v_star = msrs[VCPU_REGS_STAR];
+               vmcb->v_lstar = msrs[VCPU_REGS_LSTAR];
+               vmcb->v_cstar = msrs[VCPU_REGS_CSTAR];
+               vmcb->v_sfmask = msrs[VCPU_REGS_SFMASK];
+               vmcb->v_kgsbase = msrs[VCPU_REGS_KGSBASE];
+       }
 
        return (0);
 }