sanity check vm create and run args earlier
authormlarkin <mlarkin@openbsd.org>
Thu, 7 Jul 2016 00:58:31 +0000 (00:58 +0000)
committermlarkin <mlarkin@openbsd.org>
Thu, 7 Jul 2016 00:58:31 +0000 (00:58 +0000)
usr.sbin/vmd/virtio.c
usr.sbin/vmd/vmm.c

index 8185c89..516af7e 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: virtio.c,v 1.13 2016/07/04 23:03:52 mlarkin Exp $     */
+/*     $OpenBSD: virtio.c,v 1.14 2016/07/07 00:58:31 mlarkin Exp $     */
 
 /*
  * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@@ -1148,96 +1148,100 @@ virtio_init(struct vm_create_params *vcp, int *child_disks, int *child_taps)
            sizeof(struct vring_desc) * VIORND_QUEUE_SIZE
            + sizeof(uint16_t) * (2 + VIORND_QUEUE_SIZE));
 
-
-       vioblk = malloc(sizeof(struct vioblk_dev) * vcp->vcp_ndisks);
-       if (vioblk == NULL) {
-               log_warn("%s: malloc failure allocating vioblks",
-                   __progname);
-               return;
-       }
-
-       memset(vioblk, 0, sizeof(struct vioblk_dev) * vcp->vcp_ndisks);
-
-       /* One virtio block device for each disk defined in vcp */
-       for (i = 0; i < vcp->vcp_ndisks; i++) {
-               if ((sz = lseek(child_disks[i], 0, SEEK_END)) == -1)
-                       continue;
-
-               if (pci_add_device(&id, PCI_VENDOR_QUMRANET,
-                   PCI_PRODUCT_QUMRANET_VIO_BLOCK, PCI_CLASS_MASS_STORAGE,
-                   PCI_SUBCLASS_MASS_STORAGE_SCSI,
-                   PCI_VENDOR_OPENBSD,
-                   PCI_PRODUCT_VIRTIO_BLOCK, 1, NULL)) {
-                       log_warnx("%s: can't add PCI virtio block "
-                           "device", __progname);
-                       return;
-               }
-               if (pci_add_bar(id, PCI_MAPREG_TYPE_IO, virtio_blk_io,
-                   &vioblk[i])) {
-                       log_warnx("%s: can't add bar for virtio block "
-                           "device", __progname);
+       if (vcp->vcp_ndisks > 0) {
+               vioblk = malloc(sizeof(struct vioblk_dev) * vcp->vcp_ndisks);
+               if (vioblk == NULL) {
+                       log_warn("%s: malloc failure allocating vioblks",
+                           __progname);
                        return;
                }
-               vioblk[i].vq[0].qs = VIOBLK_QUEUE_SIZE;
-               vioblk[i].vq[0].vq_availoffset = sizeof(struct vring_desc) *
-                   VIORND_QUEUE_SIZE;
-               vioblk[i].vq[0].vq_usedoffset = VIRTQUEUE_ALIGN(
-                   sizeof(struct vring_desc) * VIOBLK_QUEUE_SIZE
-                   + sizeof(uint16_t) * (2 + VIOBLK_QUEUE_SIZE));
-               vioblk[i].vq[0].last_avail = 0;
-               vioblk[i].fd = child_disks[i];
-               vioblk[i].sz = sz / 512;
-       }
 
-       vionet = malloc(sizeof(struct vionet_dev) * vcp->vcp_nnics);
-       if (vionet == NULL) {
-               log_warn("%s: malloc failure allocating vionets",
-                   __progname);
-               return;
+               memset(vioblk, 0, sizeof(struct vioblk_dev) * vcp->vcp_ndisks);
+
+               /* One virtio block device for each disk defined in vcp */
+               for (i = 0; i < vcp->vcp_ndisks; i++) {
+                       if ((sz = lseek(child_disks[i], 0, SEEK_END)) == -1)
+                               continue;
+
+                       if (pci_add_device(&id, PCI_VENDOR_QUMRANET,
+                           PCI_PRODUCT_QUMRANET_VIO_BLOCK,
+                           PCI_CLASS_MASS_STORAGE,
+                           PCI_SUBCLASS_MASS_STORAGE_SCSI,
+                           PCI_VENDOR_OPENBSD,
+                           PCI_PRODUCT_VIRTIO_BLOCK, 1, NULL)) {
+                               log_warnx("%s: can't add PCI virtio block "
+                                   "device", __progname);
+                               return;
+                       }
+                       if (pci_add_bar(id, PCI_MAPREG_TYPE_IO, virtio_blk_io,
+                           &vioblk[i])) {
+                               log_warnx("%s: can't add bar for virtio block "
+                                   "device", __progname);
+                               return;
+                       }
+                       vioblk[i].vq[0].qs = VIOBLK_QUEUE_SIZE;
+                       vioblk[i].vq[0].vq_availoffset =
+                           sizeof(struct vring_desc) * VIORND_QUEUE_SIZE;
+                       vioblk[i].vq[0].vq_usedoffset = VIRTQUEUE_ALIGN(
+                           sizeof(struct vring_desc) * VIOBLK_QUEUE_SIZE
+                           + sizeof(uint16_t) * (2 + VIOBLK_QUEUE_SIZE));
+                       vioblk[i].vq[0].last_avail = 0;
+                       vioblk[i].fd = child_disks[i];
+                       vioblk[i].sz = sz / 512;
+               }
        }
 
-       memset(vionet, 0, sizeof(struct vionet_dev) * vcp->vcp_nnics);
-
-       nr_vionet = vcp->vcp_nnics;
-       /* Virtio network */
-       for (i = 0; i < vcp->vcp_nnics; i++) {
-               if (pci_add_device(&id, PCI_VENDOR_QUMRANET,
-                   PCI_PRODUCT_QUMRANET_VIO_NET, PCI_CLASS_SYSTEM,
-                   PCI_SUBCLASS_SYSTEM_MISC,
-                   PCI_VENDOR_OPENBSD,
-                   PCI_PRODUCT_VIRTIO_NETWORK, 1, NULL)) {
-                       log_warnx("%s: can't add PCI virtio net device",
+       if (vcp->vcp_nnics > 0) {
+               vionet = malloc(sizeof(struct vionet_dev) * vcp->vcp_nnics);
+               if (vionet == NULL) {
+                       log_warn("%s: malloc failure allocating vionets",
                            __progname);
                        return;
                }
 
-               if (pci_add_bar(id, PCI_MAPREG_TYPE_IO, virtio_net_io,
-                   &vionet[i])) {
-                       log_warnx("%s: can't add bar for virtio net "
-                           "device", __progname);
-                       return;
-               }
+               memset(vionet, 0, sizeof(struct vionet_dev) * vcp->vcp_nnics);
+
+               nr_vionet = vcp->vcp_nnics;
+               /* Virtio network */
+               for (i = 0; i < vcp->vcp_nnics; i++) {
+                       if (pci_add_device(&id, PCI_VENDOR_QUMRANET,
+                           PCI_PRODUCT_QUMRANET_VIO_NET, PCI_CLASS_SYSTEM,
+                           PCI_SUBCLASS_SYSTEM_MISC,
+                           PCI_VENDOR_OPENBSD,
+                           PCI_PRODUCT_VIRTIO_NETWORK, 1, NULL)) {
+                               log_warnx("%s: can't add PCI virtio net device",
+                                   __progname);
+                               return;
+                       }
 
-               vionet[i].vq[0].qs = VIONET_QUEUE_SIZE;
-               vionet[i].vq[0].vq_availoffset = sizeof(struct vring_desc) *
-                   VIONET_QUEUE_SIZE;
-               vionet[i].vq[0].vq_usedoffset = VIRTQUEUE_ALIGN(
-                   sizeof(struct vring_desc) * VIONET_QUEUE_SIZE
-                   + sizeof(uint16_t) * (2 + VIONET_QUEUE_SIZE));
-               vionet[i].vq[0].last_avail = 0;
-               vionet[i].vq[1].qs = VIONET_QUEUE_SIZE;
-               vionet[i].vq[1].vq_availoffset = sizeof(struct vring_desc) *
-                   VIONET_QUEUE_SIZE;
-               vionet[i].vq[1].vq_usedoffset = VIRTQUEUE_ALIGN(
-                   sizeof(struct vring_desc) * VIONET_QUEUE_SIZE
-                   + sizeof(uint16_t) * (2 + VIONET_QUEUE_SIZE));
-               vionet[i].vq[1].last_avail = 0;
-               vionet[i].fd = child_taps[i];
+                       if (pci_add_bar(id, PCI_MAPREG_TYPE_IO, virtio_net_io,
+                           &vionet[i])) {
+                               log_warnx("%s: can't add bar for virtio net "
+                                   "device", __progname);
+                               return;
+                       }
+
+                       vionet[i].vq[0].qs = VIONET_QUEUE_SIZE;
+                       vionet[i].vq[0].vq_availoffset =
+                           sizeof(struct vring_desc) * VIONET_QUEUE_SIZE;
+                       vionet[i].vq[0].vq_usedoffset = VIRTQUEUE_ALIGN(
+                           sizeof(struct vring_desc) * VIONET_QUEUE_SIZE
+                           + sizeof(uint16_t) * (2 + VIONET_QUEUE_SIZE));
+                       vionet[i].vq[0].last_avail = 0;
+                       vionet[i].vq[1].qs = VIONET_QUEUE_SIZE;
+                       vionet[i].vq[1].vq_availoffset =
+                           sizeof(struct vring_desc) * VIONET_QUEUE_SIZE;
+                       vionet[i].vq[1].vq_usedoffset = VIRTQUEUE_ALIGN(
+                           sizeof(struct vring_desc) * VIONET_QUEUE_SIZE
+                           + sizeof(uint16_t) * (2 + VIONET_QUEUE_SIZE));
+                       vionet[i].vq[1].last_avail = 0;
+                       vionet[i].fd = child_taps[i];
 
 #if 0
-               /* User defined MAC */
-               vionet[i].cfg.device_feature = VIRTIO_NET_F_MAC;
-               bcopy(&vcp->vcp_macs[i], &vionet[i].mac, 6);
+                       /* User defined MAC */
+                       vionet[i].cfg.device_feature = VIRTIO_NET_F_MAC;
+                       bcopy(&vcp->vcp_macs[i], &vionet[i].mac, 6);
 #endif
+               }
        }
 }
index ba54944..f8f37a2 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vmm.c,v 1.29 2016/07/06 07:09:15 mlarkin Exp $        */
+/*     $OpenBSD: vmm.c,v 1.30 2016/07/07 00:58:31 mlarkin Exp $        */
 
 /*
  * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@@ -785,6 +785,9 @@ vmm_create_vm(struct vm_create_params *vcp)
        if (vcp->vcp_ndisks > VMM_MAX_DISKS_PER_VM)
                return (EINVAL);
 
+       if (vcp->vcp_nnics > VMM_MAX_NICS_PER_VM)
+               return (EINVAL);
+
        if (ioctl(env->vmd_fd, VMM_IOC_CREATE, vcp) < 0)
                return (errno);
 
@@ -862,13 +865,30 @@ run_vm(int *child_disks, int *child_taps, struct vm_create_params *vcp,
        struct vm_run_params **vrp;
        struct vm_terminate_params vtp;
 
+       if (vcp == NULL)
+               return (EINVAL);
+
+       if (child_disks == NULL && vcp->vcp_ndisks != 0)
+               return (EINVAL);
+
+       if (child_taps == NULL && vcp->vcp_nnics != 0)
+               return (EINVAL);
+
+       if (vcp->vcp_ncpus > VMM_MAX_VCPUS_PER_VM)
+               return (EINVAL);
+
+       if (vcp->vcp_ndisks > VMM_MAX_DISKS_PER_VM)
+               return (EINVAL);
+
+       if (vcp->vcp_nnics > VMM_MAX_NICS_PER_VM)
+               return (EINVAL);
+
+       if (vcp->vcp_nmemranges == 0 ||
+           vcp->vcp_nmemranges > VMM_MAX_MEM_RANGES)
+               return (EINVAL);
+
        ret = 0;
 
-       /* XXX cap vcp_ncpus to avoid overflow here */
-       /*
-        * XXX ensure nvcpus in vcp is same as vm, or fix vmm to return einval
-        * on bad vcpu id
-        */
        tid = malloc(sizeof(pthread_t) * vcp->vcp_ncpus);
        vrp = malloc(sizeof(struct vm_run_params *) * vcp->vcp_ncpus);
        if (tid == NULL || vrp == NULL) {