-/* $OpenBSD: vioblk.c,v 1.4 2017/05/26 15:26:28 sf Exp $ */
+/* $OpenBSD: vioblk.c,v 1.5 2017/05/30 12:47:47 krw Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch.
int vr_len;
bus_dmamap_t vr_cmdsts;
bus_dmamap_t vr_payload;
+ SLIST_ENTRY(virtio_blk_req) vr_list;
+ int vr_qe_index;
};
struct vioblk_softc {
struct scsi_adapter sc_switch;
struct scsi_link sc_link;
struct scsi_iopool sc_iopool;
+ struct mutex sc_vr_mtx;
+ SLIST_HEAD(, virtio_blk_req) sc_freelist;
int sc_notify_on_empty;
}
qsize = sc->sc_vq[0].vq_num;
sc->sc_vq[0].vq_done = vioblk_vq_done;
- if (vioblk_alloc_reqs(sc, qsize) < 0) {
- printf("\nCan't alloc reqs\n");
- goto err;
- }
if (features & VIRTIO_F_NOTIFY_ON_EMPTY) {
virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
sc->sc_switch.dev_probe = vioblk_dev_probe;
sc->sc_switch.dev_free = vioblk_dev_free;
+ SLIST_INIT(&sc->sc_freelist);
+ mtx_init(&sc->sc_vr_mtx, IPL_BIO);
scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
+ sc->sc_link.openings = vioblk_alloc_reqs(sc, qsize);
+ if (sc->sc_link.openings == 0) {
+ printf("\nCan't alloc reqs\n");
+ goto err;
+ }
+
sc->sc_link.adapter = &sc->sc_switch;
sc->sc_link.pool = &sc->sc_iopool;
sc->sc_link.adapter_softc = self;
sc->sc_link.adapter_buswidth = 2;
sc->sc_link.luns = 1;
sc->sc_link.adapter_target = 2;
- sc->sc_link.openings = qsize;
DBGPRINT("; qsize: %d", qsize);
if (features & VIRTIO_BLK_F_RO)
sc->sc_link.flags |= SDEV_READONLY;
vioblk_req_get(void *cookie)
{
struct vioblk_softc *sc = cookie;
- struct virtqueue *vq = &sc->sc_vq[0];
struct virtio_blk_req *vr = NULL;
- int r, s, slot;
-
- s = splbio();
- r = virtio_enqueue_prep(vq, &slot);
- if (r) {
- DBGPRINT("virtio_enqueue_prep: %d, vq_num: %d, sc_queued: %d",
- r, vq->vq_num, sc->sc_queued);
- splx(s);
- return NULL;
- }
- vr = &sc->sc_reqs[slot];
- r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
- if (r) {
- DBGPRINT("virtio_enqueue_reserve: %d", r);
- splx(s);
- return NULL;
- }
+ mtx_enter(&sc->sc_vr_mtx);
+ vr = SLIST_FIRST(&sc->sc_freelist);
+ if (vr != NULL)
+ SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
+ mtx_leave(&sc->sc_vr_mtx);
- splx(s);
+ DBGPRINT("vioblk_req_get: %p\n", vr);
return vr;
}
vioblk_req_put(void *cookie, void *io)
{
struct vioblk_softc *sc = cookie;
- struct virtqueue *vq = &sc->sc_vq[0];
struct virtio_blk_req *vr = io;
- int s, slot = vr - sc->sc_reqs;
-
- s = splbio();
- virtio_enqueue_trim(vq, slot, ALLOC_SEGS);
- virtio_dequeue_commit(vq, slot);
+ DBGPRINT("vioblk_req_put: %p\n", vr);
- splx(s);
+ mtx_enter(&sc->sc_vr_mtx);
+ /*
+ * Do *NOT* call virtio_dequeue_commit()!
+ *
+ * Descriptors are permanently associated with the vioscsi_req and
+ * should not be placed on the free list!
+ */
+ SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
+ mtx_leave(&sc->sc_vr_mtx);
}
int
{
struct virtio_softc *vsc = vq->vq_owner;
struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
+ struct vq_entry *qe;
int slot;
int ret = 0;
if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
break;
}
- vioblk_vq_done1(sc, vsc, vq, slot);
+ qe = &vq->vq_entries[slot];
+ vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
ret = 1;
}
return ret;
s = splbio();
vr = xs->io;
- slot = vr - sc->sc_reqs;
+ slot = vr->vr_qe_index;
if (operation != VIRTIO_BLK_T_FLUSH) {
len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
int
vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
{
- int allocsize, r, rsegs, i;
+ struct virtqueue *vq = &sc->sc_vq[0];
+ struct vring_desc *vd;
+ int allocsize, nreqs, r, rsegs, slot, i;
void *vaddr;
- allocsize = sizeof(struct virtio_blk_req) * qsize;
+ if (vq->vq_indirect != NULL)
+ nreqs = qsize;
+ else
+ nreqs = qsize / ALLOC_SEGS;
+
+ allocsize = sizeof(struct virtio_blk_req) * nreqs;
r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
&sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
if (r != 0) {
}
sc->sc_reqs = vaddr;
memset(vaddr, 0, allocsize);
- for (i = 0; i < qsize; i++) {
+ for (i = 0; i < nreqs; i++) {
+ /*
+ * Assign descriptors and create the DMA maps for each
+ * allocated request.
+ */
struct virtio_blk_req *vr = &sc->sc_reqs[i];
+ r = virtio_enqueue_prep(vq, &slot);
+ if (r == 0)
+ r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
+ if (r != 0)
+ return i;
+
+ if (vq->vq_indirect == NULL) {
+ /*
+ * The reserved slots must be a contiguous block
+ * starting at vq_desc[slot].
+ */
+ vd = &vq->vq_desc[slot];
+ for (r = 0; r < ALLOC_SEGS - 1; r++) {
+ DBGPRINT("vd[%d].next = %d should be %d\n",
+ r, vd[r].next, (slot + r + 1));
+ if (vd[r].next != (slot + r + 1))
+ return i;
+ }
+ if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
+ return i;
+ DBGPRINT("Reserved slots are contiguous as required!\n");
+ }
+
+ vr->vr_qe_index = slot;
+ vq->vq_entries[slot].qe_vr_index = i;
vr->vr_len = VIOBLK_DONE;
+
r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
offsetof(struct virtio_blk_req, vr_xs), 1,
offsetof(struct virtio_blk_req, vr_xs), 0,
BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
if (r != 0) {
printf("cmd dmamap creation failed, err %d\n", r);
+ nreqs = i;
goto err_reqs;
}
r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
BUS_DMA_NOWAIT);
if (r != 0) {
printf("command dmamap load failed, err %d\n", r);
+ nreqs = i;
goto err_reqs;
}
r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAX_XFER,
&vr->vr_payload);
if (r != 0) {
printf("payload dmamap creation failed, err %d\n", r);
+ nreqs = i;
goto err_reqs;
}
+ SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
}
- return 0;
+ return nreqs;
err_reqs:
- for (i = 0; i < qsize; i++) {
+ for (i = 0; i < nreqs; i++) {
struct virtio_blk_req *vr = &sc->sc_reqs[i];
if (vr->vr_cmdsts) {
bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
err_dmamem_alloc:
bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
err_none:
- return -1;
+ return 0;
}
-/* $OpenBSD: vioscsi.c,v 1.9 2017/05/27 08:35:55 krw Exp $ */
+/* $OpenBSD: vioscsi.c,v 1.10 2017/05/30 12:47:47 krw Exp $ */
/*
* Copyright (c) 2013 Google Inc.
*
enum { vioscsi_debug = 0 };
#define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
-#define STATE_ASSERT(vr, want) do { \
- if (vr->vr_state != want) { \
- panic("%s:%d: vr_state is %d should be %d\n", \
- __func__, __LINE__, vr->vr_state, want); \
- } \
- } while (0)
-
-
#define MAX_XFER MAX(MAXPHYS,MAXBSIZE)
/* Number of DMA segments for buffers that the device must support */
#define SEG_MAX (MAX_XFER/PAGE_SIZE + 1)
/* In the virtqueue, we need space for header and footer, too */
#define ALLOC_SEGS (SEG_MAX + 2)
-enum vioscsi_req_state { FREE, ALLOC, INQUEUE, DONE };
-
struct vioscsi_req {
struct virtio_scsi_req_hdr vr_req;
struct virtio_scsi_res_hdr vr_res;
struct scsi_xfer *vr_xs;
bus_dmamap_t vr_control;
bus_dmamap_t vr_data;
- enum vioscsi_req_state vr_state;
+ SLIST_ENTRY(vioscsi_req) vr_list;
+ int vr_qe_index;
};
struct vioscsi_softc {
struct scsi_link sc_link;
struct scsibus *sc_scsibus;
struct scsi_iopool sc_iopool;
+ struct mutex sc_vr_mtx;
struct virtqueue sc_vqs[3];
struct vioscsi_req *sc_reqs;
bus_dma_segment_t sc_reqs_segs[1];
+ SLIST_HEAD(, vioscsi_req) sc_freelist;
};
int vioscsi_match(struct device *, void *, void *);
void vioscsi_attach(struct device *, struct device *, void *);
int vioscsi_alloc_reqs(struct vioscsi_softc *,
- struct virtio_softc *, int, uint32_t);
+ struct virtio_softc *, int);
void vioscsi_scsi_cmd(struct scsi_xfer *);
int vioscsi_vq_done(struct virtqueue *);
void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
ALLOC_SEGS, vioscsi_vq_names[i]);
if (rv) {
printf(": failed to allocate virtqueue %d\n", i);
- return;
+ goto err;
}
sc->sc_vqs[i].vq_done = vioscsi_vq_done;
}
int qsize = sc->sc_vqs[2].vq_num;
printf(": qsize %d\n", qsize);
- if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
- return;
+ SLIST_INIT(&sc->sc_freelist);
+ mtx_init(&sc->sc_vr_mtx, IPL_BIO);
scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
+ sc->sc_link.openings = vioscsi_alloc_reqs(sc, vsc, qsize);
+ if (sc->sc_link.openings == 0) {
+ printf("\nCan't alloc reqs\n");
+ goto err;
+ } else if (sc->sc_link.openings > cmd_per_lun)
+ sc->sc_link.openings = cmd_per_lun;
+
sc->sc_link.adapter = &vioscsi_switch;
sc->sc_link.adapter_softc = sc;
sc->sc_link.adapter_target = max_target;
sc->sc_link.adapter_buswidth = max_target;
- sc->sc_link.openings = cmd_per_lun;
sc->sc_link.pool = &sc->sc_iopool;
bzero(&saa, sizeof(saa));
struct vioscsi_req *vr = xs->io;
struct virtio_scsi_req_hdr *req = &vr->vr_req;
struct virtqueue *vq = &sc->sc_vqs[2];
- int slot = vr - sc->sc_reqs;
+ int slot = vr->vr_qe_index;
DPRINTF("vioscsi_scsi_cmd: enter\n");
- STATE_ASSERT(vr, ALLOC);
// TODO(matthew): Support bidirectional SCSI commands?
if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
virtio_enqueue(vq, slot, vr->vr_data, 0);
virtio_enqueue_commit(vsc, vq, slot, 1);
- vr->vr_state = INQUEUE;
if (ISSET(xs->flags, SCSI_POLL)) {
DPRINTF("vioscsi_scsi_cmd: polling...\n");
struct vioscsi_req *vr)
{
struct scsi_xfer *xs = vr->vr_xs;
- STATE_ASSERT(vr, INQUEUE);
DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
int isread = !!(xs->flags & SCSI_DATA_IN);
done:
vr->vr_xs = NULL;
- vr->vr_state = DONE;
scsi_done(xs);
}
{
struct virtio_softc *vsc = vq->vq_owner;
struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
+ struct vq_entry *qe;
+ struct vioscsi_req *vr;
int ret = 0;
DPRINTF("vioscsi_vq_done: enter\n");
break;
DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
- vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
+ qe = &vq->vq_entries[slot];
+ vr = &sc->sc_reqs[qe->qe_vr_index];
+ vioscsi_req_done(sc, vsc, vr);
ret = 1;
}
vioscsi_req_get(void *cookie)
{
struct vioscsi_softc *sc = cookie;
- struct virtqueue *vq = &sc->sc_vqs[2];
struct vioscsi_req *vr = NULL;
- int r, s, slot;
-
- s = splbio();
- r = virtio_enqueue_prep(vq, &slot);
- if (r == 0)
- r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
- splx(s);
-
- if (r != 0)
- return NULL;
- vr = &sc->sc_reqs[slot];
- if (vr == NULL)
- return NULL;
- STATE_ASSERT(vr, FREE);
- vr->vr_req.id = slot;
- vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
+ mtx_enter(&sc->sc_vr_mtx);
+ vr = SLIST_FIRST(&sc->sc_freelist);
+ if (vr != NULL)
+ SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
+ mtx_leave(&sc->sc_vr_mtx);
- DPRINTF("vioscsi_req_get: %p, %d\n", vr, slot);
- vr->vr_state = ALLOC;
+ DPRINTF("vioscsi_req_get: %p\n", vr);
return (vr);
}
vioscsi_req_put(void *cookie, void *io)
{
struct vioscsi_softc *sc = cookie;
- struct virtqueue *vq = &sc->sc_vqs[2];
struct vioscsi_req *vr = io;
- int slot = vr - sc->sc_reqs;
- DPRINTF("vioscsi_req_put: %p, %d\n", vr, slot);
+ DPRINTF("vioscsi_req_put: %p\n", vr);
- int s = splbio();
- virtio_enqueue_trim(vq, slot, ALLOC_SEGS);
- if (vr->vr_state == DONE) {
- virtio_dequeue_commit(vq, slot);
- } else if (vr->vr_state != ALLOC) {
- panic("invalid vr_state[%d]: %d", slot, vr->vr_state);
- }
- vr->vr_state = FREE;
- splx(s);
+ mtx_enter(&sc->sc_vr_mtx);
+ /*
+ * Do *NOT* call virtio_dequeue_commit()!
+ *
+ * Descriptors are permanently associated with the vioscsi_req and
+ * should not be placed on the free list!
+ */
+ SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
+ mtx_leave(&sc->sc_vr_mtx);
}
int
vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
- int qsize, uint32_t seg_max)
+ int qsize)
{
+ struct virtqueue *vq = &sc->sc_vqs[2];
struct vioscsi_req *vr;
+ struct vring_desc *vd;
size_t allocsize;
- int i, r, rsegs;
+ int i, r, nreqs, rsegs, slot;
void *vaddr;
- allocsize = qsize * sizeof(struct vioscsi_req);
+ if (vq->vq_indirect != NULL)
+ nreqs = qsize;
+ else
+ nreqs = qsize / ALLOC_SEGS;
+
+ allocsize = nreqs * sizeof(struct vioscsi_req);
r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
&sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
if (r != 0) {
printf("bus_dmamem_alloc, size %zd, error %d\n",
allocsize, r);
- return 1;
+ return 0;
}
r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
if (r != 0) {
printf("bus_dmamem_map failed, error %d\n", r);
bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
- return 1;
+ return 0;
}
sc->sc_reqs = vaddr;
memset(vaddr, 0, allocsize);
- for (i = 0; i < qsize; i++) {
- /* Create the DMA maps for each allocated request */
+ for (i = 0; i < nreqs; i++) {
+ /*
+ * Assign descriptors and create the DMA maps for each
+ * allocated request.
+ */
vr = &sc->sc_reqs[i];
+ r = virtio_enqueue_prep(vq, &slot);
+ if (r == 0)
+ r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
+ if (r != 0)
+ return i;
+
+ if (vq->vq_indirect == NULL) {
+ /*
+ * The reserved slots must be a contiguous block
+ * starting at vq_desc[slot].
+ */
+ vd = &vq->vq_desc[slot];
+ for (r = 0; r < ALLOC_SEGS - 1; r++) {
+ DPRINTF("vd[%d].next = %d should be %d\n",
+ r, vd[r].next, (slot + r + 1));
+ if (vd[r].next != (slot + r + 1))
+ return i;
+ }
+ if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
+ return i;
+ DPRINTF("Reserved slots are contiguous as required!\n");
+ }
+
+ vr->vr_qe_index = slot;
+ vr->vr_req.id = slot;
+ vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
+ vq->vq_entries[slot].qe_vr_index = i;
+
r = bus_dmamap_create(vsc->sc_dmat,
offsetof(struct vioscsi_req, vr_xs), 1,
offsetof(struct vioscsi_req, vr_xs), 0,
BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
if (r != 0) {
printf("bus_dmamap_create vr_control failed, error %d\n", r);
- return 1;
+ return i;
}
r = bus_dmamap_create(vsc->sc_dmat, MAX_XFER, SEG_MAX,
MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
if (r != 0) {
printf("bus_dmamap_create vr_data failed, error %d\n", r );
- return 1;
+ return i;
}
r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
vr, offsetof(struct vioscsi_req, vr_xs), NULL,
BUS_DMA_NOWAIT);
if (r != 0) {
printf("bus_dmamap_load vr_control failed, error %d\n", r );
- return 1;
+ return i;
}
+
+ SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
}
- return 0;
+ return nreqs;
}