-/* $OpenBSD: viogpu.c,v 1.6 2024/05/24 10:05:55 jsg Exp $ */
+/* $OpenBSD: viogpu.c,v 1.7 2024/08/01 11:13:19 sf Exp $ */
/*
* Copyright (c) 2021-2023 joshua stein <jcs@openbsd.org>
int viogpu_match(struct device *, void *, void *);
void viogpu_attach(struct device *, struct device *, void *);
int viogpu_send_cmd(struct viogpu_softc *, void *, size_t, void *, size_t);
-int viogpu_vq_wait(struct virtqueue *vq);
+int viogpu_vq_done(struct virtqueue *vq);
void viogpu_rx_soft(void *arg);
int viogpu_get_display_info(struct viogpu_softc *);
printf(": alloc_vq failed\n");
return;
}
- sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_wait;
+ sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_done;
if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], VQCURS, NBPG, 1,
"cursor")) {
goto unmap;
}
+ virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
+
if (viogpu_get_display_info(sc) != 0)
goto unmap;
sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
goto fb_unmap;
- virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
-
if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
goto fb_unmap;
}
int
-viogpu_vq_wait(struct virtqueue *vq)
+viogpu_vq_done(struct virtqueue *vq)
{
struct virtio_softc *vsc = vq->vq_owner;
struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
int slot, len;
- while (virtio_dequeue(vsc, vq, &slot, &len) != 0)
- ;
+ if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
+ return 0;
bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
BUS_DMASYNC_POSTREAD);
virtio_enqueue_p(vq, slot, sc->sc_dma_map, cmd_size, ret_size, 0);
virtio_enqueue_commit(vsc, vq, slot, 1);
- viogpu_vq_wait(vq);
+ while (virtio_check_vq(vsc, vq) == 0)
+ ;
bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
BUS_DMASYNC_POSTWRITE);
-/* $OpenBSD: virtio.c,v 1.28 2024/07/26 07:55:23 sf Exp $ */
+/* $OpenBSD: virtio.c,v 1.29 2024/08/01 11:13:19 sf Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
/*
* dmamap sync operations for a virtqueue.
+ *
+ * XXX These should be more fine grained. Syncing the whole ring if we
+ * XXX only need a few bytes is inefficient if we use bounce buffers.
*/
static inline void
vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
ops);
}
+static inline void
+vq_sync_aring_used_event(struct virtio_softc *sc, struct virtqueue *vq, int ops)
+{
+ bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_availoffset +
+ offsetof(struct vring_avail, ring) + vq->vq_num * sizeof(uint16_t),
+ sizeof(uint16_t), ops);
+}
+
+
static inline void
vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
sizeof(struct vring_used_elem), ops);
}
+static inline void
+vq_sync_uring_avail_event(struct virtio_softc *sc, struct virtqueue *vq, int ops)
+{
+ bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
+ vq->vq_usedoffset + offsetof(struct vring_used, ring) +
+ vq->vq_num * sizeof(struct vring_used_elem), sizeof(uint16_t),
+ ops);
+}
+
+
static inline void
vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
int ops)
static void
publish_avail_idx(struct virtio_softc *sc, struct virtqueue *vq)
{
+ /* first make sure the avail ring entries are visible to the device */
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
virtio_membar_producer();
vq->vq_avail->idx = vq->vq_avail_idx;
- vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
+ /* make the avail idx visible to the device */
+ vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued = 1;
}
publish_avail_idx(sc, vq);
virtio_membar_sync();
+ vq_sync_uring_avail_event(sc, vq, BUS_DMASYNC_POSTREAD);
t = VQ_AVAIL_EVENT(vq) + 1;
if ((uint16_t)(n - t) < (uint16_t)(n - o))
sc->sc_ops->kick(sc, vq->vq_index);
publish_avail_idx(sc, vq);
virtio_membar_sync();
+ vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
sc->sc_ops->kick(sc, vq->vq_index);
}
* Dequeue a request.
*/
/*
- * dequeue: dequeue a request from uring; dmamap_sync for uring is
- * already done in the interrupt handler.
+ * dequeue: dequeue a request from uring; bus_dmamap_sync for uring must
+ * already have been done, usually by virtio_check_vq()
+ * in the interrupt handler. This means that polling virtio_dequeue()
+ * repeatedly until it returns 0 does not work.
*/
int
virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
usedidx &= vq->vq_mask;
virtio_membar_consumer();
+ vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
slot = vq->vq_used->ring[usedidx].id;
qe = &vq->vq_entries[slot];
VQ_USED_EVENT(vq) = idx;
virtio_membar_sync();
- vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
+ vq_sync_aring_used_event(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued++;
if (nslots < virtio_nused(vq))
* interrupt.
*/
VQ_USED_EVENT(vq) = vq->vq_used_idx + 0x8000;
+ vq_sync_aring_used_event(sc, vq, BUS_DMASYNC_PREWRITE);
} else {
vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
* interrupts is done through setting the latest
* consumed index in the used_event field
*/
- if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX))
+ if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX)) {
VQ_USED_EVENT(vq) = vq->vq_used_idx;
- else
+ vq_sync_aring_used_event(sc, vq, BUS_DMASYNC_PREWRITE);
+ } else {
vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
+ }
virtio_membar_sync();
- vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued++;
+ vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
if (vq->vq_used_idx != vq->vq_used->idx)
return 1;
{
uint16_t n;
+ vq_sync_uring(vq->vq_owner, vq, BUS_DMASYNC_POSTREAD);
n = (uint16_t)(vq->vq_used->idx - vq->vq_used_idx);
VIRTIO_ASSERT(n <= vq->vq_num);