-/* $OpenBSD: nvme.c,v 1.108 2024/04/15 13:58:48 krw Exp $ */
+/* $OpenBSD: nvme.c,v 1.109 2024/04/15 14:25:10 krw Exp $ */
/*
* Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
void nvme_ccb_put(void *, void *);
int nvme_poll(struct nvme_softc *, struct nvme_queue *, struct nvme_ccb *,
- void (*)(struct nvme_softc *, struct nvme_ccb *, void *));
+ void (*)(struct nvme_softc *, struct nvme_ccb *, void *), u_int32_t);
void nvme_poll_fill(struct nvme_softc *, struct nvme_ccb *, void *);
void nvme_poll_done(struct nvme_softc *, struct nvme_ccb *,
struct nvme_cqe *);
.op_cq_done = nvme_op_cq_done,
};
+#define NVME_TIMO_QOP 5000 /* ms to create/delete queue */
+#define NVME_TIMO_IDENT 10000 /* ms to probe/identify */
+#define NVME_TIMO_DELAYNS 10 /* ns to delay() in poll loop */
+
/*
* Some controllers, at least Apple NVMe, always require split
* transfers, so don't use bus_space_{read,write}_8() on LP64.
ccb->ccb_cookie = &sqe;
nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
scsi_io_put(&sc->sc_iopool, ccb);
}
if (ISSET(xs->flags, SCSI_POLL)) {
- nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill);
+ nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill, xs->timeout);
return;
}
ccb->ccb_cookie = xs;
if (ISSET(xs->flags, SCSI_POLL)) {
- nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill);
+ nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill, xs->timeout);
return;
}
int
nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
- void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
+ void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *), u_int32_t ms)
{
struct nvme_poll_state state;
void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *);
void *cookie;
+ int64_t us;
u_int32_t csts;
u_int16_t flags;
ccb->ccb_cookie = &state;
nvme_q_submit(sc, q, ccb, nvme_poll_fill);
- while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
+ for (us = ms * 1000; ms == 0 || us > 0; us -= NVME_TIMO_DELAYNS) {
+ if (ISSET(state.c.flags, htole16(NVME_CQE_PHASE)))
+ break;
csts = nvme_read4(sc, NVME_CSTS);
if (csts == 0xffffffff || ISSET(csts, NVME_CSTS_CFS)) {
SET(state.c.flags, htole16(NVME_CQE_SC_INTERNAL_DEV_ERR));
break;
}
if (nvme_q_complete(sc, q) == 0)
- delay(10);
+ delay(NVME_TIMO_DELAYNS);
nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
-
- /* XXX no timeout? */
}
ccb->ccb_cookie = cookie;
ccb->ccb_cookie = mem;
nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
+ NVME_TIMO_IDENT);
nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
nvme_ccb_put(sc, ccb);
htolem16(&sqe.qid, q->q_id);
sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
if (rv != 0)
goto fail;
htolem16(&sqe.cqid, q->q_id);
sqe.qflags = NVM_SQE_Q_PC;
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
if (rv != 0)
goto fail;
sqe.opcode = NVM_ADMIN_DEL_IOSQ;
htolem16(&sqe.qid, q->q_id);
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
if (rv != 0)
goto fail;
sqe.opcode = NVM_ADMIN_DEL_IOCQ;
htolem16(&sqe.qid, q->q_id);
- rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
+ rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
if (rv != 0)
goto fail;