-/* $OpenBSD: dwc2.c,v 1.53 2021/01/28 01:48:54 kurt Exp $ */
+/* $OpenBSD: dwc2.c,v 1.54 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2.c,v 1.32 2014/09/02 23:26:20 macallan Exp $ */
/*-
* POSSIBILITY OF SUCH DAMAGE.
*/
-#if 0
-#include "opt_usb.h"
-#endif
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/endian.h>
-#if 0
-#include <sys/cpu.h>
-#endif
#include <machine/bus.h>
#include <dev/usb/usbdivar.h>
#include <dev/usb/usb_mem.h>
-#if 0
-#include <dev/usb/usbroothub_subr.h>
-#endif
-
#include <dev/usb/dwc2/dwc2.h>
#include <dev/usb/dwc2/dwc2var.h>
STATIC void dwc2_poll(struct usbd_bus *);
STATIC void dwc2_softintr(void *);
-#if 0
-STATIC usbd_status dwc2_allocm(struct usbd_bus *, struct usb_dma *, uint32_t);
-STATIC void dwc2_freem(struct usbd_bus *, struct usb_dma *);
-#endif
-
STATIC struct usbd_xfer *dwc2_allocx(struct usbd_bus *);
STATIC void dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
-#if 0
-STATIC void dwc2_get_lock(struct usbd_bus *, struct mutex **);
-#endif
STATIC usbd_status dwc2_root_ctrl_transfer(struct usbd_xfer *);
STATIC usbd_status dwc2_root_ctrl_start(struct usbd_xfer *);
STATIC void dwc2_timeout(void *);
STATIC void dwc2_timeout_task(void *);
-STATIC_INLINE void
+static inline void
dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
struct usbd_xfer *xfer)
{
}
-STATIC_INLINE void
+static inline void
dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
struct usbd_xfer *xfer)
{
.dev_setaddr = dwc2_setaddr,
.soft_intr = dwc2_softintr,
.do_poll = dwc2_poll,
-#if 0
- .allocm = dwc2_allocm,
- .freem = dwc2_freem,
-#endif
.allocx = dwc2_allocx,
.freex = dwc2_freex,
-#if 0
- .get_lock = dwc2_get_lock,
-#endif
};
STATIC struct usbd_pipe_methods dwc2_root_ctrl_methods = {
.done = dwc2_device_isoc_done,
};
-#if 0
-STATIC usbd_status
-dwc2_allocm(struct usbd_bus *bus, struct usb_dma *dma, uint32_t size)
-{
- struct dwc2_softc *sc = DWC2_BUS2SC(bus);
- usbd_status status;
-
- DPRINTFN(10, "\n");
-
- status = usb_allocmem(&sc->sc_bus, size, 0, dma);
- if (status == USBD_NOMEM)
- status = usb_reserve_allocm(&sc->sc_dma_reserve, dma, size);
- return status;
-}
-
-STATIC void
-dwc2_freem(struct usbd_bus *bus, struct usb_dma *dma)
-{
- struct dwc2_softc *sc = DWC2_BUS2SC(bus);
-
- DPRINTFN(10, "\n");
-
- if (dma->block->flags & USB_DMA_RESERVE) {
- usb_reserve_freem(&sc->sc_dma_reserve, dma);
- return;
- }
- usb_freemem(&sc->sc_bus, dma);
-}
-#endif
-
/*
* Work around the half configured control (default) pipe when setting
* the address of a device.
DPRINTFN(10, "\n");
DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
- dxfer = pool_get(&sc->sc_xferpool, PR_NOWAIT);
+ dxfer = pool_get(&sc->sc_xferpool, PR_WAITOK);
if (dxfer != NULL) {
memset(dxfer, 0, sizeof(*dxfer));
-
dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
- DWC2_MAXISOCPACKETS, GFP_ATOMIC);
- if (dxfer->urb == NULL) {
- pool_put(&sc->sc_xferpool, dxfer);
- return NULL;
- }
-
-#ifdef DWC2_DEBUG
+ DWC2_MAXISOCPACKETS, M_NOWAIT);
+#ifdef DIAGNOSTIC
dxfer->xfer.busy_free = XFER_ONQU;
#endif
}
DPRINTFN(10, "\n");
-#ifdef DWC2_DEBUG
- if (xfer->busy_free != XFER_ONQU) {
+#ifdef DIAGNOSTIC
+ if (xfer->busy_free != XFER_ONQU &&
+ xfer->status != USBD_NOT_STARTED) {
DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->busy_free);
}
xfer->busy_free = XFER_FREE;
pool_put(&sc->sc_xferpool, xfer);
}
-#if 0
-STATIC void
-dwc2_get_lock(struct usbd_bus *bus, struct mutex **lock)
-{
- struct dwc2_softc *sc = DWC2_BUS2SC(bus);
-
- *lock = &sc->sc_lock;
-}
-#endif
-
STATIC void
dwc2_rhc(void *addr)
{
u_char *p;
DPRINTF("\n");
+ mtx_enter(&sc->sc_lock);
xfer = sc->sc_intrxfer;
if (xfer == NULL) {
/* Just ignore the change. */
+ mtx_leave(&sc->sc_lock);
return;
}
+
/* set port bit */
p = KERNADDR(&xfer->dmabuf, 0);
xfer->status = USBD_NORMAL_COMPLETION;
usb_transfer_complete(xfer);
+ mtx_leave(&sc->sc_lock);
}
STATIC void
struct usbd_bus *bus = v;
struct dwc2_softc *sc = DWC2_BUS2SC(bus);
struct dwc2_hsotg *hsotg = sc->sc_hsotg;
- struct dwc2_xfer *dxfer;
+ struct dwc2_xfer *dxfer, *next;
+ TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);
+ /*
+ * Grab all the xfers that have not been aborted or timed out.
+ * Do so under a single lock -- without dropping it to run
+ * usb_transfer_complete as we go -- so that dwc2_abortx won't
+ * remove next out from under us during iteration when we've
+ * dropped the lock.
+ */
mtx_enter(&hsotg->lock);
- while ((dxfer = TAILQ_FIRST(&sc->sc_complete)) != NULL) {
-
- KASSERTMSG(!timeout_pending(&dxfer->xfer.timeout_handle),
- "xfer %p pipe %p\n", dxfer, dxfer->xfer.pipe);
-
- /*
- * dwc2_abort_xfer will remove this transfer from the
- * sc_complete queue
- */
- /*XXXNH not tested */
- if (dxfer->flags & DWC2_XFER_ABORTING) {
- wakeup(&dxfer->flags);
- continue;
- }
-
+ TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
+ KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS);
+ KASSERT(dxfer->intr_status != USBD_CANCELLED);
+ KASSERT(dxfer->intr_status != USBD_TIMEOUT);
TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
+ TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
+ }
+ mtx_leave(&hsotg->lock);
- mtx_leave(&hsotg->lock);
+ /* Now complete them. */
+ while (!TAILQ_EMPTY(&claimed)) {
+ dxfer = TAILQ_FIRST(&claimed);
+ KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS);
+ KASSERT(dxfer->intr_status != USBD_CANCELLED);
+ KASSERT(dxfer->intr_status != USBD_TIMEOUT);
+ TAILQ_REMOVE(&claimed, dxfer, xnext);
+
+ dxfer->xfer.status = dxfer->intr_status;
usb_transfer_complete(&dxfer->xfer);
- mtx_enter(&hsotg->lock);
}
- mtx_leave(&hsotg->lock);
}
STATIC void
dwc2_timeout(void *addr)
{
struct usbd_xfer *xfer = addr;
- struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
DPRINTF("xfer=%p\n", xfer);
- if (sc->sc_dying) {
+ if (sc->sc_bus.dying) {
dwc2_timeout_task(addr);
return;
}
DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");
- if (sc->sc_dying) {
+ if (sc->sc_bus.dying) {
return USBD_IOERROR;
}
err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t),
0, USB_DMA_COHERENT, &dpipe->req_dma);
if (err)
- return err;
+ return USBD_NOMEM;
break;
case UE_INTERRUPT:
pipe->methods = &dwc2_device_intr_methods;
return USBD_INVAL;
}
- dpipe->priv = NULL; /* QH */
+ /* QH */
+ dpipe->priv = NULL;
return USBD_NORMAL_COMPLETION;
}
DPRINTF("xfer=%p\n", xfer);
- if (sc->sc_dying) {
+ if (sc->sc_bus.dying) {
xfer->status = status;
timeout_del(&xfer->timeout_handle);
usb_rem_task(xfer->device, &xfer->abort_task);
},
};
-#define HSETW(ptr, val) ptr = { (uint8_t)(val), (uint8_t)((val) >> 8) }
-#if 0
-/* appears to be unused */
-STATIC const usb_hub_descriptor_t dwc2_hubd = {
- .bDescLength = USB_HUB_DESCRIPTOR_SIZE,
- .bDescriptorType = UDESC_HUB,
- .bNbrPorts = 1,
- HSETW(.wHubCharacteristics, (UHD_PWR_NO_SWITCH | UHD_OC_INDIVIDUAL)),
- .bPwrOn2PwrGood = 50,
- .bHubContrCurrent = 0,
- .DeviceRemovable = {0}, /* port is removable */
-};
-#endif
-
STATIC usbd_status
dwc2_root_ctrl_transfer(struct usbd_xfer *xfer)
{
int value, index, l, s, totlen;
usbd_status err = USBD_IOERROR;
- if (sc->sc_dying)
+ if (sc->sc_bus.dying)
return USBD_IOERROR;
req = &xfer->request;
switch (value) {
case C(0, UDESC_DEVICE):
l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
-// USETW(dwc2_devd.idVendor, sc->sc_id_vendor);
+// USETW(dwc2_devd.idVendor, sc->sc_id_vendor);
memcpy(buf, &dwc2_devd, l);
buf += l;
len -= l;
STATIC usbd_status
dwc2_root_intr_transfer(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
usbd_status err;
DPRINTF("\n");
/* Insert last in queue. */
+ mtx_enter(&sc->sc_lock);
err = usb_insert_transfer(xfer);
+ mtx_leave(&sc->sc_lock);
if (err)
return err;
dwc2_root_intr_start(struct usbd_xfer *xfer)
{
struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
+ const bool polling = sc->sc_bus.use_polling;
DPRINTF("\n");
- if (sc->sc_dying)
+ if (sc->sc_bus.dying)
return USBD_IOERROR;
+ if (!polling)
+ mtx_enter(&sc->sc_lock);
KASSERT(sc->sc_intrxfer == NULL);
sc->sc_intrxfer = xfer;
+ xfer->status = USBD_IN_PROGRESS;
+ if (!polling)
+ mtx_leave(&sc->sc_lock);
return USBD_IN_PROGRESS;
}
DPRINTF("xfer=%p\n", xfer);
- KASSERT(xfer->pipe->intrxfer == xfer);
-
- sc->sc_intrxfer = NULL;
+ /* If xfer has already completed, nothing to do here. */
+ if (sc->sc_intrxfer == NULL)
+ return;
+ /*
+ * Otherwise, sc->sc_intrxfer had better be this transfer.
+ * Cancel it.
+ */
+ KASSERT(sc->sc_intrxfer == xfer);
+ KASSERT(xfer->status == USBD_IN_PROGRESS);
xfer->status = USBD_CANCELLED;
usb_transfer_complete(xfer);
}
DPRINTF("\n");
- sc->sc_intrxfer = NULL;
+ /*
+ * Caller must guarantee the xfer has completed first, by
+ * closing the pipe only after normal completion or an abort.
+ */
+ KASSERT(sc->sc_intrxfer == NULL);
}
STATIC void
dwc2_root_intr_done(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
DPRINTF("\n");
+
+ /* Claim the xfer so it doesn't get completed again. */
+ KASSERT(sc->sc_intrxfer == xfer);
+ KASSERT(xfer->status != USBD_IN_PROGRESS);
+ sc->sc_intrxfer = NULL;
}
/***********************************************************************/
STATIC usbd_status
dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
usbd_status err;
DPRINTF("\n");
/* Insert last in queue. */
+ mtx_enter(&sc->sc_lock);
err = usb_insert_transfer(xfer);
+ mtx_leave(&sc->sc_lock);
if (err)
return err;
STATIC usbd_status
dwc2_device_ctrl_start(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
usbd_status err;
+ const bool polling = sc->sc_bus.use_polling;
DPRINTF("\n");
+ if (!polling)
+ mtx_enter(&sc->sc_lock);
xfer->status = USBD_IN_PROGRESS;
err = dwc2_device_start(xfer);
+ if (!polling)
+ mtx_leave(&sc->sc_lock);
- return err;
+ if (err)
+ return err;
+
+ return USBD_IN_PROGRESS;
}
STATIC void
STATIC void
dwc2_device_ctrl_close(struct usbd_pipe *pipe)
{
+ struct dwc2_softc * const sc = DWC2_PIPE2SC(pipe);
+ struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe);
DPRINTF("pipe=%p\n", pipe);
dwc2_close_pipe(pipe);
+
+ usb_freemem(&sc->sc_bus, &dpipe->req_dma);
}
STATIC void
STATIC usbd_status
dwc2_device_intr_transfer(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
usbd_status err;
DPRINTF("xfer=%p\n", xfer);
/* Insert last in queue. */
+ mtx_enter(&sc->sc_lock);
err = usb_insert_transfer(xfer);
+ mtx_leave(&sc->sc_lock);
if (err)
return err;
STATIC usbd_status
dwc2_device_intr_start(struct usbd_xfer *xfer)
{
+ struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
usbd_status err;
+ const bool polling = sc->sc_bus.use_polling;
+ if (!polling)
+ mtx_enter(&sc->sc_lock);
xfer->status = USBD_IN_PROGRESS;
err = dwc2_device_start(xfer);
+ if (!polling)
+ mtx_leave(&sc->sc_lock);
- return err;
+ if (err)
+ return err;
+
+ return USBD_IN_PROGRESS;
}
/* Abort a device interrupt request. */
uint32_t flags = 0;
uint32_t off = 0;
- int retval, err = USBD_IN_PROGRESS;
+ int retval, err;
int alloc_bandwidth = 0;
- int i;
DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->pipe);
/* Copy request packet to our DMA buffer */
memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
- BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREWRITE);
len = UGETW(req->wLength);
if ((req->bmRequestType & UT_READ) == UT_READ) {
dir = UE_DIR_IN;
KERNADDR(&dpipe->req_dma, 0),
(long long)DMAADDR(&dpipe->req_dma, 0),
len, dir == UE_DIR_IN ? "in" : "out");
- } else {
- DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
- " mps=%d dir %s\n", xfer, xfer->length, xfer->flags, addr,
+ } else if (xfertype == UE_ISOCHRONOUS) {
+ DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d,"
+ " mps=%d dir %s\n", xfer, xfer->nframes, xfer->flags, addr,
epnum, mps, dir == UT_READ ? "in" :"out");
+#ifdef DIAGNOSTIC
+ len = 0;
+ for (size_t i = 0; i < xfer->nframes; i++)
+ len += xfer->frlengths[i];
+ if (len != xfer->length)
+ panic("len (%d) != xfer->length (%d)", len,
+ xfer->length);
+#endif
+ len = xfer->length;
+ } else {
+ DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
+ " mps=%d dir %s\n", xfer, xfer->length, xfer->flags, addr,
+ epnum, mps, dir == UT_READ ? "in" :"out");
+
len = xfer->length;
}
if (!dwc2_urb)
return USBD_NOMEM;
+// KASSERT(dwc2_urb->packet_count == xfer->nframes);
memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
sizeof(dwc2_urb->iso_descs[0]) * DWC2_MAXISOCPACKETS);
+ dwc2_urb->priv = xfer;
+ dwc2_urb->packet_count = xfer->nframes;
+
dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
- mps);
+ mps);
if (xfertype == UE_CONTROL) {
dwc2_urb->setup_usbdma = &dpipe->req_dma;
} else {
/* XXXNH - % mps required? */
if ((xfer->flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
- flags |= URB_SEND_ZERO_PACKET;
+ flags |= URB_SEND_ZERO_PACKET;
}
flags |= URB_GIVEBACK_ASAP;
dwc2_urb->usbdma = &xfer->dmabuf;
dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);
- usb_syncmem(&xfer->dmabuf, 0, xfer->length,
- usbd_xfer_isread(xfer) ?
- BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
+
+ usb_syncmem(&xfer->dmabuf, 0, len,
+ dir == UE_DIR_IN ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
}
dwc2_urb->length = len;
dwc2_urb->flags = flags;
dwc2_urb->status = -EINPROGRESS;
- dwc2_urb->packet_count = xfer->nframes;
if (xfertype == UE_INTERRUPT ||
xfertype == UE_ISOCHRONOUS) {
dwc2_urb->interval = ival;
}
+ /* XXXNH bring down from callers?? */
+// mtx_enter(&sc->sc_lock);
+
xfer->actlen = 0;
KASSERT(xfertype != UE_ISOCHRONOUS ||
- xfer->nframes < DWC2_MAXISOCPACKETS);
+ xfer->nframes <= DWC2_MAXISOCPACKETS);
KASSERTMSG(xfer->nframes == 0 || xfertype == UE_ISOCHRONOUS,
"nframes %d xfertype %d\n", xfer->nframes, xfertype);
- for (off = i = 0; i < xfer->nframes; ++i) {
- DPRINTFN(3, "xfer=%p frame=%d offset=%d length=%d\n", xfer, i,
+ off = 0;
+ for (size_t i = 0; i < xfer->nframes; ++i) {
+ DPRINTFN(3, "xfer=%p frame=%zu offset=%d length=%d\n", xfer, i,
off, xfer->frlengths[i]);
dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
off += xfer->frlengths[i];
}
- /* might need to check cpu_intr_p */
- mtx_enter(&hsotg->lock);
+ struct dwc2_qh *qh = dpipe->priv;
+ struct dwc2_qtd *qtd;
+ bool qh_allocated = false;
- if (xfer->timeout && !sc->sc_bus.use_polling) {
- timeout_reset(&xfer->timeout_handle, mstohz(xfer->timeout),
- dwc2_timeout, xfer);
+ /* Create QH for the endpoint if it doesn't exist */
+ if (!qh) {
+ qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, M_NOWAIT);
+ if (!qh) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+ dpipe->priv = qh;
+ qh_allocated = true;
}
- dwc2_urb->priv = xfer;
- retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &dpipe->priv, 0);
+ qtd = pool_get(&sc->sc_qtdpool, PR_NOWAIT);
+ if (!qtd) {
+ retval = -ENOMEM;
+ goto fail1;
+ }
+ memset(qtd, 0, sizeof(*qtd));
+
+ /* might need to check cpu_intr_p */
+ mtx_enter(&hsotg->lock);
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
if (retval)
- goto fail;
+ goto fail2;
+ if (xfer->timeout && !sc->sc_bus.use_polling) {
+ timeout_set(&xfer->timeout_handle, dwc2_timeout, xfer);
+ timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
+ }
+ xfer->status = USBD_IN_PROGRESS;
if (alloc_bandwidth) {
dwc2_allocate_bus_bandwidth(hsotg,
xfer);
}
-fail:
mtx_leave(&hsotg->lock);
+// mtx_exit(&sc->sc_lock);
+
+ return USBD_IN_PROGRESS;
+
+fail2:
+ dwc2_urb->priv = NULL;
+ mtx_leave(&hsotg->lock);
+ pool_put(&sc->sc_qtdpool, qtd);
+
+fail1:
+ if (qh_allocated) {
+ dpipe->priv = NULL;
+ dwc2_hcd_qh_free(hsotg, qh);
+ }
+fail:
switch (retval) {
- case 0:
- break;
+ case -EINVAL:
case -ENODEV:
err = USBD_INVAL;
break;
}
-void
-dwc2_worker(struct task *wk, void *priv)
-{
- struct dwc2_softc *sc = priv;
- struct dwc2_hsotg *hsotg = sc->sc_hsotg;
-
-/* db_enter(); */
-#if 0
- struct usbd_xfer *xfer = dwork->xfer;
- struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
-
- dwc2_hcd_endpoint_disable(sc->dwc_dev.hcd, dpipe->priv, 250);
- dwc_free(NULL, dpipe->urb);
-#endif
-
- if (wk == &hsotg->wf_otg) {
- dwc2_conn_id_status_change(wk);
- } else if (wk == &hsotg->start_work.work) {
- dwc2_hcd_start_func(wk);
- } else if (wk == &hsotg->reset_work.work) {
- dwc2_hcd_reset_func(wk);
- } else {
-#if 0
- KASSERT(dwork->xfer != NULL);
- KASSERT(dxfer->queued == true);
-
- if (!(dxfer->flags & DWC2_XFER_ABORTING)) {
- dwc2_start_standard_chain(xfer);
- }
- dxfer->queued = false;
- wakeup(&dxfer->flags);
-#endif
- }
-}
-
-int
-dwc2_intr(void *p)
+int dwc2_intr(void *p)
{
struct dwc2_softc *sc = p;
- struct dwc2_hsotg *hsotg = sc->sc_hsotg;
+ struct dwc2_hsotg *hsotg;
int ret = 0;
+ if (sc == NULL)
+ return 0;
+
+ hsotg = sc->sc_hsotg;
mtx_enter(&hsotg->lock);
- if (sc->sc_dying)
+ if (sc->sc_bus.dying)
goto done;
if (sc->sc_bus.use_polling) {
sc->sc_bus.pipe_size = sizeof(struct dwc2_pipe);
sc->sc_hcdenabled = false;
+ mtx_init(&sc->sc_lock, IPL_SOFTUSB);
+
TAILQ_INIT(&sc->sc_complete);
sc->sc_rhc_si = softintr_establish(IPL_SOFTUSB, dwc2_rhc, sc);
-#if 0
- usb_setup_reserve(&sc->sc_bus, &sc->sc_dma_reserve, sc->sc_bus.dmatag,
- USB_MEM_RESERVE);
-#endif
-
pool_init(&sc->sc_xferpool, sizeof(struct dwc2_xfer), 0, IPL_USB, 0,
"dwc2xfer", NULL);
pool_init(&sc->sc_qhpool, sizeof(struct dwc2_qh), 0, IPL_USB, 0,
sc->sc_hsotg = malloc(sizeof(struct dwc2_hsotg), M_DEVBUF,
M_ZERO | M_WAITOK);
-
sc->sc_hsotg->hsotg_sc = sc;
sc->sc_hsotg->dev = &sc->sc_bus.bdev;
sc->sc_hcdenabled = true;
- err = dwc2_hcd_init(sc->sc_hsotg, sc->sc_params);
- if (err) {
- err = -err;
- goto fail;
+ struct dwc2_hsotg *hsotg = sc->sc_hsotg;
+ struct dwc2_core_params defparams;
+ int retval;
+
+ if (sc->sc_params == NULL) {
+ /* Default all params to autodetect */
+ dwc2_set_all_params(&defparams, -1);
+ sc->sc_params = &defparams;
+
+ /*
+ * Disable descriptor dma mode by default as the HW can support
+ * it, but does not support it for SPLIT transactions.
+ */
+ defparams.dma_desc_enable = 0;
}
+ hsotg->dr_mode = USB_DR_MODE_HOST;
+
+ /*
+ * Reset before dwc2_get_hwparams() then it could get power-on real
+ * reset value form registers.
+ */
+ dwc2_core_reset(hsotg);
+ usb_delay_ms(&sc->sc_bus, 500);
+
+ /* Detect config values from hardware */
+ retval = dwc2_get_hwparams(hsotg);
+ if (retval) {
+ goto fail2;
+ }
+
+ hsotg->core_params = malloc(sizeof(*hsotg->core_params), M_DEVBUF,
+ M_ZERO | M_WAITOK);
+ dwc2_set_all_params(hsotg->core_params, -1);
+
+ /* Validate parameter values */
+ dwc2_set_parameters(hsotg, sc->sc_params);
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ if (hsotg->dr_mode != USB_DR_MODE_HOST) {
+ retval = dwc2_gadget_init(hsotg);
+ if (retval)
+ goto fail2;
+ hsotg->gadget_enabled = 1;
+ }
+#endif
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
+ retval = dwc2_hcd_init(hsotg);
+ if (retval) {
+ if (hsotg->gadget_enabled)
+ dwc2_hsotg_remove(hsotg);
+ goto fail2;
+ }
+ hsotg->hcd_enabled = 1;
+ }
+#endif
+
+#ifdef DWC2_DEBUG
+ uint32_t snpsid = hsotg->hw_params.snpsid;
+ dev_dbg(hsotg->dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
+ snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
+ snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);
+#endif
return 0;
-fail:
+fail2:
+ err = -retval;
free(sc->sc_hsotg, M_DEVBUF, sizeof(struct dwc2_hsotg));
softintr_disestablish(sc->sc_rhc_si);
return err;
}
-int
-dwc2_dma_config(struct dwc2_softc *sc, struct dwc2_core_dma_config *config)
-{
- sc->sc_dma_config = config;
- return dwc2_hcd_dma_config(sc->sc_hsotg, sc->sc_dma_config);
-}
-
#if 0
/*
* curmode is a mode indication bit 0 = device, 1 = host
struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
struct usbd_device *dev = dpipe->pipe.device;
- if (dev->myhsport != NULL) {
- *hub_addr = dev->myhsport->parent->address;
- *hub_port = dev->myhsport->portno;
- }
+ *hub_addr = dev->myhsport->parent->address;
+ *hub_port = dev->myhsport->portno;
}
int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- int status)
+ int status)
{
struct usbd_xfer *xfer;
struct dwc2_xfer *dxfer;
ed = xfer->pipe->endpoint->edesc;
xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
- xfer->actlen = dwc2_hcd_urb_get_actual_length(qtd->urb);
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ xfer->actlen = dwc2_hcd_urb_get_actual_length(urb);
DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->actlen);
- if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
- int i;
-
- for (i = 0; i < xfer->nframes; i++)
- dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
- i, qtd->urb->iso_descs[i].status);
- }
-
if (xfertype == UE_ISOCHRONOUS) {
- int i;
-
xfer->actlen = 0;
- for (i = 0; i < xfer->nframes; ++i) {
+ for (size_t i = 0; i < xfer->nframes; ++i) {
xfer->frlengths[i] =
dwc2_hcd_urb_get_iso_desc_actual_length(
- qtd->urb, i);
+ urb, i);
+ DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
+ xfer->frlengths[i]);
xfer->actlen += xfer->frlengths[i];
}
+ DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->actlen);
+ }
+
+ if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
+ for (size_t i = 0; i < xfer->nframes; i++)
+ dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
+ i, urb->iso_descs[i].status);
}
if (!status) {
switch (status) {
case 0:
- xfer->status = USBD_NORMAL_COMPLETION;
+ dxfer->intr_status = USBD_NORMAL_COMPLETION;
break;
case -EPIPE:
- xfer->status = USBD_STALLED;
- break;
- case -ETIMEDOUT:
- xfer->status = USBD_TIMEOUT;
+ dxfer->intr_status = USBD_STALLED;
break;
case -EPROTO:
- xfer->status = USBD_INVAL;
+ dxfer->intr_status = USBD_INVAL;
break;
case -EIO:
- xfer->status = USBD_IOERROR;
+ dxfer->intr_status = USBD_IOERROR;
break;
case -EOVERFLOW:
- xfer->status = USBD_IOERROR;
+ dxfer->intr_status = USBD_IOERROR;
break;
default:
+ dxfer->intr_status = USBD_IOERROR;
printf("%s: unknown error status %d\n", __func__, status);
}
+ if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
+ /*
+ * control transfers with no data phase don't touch dmabuf, but
+ * everything else does.
+ */
+ if (!(xfertype == UE_CONTROL &&
+ xfer->length == 0) &&
+ xfer->actlen > 0 /* XXX PR/53503 */
+ ) {
+ int rd = usbd_xfer_isread(xfer);
+
+ usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
+ rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ }
+ }
+
if (xfertype == UE_ISOCHRONOUS ||
xfertype == UE_INTERRUPT) {
struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
xfer);
}
- if (xfer->status == USBD_NORMAL_COMPLETION) {
- if (xfertype == UE_ISOCHRONOUS)
- usb_syncmem(&xfer->dmabuf, 0, xfer->length,
- usbd_xfer_isread(xfer) ?
- BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
- else if (xfer->actlen)
- usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
- usbd_xfer_isread(xfer) ?
- BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
- }
-
qtd->urb = NULL;
timeout_del(&xfer->timeout_handle);
usb_rem_task(xfer->device, &xfer->abort_task);
-
MUTEX_ASSERT_LOCKED(&hsotg->lock);
TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
mtx_enter(&hsotg->lock);
- hsotg->op_state = OTG_STATE_A_HOST;
-
- dwc2_hcd_reinit(hsotg);
+ hsotg->lx_state = DWC2_L0;
- /* Try to enable port. */
- dwc2_handle_hcd_intr(hsotg);
+ if (dwc2_is_device_mode(hsotg)) {
+ mtx_leave(&hsotg->lock);
+ return 0; /* why 0 ?? */
+ }
- /*XXXNH*/
- delay(50);
+ dwc2_hcd_reinit(hsotg);
mtx_leave(&hsotg->lock);
return 0;
-/* $OpenBSD: dwc2.h,v 1.14 2017/02/15 14:49:13 visa Exp $ */
+/* $OpenBSD: dwc2.h,v 1.15 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2.h,v 1.4 2014/12/23 16:20:06 macallan Exp $ */
/*-
#include <sys/kernel.h>
#include <sys/task.h>
-#include <sys/timeout.h>
#include <lib/libkern/libkern.h>
-#if 0
-#include "opt_usb.h"
-#endif
-
-#define STATIC_INLINE static inline
-#define STATIC
+#define STATIC
// #define VERBOSE_DEBUG
// #define DWC2_DUMP_FRREM
// #define CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+#define CONFIG_USB_DWC2_HOST 1
+#define CONFIG_USB_DWC2_DUAL_ROLE 0
+#define CONFIG_USB_DWC2_PERIPHERAL 0
+
typedef int irqreturn_t;
#define IRQ_NONE 0
#define IRQ_HANDLED 1
#define dma_addr_t bus_addr_t
+#define DWC2_READ_4(hsotg, reg) \
+ bus_space_read_4((hsotg)->hsotg_sc->sc_iot, (hsotg)->hsotg_sc->sc_ioh, (reg))
+#define DWC2_WRITE_4(hsotg, reg, data) \
+ bus_space_write_4((hsotg)->hsotg_sc->sc_iot, (hsotg)->hsotg_sc->sc_ioh, (reg), (data));
+
#ifdef DWC2_DEBUG
extern int dwc2debug;
#define WARN_ON(x) KASSERT(!(x))
#define dev_vdbg(...) do { } while (0)
#endif
-#define jiffies hardclock_ticks
-#define msecs_to_jiffies mstohz
-
-#define gfp_t int
-#define GFP_KERNEL M_WAITOK
-#define GFP_ATOMIC M_NOWAIT
-
enum usb_otg_state {
OTG_STATE_RESERVED = 0,
#define spinlock_t struct mutex
#define spin_lock_init(lock) mtx_init(lock, IPL_USB)
-#define spin_lock(l) do { mtx_enter(l); } while (0)
-#define spin_unlock(l) do { mtx_leave(l); } while (0)
+#define spin_lock(l) do { mtx_enter(l); } while (0)
+#define spin_unlock(l) do { mtx_leave(l); } while (0)
#define spin_lock_irqsave(l, f) \
do { mtx_enter(l); (void)(f); } while (0)
#define USB_PORT_STAT_C_RESET UPS_C_PORT_RESET
#define USB_PORT_STAT_C_L1 UPS_C_PORT_L1
-STATIC_INLINE void
+#define USB_DT_HUB UDESC_HUB
+
+/* See USB 2.0 spec Table 11-13, offset 3 */
+#define HUB_CHAR_LPSM UHD_PWR
+#define HUB_CHAR_COMMON_LPSM UHD_PWR_GANGED
+#define HUB_CHAR_INDV_PORT_LPSM UHD_PWR_INDIVIDUAL
+#define HUB_CHAR_NO_LPSM UHD_PWR_NO_SWITCH
+
+#define HUB_CHAR_COMPOUND UHD_COMPOUND
+
+#define HUB_CHAR_OCPM UHD_OC
+#define HUB_CHAR_COMMON_OCPM UHD_OC_GLOBAL
+#define HUB_CHAR_INDV_PORT_OCPM UHD_OC_INDIVIDUAL
+#define HUB_CHAR_NO_OCPM UHD_OC_NONE
+
+#define HUB_CHAR_TTTT UHD_TT_THINK
+#define HUB_CHAR_PORTIND UHD_PORT_IND
+
+enum usb_dr_mode {
+ USB_DR_MODE_UNKNOWN,
+ USB_DR_MODE_HOST,
+ USB_DR_MODE_PERIPHERAL,
+ USB_DR_MODE_OTG,
+};
+
+struct usb_phy;
+struct usb_hcd;
+
+static inline int
+usb_phy_set_suspend(struct usb_phy *x, int suspend)
+{
+
+ return 0;
+}
+
+static inline void
+usb_hcd_resume_root_hub(struct usb_hcd *hcd)
+{
+
+ return;
+}
+
+static inline int
+usb_disabled(void)
+{
+
+ return 0;
+}
+
+static inline void
udelay(unsigned long usecs)
{
+
DELAY(usecs);
}
+static inline void
+ndelay(unsigned long nsecs)
+{
+
+ DELAY(nsecs / 1000);
+}
+
#define EREMOTEIO EIO
#define ECOMM EIO
+#define ENOTSUPP ENOTSUP
#define NS_TO_US(ns) ((ns + 500L) / 1000L)
void dw_timeout(void *);
-void dwc2_worker(struct task *, void *);
struct delayed_work {
struct task work;
void *dw_arg;
};
-STATIC_INLINE void
+static inline void
INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(void *), void *arg)
{
dw->dw_fn = fn;
timeout_set(&dw->dw_timer, dw_timeout, dw);
}
-STATIC_INLINE void
+static inline void
queue_delayed_work(struct taskq *wq, struct delayed_work *dw, int j)
{
dw->dw_wq = wq;
timeout_add(&dw->dw_timer, j);
}
+#define USB_RESUME_TIMEOUT 40 /* ms */
+
#endif
-/* $OpenBSD: dwc2_core.c,v 1.9 2017/09/08 05:36:52 deraadt Exp $ */
+/* $OpenBSD: dwc2_core.c,v 1.10 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */
/*
#include <dev/usb/dwc2/dwc2_core.h>
#include <dev/usb/dwc2/dwc2_hcd.h>
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+/**
+ * dwc2_backup_host_registers() - Backup controller host registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup Host regs */
+ hr = &hsotg->hr_backup;
+ hr->hcfg = DWC2_READ_4(hsotg, HCFG);
+ hr->haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
+ for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ hr->hcintmsk[i] = DWC2_READ_4(hsotg, HCINTMSK(i));
+
+ hr->hprt0 = DWC2_READ_4(hsotg, HPRT0);
+ hr->hfir = DWC2_READ_4(hsotg, HFIR);
+ hr->valid = true;
+
+ return 0;
+}
+
+/**
+ * dwc2_restore_host_registers() - Restore controller host registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore host regs */
+ hr = &hsotg->hr_backup;
+ if (!hr->valid) {
+ dev_err(hsotg->dev, "%s: no host registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ hr->valid = false;
+
+ DWC2_WRITE_4(hsotg, HCFG, hr->hcfg);
+ DWC2_WRITE_4(hsotg, HAINTMSK, hr->haintmsk);
+
+ for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ DWC2_WRITE_4(hsotg, HCINTMSK(i), hr->hcintmsk[i]);
+
+ DWC2_WRITE_4(hsotg, HPRT0, hr->hprt0);
+ DWC2_WRITE_4(hsotg, HFIR, hr->hfir);
+ hsotg->frame_number = 0;
+
+ return 0;
+}
+#else
+static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+
+static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+/**
+ * dwc2_backup_device_registers() - Backup controller device registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_dregs_backup *dr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup dev regs */
+ dr = &hsotg->dr_backup;
+
+ dr->dcfg = DWC2_READ_4(hsotg, DCFG);
+ dr->dctl = DWC2_READ_4(hsotg, DCTL);
+ dr->daintmsk = DWC2_READ_4(hsotg, DAINTMSK);
+ dr->diepmsk = DWC2_READ_4(hsotg, DIEPMSK);
+ dr->doepmsk = DWC2_READ_4(hsotg, DOEPMSK);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Backup IN EPs */
+ dr->diepctl[i] = DWC2_READ_4(hsotg, DIEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->diepctl[i] & DXEPCTL_DPID)
+ dr->diepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->diepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->dieptsiz[i] = DWC2_READ_4(hsotg, DIEPTSIZ(i));
+ dr->diepdma[i] = DWC2_READ_4(hsotg, DIEPDMA(i));
+
+ /* Backup OUT EPs */
+ dr->doepctl[i] = DWC2_READ_4(hsotg, DOEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->doepctl[i] & DXEPCTL_DPID)
+ dr->doepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->doepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->doeptsiz[i] = DWC2_READ_4(hsotg, DOEPTSIZ(i));
+ dr->doepdma[i] = DWC2_READ_4(hsotg, DOEPDMA(i));
+ }
+ dr->valid = true;
+ return 0;
+}
+
+/**
+ * dwc2_restore_device_registers() - Restore controller device registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_dregs_backup *dr;
+ u32 dctl;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore dev regs */
+ dr = &hsotg->dr_backup;
+ if (!dr->valid) {
+ dev_err(hsotg->dev, "%s: no device registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ dr->valid = false;
+
+ DWC2_WRITE_4(hsotg, DCFG, dr->dcfg);
+ DWC2_WRITE_4(hsotg, DCTL, dr->dctl);
+ DWC2_WRITE_4(hsotg, DAINTMSK, dr->daintmsk);
+ DWC2_WRITE_4(hsotg, DIEPMSK, dr->diepmsk);
+ DWC2_WRITE_4(hsotg, DOEPMSK, dr->doepmsk);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Restore IN EPs */
+ DWC2_WRITE_4(hsotg, DIEPCTL(i), dr->diepctl[i]);
+ DWC2_WRITE_4(hsotg, DIEPTSIZ(i), dr->dieptsiz[i]);
+ DWC2_WRITE_4(hsotg, DIEPDMA(i), dr->diepdma[i]);
+
+ /* Restore OUT EPs */
+ DWC2_WRITE_4(hsotg, DOEPCTL(i), dr->doepctl[i]);
+ DWC2_WRITE_4(hsotg, DOEPTSIZ(i), dr->doeptsiz[i]);
+ DWC2_WRITE_4(hsotg, DOEPDMA(i), dr->doepdma[i]);
+ }
+
+ /* Set the Power-On Programming done bit */
+ dctl = DWC2_READ_4(hsotg, DCTL);
+ dctl |= DCTL_PWRONPRGDONE;
+ DWC2_WRITE_4(hsotg, DCTL, dctl);
+
+ return 0;
+}
+#else
+static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+
+static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+#endif
+
+/**
+ * dwc2_backup_global_registers() - Backup global controller registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+ int i;
+
+ /* Backup global regs */
+ gr = &hsotg->gr_backup;
+
+ gr->gotgctl = DWC2_READ_4(hsotg, GOTGCTL);
+ gr->gintmsk = DWC2_READ_4(hsotg, GINTMSK);
+ gr->gahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
+ gr->gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
+ gr->grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
+ gr->gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
+ gr->hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
+ gr->gdfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ gr->dtxfsiz[i] = DWC2_READ_4(hsotg, DPTXFSIZN(i));
+
+ gr->valid = true;
+ return 0;
+}
+
+/**
+ * dwc2_restore_global_registers() - Restore controller global registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+STATIC int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore global regs */
+ gr = &hsotg->gr_backup;
+ if (!gr->valid) {
+ dev_err(hsotg->dev, "%s: no global registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ gr->valid = false;
+
+ DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
+ DWC2_WRITE_4(hsotg, GOTGCTL, gr->gotgctl);
+ DWC2_WRITE_4(hsotg, GINTMSK, gr->gintmsk);
+ DWC2_WRITE_4(hsotg, GUSBCFG, gr->gusbcfg);
+ DWC2_WRITE_4(hsotg, GAHBCFG, gr->gahbcfg);
+ DWC2_WRITE_4(hsotg, GRXFSIZ, gr->grxfsiz);
+ DWC2_WRITE_4(hsotg, GNPTXFSIZ, gr->gnptxfsiz);
+ DWC2_WRITE_4(hsotg, HPTXFSIZ, gr->hptxfsiz);
+ DWC2_WRITE_4(hsotg, GDFIFOCFG, gr->gdfifocfg);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ DWC2_WRITE_4(hsotg, DPTXFSIZN(i), gr->dtxfsiz[i]);
+
+ return 0;
+}
+
+/**
+ * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @restore: Controller registers need to be restored
+ */
+int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
+{
+ u32 pcgcctl;
+ int ret = 0;
+
+ if (!hsotg->core_params->hibernation)
+ return -ENOTSUP;
+
+ pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_STOPPCLK;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+
+ pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_PWRCLMP;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+
+ pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+
+ udelay(100);
+ if (restore) {
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+ if (dwc2_is_host_mode(hsotg)) {
+ ret = dwc2_restore_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore host registers\n",
+ __func__);
+ return ret;
+ }
+ } else {
+ ret = dwc2_restore_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore device registers\n",
+ __func__);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * dwc2_enter_hibernation() - Put controller in Partial Power Down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
+{
+ u32 pcgcctl;
+ int ret = 0;
+
+ if (!hsotg->core_params->hibernation)
+ return -ENOTSUP;
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ if (dwc2_is_host_mode(hsotg)) {
+ ret = dwc2_backup_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup host registers\n",
+ __func__);
+ return ret;
+ }
+ } else {
+ ret = dwc2_backup_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup device registers\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /*
+ * Clear any pending interrupts since dwc2 will not be able to
+ * clear them after entering hibernation.
+ */
+ DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
+
+ /* Put the controller in low power state */
+ pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
+
+ pcgcctl |= PCGCTL_PWRCLMP;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+ ndelay(20);
+
+ pcgcctl |= PCGCTL_RSTPDWNMODULE;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+ ndelay(20);
+
+ pcgcctl |= PCGCTL_STOPPCLK;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
+
+ return ret;
+}
+
/**
* dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
* used in both device and host modes
if (hsotg->core_params->dma_enable <= 0)
intmsk |= GINTSTS_RXFLVL;
+ if (hsotg->core_params->external_id_pin_ctl <= 0)
+ intmsk |= GINTSTS_CONIDSTSCHNG;
- intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
+ intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
GINTSTS_SESSREQINT;
DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
*/
-STATIC int dwc2_core_reset(struct dwc2_hsotg *hsotg)
+int dwc2_core_reset(struct dwc2_hsotg *hsotg)
{
u32 greset;
int count = 0;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- /* Wait for AHB master IDLE state */
+ /* Core Soft Reset */
+ greset = DWC2_READ_4(hsotg, GRSTCTL);
+ greset |= GRSTCTL_CSFTRST;
+ DWC2_WRITE_4(hsotg, GRSTCTL, greset);
do {
- usleep_range(20000, 40000);
+ udelay(1);
greset = DWC2_READ_4(hsotg, GRSTCTL);
if (++count > 50) {
dev_warn(hsotg->dev,
- "%s() HANG! AHB Idle GRSTCTL=%0x\n",
+ "%s() HANG! Soft Reset GRSTCTL=%0x\n",
__func__, greset);
return -EBUSY;
}
- } while (!(greset & GRSTCTL_AHBIDLE));
+ } while (greset & GRSTCTL_CSFTRST);
- /* Core Soft Reset */
+ /* Wait for AHB master IDLE state */
count = 0;
- greset |= GRSTCTL_CSFTRST;
- DWC2_WRITE_4(hsotg, GRSTCTL, greset);
do {
- usleep_range(20000, 40000);
+ udelay(1);
greset = DWC2_READ_4(hsotg, GRSTCTL);
if (++count > 50) {
dev_warn(hsotg->dev,
- "%s() HANG! Soft Reset GRSTCTL=%0x\n",
+ "%s() HANG! AHB Idle GRSTCTL=%0x\n",
__func__, greset);
return -EBUSY;
}
- } while (greset & GRSTCTL_CSFTRST);
+ } while (!(greset & GRSTCTL_AHBIDLE));
+
+ return 0;
+}
+
+/*
+ * Force the mode of the controller.
+ *
+ * Forcing the mode is needed for two cases:
+ *
+ * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
+ * controller to stay in a particular mode regardless of ID pin
+ * changes. We do this usually after a core reset.
+ *
+ * 2) During probe we want to read reset values of the hw
+ * configuration registers that are only available in either host or
+ * device mode. We may need to force the mode if the current mode does
+ * not allow us to access the register in the mode that we want.
+ *
+ * In either case it only makes sense to force the mode if the
+ * controller hardware is OTG capable.
+ *
+ * Checks are done in this function to determine whether doing a force
+ * would be valid or not.
+ *
+ * If a force is done, it requires a 25ms delay to take effect.
+ *
+ * Returns true if the mode was forced.
+ */
+STATIC bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
+{
+ struct dwc2_softc *sc = hsotg->hsotg_sc;
+ u32 gusbcfg;
+ u32 set;
+ u32 clear;
+
+ dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
+
+ /*
+ * Force mode has no effect if the hardware is not OTG.
+ */
+ if (!dwc2_hw_is_otg(hsotg))
+ return false;
+
+ /*
+ * If dr_mode is either peripheral or host only, there is no
+ * need to ever force the mode to the opposite mode.
+ */
+ if (host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ WARN_ON(1);
+ return false;
+ }
+
+ if (!host && hsotg->dr_mode == USB_DR_MODE_HOST) {
+ WARN_ON(1);
+ return false;
+ }
+
+ gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
+
+ set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
+ clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
+
+ gusbcfg &= ~clear;
+ gusbcfg |= set;
+ DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
+
+ usb_delay_ms(&sc->sc_bus, 25);
+ return true;
+}
+
+/*
+ * Clears the force mode bits.
+ */
+STATIC void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_softc *sc = hsotg->hsotg_sc;
+ u32 gusbcfg;
+
+ gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
+ gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
+ gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
+ DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
/*
* NOTE: This long sleep is _very_ important, otherwise the core will
* not stay in host mode after a connector ID change!
*/
- usleep_range(150000, 200000);
+ usb_delay_ms(&sc->sc_bus, 25);
+}
+/*
+ * Sets or clears force mode based on the dr_mode parameter.
+ */
+void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
+{
+ switch (hsotg->dr_mode) {
+ case USB_DR_MODE_HOST:
+ dwc2_force_mode(hsotg, true);
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ dwc2_force_mode(hsotg, false);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc2_clear_force_mode(hsotg);
+ break;
+ default:
+ dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
+ __func__, hsotg->dr_mode);
+ break;
+ }
+}
+
+/*
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ *
+ * Additionally this will apply force mode as per the hsotg->dr_mode
+ * parameter.
+ */
+int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
+{
+ int retval;
+
+ retval = dwc2_core_reset(hsotg);
+ if (retval)
+ return retval;
+
+ dwc2_force_dr_mode(hsotg);
return 0;
}
*/
if (select_phy) {
dev_dbg(hsotg->dev, "FS PHY selected\n");
+
usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
- usbcfg |= GUSBCFG_PHYSEL;
- DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
+ if (!(usbcfg & GUSBCFG_PHYSEL)) {
+ usbcfg |= GUSBCFG_PHYSEL;
+ DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
- /* Reset after a PHY select */
- retval = dwc2_core_reset(hsotg);
- if (retval) {
- dev_err(hsotg->dev, "%s() Reset failed, aborting",
- __func__);
- return retval;
+ /* Reset after a PHY select */
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
}
}
STATIC int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
- u32 usbcfg;
+ u32 usbcfg, usbcfg_old;
int retval = 0;
if (!select_phy)
return 0;
- usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
+ usbcfg = usbcfg_old = DWC2_READ_4(hsotg, GUSBCFG);
/*
* HS PHY parameters. These parameters are preserved during soft reset
break;
}
- DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
+ if (usbcfg != usbcfg_old) {
+ DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
- /* Reset after setting the PHY parameters */
- retval = dwc2_core_reset(hsotg);
- if (retval) {
- dev_err(hsotg->dev, "%s() Reset failed, aborting",
- __func__);
- return retval;
+ /* Reset after setting the PHY parameters */
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
}
return retval;
STATIC int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
+ struct dwc2_softc *sc = hsotg->hsotg_sc;
u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
switch (hsotg->hw_params.arch) {
case GHWCFG2_EXT_DMA_ARCH:
- dev_err(hsotg->dev, "External DMA Mode\n");
+ dev_dbg(hsotg->dev, "External DMA Mode\n");
+ if (!sc->sc_set_dma_addr) {
+ dev_err(hsotg->dev, "External DMA Mode not supported\n");
+ return -EINVAL;
+ }
if (hsotg->core_params->ahbcfg != -1) {
ahbcfg &= GAHBCFG_CTRL_MASK;
ahbcfg |= hsotg->core_params->ahbcfg &
* dwc2_core_init() - Initializes the DWC_otg controller registers and
* prepares the core for device mode or host mode operation
*
- * @hsotg: Programming view of the DWC_otg controller
- * @select_phy: If true then also set the Phy type
- * @irq: If >= 0, the irq to register
+ * @hsotg: Programming view of the DWC_otg controller
+ * @initial_setup: If true then this is the first init for this instance.
*/
-int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy)
+int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
{
u32 usbcfg, otgctl;
int retval;
DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
- /* Reset the Controller */
- retval = dwc2_core_reset(hsotg);
- if (retval) {
- dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
- __func__);
- return retval;
+ /*
+ * Reset the Controller
+ *
+ * We only need to reset the controller if this is a re-init.
+ * For the first init we know for sure that earlier code reset us (it
+ * needed to in order to properly detect various parameters).
+ */
+ if (!initial_setup) {
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
+ __func__);
+ return retval;
+ }
}
/*
* This needs to happen in FS mode before any other programming occurs
*/
- retval = dwc2_phy_init(hsotg, select_phy);
+ retval = dwc2_phy_init(hsotg, initial_setup);
if (retval)
return retval;
dwc2_enable_common_interrupts(hsotg);
/*
- * Do device or host intialization based on mode during PCD and
+ * Do device or host initialization based on mode during PCD and
* HCD initialization
*/
if (dwc2_is_host_mode(hsotg)) {
/* Disable host mode interrupts without disturbing common interrupts */
intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
- GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
+ GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
}
+/*
+ * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
+ * For system that have a total fifo depth that is smaller than the default
+ * RX + TX fifo size.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+STATIC void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
+
+ total_fifo_size = hw->total_fifo_size;
+ rxfsiz = params->host_rx_fifo_size;
+ nptxfsiz = params->host_nperio_tx_fifo_size;
+ ptxfsiz = params->host_perio_tx_fifo_size;
+
+ /*
+ * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
+ * allocation with support for high bandwidth endpoints. Synopsys
+ * defines MPS(Max Packet size) for a periodic EP=1024, and for
+ * non-periodic as 512.
+ */
+ if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
+ /*
+ * For Buffer DMA mode/Scatter Gather DMA mode
+ * 2 * ((Largest Packet size / 4) + 1 + 1) + n
+ * with n = number of host channel.
+ * 2 * ((1024/4) + 2) = 516
+ */
+ rxfsiz = 516 + hw->host_channels;
+
+ /*
+ * min non-periodic tx fifo depth
+ * 2 * (largest non-periodic USB packet used / 4)
+ * 2 * (512/4) = 256
+ */
+ nptxfsiz = 256;
+
+ /*
+ * min periodic tx fifo depth
+ * (largest packet size*MC)/4
+ * (1024 * 3)/4 = 768
+ */
+ ptxfsiz = 768;
+
+ params->host_rx_fifo_size = rxfsiz;
+ params->host_nperio_tx_fifo_size = nptxfsiz;
+ params->host_perio_tx_fifo_size = ptxfsiz;
+ }
+
+ /*
+ * If the summation of RX, NPTX and PTX fifo sizes is still
+ * bigger than the total_fifo_size, then we have a problem.
+ *
+ * We won't be able to allocate as many endpoints. Right now,
+ * we're just printing an error message, but ideally this FIFO
+ * allocation algorithm would be improved in the future.
+ *
+ * FIXME improve this FIFO allocation algorithm.
+ */
+ if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))
+ dev_err(hsotg->dev, "invalid fifo sizes\n");
+}
+
STATIC void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *params = hsotg->core_params;
if (!params->enable_dynamic_fifo)
return;
+ dwc2_calculate_dynamic_fifo(hsotg);
+
/* Rx FIFO */
grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
grxfsiz |= params->host_rx_fifo_size <<
GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
- dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", DWC2_READ_4(hsotg, GRXFSIZ));
+ dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
+ DWC2_READ_4(hsotg, GRXFSIZ));
/* Non-periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
+ u32 ec_mc;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
TSIZ_XFERSIZE_MASK;
+
+ /* For split set ec_mc for immediate retries */
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ ec_mc = 3;
+ else
+ ec_mc = 1;
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "no split\n");
hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
TSIZ_XFERSIZE_MASK;
+
+ /* The ec_mc gets the multi_count for non-split */
+ ec_mc = chan->multi_count;
}
chan->start_pkt_count = num_packets;
} else {
dma_addr = chan->xfer_dma;
}
- struct dwc2_core_dma_config *dma_config =
- hsotg->core_dma_config;
- if (dma_config == NULL) {
+ if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num),
(u32)dma_addr);
if (dbg_hc(chan))
(unsigned long)dma_addr,
chan->hc_num);
} else {
- (void)(*dma_config->set_dma_addr)(
- dma_config->set_dma_addr_data, dma_addr,
- chan->hc_num);
+ (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
+ hsotg->dev, dma_addr, chan->hc_num);
}
}
hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
- hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK;
+ hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
if (hcchar & HCCHAR_CHDIS)
struct dwc2_host_chan *chan)
{
u32 hcchar;
- u32 hc_dma;
u32 hctsiz = 0;
if (chan->do_ping)
DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
- hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
+ usb_syncmem(&chan->desc_list_usbdma, 0, chan->desc_list_sz,
+ BUS_DMASYNC_PREWRITE);
- /* Always start from first descriptor */
- hc_dma &= ~HCDMA_CTD_MASK;
- DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), hc_dma);
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
- hc_dma, chan->hc_num);
+ if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
+ DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), chan->desc_list_addr);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
+ &chan->desc_list_addr, chan->hc_num);
+ } else {
+ (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
+ hsotg->dev, chan->desc_list_addr, chan->hc_num);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %pad to ext dma(%d)\n",
+ &chan->desc_list_addr, chan->hc_num);
+ }
hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
/* High speed case */
- return 125 * clock - 1;
+ return 125 * clock;
else
/* FS/LS case */
- return 1000 * clock - 1;
+ return 1000 * clock;
}
/**
hsotg->core_params->dma_desc_enable = val;
}
+void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->core_params->dma_enable > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ }
+
+ hsotg->core_params->dma_desc_fs_enable = val;
+ dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
+}
+
void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
int val)
{
hsotg->core_params->uframe_sched = val;
}
+STATIC void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter external_id_pin_ctl\n",
+ val);
+ dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
+ }
+
+ hsotg->core_params->external_id_pin_ctl = val;
+}
+
+STATIC void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter hibernation\n",
+ val);
+ dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
+ }
+
+ hsotg->core_params->hibernation = val;
+}
+
/*
* This function is called during module intialization to pass module parameters
* for the DWC_otg core.
dwc2_set_param_otg_cap(hsotg, params->otg_cap);
dwc2_set_param_dma_enable(hsotg, params->dma_enable);
dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
dwc2_set_param_host_support_fs_ls_low_power(hsotg,
params->host_support_fs_ls_low_power);
dwc2_set_param_enable_dynamic_fifo(hsotg,
dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
dwc2_set_param_otg_ver(hsotg, params->otg_ver);
dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
+ dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
+ dwc2_set_param_hibernation(hsotg, params->hibernation);
+}
+
+/*
+ * Forces either host or device mode if the controller is not
+ * currently in that mode.
+ *
+ * Returns true if the mode was forced.
+ */
+STATIC bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
+{
+ if (host && dwc2_is_host_mode(hsotg))
+ return false;
+ else if (!host && dwc2_is_device_mode(hsotg))
+ return false;
+
+ return dwc2_force_mode(hsotg, host);
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+STATIC void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ u32 hptxfsiz;
+ bool forced;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, true);
+
+ gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
+ hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+STATIC void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ bool forced;
+ u32 gnptxfsiz;
+
+ if (hsotg->dr_mode == USB_DR_MODE_HOST)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, false);
+
+ gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
}
/**
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
unsigned width;
- u32 hwcfg2, hwcfg3, hwcfg4;
- u32 hptxfsiz, grxfsiz, gnptxfsiz;
- u32 gusbcfg;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 grxfsiz;
/*
* Attempt to ensure this device is really a DWC_otg Controller.
hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+ hwcfg1 = DWC2_READ_4(hsotg, GHWCFG1);
hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
- gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", DWC2_READ_4(hsotg, GHWCFG1));
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
- /* Force host mode to get HPTXFSIZ exact power on value */
- gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
- gusbcfg |= GUSBCFG_FORCEHOSTMODE;
- DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
- usleep_range(100000, 150000);
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
- hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
- gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
- gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
- DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
- usleep_range(100000, 150000);
+ /* hwcfg1 */
+ hw->dev_ep_dirs = hwcfg1;
/* hwcfg2 */
hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
hw->max_transfer_size = (1 << (width + 11)) - 1;
+ /*
+ * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
+ * coherent buffers with this size, and if it's too large we can
+ * exhaust the coherent DMA pool.
+ */
+ if (hw->max_transfer_size > 65535)
+ hw->max_transfer_size = 65535;
width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
hw->max_packet_count = (1 << (width + 4)) - 1;
/* fifo sizes */
hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
GRXFSIZ_DEPTH_SHIFT;
- hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
- hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
dev_dbg(hsotg->dev, "Detected values from hardware:\n");
dev_dbg(hsotg->dev, " op_mode=%d\n",
hw->hs_phy_type);
dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
hw->fs_phy_type);
- dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
+ dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
hw->utmi_phy_data_width);
dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
hw->num_dev_ep);
return 0;
}
+/*
+ * Sets all parameters to the given value.
+ *
+ * Assumes that the dwc2_core_params struct contains only integers.
+ */
+void dwc2_set_all_params(struct dwc2_core_params *params, int value)
+{
+ int *p = (int *)params;
+ size_t size = sizeof(*params) / sizeof(*p);
+ int i;
+
+ for (i = 0; i < size; i++)
+ p[i] = value;
+}
+
+
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
}
+
+/* Returns the controller's GHWCFG2.OTG_MODE. */
+unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg)
+{
+ u32 ghwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
+
+ return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+}
+
+/* Returns true if the controller is capable of DRD. */
+bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
+{
+ unsigned op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
+ (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
+}
+
+/* Returns true if the controller is host-only. */
+bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
+{
+ unsigned op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
+}
+
+/* Returns true if the controller is device-only. */
+bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
+{
+ unsigned op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
+}
-/* $OpenBSD: dwc2_core.h,v 1.9 2015/06/28 11:48:18 jmatthew Exp $ */
+/* $OpenBSD: dwc2_core.h,v 1.10 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_core.h,v 1.5 2014/04/03 06:34:58 skrll Exp $ */
/*
#include <dev/usb/dwc2/dwc2_hw.h>
+#include <dev/usb/dwc2/list.h>
+
/* Maximum number of Endpoints/HostChannels */
#define MAX_EPS_CHANNELS 16
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+
+/* dwc2-hsotg declarations */
+STATIC const char * const dwc2_hsotg_supply_names[] = {
+ "vusb_d", /* digital USB supply, 1.2V */
+ "vusb_a", /* analog USB supply, 1.1V */
+};
+
+/*
+ * EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transferred by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practically means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT 64
+
+struct dwc2_hsotg;
+struct dwc2_hsotg_req;
+
+/**
+ * struct dwc2_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ * used to indicate an request has been loaded onto the endpoint
+ * and has yet to be completed (maybe due to data move, or simply
+ * awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @lock: State lock to protect contents of endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ * means that it is sending data to the Host.
+ * @index: The index for the endpoint registers.
+ * @mc: Multi Count - number of transactions per microframe
+ * @interval - Interval for periodic endpoints
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @isochronous: Set if this is a isochronous ep
+ * @send_zlp: Set if we need to send a zero-length packet.
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ *
+ * This is the driver's state for each registered enpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them. (note, this may actually be useless information
+ * as in shared-fifo mode periodic in acts like a single-frame packet
+ * buffer than a fifo)
+ */
+struct dwc2_hsotg_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct dwc2_hsotg *parent;
+ struct dwc2_hsotg_req *req;
+ struct dentry *debugfs;
+
+ unsigned long total_data;
+ unsigned int size_loaded;
+ unsigned int last_load;
+ unsigned int fifo_load;
+ unsigned short fifo_size;
+ unsigned short fifo_index;
+
+ unsigned char dir_in;
+ unsigned char index;
+ unsigned char mc;
+ unsigned char interval;
+
+ unsigned int halted:1;
+ unsigned int periodic:1;
+ unsigned int isochronous:1;
+ unsigned int send_zlp:1;
+ unsigned int has_correct_parity:1;
+
+ char name[10];
+};
+
+/**
+ * struct dwc2_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @saved_req_buf: variable to save req.buf when bounce buffers are used.
+ */
+struct dwc2_hsotg_req {
+ struct usb_request req;
+ struct list_head queue;
+ void *saved_req_buf;
+};
+
+#define call_gadget(_hs, _entry) \
+do { \
+ if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
+ (_hs)->driver && (_hs)->driver->_entry) { \
+ spin_unlock(&_hs->lock); \
+ (_hs)->driver->_entry(&(_hs)->gadget); \
+ spin_lock(&_hs->lock); \
+ } \
+} while (0)
+#else
+#define call_gadget(_hs, _entry) do {} while (0)
+#endif
+
struct dwc2_hsotg;
struct dwc2_host_chan;
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
+/* Gadget ep0 states */
+enum dwc2_ep0_state {
+ DWC2_EP0_SETUP,
+ DWC2_EP0_DATA_IN,
+ DWC2_EP0_DATA_OUT,
+ DWC2_EP0_STATUS_IN,
+ DWC2_EP0_STATUS_OUT,
+};
+
/**
* struct dwc2_core_params - Parameters for configuring the core
*
* value for this if none is specified.
* 0 - Address DMA
* 1 - Descriptor DMA (default, if available)
+ * @dma_desc_fs_enable: When DMA mode is enabled, specifies whether to use
+ * address DMA mode or descriptor DMA mode for accessing
+ * the data FIFOs in Full Speed mode only. The driver
+ * will automatically detect the value for this if none is
+ * specified.
+ * 0 - Address DMA
+ * 1 - Descriptor DMA in FS (default, if available)
* @speed: Specifies the maximum speed of operation in host and
* device mode. The actual speed depends on the speed of
* the attached device and the value of phy_type.
* by the driver and are ignored in this
* configuration value.
* @uframe_sched: True to enable the microframe scheduler
+ * @external_id_pin_ctl: Specifies whether ID pin is handled externally.
+ * Disable CONIDSTSCHNG controller interrupt in such
+ * case.
+ * 0 - No (default)
+ * 1 - Yes
+ * @hibernation: Specifies whether the controller support hibernation.
+ * If hibernation is enabled, the controller will enter
+ * hibernation in both peripheral and host mode when
+ * needed.
+ * 0 - No (default)
+ * 1 - Yes
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
int otg_ver;
int dma_enable;
int dma_desc_enable;
+ int dma_desc_fs_enable;
int speed;
int enable_dynamic_fifo;
int en_multiple_tx_fifo;
int reload_ctl;
int ahbcfg;
int uframe_sched;
+ int external_id_pin_ctl;
+ int hibernation;
};
/**
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
- * avaialable
+ * available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
* Depth
* 0 to 30
* 1 - 16 bits
* 2 - 8 or 16 bits
* @snpsid: Value from SNPSID register
+ * @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
*/
struct dwc2_hw_params {
unsigned op_mode:3;
unsigned arch:2;
unsigned dma_desc_enable:1;
+ unsigned dma_desc_fs_enable:1;
unsigned enable_dynamic_fifo:1;
unsigned en_multiple_tx_fifo:1;
unsigned host_rx_fifo_size:16;
unsigned host_nperio_tx_fifo_size:16;
+ unsigned dev_nperio_tx_fifo_size:16;
unsigned host_perio_tx_fifo_size:16;
unsigned nperio_tx_q_depth:3;
unsigned host_perio_tx_q_depth:3;
unsigned power_optimized:1;
unsigned utmi_phy_data_width:2;
u32 snpsid;
+ u32 dev_ep_dirs;
+};
+
+/* Size of control and EP0 buffers */
+#define DWC2_CTRL_BUFF_SIZE 8
+
+/**
+ * struct dwc2_gregs_backup - Holds global registers state before entering partial
+ * power down
+ * @gotgctl: Backup of GOTGCTL register
+ * @gintmsk: Backup of GINTMSK register
+ * @gahbcfg: Backup of GAHBCFG register
+ * @gusbcfg: Backup of GUSBCFG register
+ * @grxfsiz: Backup of GRXFSIZ register
+ * @gnptxfsiz: Backup of GNPTXFSIZ register
+ * @gi2cctl: Backup of GI2CCTL register
+ * @hptxfsiz: Backup of HPTXFSIZ register
+ * @gdfifocfg: Backup of GDFIFOCFG register
+ * @dtxfsiz: Backup of DTXFSIZ registers for each endpoint
+ * @gpwrdn: Backup of GPWRDN register
+ */
+struct dwc2_gregs_backup {
+ u32 gotgctl;
+ u32 gintmsk;
+ u32 gahbcfg;
+ u32 gusbcfg;
+ u32 grxfsiz;
+ u32 gnptxfsiz;
+ u32 gi2cctl;
+ u32 hptxfsiz;
+ u32 pcgcctl;
+ u32 gdfifocfg;
+ u32 dtxfsiz[MAX_EPS_CHANNELS];
+ u32 gpwrdn;
+ bool valid;
};
-struct dwc2_core_dma_config {
- int (*set_dma_addr)(void *, dma_addr_t, int);
- void *set_dma_addr_data;
+/**
+ * struct dwc2_dregs_backup - Holds device registers state before entering partial
+ * power down
+ * @dcfg: Backup of DCFG register
+ * @dctl: Backup of DCTL register
+ * @daintmsk: Backup of DAINTMSK register
+ * @diepmsk: Backup of DIEPMSK register
+ * @doepmsk: Backup of DOEPMSK register
+ * @diepctl: Backup of DIEPCTL register
+ * @dieptsiz: Backup of DIEPTSIZ register
+ * @diepdma: Backup of DIEPDMA register
+ * @doepctl: Backup of DOEPCTL register
+ * @doeptsiz: Backup of DOEPTSIZ register
+ * @doepdma: Backup of DOEPDMA register
+ */
+struct dwc2_dregs_backup {
+ u32 dcfg;
+ u32 dctl;
+ u32 daintmsk;
+ u32 diepmsk;
+ u32 doepmsk;
+ u32 diepctl[MAX_EPS_CHANNELS];
+ u32 dieptsiz[MAX_EPS_CHANNELS];
+ u32 diepdma[MAX_EPS_CHANNELS];
+ u32 doepctl[MAX_EPS_CHANNELS];
+ u32 doeptsiz[MAX_EPS_CHANNELS];
+ u32 doepdma[MAX_EPS_CHANNELS];
+ bool valid;
};
-TAILQ_HEAD(dwc2_qh_list, dwc2_qh);
+/**
+ * struct dwc2_hregs_backup - Holds host registers state before entering partial
+ * power down
+ * @hcfg: Backup of HCFG register
+ * @haintmsk: Backup of HAINTMSK register
+ * @hcintmsk: Backup of HCINTMSK register
+ * @hptr0: Backup of HPTR0 register
+ * @hfir: Backup of HFIR register
+ */
+struct dwc2_hregs_backup {
+ u32 hcfg;
+ u32 haintmsk;
+ u32 hcintmsk[MAX_EPS_CHANNELS];
+ u32 hprt0;
+ u32 hfir;
+ bool valid;
+};
/**
* struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
* and periodic schedules
*
+ * These are common for both host and peripheral modes:
+ *
* @dev: The struct device pointer
* @regs: Pointer to controller regs
- * @core_params: Parameters that define how the core should be configured
* @hw_params: Parameters that were autodetected from the
* hardware registers
+ * @core_params: Parameters that define how the core should be configured
* @op_state: The operational State, during transitions (a_host=>
* a_peripheral and b_device=>b_host) this may not match
* the core, but allows the software to determine
* transitions
+ * @dr_mode: Requested mode of operation, one of following:
+ * - USB_DR_MODE_PERIPHERAL
+ * - USB_DR_MODE_HOST
+ * - USB_DR_MODE_OTG
+ * @hcd_enabled Host mode sub-driver initialization indicator.
+ * @gadget_enabled Peripheral mode sub-driver initialization indicator.
+ * @ll_hw_enabled Status of low-level hardware resources.
+ * @phy: The otg phy transceiver structure for phy control.
+ * @uphy: The otg phy transceiver structure for old USB phy control.
+ * @plat: The platform specific configuration data. This can be removed once
+ * all SoCs support usb transceiver.
+ * @supplies: Definition of USB power supplies
+ * @phyif: PHY interface width
+ * @lock: Spinlock that protects all the driver data structures
+ * @priv: Stores a pointer to the struct usb_hcd
* @queuing_high_bandwidth: True if multiple packets of a high-bandwidth
* transfer are in process of being queued
* @srp_success: Stores status of SRP request in the case of a FS PHY
* interrupt
* @wkp_timer: Timer object for handling Wakeup Detected interrupt
* @lx_state: Lx state of connected device
+ * @gregs_backup: Backup of global registers during suspend
+ * @dregs_backup: Backup of device registers during suspend
+ * @hregs_backup: Backup of host registers during suspend
+ *
+ * These are for host mode:
+ *
* @flags: Flags for handling root port state changes
* @non_periodic_sched_inactive: Inactive QHs in the non-periodic schedule.
* Transfers associated with these QHs are not currently
* @status_buf_dma: DMA address for status_buf
* @start_work: Delayed work for handling host A-cable connection
* @reset_work: Delayed work for handling a port reset
- * @lock: Spinlock that protects all the driver data structures
- * @priv: Stores a pointer to the struct usb_hcd
* @otg_port: OTG port number
* @frame_list: Frame list
* @frame_list_dma: Frame list DMA address
+ * @frame_list_sz: Frame list size
+ * @desc_gen_cache: Kmem cache for generic descriptors
+ * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
+ *
+ * These are for peripheral mode:
+ *
+ * @driver: USB gadget driver
+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
+ * @num_of_eps: Number of available EPs (excluding EP0)
+ * @debug_root: Root directrory for debugfs.
+ * @debug_file: Main status file for debugfs.
+ * @debug_testmode: Testmode status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @ep0_state: EP0 control transfers state
+ * @test_mode: USB test mode requested by the host
+ * @eps: The endpoints being supplied to the gadget framework
+ * @g_using_dma: Indicate if dma usage is enabled
+ * @g_rx_fifo_sz: Contains rx fifo size value
+ * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value
+ * @g_tx_fifo_sz: Contains tx fifo size value per endpoints
*/
struct dwc2_hsotg {
struct device *dev;
struct dwc2_hw_params hw_params;
/** Params to actually use */
struct dwc2_core_params *core_params;
- struct dwc2_core_dma_config *core_dma_config;
enum usb_otg_state op_state;
+ enum usb_dr_mode dr_mode;
+ unsigned int hcd_enabled:1;
+ unsigned int gadget_enabled:1;
+ unsigned int ll_hw_enabled:1;
+
+ spinlock_t lock;
+ void *priv;
+ struct usb_phy *uphy;
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ struct phy *phy;
+ struct usb_phy *uphy;
+ struct dwc2_hsotg_plat *plat;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(dwc2_hsotg_supply_names)];
+ u32 phyif;
+
+ int irq;
+ struct clk *clk;
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
unsigned int queuing_high_bandwidth:1;
unsigned int srp_success:1;
struct task wf_otg;
struct timeout wkp_timer;
enum dwc2_lx_state lx_state;
+ struct dwc2_gregs_backup gr_backup;
+ struct dwc2_dregs_backup dr_backup;
+ struct dwc2_hregs_backup hr_backup;
+
+ struct dentry *debug_root;
+ struct debugfs_regset32 *regset;
+
+ /* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_2_71a 0x4f54271a
+#define DWC2_CORE_REV_2_90a 0x4f54290a
+#define DWC2_CORE_REV_2_92a 0x4f54292a
+#define DWC2_CORE_REV_2_94a 0x4f54294a
+#define DWC2_CORE_REV_3_00a 0x4f54300a
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
u32 d32;
struct {
unsigned port_suspend_change:1;
unsigned port_over_current_change:1;
unsigned port_l1_change:1;
- unsigned reserved:26;
+ unsigned reserved:25;
} b;
} flags;
- struct dwc2_qh_list non_periodic_sched_inactive;
- struct dwc2_qh_list non_periodic_sched_active;
- struct dwc2_qh *non_periodic_qh_ptr;
- struct dwc2_qh_list periodic_sched_inactive;
- struct dwc2_qh_list periodic_sched_ready;
- struct dwc2_qh_list periodic_sched_assigned;
- struct dwc2_qh_list periodic_sched_queued;
+ struct list_head non_periodic_sched_inactive;
+ struct list_head non_periodic_sched_waiting;
+ struct list_head non_periodic_sched_active;
+ struct list_head *non_periodic_qh_ptr;
+ struct list_head periodic_sched_inactive;
+ struct list_head periodic_sched_ready;
+ struct list_head periodic_sched_assigned;
+ struct list_head periodic_sched_queued;
u16 periodic_usecs;
u16 frame_usecs[8];
u16 frame_number;
u16 periodic_qh_count;
+ bool bus_suspended;
+ bool new_connection;
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
#define FRAME_NUM_ARRAY_SIZE 1000
int dumped_frame_num_array;
#endif
- LIST_HEAD(, dwc2_host_chan) free_hc_list;
+ struct list_head free_hc_list;
int periodic_channels;
int non_periodic_channels;
int available_host_channels;
struct delayed_work start_work;
struct delayed_work reset_work;
- spinlock_t lock;
- void *priv;
u8 otg_port;
struct usb_dma frame_list_usbdma;
u32 *frame_list;
dma_addr_t frame_list_dma;
-
- /* DWC OTG HW Release versions */
-#define DWC2_CORE_REV_2_71a 0x4f54271a
-#define DWC2_CORE_REV_2_90a 0x4f54290a
-#define DWC2_CORE_REV_2_92a 0x4f54292a
-#define DWC2_CORE_REV_2_94a 0x4f54294a
-#define DWC2_CORE_REV_3_00a 0x4f54300a
+ u32 frame_list_sz;
+ struct kmem_cache *desc_gen_cache;
+ struct kmem_cache *desc_hsisoc_cache;
#ifdef DEBUG
u32 frrem_samples;
u32 hfnum_other_samples_b;
u64 hfnum_other_frrem_accum_b;
#endif
+#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Gadget structures */
+ struct usb_gadget_driver *driver;
+ int fifo_mem;
+ unsigned int dedicated_fifos:1;
+ unsigned char num_of_eps;
+ u32 fifo_map;
+
+ struct usb_request *ep0_reply;
+ struct usb_request *ctrl_req;
+ void *ep0_buff;
+ void *ctrl_buff;
+ enum dwc2_ep0_state ep0_state;
+ u8 test_mode;
+
+ struct usb_gadget gadget;
+ unsigned int enabled:1;
+ unsigned int connected:1;
+ struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
+ struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
+ u32 g_using_dma;
+ u32 g_rx_fifo_sz;
+ u32 g_np_g_tx_fifo_sz;
+ u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
/* Reasons for halting a host channel */
* The following functions support initialization of the core driver component
* and the DWC_otg controller
*/
+extern int dwc2_core_reset(struct dwc2_hsotg *hsotg);
+extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg);
+extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
+extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
+
+void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
/*
* Host core Functions.
extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num);
extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg);
-extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy);
+extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup);
extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd);
extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
*/
extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
+/*
+ * When DMA mode is enabled specifies whether to use
+ * address DMA or DMA Descritor mode with full speed devices
+ * for accessing the data FIFOs in host mode.
+ * 0 - address DMA
+ * 1 - FS DMA Descriptor(default, if available)
+ */
+extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg,
+ int val);
+
/*
* Specifies the maximum speed of operation in host and device mode.
* The actual speed depends on the speed of the attached device and
extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params);
+
+extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
+
+extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+
+extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
+extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
+
+/*
+ * The following functions check the controller's OTG operation mode
+ * capability (GHWCFG2.OTG_MODE).
+ *
+ * These functions can be used before the internal hsotg->hw_params
+ * are read in and cached so they always read directly from the
+ * GHWCFG2 register.
+ */
+unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg);
+
+/*
+ * Returns the mode of operation, host or device
+ */
+static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
+{
+ return (DWC2_READ_4(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
+}
+
+static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
+{
+ return (DWC2_READ_4(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
+}
+
/*
* Dump core registers and SPRAM
*/
*/
extern u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg);
+/* Gadget defines */
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+extern int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg);
+extern int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2);
+extern int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2);
+extern int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq);
+extern void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset);
+extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
+extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
+extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
+#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#else
+static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
+{ return 0; }
+static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset) {}
+static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2) {}
+static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
+ int testmode)
+{ return 0; }
+#define dwc2_is_device_connected(hsotg) (0)
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force);
+extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
+#else
+static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {}
+static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {}
+static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
+{ return 0; }
+#endif
+
#endif /* __DWC2_CORE_H__ */
-/* $OpenBSD: dwc2_coreintr.c,v 1.10 2017/06/29 17:36:16 deraadt Exp $ */
+/* $OpenBSD: dwc2_coreintr.c,v 1.11 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_coreintr.c,v 1.8 2014/04/04 05:40:57 skrll Exp $ */
/*
hprt0 &= ~HPRT0_ENA;
DWC2_WRITE_4(hsotg, HPRT0, hprt0);
}
-
- /* Clear interrupt */
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_PRTINT);
}
/**
*/
STATIC void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg)
{
- dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n",
- dwc2_is_host_mode(hsotg) ? "Host" : "Device");
-
/* Clear interrupt */
DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_MODEMIS);
+
+ dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n",
+ dwc2_is_host_mode(hsotg) ? "Host" : "Device");
}
/**
dwc2_op_state_str(hsotg));
gotgctl = DWC2_READ_4(hsotg, GOTGCTL);
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_hsotg_disconnect(hsotg);
+
if (hsotg->op_state == OTG_STATE_B_HOST) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
} else {
dev_dbg(hsotg->dev, "a_suspend->a_peripheral (%d)\n",
hsotg->op_state);
spin_unlock(&hsotg->lock);
- dwc2_hcd_disconnect(hsotg);
+ dwc2_hcd_disconnect(hsotg, false);
spin_lock(&hsotg->lock);
hsotg->op_state = OTG_STATE_A_PERIPHERAL;
} else {
*/
STATIC void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
{
- u32 gintmsk = DWC2_READ_4(hsotg, GINTMSK);
+ u32 gintmsk;
+
+ /* Clear interrupt */
+ DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_CONIDSTSCHNG);
/* Need to disable SOF interrupt immediately */
+ gintmsk = DWC2_READ_4(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
* Release lock before scheduling workq as it holds spinlock during
* scheduling.
*/
- spin_unlock(&hsotg->lock);
- task_set(&hsotg->wf_otg, dwc2_conn_id_status_change, hsotg);
- task_add(hsotg->wq_otg, &hsotg->wf_otg);
- spin_lock(&hsotg->lock);
-
- /* Clear interrupt */
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_CONIDSTSCHNG);
+ if (hsotg->wq_otg) {
+ spin_unlock(&hsotg->lock);
+ task_add(hsotg->wq_otg, &hsotg->wf_otg);
+ spin_lock(&hsotg->lock);
+ }
}
/**
*/
STATIC void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
{
- dev_dbg(hsotg->dev, "++Session Request Interrupt++\n");
+ int ret;
/* Clear interrupt */
DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SESSREQINT);
+
+ dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n",
+ hsotg->lx_state);
+
+ if (dwc2_is_device_mode(hsotg)) {
+ if (hsotg->lx_state == DWC2_L2) {
+ ret = dwc2_exit_hibernation(hsotg, true);
+ if (ret && (ret != -ENOTSUP))
+ dev_err(hsotg->dev,
+ "exit hibernation failed\n");
+ }
+
+ /*
+ * Report disconnect if there is any previous session
+ * established
+ */
+ dwc2_hsotg_disconnect(hsotg);
+ }
}
/*
*/
STATIC void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
{
+ int ret;
+
+ /* Clear interrupt */
+ DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_WKUPINT);
+
dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n");
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
/* Clear Remote Wakeup Signaling */
dctl &= ~DCTL_RMTWKUPSIG;
DWC2_WRITE_4(hsotg, DCTL, dctl);
+ ret = dwc2_exit_hibernation(hsotg, true);
+ if (ret && (ret != -ENOTSUP))
+ dev_err(hsotg->dev, "exit hibernation failed\n");
+
+ call_gadget(hsotg, resume);
}
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
} else {
+ if (hsotg->core_params->hibernation)
+ return;
+
if (hsotg->lx_state != DWC2_L1) {
u32 pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
/* Restart the Phy Clock */
pcgcctl &= ~PCGCTL_STOPPCLK;
DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
- timeout_reset(&hsotg->wkp_timer, mstohz(71),
- dwc2_wakeup_detected, hsotg);
+ timeout_set(&hsotg->wkp_timer, dwc2_wakeup_detected,
+ hsotg);
+ timeout_add_msec(&hsotg->wkp_timer, 71);
} else {
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
}
}
-
- /* Clear interrupt */
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_WKUPINT);
}
/*
*/
STATIC void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
{
+ DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_DISCONNINT);
+
dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device",
dwc2_op_state_str(hsotg));
- /* Change to L3 (OFF) state */
- hsotg->lx_state = DWC2_L3;
-
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_DISCONNINT);
+ if (hsotg->op_state == OTG_STATE_A_HOST)
+ dwc2_hcd_disconnect(hsotg, false);
}
/*
*/
STATIC void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
{
+ u32 dsts;
+ int ret;
+
+ /* Clear interrupt */
+ DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_USBSUSP);
+
dev_dbg(hsotg->dev, "USB SUSPEND\n");
if (dwc2_is_device_mode(hsotg)) {
-#ifdef DWC2_DEBUG
- u32 dsts;
-
/*
* Check the Device status register to determine if the Suspend
* state is active
"DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d\n",
!!(dsts & DSTS_SUSPSTS),
hsotg->hw_params.power_optimized);
-#endif
+ if ((dsts & DSTS_SUSPSTS) && hsotg->hw_params.power_optimized) {
+ /* Ignore suspend request before enumeration */
+ if (!dwc2_is_device_connected(hsotg)) {
+ dev_dbg(hsotg->dev,
+ "ignore suspend request before enumeration\n");
+ return;
+ }
+
+ ret = dwc2_enter_hibernation(hsotg);
+ if (ret) {
+ if (ret != -ENOTSUP)
+ dev_err(hsotg->dev,
+ "enter hibernation failed\n");
+ goto skip_power_saving;
+ }
+
+ udelay(100);
+
+ /* Ask phy to be suspended */
+ if (hsotg->uphy != NULL)
+ usb_phy_set_suspend(hsotg->uphy, true);
+skip_power_saving:
+ /*
+ * Change to L2 (suspend) state before releasing
+ * spinlock
+ */
+ hsotg->lx_state = DWC2_L2;
+
+ /* Call gadget suspend callback */
+ call_gadget(hsotg, suspend);
+ }
} else {
if (hsotg->op_state == OTG_STATE_A_PERIPHERAL) {
dev_dbg(hsotg->dev, "a_peripheral->a_host\n");
+ /* Change to L2 (suspend) state */
+ hsotg->lx_state = DWC2_L2;
/* Clear the a_peripheral flag, back to a_host */
spin_unlock(&hsotg->lock);
dwc2_hcd_start(hsotg);
hsotg->op_state = OTG_STATE_A_HOST;
}
}
-
- /* Change to L2 (suspend) state */
- hsotg->lx_state = DWC2_L2;
-
- /* Clear interrupt */
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_USBSUSP);
}
#define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \
-/* $OpenBSD: dwc2_hcd.c,v 1.22 2020/03/21 12:08:31 patrick Exp $ */
+/* $OpenBSD: dwc2_hcd.c,v 1.23 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hcd.c,v 1.15 2014/11/24 10:14:14 skrll Exp $ */
/*
dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
dev_dbg(hsotg->dev, " NP inactive sched:\n");
- TAILQ_FOREACH(qh, &hsotg->non_periodic_sched_inactive, qh_list_entry)
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
+ qh_list_entry)
+ dev_dbg(hsotg->dev, " %p\n", qh);
+ dev_dbg(hsotg->dev, " NP waiting sched:\n");
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
+ qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " NP active sched:\n");
- TAILQ_FOREACH(qh, &hsotg->non_periodic_sched_active, qh_list_entry)
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
+ qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " Channels:\n");
for (i = 0; i < num_channels; i++) {
* Must be called with interrupt disabled and spinlock held
*/
STATIC void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
- struct dwc2_qh_list *qh_list)
+ struct list_head *qh_list)
{
struct dwc2_qh *qh, *qh_tmp;
struct dwc2_qtd *qtd, *qtd_tmp;
- TAILQ_FOREACH_SAFE(qh, qh_list, qh_list_entry, qh_tmp) {
- TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) {
- dwc2_host_complete(hsotg, qtd, -ETIMEDOUT);
+ list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry) {
+ dwc2_host_complete(hsotg, qtd, -ECONNRESET);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
}
}
STATIC void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
- struct dwc2_qh_list *qh_list)
+ struct list_head *qh_list)
{
struct dwc2_qtd *qtd, *qtd_tmp;
struct dwc2_qh *qh, *qh_tmp;
unsigned long flags;
- if (TAILQ_EMPTY(qh_list)) {
+ if (!qh_list->next)
/* The list hasn't been initialized yet */
return;
- }
spin_lock_irqsave(&hsotg->lock, flags);
/* Ensure there are no QTDs or URBs left */
dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
- TAILQ_FOREACH_SAFE(qh, qh_list, qh_list_entry, qh_tmp) {
+ list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
dwc2_hcd_qh_unlink(hsotg, qh);
/* Free each QTD in the QH's QTD list */
- TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) {
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- }
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh);
STATIC void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
{
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
- if (channel->in_freelist == 0)
+ if (!list_empty(&channel->hc_list_entry))
continue;
hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
- if (channel->in_freelist != 0)
+ if (!list_empty(&channel->hc_list_entry))
continue;
hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
}
dwc2_hc_cleanup(hsotg, channel);
- LIST_INSERT_HEAD(&hsotg->free_hc_list, channel, hc_list_entry);
- channel->in_freelist = 1;
+ list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
/*
* Added for Descriptor DMA to prevent channel double cleanup in
* release_channel_ddma(), which is called from ep_disable when
*/
channel->qh = NULL;
}
+ /* All channels have been freed, mark them available */
+ if (hsotg->core_params->uframe_sched > 0) {
+ hsotg->available_host_channels =
+ hsotg->core_params->host_channels;
+ } else {
+ hsotg->non_periodic_channels = 0;
+ hsotg->periodic_channels = 0;
+ }
+}
+
+/**
+ * dwc2_hcd_connect() - Handles connect of the HCD
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->lx_state != DWC2_L0)
+ usb_hcd_resume_root_hub(hsotg->priv);
+
+ hsotg->flags.b.port_connect_status_change = 1;
+ hsotg->flags.b.port_connect_status = 1;
}
/**
* dwc2_hcd_disconnect() - Handles disconnect of the HCD
*
* @hsotg: Pointer to struct dwc2_hsotg
+ * @force: If true, we won't try to reconnect even if we see device connected.
*
* Must be called with interrupt disabled and spinlock held
*/
-void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
+void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
{
u32 intr;
+ u32 hprt0;
/* Set status flags for the hub driver */
hsotg->flags.b.port_connect_status_change = 1;
dwc2_host_disconnect(hsotg);
dwc2_root_intr(hsotg->hsotg_sc);
+
+ /*
+ * Add an extra check here to see if we're actually connected but
+ * we don't have a detection interrupt pending. This can happen if:
+ * 1. hardware sees connect
+ * 2. hardware sees disconnect
+ * 3. hardware sees connect
+ * 4. dwc2_port_intr() - clears connect interrupt
+ * 5. dwc2_handle_common_intr() - calls here
+ *
+ * Without the extra check here we will end calling disconnect
+ * and won't get any future interrupts to handle the connect.
+ */
+ if (!force) {
+ hprt0 = DWC2_READ_4(hsotg, HPRT0);
+ if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
+ dwc2_hcd_connect(hsotg);
+ }
}
/**
*/
STATIC void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
{
- if (hsotg->lx_state == DWC2_L2)
+ if (hsotg->bus_suspended) {
hsotg->flags.b.port_suspend_change = 1;
- else
+ usb_hcd_resume_root_hub(hsotg->priv);
+ }
+
+ if (hsotg->lx_state == DWC2_L1)
hsotg->flags.b.port_l1_change = 1;
dwc2_root_intr(hsotg->hsotg_sc);
DWC2_WRITE_4(hsotg, HPRT0, 0);
}
-int
-dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb,
- void **ep_handle, gfp_t mem_flags)
+/* Caller must hold driver lock */
+int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+ struct dwc2_qtd *qtd)
{
- struct dwc2_softc *sc = hsotg->hsotg_sc;
- struct dwc2_qtd *qtd;
u32 intr_mask;
int retval;
int dev_speed;
dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
- /* Some core configurations cannot support LS traffic on a FS root port */
+ /* Some configurations cannot support LS traffic on a FS root port */
if ((dev_speed == USB_SPEED_LOW) &&
(hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
(hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_FULL_SPEED) {
+ dev_err(hsotg->dev,
+ "DWC OTG HCD URB Enqueue unsupported\n");
return -ENODEV;
}
}
- qtd = pool_get(&sc->sc_qtdpool, PR_NOWAIT);
if (!qtd)
- return -ENOMEM;
+ return -EINVAL;
memset(qtd, 0, sizeof(*qtd));
dwc2_hcd_qtd_init(qtd, urb);
- retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
- mem_flags);
+ retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
- pool_put(&sc->sc_qtdpool, qtd);
return retval;
}
if (in_process) {
dwc2_hcd_qh_deactivate(hsotg, qh, 0);
qh->channel = NULL;
- } else if (TAILQ_EMPTY(&qh->qtd_list)) {
+ } else if (list_empty(&qh->qtd_list)) {
dwc2_hcd_qh_unlink(hsotg, qh);
}
} else {
int i;
hsotg->flags.d32 = 0;
- hsotg->non_periodic_qh_ptr = NULL;
+ hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
if (hsotg->core_params->uframe_sched > 0) {
hsotg->available_host_channels =
* Put all channels in the free channel list and clean up channel
* states
*/
- LIST_FOREACH_SAFE(chan, &hsotg->free_hc_list, hc_list_entry, chan_tmp) {
- LIST_REMOVE(chan, hc_list_entry);
- chan->in_freelist = 0;
- }
+ list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
+ hc_list_entry)
+ list_del_init(&chan->hc_list_entry);
num_channels = hsotg->core_params->host_channels;
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
- LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry);
- chan->in_freelist = 1;
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
dwc2_hc_cleanup(hsotg, chan);
}
}
STATIC int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
- struct dwc2_host_chan *chan, void *bufptr)
+ struct dwc2_host_chan *chan,
+ struct dwc2_hcd_urb *urb, void *bufptr)
{
u32 buf_size;
- if (chan->ep_type != USB_ENDPOINT_XFER_ISOC)
- buf_size = hsotg->core_params->max_transfer_size;
- else
- buf_size = 4096;
-
if (!qh->dw_align_buf) {
int err;
+ if (chan->ep_type != USB_ENDPOINT_XFER_ISOC)
+ buf_size = hsotg->core_params->max_transfer_size;
+ else
+ /* 3072 = 3 max-size Isoc packets */
+ buf_size = 3072;
+
qh->dw_align_buf = NULL;
qh->dw_align_buf_dma = 0;
- err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, buf_size, buf_size,
- USB_DMA_COHERENT, &qh->dw_align_buf_usbdma);
+ err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, buf_size, 0,
+ USB_DMA_COHERENT, &qh->dw_align_buf_usbdma);
if (!err) {
struct usb_dma *ud = &qh->dw_align_buf_usbdma;
}
if (!qh->dw_align_buf)
return -ENOMEM;
+ qh->dw_align_buf_size = buf_size;
}
- if (!chan->ep_is_in && chan->xfer_len) {
- usb_syncmem(chan->xfer_usbdma, 0, buf_size,
- BUS_DMASYNC_POSTWRITE);
- memcpy(qh->dw_align_buf, bufptr, chan->xfer_len);
- usb_syncmem(chan->xfer_usbdma, 0, buf_size,
- BUS_DMASYNC_PREWRITE);
+ if (chan->xfer_len) {
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ void *usb_urb = urb->priv;
+
+ if (usb_urb) {
+ if (!chan->ep_is_in) {
+ memcpy(qh->dw_align_buf, bufptr,
+ chan->xfer_len);
+ }
+ } else {
+ dev_warn(hsotg->dev, "no URB in dwc2_urb\n");
+ }
}
+ usb_syncmem(&qh->dw_align_buf_usbdma, 0, qh->dw_align_buf_size,
+ chan->ep_is_in ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
+
chan->align_buf = qh->dw_align_buf_dma;
return 0;
}
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
- if (TAILQ_EMPTY(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list)) {
dev_dbg(hsotg->dev, "No QTDs in QH list\n");
return -ENOMEM;
}
- if (LIST_EMPTY(&hsotg->free_hc_list)) {
+ if (list_empty(&hsotg->free_hc_list)) {
dev_dbg(hsotg->dev, "No free channel to assign\n");
return -ENOMEM;
}
- chan = LIST_FIRST(&hsotg->free_hc_list);
+ chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
+ hc_list_entry);
/* Remove host channel from free list */
- LIST_REMOVE(chan, hc_list_entry);
- chan->in_freelist = 0;
+ list_del_init(&chan->hc_list_entry);
- qtd = TAILQ_FIRST(&qh->qtd_list);
+ qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
urb = qtd->urb;
qh->channel = chan;
qtd->in_process = 1;
/* Non DWORD-aligned buffer case */
if (bufptr) {
- dev_vdbg(hsotg->dev, "Non-aligned buffer%p\n", bufptr);
- if (dwc2_hc_setup_align_buf(hsotg, qh, chan, bufptr)) {
+ dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+ if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) {
dev_err(hsotg->dev,
"%s: Failed to allocate memory to handle non-dword aligned buffer\n",
__func__);
/* Add channel back to free list */
chan->align_buf = 0;
chan->multi_count = 0;
- LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry);
- chan->in_freelist = 1;
+ list_add_tail(&chan->hc_list_entry,
+ &hsotg->free_hc_list);
qtd->in_process = 0;
qh->channel = NULL;
return -ENOMEM;
*/
chan->multi_count = dwc2_hb_mult(qh->maxp);
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->core_params->dma_desc_enable > 0) {
+ chan->desc_list_usbdma = qh->desc_list_usbdma;
chan->desc_list_addr = qh->desc_list_dma;
+ chan->desc_list_sz = qh->desc_list_sz;
+ }
dwc2_hc_init(hsotg, chan);
chan->qh = qh;
struct dwc2_hsotg *hsotg)
{
enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
- struct dwc2_qh *qh, *qhn;
+ struct list_head *qh_ptr;
+ struct dwc2_qh *qh;
int num_channels;
-#ifdef DWC2_DEBUG
+#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, " Select Transactions\n");
#endif
/* Process entries in the periodic ready list */
- /* TAILQ_FOREACH_SAFE? */
- qh = TAILQ_FIRST(&hsotg->periodic_sched_ready);
- while (qh != NULL) {
- if (LIST_EMPTY(&hsotg->free_hc_list))
+ qh_ptr = hsotg->periodic_sched_ready.next;
+ while (qh_ptr != &hsotg->periodic_sched_ready) {
+ if (list_empty(&hsotg->free_hc_list))
break;
if (hsotg->core_params->uframe_sched > 0) {
if (hsotg->available_host_channels <= 1)
break;
hsotg->available_host_channels--;
}
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
if (dwc2_assign_and_init_hc(hsotg, qh))
break;
* Move the QH from the periodic ready schedule to the
* periodic assigned schedule
*/
- qhn = TAILQ_NEXT(qh, qh_list_entry);
- TAILQ_REMOVE(&hsotg->periodic_sched_ready, qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, qh, qh_list_entry);
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned);
ret_val = DWC2_TRANSACTION_PERIODIC;
- qh = qhn;
}
/*
* reserved for periodic transfers.
*/
num_channels = hsotg->core_params->host_channels;
- qh = TAILQ_FIRST(&hsotg->non_periodic_sched_inactive);
- while (qh != NULL) {
+ qh_ptr = hsotg->non_periodic_sched_inactive.next;
+ while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
if (hsotg->core_params->uframe_sched <= 0 &&
hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
- if (LIST_EMPTY(&hsotg->free_hc_list))
+ if (list_empty(&hsotg->free_hc_list))
break;
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
/*
* Check to see if this is a NAK'd retransmit, in which case
if (qh->nak_frame != 0xffff &&
dwc2_full_frame_num(qh->nak_frame) ==
dwc2_full_frame_num(dwc2_hcd_get_frame_number(hsotg))) {
- qh = TAILQ_NEXT(qh, qh_list_entry);
+ qh_ptr = qh_ptr->next;
continue;
} else {
qh->nak_frame = 0xffff;
* Move the QH from the non-periodic inactive schedule to the
* non-periodic active schedule
*/
- qhn = TAILQ_NEXT(qh, qh_list_entry);
- TAILQ_REMOVE(&hsotg->non_periodic_sched_inactive, qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->non_periodic_sched_active, qh, qh_list_entry);
- qh = qhn;
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_active);
if (ret_val == DWC2_TRANSACTION_NONE)
ret_val = DWC2_TRANSACTION_NON_PERIODIC;
*/
STATIC void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
{
- struct dwc2_qh *qh, *qhn;
+ struct list_head *qh_ptr;
+ struct dwc2_qh *qh;
u32 tx_status;
u32 fspcavail;
u32 gintmsk;
fspcavail);
}
- qh = TAILQ_FIRST(&hsotg->periodic_sched_assigned);
- while (qh != NULL) {
+ qh_ptr = hsotg->periodic_sched_assigned.next;
+ while (qh_ptr != &hsotg->periodic_sched_assigned) {
tx_status = DWC2_READ_4(hsotg, HPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
break;
}
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
if (!qh->channel) {
- qh = TAILQ_NEXT(qh, qh_list_entry);
+ qh_ptr = qh_ptr->next;
continue;
}
/* Make sure EP's TT buffer is clean before queueing qtds */
if (qh->tt_buffer_dirty) {
- qh = TAILQ_NEXT(qh, qh_list_entry);
+ qh_ptr = qh_ptr->next;
continue;
}
*/
if (hsotg->core_params->dma_enable > 0 || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
- qhn = TAILQ_NEXT(qh, qh_list_entry);
+ qh_ptr = qh_ptr->next;
/*
* Move the QH from the periodic assigned schedule to
* the periodic queued schedule
*/
- TAILQ_REMOVE(&hsotg->periodic_sched_assigned, qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_queued, qh, qh_list_entry);
+ list_move(&qh->qh_list_entry,
+ &hsotg->periodic_sched_queued);
/* done queuing high bandwidth */
hsotg->queuing_high_bandwidth = 0;
-
- qh = qhn;
}
}
fspcavail);
}
- if (!TAILQ_EMPTY(&hsotg->periodic_sched_assigned) ||
+ if (!list_empty(&hsotg->periodic_sched_assigned) ||
no_queue_space || no_fifo_space) {
/*
* May need to queue more transactions as the request
*/
STATIC void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
{
+ struct list_head *orig_qh_ptr;
struct dwc2_qh *qh;
u32 tx_status;
u32 qspcavail;
* Keep track of the starting point. Skip over the start-of-list
* entry.
*/
- if (hsotg->non_periodic_qh_ptr == NULL) {
- hsotg->non_periodic_qh_ptr = TAILQ_FIRST(&hsotg->non_periodic_sched_active);
- }
- qh = hsotg->non_periodic_qh_ptr;
+ if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
+ hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+ orig_qh_ptr = hsotg->non_periodic_qh_ptr;
/*
* Process once through the active list or until no more space is
break;
}
+ qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
+ qh_list_entry);
if (!qh->channel)
goto next;
break;
}
next:
- /* Advance to next QH, wrapping to the start if we hit the end */
- qh = TAILQ_NEXT(qh, qh_list_entry);
- if (qh == NULL)
- qh = TAILQ_FIRST(&hsotg->non_periodic_sched_active);
- } while ((qh != hsotg->non_periodic_qh_ptr) && (hsotg->non_periodic_qh_ptr != NULL));
- hsotg->non_periodic_qh_ptr = qh;
+ /* Advance to next QH, skipping start-of-list entry */
+ hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+ if (hsotg->non_periodic_qh_ptr ==
+ &hsotg->non_periodic_sched_active)
+ hsotg->non_periodic_qh_ptr =
+ hsotg->non_periodic_qh_ptr->next;
+ } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
if (hsotg->core_params->dma_enable <= 0) {
tx_status = DWC2_READ_4(hsotg, GNPTXSTS);
void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
enum dwc2_transaction_type tr_type)
{
-#ifdef DWC2_DEBUG
+#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "Queue Transactions\n");
#endif
/* Process host channels associated with periodic transfers */
if ((tr_type == DWC2_TRANSACTION_PERIODIC ||
tr_type == DWC2_TRANSACTION_ALL) &&
- !TAILQ_EMPTY(&hsotg->periodic_sched_assigned))
+ !list_empty(&hsotg->periodic_sched_assigned))
dwc2_process_periodic_channels(hsotg);
/* Process host channels associated with non-periodic transfers */
if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
tr_type == DWC2_TRANSACTION_ALL) {
- if (!TAILQ_EMPTY(&hsotg->non_periodic_sched_active)) {
+ if (!list_empty(&hsotg->non_periodic_sched_active)) {
dwc2_process_non_periodic_channels(hsotg);
} else {
/*
}
-void
-dwc2_conn_id_status_change(void *data)
+STATIC void dwc2_conn_id_status_change(void *data)
{
struct dwc2_hsotg *hsotg = data;
u32 count = 0;
u32 gotgctl;
+ unsigned long flags;
dev_dbg(hsotg->dev, "%s()\n", __func__);
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_hsotg_core_connect(hsotg);
} else {
/* A-Device connector (Host Mode) */
dev_dbg(hsotg->dev, "connId A\n");
}
}
-void dwc2_wakeup_detected(void * data)
+void dwc2_wakeup_detected(void *data)
{
struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
u32 hprt0;
DWC2_READ_4(hsotg, HPRT0));
dwc2_hcd_rem_wakeup(hsotg);
+ hsotg->bus_suspended = 0;
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
hprt0 |= HPRT0_SUSP;
DWC2_WRITE_4(hsotg, HPRT0, hprt0);
- /* Update lx_state */
- hsotg->lx_state = DWC2_L2;
+ hsotg->bus_suspended = 1;
- /* Suspend the Phy Clock */
- pcgctl = DWC2_READ_4(hsotg, PCGCTL);
- pcgctl |= PCGCTL_STOPPCLK;
- DWC2_WRITE_4(hsotg, PCGCTL, pcgctl);
- udelay(10);
+ /*
+ * If hibernation is supported, Phy clock will be suspended
+ * after registers are backuped.
+ */
+ if (!hsotg->core_params->hibernation) {
+ /* Suspend the Phy Clock */
+ pcgctl = DWC2_READ_4(hsotg, PCGCTL);
+ pcgctl |= PCGCTL_STOPPCLK;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgctl);
+ udelay(10);
+ }
/* For HNP the bus must be suspended for at least 200ms */
if (dwc2_host_is_b_hnp_enabled(hsotg)) {
}
}
+/* Must NOT be called with interrupt disabled or spinlock held */
+STATIC void dwc2_port_resume(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_softc *sc = hsotg->hsotg_sc;
+ unsigned long flags;
+ u32 hprt0;
+ u32 pcgctl;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * If hibernation is supported, Phy clock is already resumed
+ * after registers restore.
+ */
+ if (!hsotg->core_params->hibernation) {
+ pcgctl = DWC2_READ_4(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_STOPPCLK;
+ DWC2_WRITE_4(hsotg, PCGCTL, pcgctl);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ usleep_range(20000, 40000);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+ hprt0 &= ~HPRT0_SUSP;
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ usb_delay_ms(&sc->sc_bus, USB_RESUME_TIMEOUT);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 &= ~(HPRT0_RES | HPRT0_SUSP);
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0);
+ hsotg->bus_suspended = 0;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
/* Handles hub class-specific requests */
int
dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
- DWC2_WRITE_4(hsotg, PCGCTL, 0);
- usleep_range(20000, 40000);
-
- hprt0 = dwc2_read_hprt0(hsotg);
- hprt0 |= HPRT0_RES;
- DWC2_WRITE_4(hsotg, HPRT0, hprt0);
- hprt0 &= ~HPRT0_SUSP;
- usleep_range(100000, 150000);
-
- hprt0 &= ~HPRT0_RES;
- DWC2_WRITE_4(hsotg, HPRT0, hprt0);
+ if (hsotg->bus_suspended)
+ dwc2_port_resume(hsotg);
break;
case USB_PORT_FEAT_POWER:
dev_dbg(hsotg->dev, "GetHubDescriptor\n");
hub_desc = (usb_hub_descriptor_t *)buf;
hub_desc->bDescLength = 9;
- hub_desc->bDescriptorType = 0x29;
+ hub_desc->bDescriptorType = USB_DT_HUB;
hub_desc->bNbrPorts = 1;
- USETW(hub_desc->wHubCharacteristics, 0x08);
+ USETW(hub_desc->wHubCharacteristics, HUB_CHAR_COMMON_LPSM |
+ HUB_CHAR_INDV_PORT_OCPM);
hub_desc->bPwrOn2PwrGood = 1;
hub_desc->bHubContrCurrent = 0;
hub_desc->DeviceRemovable[0] = 0;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
USETW(ps.wPortStatus, port_status);
+ if (hsotg->core_params->dma_desc_fs_enable) {
+ /*
+ * Enable descriptor DMA only if a full speed
+ * device is connected.
+ */
+ if (hsotg->new_connection &&
+ ((port_status &
+ (USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_LOW_SPEED)) ==
+ USB_PORT_STAT_CONNECTION)) {
+ u32 hcfg;
+
+ dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
+ hsotg->core_params->dma_desc_enable = 1;
+ hcfg = DWC2_READ_4(hsotg, HCFG);
+ hcfg |= HCFG_DESCDMA;
+ DWC2_WRITE_4(hsotg, HCFG, hcfg);
+ hsotg->new_connection = false;
+ }
+ }
dev_vdbg(hsotg->dev, "wPortStatus=%04x\n", port_status);
memcpy(buf, &ps, sizeof(ps));
break;
/* Not supported */
break;
+ case USB_PORT_FEAT_TEST:
+ hprt0 = dwc2_read_hprt0(hsotg);
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_TEST\n");
+ hprt0 &= ~HPRT0_TSTCTL_MASK;
+ hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0);
+ break;
+
default:
retval = -EINVAL;
dev_err(hsotg->dev,
{
u32 hfnum = DWC2_READ_4(hsotg, HFNUM);
-#ifdef DWC2_DEBUG
+#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
(hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
#endif
if (!(chan->xfer_started && chan->qh))
continue;
- TAILQ_FOREACH(qtd, &chan->qh->qtd_list, qtd_list_entry) {
+ list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process)
break;
urb = qtd->urb;
// hcd->self.is_b_host = 0;
}
-
/*
* Work queue function for starting the HCD when A-Cable is connected
*/
-void
-dwc2_hcd_start_func(void *data)
+STATIC void dwc2_hcd_start_func(void *data)
{
struct dwc2_hsotg *hsotg = data;
/*
* Reset work queue function
*/
-void
-dwc2_hcd_reset_func(void *data)
+STATIC void dwc2_hcd_reset_func(void *data)
{
struct dwc2_hsotg *hsotg = data;
+ unsigned long flags;
u32 hprt0;
dev_dbg(hsotg->dev, "USB RESET function called\n");
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_RST;
DWC2_WRITE_4(hsotg, HPRT0, hprt0);
hsotg->flags.b.port_reset_change = 1;
dwc2_root_intr(hsotg->hsotg_sc);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* error code on failure.
*/
-
/*
* Frees secondary storage associated with the dwc2_hsotg structure contained
* in the struct usb_hcd field
/* Free memory for QH/QTD lists */
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
dwc2_hcd_free(hsotg);
}
-/*
- * Sets all parameters to the given value.
- *
- * Assumes that the dwc2_core_params struct contains only integers.
- */
-void dwc2_set_all_params(struct dwc2_core_params *params, int value)
-{
- int *p = (int *)params;
- size_t size = sizeof(*params) / sizeof(*p);
- int i;
-
- for (i = 0; i < size; i++)
- p[i] = value;
-}
-
/*
* Initializes the HCD. This function allocates memory for and initializes the
* static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
* USB bus with the core and calls the hc_driver->start() function. It returns
* a negative error on failure.
*/
-int dwc2_hcd_init(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
+int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
{
struct dwc2_host_chan *channel;
int i, num_channels;
int retval;
- dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
-
- /* Detect config values from hardware */
- retval = dwc2_get_hwparams(hsotg);
+ if (usb_disabled())
+ return -ENODEV;
- if (retval)
- return retval;
+ dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
retval = -ENOMEM;
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
#endif
- hsotg->core_params = malloc(sizeof(*hsotg->core_params), M_DEVBUF,
- M_ZERO | M_WAITOK);
- if (!hsotg->core_params)
- goto error1;
-
- dwc2_set_all_params(hsotg->core_params, -1);
-
- /* Validate parameter values */
- dwc2_set_parameters(hsotg, params);
-
spin_lock_init(&hsotg->lock);
/*
/* Create new workqueue and init work */
retval = -ENOMEM;
hsotg->wq_otg = taskq_create("dwc2", 1, IPL_USB, 0);
- if (hsotg->wq_otg == NULL) {
+ if (!hsotg->wq_otg) {
dev_err(hsotg->dev, "Failed to create workqueue\n");
goto error2;
}
+ task_set(&hsotg->wf_otg, dwc2_conn_id_status_change, hsotg);
timeout_set(&hsotg->wkp_timer, dwc2_wakeup_detected, hsotg);
/* Initialize the non-periodic schedule */
- TAILQ_INIT(&hsotg->non_periodic_sched_inactive);
- TAILQ_INIT(&hsotg->non_periodic_sched_active);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
/* Initialize the periodic schedule */
- TAILQ_INIT(&hsotg->periodic_sched_inactive);
- TAILQ_INIT(&hsotg->periodic_sched_ready);
- TAILQ_INIT(&hsotg->periodic_sched_assigned);
- TAILQ_INIT(&hsotg->periodic_sched_queued);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
/*
* Create a host channel descriptor for each host channel implemented
* in the controller. Initialize the channel descriptor array.
*/
- LIST_INIT(&hsotg->free_hc_list);
+ INIT_LIST_HEAD(&hsotg->free_hc_list);
num_channels = hsotg->core_params->host_channels;
memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
*/
hsotg->status_buf = NULL;
if (hsotg->core_params->dma_enable > 0) {
- retval = usb_allocmem(&hsotg->hsotg_sc->sc_bus,
- DWC2_HCD_STATUS_BUF_SIZE, 0,
- USB_DMA_COHERENT,
- &hsotg->status_buf_usbdma);
- if (!retval) {
+ int error = usb_allocmem(&hsotg->hsotg_sc->sc_bus,
+ DWC2_HCD_STATUS_BUF_SIZE, 0, USB_DMA_COHERENT,
+ &hsotg->status_buf_usbdma);
+ if (!error) {
hsotg->status_buf = KERNADDR(&hsotg->status_buf_usbdma, 0);
hsotg->status_buf_dma = DMAADDR(&hsotg->status_buf_usbdma, 0);
}
hsotg->status_buf = malloc(DWC2_HCD_STATUS_BUF_SIZE, M_DEVBUF,
M_ZERO | M_WAITOK);
+ /* retval is already -ENOMEM */
if (!hsotg->status_buf)
goto error3;
error3:
dwc2_hcd_release(hsotg);
error2:
-error1:
- free(hsotg->core_params, M_DEVBUF, sizeof(*hsotg->core_params));
+ if (hsotg->core_params != NULL)
+ free(hsotg->core_params, M_DEVBUF, sizeof(*hsotg->core_params));
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
- free(hsotg->last_frame_num_array, M_DEVBUF,
- sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE);
- free(hsotg->frame_num_array, M_DEVBUF,
- sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE);
+ if (hsotg->last_frame_num_array != NULL)
+ free(hsotg->last_frame_num_array, M_DEVBUF,
+ sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE);
+ if (hsotg->frame_num_array != NULL)
+ free(hsotg->frame_num_array, M_DEVBUF,
+ sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE);
#endif
dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
return retval;
}
-int dwc2_hcd_dma_config(struct dwc2_hsotg *hsotg,
- struct dwc2_core_dma_config *config)
+/*
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
{
- hsotg->core_dma_config = config;
- return 0;
+ struct usb_hcd *hcd;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
+
+ hcd = dwc2_hsotg_to_hcd(hsotg);
+ dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
+
+ if (!hcd) {
+ dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
+ __func__);
+ return;
+ }
+ hsotg->priv = NULL;
+
+ dwc2_hcd_release(hsotg);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+ free(hsotg->last_frame_num_array, M_DEVBUF, sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE);
+ free(hsotg->frame_num_array, M_DEVBUF, sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE);
+#endif
}
-/* $OpenBSD: dwc2_hcd.h,v 1.13 2015/06/28 11:48:18 jmatthew Exp $ */
+/* $OpenBSD: dwc2_hcd.h,v 1.14 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hcd.h,v 1.9 2014/09/03 10:00:08 skrll Exp $ */
/*
* @qh: QH for the transfer being processed by this channel
* @hc_list_entry: For linking to list of host channels
* @desc_list_addr: Current QH's descriptor list DMA address
+ * @desc_list_sz: Current QH's descriptor list size
*
* This structure represents the state of a single host channel when acting in
* host mode. It contains the data items needed to transfer packets to an
enum dwc2_halt_status halt_status;
u32 hcint;
struct dwc2_qh *qh;
- LIST_ENTRY(dwc2_host_chan) hc_list_entry;
+ struct list_head hc_list_entry;
+ struct usb_dma desc_list_usbdma;
dma_addr_t desc_list_addr;
- int in_freelist;
+ u32 desc_list_sz;
};
struct dwc2_hcd_pipe_info {
/**
* struct dwc2_qh - Software queue head structure
*
+ * @hsotg: The HCD state structure for the DWC OTG controller
* @ep_type: Endpoint type. One of the following values:
* - USB_ENDPOINT_XFER_CONTROL
* - USB_ENDPOINT_XFER_BULK
* @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
* is not dword-aligned
- * @dw_align_buf_dma: DMA address for align_buf
+ * @dw_align_buf_size: Size of dw_align_buf
+ * @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
* schedule
* @desc_list: List of transfer descriptors
* @desc_list_dma: Physical address of desc_list
+ * @desc_list_sz: Size of descriptors list
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the
* descriptor
+ * @wait_timer: Timer used to wait before re-queuing.
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
+ * @want_wait: We should wait before re-queuing; only matters for non-
+ * periodic transfers and is ignored for periodic ones.
+ * @wait_timer_cancel: Set to true to cancel the wait_timer.
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
* maintains a list of transfers (QTDs) for that endpoint. A QH structure may
* be entered in either the non-periodic or periodic schedule.
*/
struct dwc2_qh {
+ struct dwc2_hsotg *hsotg;
u8 ep_type;
u8 ep_is_in;
u16 maxp;
u16 ntd;
struct usb_dma dw_align_buf_usbdma;
u8 *dw_align_buf;
+ int dw_align_buf_size;
dma_addr_t dw_align_buf_dma;
- TAILQ_HEAD(, dwc2_qtd) qtd_list;
+ struct list_head qtd_list;
struct dwc2_host_chan *channel;
- TAILQ_ENTRY(dwc2_qh) qh_list_entry;
+ struct list_head qh_list_entry;
struct usb_dma desc_list_usbdma;
struct dwc2_hcd_dma_desc *desc_list;
dma_addr_t desc_list_dma;
+ u32 desc_list_sz;
u32 *n_bytes;
+ /* XXX struct timer_list wait_timer; */
+ struct timeout wait_timer;
unsigned tt_buffer_dirty:1;
- unsigned linked:1;
+ unsigned want_wait:1;
+ unsigned wait_timer_cancel:1;
};
/**
* @n_desc: Number of DMA descriptors for this QTD
* @isoc_frame_index_last: Last activated frame (packet) index, used in
* descriptor DMA mode only
+ * @num_naks: Number of NAKs received on this QTD.
* @urb: URB for this transfer
* @qh: Queue head for this QTD
* @qtd_list_entry: For linking to the QH's list of QTDs
u8 isoc_split_pos;
u16 isoc_frame_index;
u16 isoc_split_offset;
+ u16 isoc_td_last;
+ u16 isoc_td_first;
u32 ssplit_out_xfer_count;
u8 error_count;
u8 n_desc;
u16 isoc_frame_index_last;
+ u16 num_naks;
struct dwc2_hcd_urb *urb;
struct dwc2_qh *qh;
- TAILQ_ENTRY(dwc2_qtd) qtd_list_entry;
+ struct list_head qtd_list_entry;
};
#ifdef DEBUG
#endif
/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
-STATIC_INLINE struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
+static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
{
return (struct usb_hcd *)hsotg->priv;
}
* channel is re-assigned. In fact, subsequent handling may cause crashes
* because the channel structures are cleaned up when the channel is released.
*/
-STATIC_INLINE void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
+static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
{
u32 mask = DWC2_READ_4(hsotg, HCINTMSK(chnum));
DWC2_WRITE_4(hsotg, HCINTMSK(chnum), mask);
}
-/*
- * Returns the mode of operation, host or device
- */
-STATIC_INLINE int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
-{
- return (DWC2_READ_4(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
-}
-
-STATIC_INLINE int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
-{
- return (DWC2_READ_4(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
-}
-
/*
* Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
* are read as 1, they won't clear when written back.
*/
-STATIC_INLINE u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
+static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
{
u32 hprt0 = DWC2_READ_4(hsotg, HPRT0);
return hprt0;
}
-STATIC_INLINE u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->ep_num;
}
-STATIC_INLINE u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_type;
}
-STATIC_INLINE u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->mps;
}
-STATIC_INLINE u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->dev_addr;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
{
return pipe->pipe_dir == USB_DIR_IN;
}
-STATIC_INLINE u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
+static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
{
return !dwc2_hcd_is_pipe_in(pipe);
}
-extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
-extern int dwc2_hcd_dma_config(struct dwc2_hsotg *hsotg,
- struct dwc2_core_dma_config *config);
+extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg);
extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
-extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
-extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
-extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
/* Transaction Execution Functions */
extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
/* Schedule Queue Functions */
/* Implemented in hcd_queue.c */
extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
+extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags);
extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, int mem_flags);
+ struct dwc2_qh *qh);
/* Removes and frees a QTD */
extern void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
(_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
-#ifdef DWC2_DEBUG
-STATIC_INLINE bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
-STATIC_INLINE bool dbg_qh(struct dwc2_qh *qh) { return true; }
-STATIC_INLINE bool dbg_perio(void) { return true; }
-#else /* !DWC2_DEBUG */
-STATIC_INLINE bool dbg_hc(struct dwc2_host_chan *hc)
+#ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
+static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
+static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
+static inline bool dbg_perio(void) { return true; }
+#else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
+static inline bool dbg_hc(struct dwc2_host_chan *hc)
{
return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
}
-STATIC_INLINE bool dbg_qh(struct dwc2_qh *qh)
+static inline bool dbg_qh(struct dwc2_qh *qh)
{
return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
}
-STATIC_INLINE bool dbg_perio(void) { return false; }
+static inline bool dbg_perio(void) { return false; }
#endif
/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
/* Packet size for any kind of endpoint descriptor */
#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
+/*
+ * Returns true if frame1 index is greater than frame2 index. The comparison
+ * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
+ * frame number when the max index frame number is reached.
+ */
+static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2)
+{
+ u16 diff = fr_idx1 - fr_idx2;
+ u16 sign = diff & (FRLISTEN_64_SIZE >> 1);
+
+ return diff && !sign;
+}
+
/*
* Returns true if frame1 is less than or equal to frame2. The comparison is
* done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
* frame number when the max frame number is reached.
*/
-STATIC_INLINE int dwc2_frame_num_le(u16 frame1, u16 frame2)
+static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
{
return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
}
* modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
* number when the max frame number is reached.
*/
-STATIC_INLINE int dwc2_frame_num_gt(u16 frame1, u16 frame2)
+static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
{
return (frame1 != frame2) &&
((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
* Increments frame by the amount specified by inc. The addition is done
* modulo HFNUM_MAX_FRNUM. Returns the incremented value.
*/
-STATIC_INLINE u16 dwc2_frame_num_inc(u16 frame, u16 inc)
+static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
{
return (frame + inc) & HFNUM_MAX_FRNUM;
}
-STATIC_INLINE u16 dwc2_full_frame_num(u16 frame)
+static inline u16 dwc2_full_frame_num(u16 frame)
{
return (frame & HFNUM_MAX_FRNUM) >> 3;
}
-STATIC_INLINE u16 dwc2_micro_frame_num(u16 frame)
+static inline u16 dwc2_micro_frame_num(u16 frame)
{
return frame & 0x7;
}
* Returns the Core Interrupt Status register contents, ANDed with the Core
* Interrupt Mask register contents
*/
-STATIC_INLINE u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
+static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
{
return DWC2_READ_4(hsotg, GINTSTS) & DWC2_READ_4(hsotg, GINTMSK);
}
-STATIC_INLINE u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
+static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
{
return dwc2_urb->status;
}
-STATIC_INLINE u32 dwc2_hcd_urb_get_actual_length(
+static inline u32 dwc2_hcd_urb_get_actual_length(
struct dwc2_hcd_urb *dwc2_urb)
{
return dwc2_urb->actual_length;
}
-STATIC_INLINE u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
+static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
{
return dwc2_urb->error_count;
}
-STATIC_INLINE void dwc2_hcd_urb_set_iso_desc_params(
+static inline void dwc2_hcd_urb_set_iso_desc_params(
struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
u32 length)
{
dwc2_urb->iso_descs[desc_num].length = length;
}
-STATIC_INLINE u32 dwc2_hcd_urb_get_iso_desc_status(
+static inline u32 dwc2_hcd_urb_get_iso_desc_status(
struct dwc2_hcd_urb *dwc2_urb, int desc_num)
{
return dwc2_urb->iso_descs[desc_num].status;
}
-STATIC_INLINE u32 dwc2_hcd_urb_get_iso_desc_actual_length(
+static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
struct dwc2_hcd_urb *dwc2_urb, int desc_num)
{
return dwc2_urb->iso_descs[desc_num].actual_length;
}
-STATIC_INLINE int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
+static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
struct usbd_xfer *xfer)
{
struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
struct dwc2_qh *qh = dpipe->priv;
- if (qh && qh->linked)
+ if (qh && !list_empty(&qh->qh_list_entry))
return 1;
return 0;
}
-STATIC_INLINE u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
+static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
struct dwc2_pipe *dpipe)
{
struct dwc2_qh *qh = dpipe->priv;
*/
extern void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
-extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
-
/**
* dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
* and 0 otherwise
*/
extern int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
-/**
- * dwc2_hcd_get_frame_number() - Returns current frame number
- *
- * @hsotg: The DWC2 HCD
- */
-extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
-
/**
* dwc2_hcd_dump_state() - Dumps hsotg state
*
qtd_list_entry); \
if (usb_pipeint(_qtd_->urb->pipe) && \
(_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \
- _hfnum_.d32 = DWC2_READ_4(hsotg, (_hcd_)->regs + HFNUM); \
+ _hfnum_.d32 = DWC2_READ_4((_hcd_), HFNUM); \
switch (_hfnum_.b.frnum & 0x7) { \
case 7: \
(_hcd_)->hfnum_7_samples_##_letter_++; \
void dwc2_hcd_reinit(struct dwc2_hsotg *);
int dwc2_hcd_hub_control(struct dwc2_hsotg *, u16, u16, u16, char *, u16);
struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *);
-int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *, struct dwc2_hcd_urb *, void **,
- gfp_t);
+int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+ struct dwc2_qtd *qtd);
void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *, struct dwc2_hcd_urb *,
u8 ,u8, u8, u8, u16);
-void dwc2_conn_id_status_change(void *);
-void dwc2_hcd_start_func(void *);
-void dwc2_hcd_reset_func(void *);
-
struct dwc2_hcd_urb * dwc2_hcd_urb_alloc(struct dwc2_hsotg *, int, gfp_t);
void dwc2_hcd_urb_free(struct dwc2_hsotg *, struct dwc2_hcd_urb *, int);
-/* $OpenBSD: dwc2_hcdddma.c,v 1.16 2020/03/21 12:08:31 patrick Exp $ */
+/* $OpenBSD: dwc2_hcdddma.c,v 1.17 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */
/*
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
-#if 0
-#include <sys/cpu.h>
-#endif
#include <machine/bus.h>
{
int err;
- //KASSERT(!cpu_intr_p() && !cpu_softintr_p());
-
qh->desc_list = NULL;
- err = usb_allocmem(&hsotg->hsotg_sc->sc_bus,
- sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh), 0,
- USB_DMA_COHERENT, &qh->desc_list_usbdma);
+ qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+ dwc2_max_desc_num(qh);
- if (!err) {
- qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0);
- qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0);
- }
+ err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, qh->desc_list_sz, 0,
+ USB_DMA_COHERENT, &qh->desc_list_usbdma);
- if (!qh->desc_list)
+ if (err)
return -ENOMEM;
- memset(qh->desc_list, 0,
- sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
+ qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0);
+ qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0);
- qh->n_bytes = mallocarray(dwc2_max_desc_num(qh), sizeof(u32), M_DEVBUF,
+ qh->n_bytes = malloc(sizeof(u32) * dwc2_max_desc_num(qh), M_DEVBUF,
M_ZERO | M_WAITOK);
+ if (!qh->n_bytes) {
+ usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma);
+ qh->desc_list = NULL;
+ return -ENOMEM;
+ }
+
return 0;
}
STATIC void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
+
if (qh->desc_list) {
usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma);
qh->desc_list = NULL;
if (hsotg->frame_list)
return 0;
- /* XXXNH - struct pool */
+ /* XXXNH - pool_cache_t */
+ hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
hsotg->frame_list = NULL;
- err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 4 * FRLISTEN_64_SIZE,
+ err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, hsotg->frame_list_sz,
0, USB_DMA_COHERENT, &hsotg->frame_list_usbdma);
if (!err) {
if (!hsotg->frame_list)
return -ENOMEM;
- memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
return 0;
}
j = (j + inc) & (FRLISTEN_64_SIZE - 1);
} while (j != i);
+ /*
+ * Sync frame list since controller will access it if periodic
+ * channel is currently enabled.
+ */
+ usb_syncmem(&hsotg->frame_list_usbdma, 0, hsotg->frame_list_sz,
+ BUS_DMASYNC_PREWRITE);
+
if (!enable)
return;
hsotg->non_periodic_channels--;
} else {
dwc2_update_frame_list(hsotg, qh, 0);
+ hsotg->available_host_channels++;
}
/*
* device disconnect. See channel cleanup in dwc2_hcd_disconnect().
*/
if (chan->qh) {
- if (chan->in_freelist != 0)
- LIST_REMOVE(chan, hc_list_entry);
+ if (!list_empty(&chan->hc_list_entry))
+ list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
- LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry);
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
chan->qh = NULL;
- chan->in_freelist = 1;
}
qh->channel = NULL;
*/
void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
+ unsigned long flags;
+
dwc2_desc_list_free(hsotg, qh);
/*
* when it comes here from endpoint disable routine
* channel remains assigned.
*/
+ spin_lock_irqsave(&hsotg->lock, flags);
if (qh->channel)
dwc2_release_channel_ddma(hsotg, qh);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
HOST_DMA_ISOC_NBYTES_MASK;
+ /* Set active bit */
+ dma_desc->status |= HOST_DMA_A;
+
+ qh->ntd++;
+ qtd->isoc_frame_index_last++;
+
#ifdef ISOC_URB_GIVEBACK_ASAP
/* Set IOC for each descriptor corresponding to last frame of URB */
if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
dma_desc->status |= HOST_DMA_IOC;
#endif
- qh->ntd++;
- qtd->isoc_frame_index_last++;
+ usb_syncmem(&qh->desc_list_usbdma,
+ (idx * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
}
STATIC void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_qtd *qtd;
u32 max_xfer_size;
- u16 idx, inc, n_desc, ntd_max = 0;
+ u16 idx, inc, n_desc = 0, ntd_max = 0;
+ u16 cur_idx;
+ u16 next_idx;
idx = qh->td_last;
inc = qh->interval;
- n_desc = 0;
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+ cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
+
+ /*
+ * Ensure current frame number didn't overstep last scheduled
+ * descriptor. If it happens, the only way to recover is to move
+ * qh->td_last to current frame number + 1.
+ * So that next isoc descriptor will be scheduled on frame number + 1
+ * and not on a past frame.
+ */
+ if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
+ if (inc < 32) {
+ dev_vdbg(hsotg->dev,
+ "current frame number overstep last descriptor\n");
+ qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
+ qh->dev_speed);
+ idx = qh->td_last;
+ }
+ }
if (qh->interval) {
ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
- TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
+ if (qtd->in_process &&
+ qtd->isoc_frame_index_last ==
+ qtd->urb->packet_count)
+ continue;
+
+ qtd->isoc_td_first = idx;
while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
qtd->urb->packet_count) {
- if (n_desc > 1)
- qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
max_xfer_size, idx);
idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
n_desc++;
}
+ qtd->isoc_td_last = idx;
qtd->in_process = 1;
}
if (qh->ntd == ntd_max) {
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
+
+ usb_syncmem(&qh->desc_list_usbdma,
+ (idx * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
}
#else
/*
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
+ usb_syncmem(&qh->desc_list_usbdma,
+ (idx * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
#endif
-
- if (n_desc) {
- qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
- if (n_desc > 1)
- qh->desc_list[0].status |= HOST_DMA_A;
- }
}
STATIC void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
dma_desc->buf = (u32)chan->xfer_dma;
+ usb_syncmem(&qh->desc_list_usbdma,
+ (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
+
/*
* Last (or only) descriptor of IN transfer with actual size less
* than MaxPacket
* there is always one QTD active.
*/
- TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
if (n_desc) {
"set A bit in desc %d (%p)\n",
n_desc - 1,
&qh->desc_list[n_desc - 1]);
+ usb_syncmem(&qh->desc_list_usbdma,
+ ((n_desc - 1) *
+ sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
dev_vdbg(hsotg->dev,
HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
n_desc - 1, &qh->desc_list[n_desc - 1]);
+ usb_syncmem(&qh->desc_list_usbdma,
+ ((n_desc - 1) * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
&qh->desc_list[0]);
+ usb_syncmem(&qh->desc_list_usbdma, 0,
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_PREWRITE);
}
chan->ntd = n_desc;
}
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+ struct dwc2_hcd_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_syncmem(&qh->desc_list_usbdma,
+ (idx * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_POSTREAD);
+
+ dma_desc = &qh->desc_list[idx];
+
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset));
if (chan->ep_is_in)
idx = qh->td_first;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
- TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry)
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
-EIO : -EOVERFLOW;
- TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) {
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry) {
if (qtd->urb) {
for (idx = 0; idx < qtd->urb->packet_count;
idx++) {
return;
}
- TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) {
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process)
break;
+
+ /*
+ * Ensure idx corresponds to descriptor where first urb of this
+ * qtd was added. In fact, during isoc desc init, dwc2 may skip
+ * an index if current frame number is already over this index.
+ */
+ if (idx != qtd->isoc_td_first) {
+ dev_vdbg(hsotg->dev,
+ "try to complete %d instead of %d\n",
+ idx, qtd->isoc_td_first);
+ idx = qtd->isoc_td_first;
+ }
+
do {
+ struct dwc2_qtd *qtd_next;
+ u16 cur_idx;
+
rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
idx);
if (rc < 0)
return;
idx = dwc2_desclist_idx_inc(idx, qh->interval,
chan->speed);
- if (rc == DWC2_CMPL_STOP)
- goto stop_scan;
+ if (!rc)
+ continue;
+
if (rc == DWC2_CMPL_DONE)
break;
+
+ /* rc == DWC2_CMPL_STOP */
+
+ if (qh->interval >= 32)
+ goto stop_scan;
+
+ qh->td_first = idx;
+ cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ qtd_next = list_first_entry(&qh->qtd_list,
+ struct dwc2_qtd,
+ qtd_list_entry);
+ if (dwc2_frame_idx_num_gt(cur_idx,
+ qtd_next->isoc_td_last))
+ break;
+
+ goto stop_scan;
+
} while (idx != qh->td_first);
}
if (!urb)
return -EINVAL;
+ usb_syncmem(&qh->desc_list_usbdma,
+ (desc_num * sizeof(struct dwc2_hcd_dma_desc)),
+ sizeof(struct dwc2_hcd_dma_desc),
+ BUS_DMASYNC_POSTREAD);
+
dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev,
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
- if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
+ if (*xfer_done && urb->status != -EINPROGRESS)
+ failed = 1;
+
+ if (failed) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
int chnum,
enum dwc2_halt_status halt_status)
{
+ struct list_head *qtd_item, *qtd_tmp;
struct dwc2_qh *qh = chan->qh;
- struct dwc2_qtd *qtd = NULL, *qtd_tmp;
+ struct dwc2_qtd *qtd = NULL;
int xfer_done;
int desc_num = 0;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
- TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry)
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
- TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) {
+ list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
+ qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
for (i = 0; i < qtd->n_desc; i++) {
/* Release the channel if halted or session completed */
if (halt_status != DWC2_HC_XFER_COMPLETE ||
- TAILQ_EMPTY(&qh->qtd_list)) {
+ list_empty(&qh->qtd_list)) {
+ struct dwc2_qtd *qtd, *qtd_tmp;
+
+ /*
+ * Kill all remainings QTDs since channel has been
+ * halted.
+ */
+ list_for_each_entry_safe(qtd, qtd_tmp,
+ &qh->qtd_list,
+ qtd_list_entry) {
+ dwc2_host_complete(hsotg, qtd,
+ -ECONNRESET);
+ dwc2_hcd_qtd_unlink_and_free(hsotg,
+ qtd, qh);
+ }
+
/* Halt the channel if session completed */
if (halt_status == DWC2_HC_XFER_COMPLETE)
dwc2_hc_halt(hsotg, chan, halt_status);
dwc2_hcd_qh_unlink(hsotg, qh);
} else {
/* Keep in assigned schedule to continue transfer */
- TAILQ_REMOVE(&hsotg->periodic_sched_queued, qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, qh, qh_list_entry);
- continue_isoc_xfer = 1;
+ list_move(&qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
+ /*
+ * If channel has been halted during giveback of urb
+ * then prevent any new scheduling.
+ */
+ if (!chan->halt_status)
+ continue_isoc_xfer = 1;
}
/*
* Todo: Consider the case when period exceeds FrameList size.
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
- if (!TAILQ_EMPTY(&qh->qtd_list)) {
+ if (!list_empty(&qh->qtd_list)) {
/*
* Add back to inactive non-periodic schedule on normal
* completion
-/* $OpenBSD: dwc2_hcdintr.c,v 1.10 2017/09/08 05:36:53 deraadt Exp $ */
+/* $OpenBSD: dwc2_hcdintr.c,v 1.11 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hcdintr.c,v 1.11 2014/11/24 10:14:14 skrll Exp $ */
/*
#include <dev/usb/dwc2/dwc2_core.h>
#include <dev/usb/dwc2/dwc2_hcd.h>
+/*
+ * If we get this many NAKs on a split transaction we'll slow down
+ * retransmission. A 1 here means delay after the first NAK.
+ */
+#define DWC2_NAKS_BEFORE_DELAY 3
+int dwc2_naks_before_delay = DWC2_NAKS_BEFORE_DELAY;
+
+#define DWC2_OUT_NAKS_BEFORE_DELAY 1
+int dwc2_out_naks_before_delay = DWC2_OUT_NAKS_BEFORE_DELAY;
+
/* This function is for debug only */
STATIC void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
{
*/
STATIC void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
{
- struct dwc2_qh *qh, *qhn;
+ struct list_head *qh_entry;
+ struct dwc2_qh *qh;
enum dwc2_transaction_type tr_type;
+ /* Clear interrupt */
+ DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
+
#ifdef DEBUG_SOF
dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
#endif
dwc2_track_missed_sofs(hsotg);
/* Determine whether any periodic QHs should be executed */
- qh = TAILQ_FIRST(&hsotg->periodic_sched_inactive);
- while (qh != NULL) {
- qhn = TAILQ_NEXT(qh, qh_list_entry);
- if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) {
+ qh_entry = hsotg->periodic_sched_inactive.next;
+ while (qh_entry != &hsotg->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
/*
* Move QH to the ready list to be executed next
* (micro)frame
*/
- TAILQ_REMOVE(&hsotg->periodic_sched_inactive, qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_ready, qh, qh_list_entry);
- }
- qh = qhn;
+ list_move(&qh->qh_list_entry,
+ &hsotg->periodic_sched_ready);
}
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
-
- /* Clear interrupt */
- DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
}
/*
if (do_reset) {
*hprt0_modify |= HPRT0_RST;
+ DWC2_WRITE_4(hsotg, HPRT0, *hprt0_modify);
queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
msecs_to_jiffies(60));
} else {
* Set flag and clear if detected
*/
if (hprt0 & HPRT0_CONNDET) {
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_CONNDET);
+
dev_vdbg(hsotg->dev,
"--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
hprt0);
- hsotg->flags.b.port_connect_status_change = 1;
- hsotg->flags.b.port_connect_status = 1;
- hprt0_modify |= HPRT0_CONNDET;
+ dwc2_hcd_connect(hsotg);
/*
* The Hub driver asserts a reset when it sees port connect
* Clear if detected - Set internal flag if disabled
*/
if (hprt0 & HPRT0_ENACHG) {
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_ENACHG);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
hprt0, !!(hprt0 & HPRT0_ENA));
- hprt0_modify |= HPRT0_ENACHG;
- if (hprt0 & HPRT0_ENA)
+ if (hprt0 & HPRT0_ENA) {
+ hsotg->new_connection = true;
dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
- else
+ } else {
hsotg->flags.b.port_enable_change = 1;
+ if (hsotg->core_params->dma_desc_fs_enable) {
+ u32 hcfg;
+
+ hsotg->core_params->dma_desc_enable = 0;
+ hsotg->new_connection = false;
+ hcfg = DWC2_READ_4(hsotg, HCFG);
+ hcfg &= ~HCFG_DESCDMA;
+ DWC2_WRITE_4(hsotg, HCFG, hcfg);
+ }
+ }
}
/* Overcurrent Change Interrupt */
if (hprt0 & HPRT0_OVRCURRCHG) {
+ DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_OVRCURRCHG);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
hprt0);
hsotg->flags.b.port_over_current_change = 1;
- hprt0_modify |= HPRT0_OVRCURRCHG;
}
- /* Clear Port Interrupts */
- DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify);
-
if (hsotg->flags.b.port_connect_status_change ||
hsotg->flags.b.port_enable_change ||
hsotg->flags.b.port_over_current_change)
}
/* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && xfer_length && chan->ep_is_in) {
+ if (chan->align_buf && xfer_length) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
- memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
- xfer_length);
- usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
+ usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ if (chan->ep_is_in)
+ memcpy(urb->buf + urb->actual_length,
+ chan->qh->dw_align_buf, xfer_length);
+ usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
}
dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
chan, chnum, qtd, halt_status, NULL);
/* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && frame_desc->actual_length &&
- chan->ep_is_in) {
+ if (chan->align_buf && frame_desc->actual_length) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
__func__);
- usb_syncmem(urb->usbdma, 0, urb->length,
- BUS_DMASYNC_POSTREAD);
- memcpy(urb->buf + frame_desc->offset +
- qtd->isoc_split_offset, chan->qh->dw_align_buf,
- frame_desc->actual_length);
- usb_syncmem(urb->usbdma, 0, urb->length,
- BUS_DMASYNC_PREREAD);
+ struct usb_dma *ud = &chan->qh->dw_align_buf_usbdma;
+
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ if (chan->ep_is_in)
+ memcpy(urb->buf + frame_desc->offset +
+ qtd->isoc_split_offset,
+ chan->qh->dw_align_buf,
+ frame_desc->actual_length);
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
}
break;
case DWC2_HC_XFER_FRAME_OVERRUN:
chan, chnum, qtd, halt_status, NULL);
/* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && frame_desc->actual_length &&
- chan->ep_is_in) {
+ if (chan->align_buf && frame_desc->actual_length) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
__func__);
- usb_syncmem(urb->usbdma, 0, urb->length,
- BUS_DMASYNC_POSTREAD);
- memcpy(urb->buf + frame_desc->offset +
- qtd->isoc_split_offset, chan->qh->dw_align_buf,
- frame_desc->actual_length);
- usb_syncmem(urb->usbdma, 0, urb->length,
- BUS_DMASYNC_PREREAD);
+ struct usb_dma *ud = &chan->qh->dw_align_buf_usbdma;
+
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ if (chan->ep_is_in)
+ memcpy(urb->buf + frame_desc->offset +
+ qtd->isoc_split_offset,
+ chan->qh->dw_align_buf,
+ frame_desc->actual_length);
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
}
/* Skip whole frame */
dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
hsotg, qh, free_qtd);
- if (TAILQ_EMPTY(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list)) {
dev_dbg(hsotg->dev, "## QTD list empty ##\n");
goto no_qtd;
}
- qtd = TAILQ_FIRST(&qh->qtd_list);
+ qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
if (qtd->complete_split)
continue_split = 1;
no_qtd:
if (qh->channel)
qh->channel->align_buf = 0;
- dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
qh->channel = NULL;
+ dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
}
/**
* function clears the channel interrupt enables and conditions, so
* there's no need to clear the Channel Halted interrupt separately.
*/
- if (chan->in_freelist != 0)
- LIST_REMOVE(chan, hc_list_entry);
+ if (!list_empty(&chan->hc_list_entry))
+ list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
- LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry);
- chan->in_freelist = 1;
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
if (hsotg->core_params->uframe_sched > 0) {
hsotg->available_host_channels++;
* halt to be queued when the periodic schedule is
* processed.
*/
- TAILQ_REMOVE(&hsotg->periodic_sched_queued, chan->qh, qh_list_entry);
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, chan->qh, qh_list_entry);
+ list_move(&chan->qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
/*
* Make sure the Periodic Tx FIFO Empty interrupt is
if (chan->align_buf) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
- BUS_DMASYNC_POSTREAD);
+ usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
+ chan->qh->dw_align_buf_size, BUS_DMASYNC_POSTREAD);
memcpy(qtd->urb->buf + frame_desc->offset +
qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
- usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
- BUS_DMASYNC_PREREAD);
+ usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
+ chan->qh->dw_align_buf_size, BUS_DMASYNC_PREREAD);
}
qtd->isoc_split_offset += len;
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && xfer_length && chan->ep_is_in) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
- memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
- xfer_length);
- usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
+
+ struct usb_dma *ud = &chan->qh->dw_align_buf_usbdma;
+
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ if (chan->ep_is_in)
+ memcpy(urb->buf + urb->actual_length,
+ chan->qh->dw_align_buf,
+ xfer_length);
+ usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
+ chan->ep_is_in ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
}
urb->actual_length += xfer_length;
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
+ if (!qtd) {
+ dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
+ return;
+ }
+
+ if (!qtd->urb) {
+ dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
+ return;
+ }
+
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
chnum);
/*
* Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
* interrupt. Re-start the SSPLIT transfer.
+ *
+ * Normally for non-periodic transfers we'll retry right away, but to
+ * avoid interrupt storms we'll wait before retrying if we've got
+ * several NAKs. If we didn't do this we'd retry directly from the
+ * interrupt handler and could end up quickly getting another
+ * interrupt (another NAK), which we'd retry.
+ *
+ * Note that in DMA mode software only gets involved to re-send NAKed
+ * transfers for split transactions unless the core is missing OUT NAK
+ * enhancement.
*/
if (chan->do_split) {
/*
if (chan->complete_split)
qtd->error_count = 0;
qtd->complete_split = 0;
+ qtd->num_naks++;
+ qtd->qh->want_wait = qtd->num_naks >= dwc2_naks_before_delay;
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
goto handle_nak_done;
}
*/
qtd->error_count = 0;
+ if (hsotg->core_params->dma_enable > 0 && !chan->ep_is_in) {
+ /*
+ * Avoid interrupt storms.
+ */
+ qtd->num_naks++;
+ qtd->qh->want_wait = qtd->num_naks >= dwc2_out_naks_before_delay;
+ }
if (!chan->qh->ping_state) {
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_NAK);
"NYET/NAK/ACK/other in non-error case, 0x%08x\n",
chan->hcint);
error:
- /* use the 3-strikes rule */
+ /* Failthrough: use 3-strikes rule */
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
- qtd, DWC2_HC_XFER_XACT_ERR);
+ qtd, DWC2_HC_XFER_XACT_ERR);
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
*/
STATIC bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
{
- if (!qh)
+ struct dwc2_qtd *cur_head;
+
+ if (qh == NULL)
return false;
- return (TAILQ_FIRST(&qh->qtd_list) == qtd);
+ cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
+ qtd_list_entry);
+ return (cur_head == qtd);
}
/* Handles interrupt for a specific Host Channel */
return;
}
- if (TAILQ_EMPTY(&chan->qh->qtd_list)) {
+ if (list_empty(&chan->qh->qtd_list)) {
/*
* TODO: Will this ever happen with the
* DWC2_HC_XFER_URB_DEQUEUE handling above?
return;
}
- qtd = TAILQ_FIRST(&chan->qh->qtd_list);
+ qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
+ qtd_list_entry);
if (hsotg->core_params->dma_enable <= 0) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
-/* $OpenBSD: dwc2_hcdqueue.c,v 1.9 2017/09/08 05:36:53 deraadt Exp $ */
+/* $OpenBSD: dwc2_hcdqueue.c,v 1.10 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hcdqueue.c,v 1.11 2014/09/03 10:00:08 skrll Exp $ */
/*
#include <dev/usb/dwc2/dwc2_hcd.h>
STATIC u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int);
+STATIC void dwc2_wait_timer_fn(void *);
+
+/* If we get a NAK, wait this long before retrying */
+#define DWC2_RETRY_WAIT_DELAY 1 /* msec */
/**
* dwc2_qh_init() - Initializes a QH structure
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* Initialize QH */
+ qh->hsotg = hsotg;
+ /* XXX timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0); */
+ timeout_set(&qh->wait_timer, dwc2_wait_timer_fn, qh);
qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
qh->data_toggle = DWC2_HC_PID_DATA0;
qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
- TAILQ_INIT(&qh->qtd_list);
- qh->linked = 0;
+ INIT_LIST_HEAD(&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->qh_list_entry);
/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
qh->ep_type == USB_ENDPOINT_XFER_ISOC,
bytecount);
+
+ /* Ensure frame_number corresponds to the reality */
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
/* Start in a slightly future (micro)frame */
qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
SCHEDULE_SLOP);
*
* Return: Pointer to the newly allocated QH, or NULL on error
*/
-STATIC struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb,
gfp_t mem_flags)
{
{
struct dwc2_softc *sc = hsotg->hsotg_sc;
- if (hsotg->core_params->dma_desc_enable > 0) {
+ /*
+ * We don't have the lock so we can safely wait until the wait timer
+ * finishes. Of course, at this point in time we'd better have set
+ * wait_timer_active to false so if this timer was still pending it
+ * won't do anything anyway, but we want it to finish before we free
+ * memory.
+ */
+ /* XXX del_timer_sync(&qh->wait_timer); */
+
+ timeout_del(&qh->wait_timer);
+ if (qh->desc_list) {
dwc2_hcd_qh_free_ddma(hsotg, qh);
} else if (qh->dw_align_buf) {
- /* XXXNH */
- usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->dw_align_buf_usbdma);
+ usb_freemem(&sc->sc_bus, &qh->dw_align_buf_usbdma);
+ qh->dw_align_buf_dma = (dma_addr_t)0;
}
pool_put(&sc->sc_qhpool, qh);
if (hsotg->core_params->dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_ready, qh, qh_list_entry);
+ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
else
/* Always start in inactive schedule */
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_inactive, qh, qh_list_entry);
- qh->linked = 1;
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_inactive);
if (hsotg->core_params->uframe_sched <= 0)
/* Reserve periodic channel */
{
int i;
- qh->linked = 0;
+ list_del_init(&qh->qh_list_entry);
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->usecs;
}
}
+/**
+ * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
+ *
+ * As per the spec, a NAK indicates that "a function is temporarily unable to
+ * transmit or receive data, but will eventually be able to do so without need
+ * of host intervention".
+ *
+ * That means that when we encounter a NAK we're supposed to retry.
+ *
+ * ...but if we retry right away (from the interrupt handler that saw the NAK)
+ * then we can end up with an interrupt storm (if the other side keeps NAKing
+ * us) because on slow enough CPUs it could take us longer to get out of the
+ * interrupt routine than it takes for the device to send another NAK. That
+ * leads to a constant stream of NAK interrupts and the CPU locks.
+ *
+ * ...so instead of retrying right away in the case of a NAK we'll set a timer
+ * to retry some time later. This function handles that timer and moves the
+ * qh back to the "inactive" list, then queues transactions.
+ *
+ * @t: Pointer to wait_timer in a qh.
+ */
+STATIC void dwc2_wait_timer_fn(void *arg)
+{
+ struct dwc2_qh *qh = arg;
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * We'll set wait_timer_cancel to true if we want to cancel this
+ * operation in dwc2_hcd_qh_unlink().
+ */
+ if (!qh->wait_timer_cancel) {
+ enum dwc2_transaction_type tr_type;
+
+ qh->want_wait = false;
+
+ list_move(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
/**
* dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
* schedule if it is not already in the schedule. If the QH is already in
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (qh->linked != 0) {
+ if (!list_empty(&qh->qh_list_entry))
/* QH already in a schedule */
return 0;
+
+ if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) &&
+ !hsotg->frame_number) {
+ dev_dbg(hsotg->dev,
+ "reset frame number counter\n");
+ qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
+ SCHEDULE_SLOP);
}
/* Add the new QH to the appropriate schedule */
if (dwc2_qh_is_non_per(qh)) {
- /* Always start in inactive schedule */
- TAILQ_INSERT_TAIL(&hsotg->non_periodic_sched_inactive, qh, qh_list_entry);
- qh->linked = 1;
+ if (qh->want_wait) {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_waiting);
+ qh->wait_timer_cancel = false;
+ /* XXX mod_timer(&qh->wait_timer,
+ jiffies + DWC2_RETRY_WAIT_DELAY + 1); */
+ timeout_add_msec(&qh->wait_timer,
+ DWC2_RETRY_WAIT_DELAY);
+ } else {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+ }
return 0;
}
+
status = dwc2_schedule_periodic(hsotg, qh);
if (status)
return status;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (qh->linked == 0) {
+ /* If the wait_timer is pending, this will stop it from acting */
+ qh->wait_timer_cancel = true;
+
+ if (list_empty(&qh->qh_list_entry))
/* QH is not in a schedule */
return;
- }
if (dwc2_qh_is_non_per(qh)) {
- if (hsotg->non_periodic_qh_ptr == qh) {
+ if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
hsotg->non_periodic_qh_ptr =
- TAILQ_NEXT(qh, qh_list_entry);
- }
-
- if (qh->channel) {
- TAILQ_REMOVE(&hsotg->non_periodic_sched_active, qh, qh_list_entry);
- } else {
- TAILQ_REMOVE(&hsotg->non_periodic_sched_inactive, qh, qh_list_entry);
- }
- qh->linked = 0;
-
- if (hsotg->non_periodic_qh_ptr == NULL)
- hsotg->non_periodic_qh_ptr = TAILQ_FIRST(&hsotg->non_periodic_sched_active);
+ hsotg->non_periodic_qh_ptr->next;
+ list_del_init(&qh->qh_list_entry);
return;
}
+
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count) {
if (dwc2_qh_is_non_per(qh)) {
dwc2_hcd_qh_unlink(hsotg, qh);
- if (!TAILQ_EMPTY(&qh->qtd_list))
- /* Add back to inactive non-periodic schedule */
+ if (!list_empty(&qh->qtd_list))
+ /* Add back to inactive/waiting non-periodic schedule */
dwc2_hcd_qh_add(hsotg, qh);
return;
}
qh->sched_frame = frame_number;
}
- if (TAILQ_EMPTY(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list)) {
dwc2_hcd_qh_unlink(hsotg, qh);
return;
}
* Remove from periodic_sched_queued and move to
* appropriate queue
*/
- TAILQ_REMOVE(&hsotg->periodic_sched_queued, qh, qh_list_entry);
if ((hsotg->core_params->uframe_sched > 0 &&
dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
(hsotg->core_params->uframe_sched <= 0 &&
- qh->sched_frame == frame_number)) {
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_ready, qh, qh_list_entry);
- } else {
- TAILQ_INSERT_TAIL(&hsotg->periodic_sched_inactive, qh, qh_list_entry);
- }
+ qh->sched_frame == frame_number))
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+ else
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
}
/**
/**
* dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
+ * Caller must hold driver lock.
*
- * @hsotg: The DWC HCD structure
- * @qtd: The QTD to add
- * @qh: Out parameter to return queue head
- * @mem_flags: Flag to do atomic alloc if needed
+ * @hsotg: The DWC HCD structure
+ * @qtd: The QTD to add
+ * @qh: Queue head to add qtd to
*
* Return: 0 if successful, negative error code otherwise
*
- * Finds the correct QH to place the QTD into. If it does not find a QH, it
- * will create a new QH. If the QH to which the QTD is added is not currently
- * scheduled, it is placed into the proper schedule based on its EP type.
- *
- * HCD lock must be held and interrupts must be disabled on entry
+ * If the QH to which the QTD is added is not currently scheduled, it is placed
+ * into the proper schedule based on its EP type.
*/
int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, gfp_t mem_flags)
+ struct dwc2_qh *qh)
{
- struct dwc2_hcd_urb *urb = qtd->urb;
- int allocated = 0;
+
+ MUTEX_ASSERT_LOCKED(&hsotg->lock);
int retval;
- /*
- * Get the QH which holds the QTD-list to insert to. Create QH if it
- * doesn't exist.
- */
- if (*qh == NULL) {
- *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
- if (*qh == NULL)
- return -ENOMEM;
- allocated = 1;
+ if (!qh) {
+ dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
+ retval = -EINVAL;
+ goto fail;
}
- retval = dwc2_hcd_qh_add(hsotg, *qh);
+ retval = dwc2_hcd_qh_add(hsotg, qh);
if (retval)
goto fail;
- qtd->qh = *qh;
- TAILQ_INSERT_TAIL(&(*qh)->qtd_list, qtd, qtd_list_entry);
+ qtd->qh = qh;
+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
return 0;
-
fail:
- if (allocated) {
- struct dwc2_qtd *qtd2, *qtd2_tmp;
- struct dwc2_qh *qh_tmp = *qh;
-
- *qh = NULL;
- dwc2_hcd_qh_unlink(hsotg, qh_tmp);
-
- /* Free each QTD in the QH's QTD list */
- TAILQ_FOREACH_SAFE(qtd2, &qh_tmp->qtd_list, qtd_list_entry, qtd2_tmp)
- dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
-
- dwc2_hcd_qh_free(hsotg, qh_tmp);
- }
-
return retval;
}
{
struct dwc2_softc *sc = hsotg->hsotg_sc;
- TAILQ_REMOVE(&qh->qtd_list, qtd, qtd_list_entry);
+ list_del_init(&qtd->qtd_list_entry);
pool_put(&sc->sc_qtdpool, qtd);
}
-/* $OpenBSD: dwc2_hw.h,v 1.2 2015/02/10 12:58:47 uebayasi Exp $ */
+/* $OpenBSD: dwc2_hw.h,v 1.3 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2_hw.h,v 1.2 2013/09/25 06:19:22 skrll Exp $ */
/*
#define GUSBCFG_FSINTF (1 << 5)
#define GUSBCFG_ULPI_UTMI_SEL (1 << 4)
#define GUSBCFG_PHYIF16 (1 << 3)
+#define GUSBCFG_PHYIF8 (0 << 3)
#define GUSBCFG_TOUTCAL_MASK (0x7 << 0)
#define GUSBCFG_TOUTCAL_SHIFT 0
#define GUSBCFG_TOUTCAL_LIMIT 0x7
#define GINTSTS_RESETDET (1 << 23)
#define GINTSTS_FET_SUSP (1 << 22)
#define GINTSTS_INCOMPL_IP (1 << 21)
+#define GINTSTS_INCOMPL_SOOUT (1 << 21)
#define GINTSTS_INCOMPL_SOIN (1 << 20)
#define GINTSTS_OEPINT (1 << 19)
#define GINTSTS_IEPINT (1 << 18)
#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
#define GHWCFG4_NUM_IN_EPS_SHIFT 26
#define GHWCFG4_DED_FIFO_EN (1 << 25)
+#define GHWCFG4_DED_FIFO_SHIFT 25
#define GHWCFG4_SESSION_END_FILT_EN (1 << 24)
#define GHWCFG4_B_VALID_FILT_EN (1 << 23)
#define GHWCFG4_A_VALID_FILT_EN (1 << 22)
#define FIFOSIZE_DEPTH_SHIFT 16
#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
#define FIFOSIZE_STARTADDR_SHIFT 0
+#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff)
/* Device mode registers */
#define DXEPCTL_STALL (1 << 21)
#define DXEPCTL_SNP (1 << 20)
#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
-#define DXEPCTL_EPTYPE_SHIFT 18
-#define DXEPCTL_EPTYPE_CONTROL 0
-#define DXEPCTL_EPTYPE_ISO 1
-#define DXEPCTL_EPTYPE_BULK 2
-#define DXEPCTL_EPTYPE_INTTERUPT 3
+#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18)
+#define DXEPCTL_EPTYPE_ISO (0x1 << 18)
+#define DXEPCTL_EPTYPE_BULK (0x2 << 18)
+#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18)
+
#define DXEPCTL_NAKSTS (1 << 17)
#define DXEPCTL_DPID (1 << 16)
#define DXEPCTL_EOFRNUM (1 << 16)
#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
+#define DXEPINT_SETUP_RCVD (1 << 15)
#define DXEPINT_INEPNAKEFF (1 << 6)
#define DXEPINT_BACK2BACKSETUP (1 << 6)
#define DXEPINT_INTKNEPMIS (1 << 5)
#define TSIZ_XFERSIZE_SHIFT 0
#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch))
-#define HCDMA_DMA_ADDR_MASK (0x1fffff << 11)
-#define HCDMA_DMA_ADDR_SHIFT 11
-#define HCDMA_CTD_MASK (0xff << 3)
-#define HCDMA_CTD_SHIFT 3
#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch))
-/* $OpenBSD: dwc2var.h,v 1.19 2017/09/05 14:01:03 otto Exp $ */
+/* $OpenBSD: dwc2var.h,v 1.20 2021/07/22 18:32:33 mglocker Exp $ */
/* $NetBSD: dwc2var.h,v 1.3 2013/10/22 12:57:40 skrll Exp $ */
/*-
#define _DWC2VAR_H_
#include <sys/pool.h>
+#include <sys/task.h>
-struct task;
-
-#define DWC2_MAXISOCPACKETS 16
struct dwc2_hsotg;
struct dwc2_qtd;
struct usbd_xfer xfer; /* Needs to be first */
struct dwc2_hcd_urb *urb;
- int packet_count;
TAILQ_ENTRY(dwc2_xfer) xnext; /* list of complete xfers */
-
+ usbd_status intr_status;
u_int32_t flags;
-#define DWC2_XFER_ABORTING 0x0001 /* xfer is aborting. */
-#define DWC2_XFER_ABORTWAIT 0x0002 /* abort completion is being awaited. */
+#define DWC2_XFER_ABORTING 0x0001 /* xfer is aborting. */
+#define DWC2_XFER_ABORTWAIT 0x0002 /* abort completion is being awaited. */
};
struct dwc2_pipe {
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
struct dwc2_core_params *sc_params;
- struct dwc2_core_dma_config *sc_dma_config;
+ int (*sc_set_dma_addr)(struct device *, bus_addr_t, int);
/*
* Private
struct dwc2_hsotg *sc_hsotg;
+ struct mutex sc_lock;
+
bool sc_hcdenabled;
void *sc_rhc_si;
struct usbd_xfer *sc_intrxfer;
- struct device *sc_child; /* /dev/usb# device */
- char sc_dying;
-#if 0
- struct usb_dma_reserve sc_dma_reserve;
-#endif
+ struct device *sc_child; /* /dev/usb# device */
char sc_vendor[32]; /* vendor string for root hub */
- int sc_id_vendor; /* vendor ID for root hub */
TAILQ_HEAD(, dwc2_xfer) sc_complete; /* complete transfers */
- uint8_t sc_addr; /* device address */
- uint8_t sc_conf; /* device configuration */
-
struct pool sc_xferpool;
struct pool sc_qhpool;
struct pool sc_qtdpool;
+ uint8_t sc_addr; /* device address */
+ uint8_t sc_conf; /* device configuration */
+
} dwc2_softc_t;
int dwc2_init(struct dwc2_softc *);
-int dwc2_dma_config(struct dwc2_softc *,
- struct dwc2_core_dma_config *);
int dwc2_intr(void *);
int dwc2_detach(dwc2_softc_t *, int);
void dwc2_host_complete(struct dwc2_hsotg *, struct dwc2_qtd *,
int);
-#define DWC2_READ_4(hsotg, reg) \
- bus_space_read_4((hsotg)->hsotg_sc->sc_iot, (hsotg)->hsotg_sc->sc_ioh, \
- (reg))
-#define DWC2_WRITE_4(hsotg, reg, data) \
- bus_space_write_4((hsotg)->hsotg_sc->sc_iot, (hsotg)->hsotg_sc->sc_ioh, \
- (reg), (data))
-
static inline void
dwc2_root_intr(dwc2_softc_t *sc)
{
softintr_schedule(sc->sc_rhc_si);
}
-// XXX compat
-
-#define ENOSR 90
-
-#ifndef mstohz
+/*
+ * XXX Compat
+ */
+#define DWC2_MAXISOCPACKETS 16 /* XXX: Fix nframes handling */
+#define ENOSR 90
+#define device_xname(d) ((d)->dv_xname)
+#define jiffies hardclock_ticks
#define mstohz(ms) \
(__predict_false((ms) >= 0x20000) ? \
((ms +0u) / 1000u) * hz : \
((ms +0u) * hz) / 1000u)
-#endif
-
-#define timeout_reset(x, t, f, a) \
-do { \
- timeout_set((x), (f), (a)); \
- timeout_add((x), (t)); \
-} while (0)
-
-#define device_xname(d) ((d)->dv_xname)
+#define msecs_to_jiffies mstohz
+#define IS_ENABLED(option) (option)
#endif /* _DWC_OTGVAR_H_ */
--- /dev/null
+/* $OpenBSD: list.h,v 1.1 2021/07/22 18:32:33 mglocker Exp $ */
+/* linux list functions for the BSDs, cut down for dwctwo(4).
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#ifndef _DWC2_LINUX_LIST_H_
+#define _DWC2_LINUX_LIST_H_
+
+/*
+ * From <dev/pci/drm/include/linux/kernel.h>
+ */
+#define container_of(ptr, type, member) ({ \
+ const __typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/*
+ * From <dev/pci/drm/include/linux/types.h>
+ */
+typedef unsigned int gfp_t;
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+/*
+ * From <dev/pci/drm/include/linux/list.h>
+ */
+#define list_entry(ptr, type, member) container_of(ptr, type, member)
+
+static inline void
+INIT_LIST_HEAD(struct list_head *head) {
+ (head)->next = head;
+ (head)->prev = head;
+}
+
+static inline int
+list_empty(const struct list_head *head) {
+ return (head)->next == head;
+}
+
+static inline void
+list_add(struct list_head *new, struct list_head *head) {
+ (head)->next->prev = new;
+ (new)->next = (head)->next;
+ (new)->prev = head;
+ (head)->next = new;
+}
+
+static inline void
+list_add_tail(struct list_head *entry, struct list_head *head) {
+ (entry)->prev = (head)->prev;
+ (entry)->next = head;
+ (head)->prev->next = entry;
+ (head)->prev = entry;
+}
+
+static inline void
+list_del(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ list_del(list);
+ list_add(list, head);
+}
+
+static inline void
+list_del_init(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+ INIT_LIST_HEAD(entry);
+}
+
+#define list_for_each(entry, head) \
+ for (entry = (head)->next; entry != head; entry = (entry)->next)
+
+#define list_for_each_safe(entry, temp, head) \
+ for (entry = (head)->next, temp = (entry)->next; \
+ entry != head; \
+ entry = temp, temp = entry->next)
+
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, __typeof(*pos), member))
+
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member), \
+ n = list_entry(pos->member.next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, __typeof(*n), member))
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#endif /* _DWC2_LINUX_LIST_H_ */