1 /* $OpenBSD: dwqe.c,v 1.6 2023/04/22 06:36:35 dlg Exp $ */
3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4 * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 * Driver for the Synopsys Designware ethernet controller.
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/timeout.h>
36 #include <machine/bus.h>
37 #include <machine/fdt.h>
40 #include <net/if_media.h>
42 #include <dev/ofw/openfirm.h>
43 #include <dev/ofw/ofw_clock.h>
44 #include <dev/ofw/ofw_gpio.h>
45 #include <dev/ofw/ofw_misc.h>
46 #include <dev/ofw/ofw_pinctrl.h>
47 #include <dev/ofw/ofw_regulator.h>
48 #include <dev/ofw/fdt.h>
50 #include <dev/mii/mii.h>
51 #include <dev/mii/miivar.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
60 #include <dev/ic/dwqevar.h>
61 #include <dev/ic/dwqereg.h>
63 struct cfdriver dwqe_cd = {
64 NULL, "dwqe", DV_IFNET
67 uint32_t dwqe_read(struct dwqe_softc *, bus_addr_t);
68 void dwqe_write(struct dwqe_softc *, bus_addr_t, uint32_t);
70 int dwqe_ioctl(struct ifnet *, u_long, caddr_t);
71 void dwqe_start(struct ifqueue *);
72 void dwqe_watchdog(struct ifnet *);
74 int dwqe_media_change(struct ifnet *);
75 void dwqe_media_status(struct ifnet *, struct ifmediareq *);
77 int dwqe_mii_readreg(struct device *, int, int);
78 void dwqe_mii_writereg(struct device *, int, int, int);
79 void dwqe_mii_statchg(struct device *);
81 void dwqe_lladdr_read(struct dwqe_softc *, uint8_t *);
82 void dwqe_lladdr_write(struct dwqe_softc *);
84 void dwqe_tick(void *);
85 void dwqe_rxtick(void *);
87 int dwqe_intr(void *);
88 void dwqe_tx_proc(struct dwqe_softc *);
89 void dwqe_rx_proc(struct dwqe_softc *);
91 void dwqe_up(struct dwqe_softc *);
92 void dwqe_down(struct dwqe_softc *);
93 void dwqe_iff(struct dwqe_softc *);
94 int dwqe_encap(struct dwqe_softc *, struct mbuf *, int *, int *);
96 void dwqe_reset(struct dwqe_softc *);
99 dwqe_dmamem_alloc(struct dwqe_softc *, bus_size_t, bus_size_t);
100 void dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *);
101 struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t);
102 void dwqe_fill_rx_ring(struct dwqe_softc *);
105 dwqe_attach(struct dwqe_softc *sc)
108 uint32_t version, mode;
112 version = dwqe_read(sc, GMAC_VERSION);
113 printf(": rev 0x%02x, address %s\n", version & GMAC_VERSION_SNPS_MASK,
114 ether_sprintf(sc->sc_lladdr));
116 for (i = 0; i < 4; i++)
117 sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i));
119 timeout_set(&sc->sc_tick, dwqe_tick, sc);
120 timeout_set(&sc->sc_rxto, dwqe_rxtick, sc);
122 ifp = &sc->sc_ac.ac_if;
124 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
125 ifp->if_xflags = IFXF_MPSAFE;
126 ifp->if_ioctl = dwqe_ioctl;
127 ifp->if_qstart = dwqe_start;
128 ifp->if_watchdog = dwqe_watchdog;
129 ifq_set_maxlen(&ifp->if_snd, DWQE_NTXDESC - 1);
130 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
132 ifp->if_capabilities = IFCAP_VLAN_MTU;
134 sc->sc_mii.mii_ifp = ifp;
135 sc->sc_mii.mii_readreg = dwqe_mii_readreg;
136 sc->sc_mii.mii_writereg = dwqe_mii_writereg;
137 sc->sc_mii.mii_statchg = dwqe_mii_statchg;
139 ifmedia_init(&sc->sc_media, 0, dwqe_media_change, dwqe_media_status);
143 /* Configure DMA engine. */
144 mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
145 if (sc->sc_fixed_burst)
146 mode |= GMAC_SYS_BUS_MODE_FB;
147 if (sc->sc_mixed_burst)
148 mode |= GMAC_SYS_BUS_MODE_MB;
150 mode |= GMAC_SYS_BUS_MODE_AAL;
151 dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
153 /* Configure channel 0. */
154 mode = dwqe_read(sc, GMAC_CHAN_CONTROL(0));
156 mode |= GMAC_CHAN_CONTROL_8XPBL;
157 dwqe_write(sc, GMAC_CHAN_CONTROL(0), mode);
159 mode = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
160 mode &= ~GMAC_CHAN_TX_CONTROL_PBL_MASK;
161 mode |= sc->sc_txpbl << GMAC_CHAN_TX_CONTROL_PBL_SHIFT;
162 mode |= GMAC_CHAN_TX_CONTROL_OSP;
163 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), mode);
164 mode = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
165 mode &= ~GMAC_CHAN_RX_CONTROL_RPBL_MASK;
166 mode |= sc->sc_rxpbl << GMAC_CHAN_RX_CONTROL_RPBL_SHIFT;
167 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), mode);
169 /* Configure AXI master. */
170 if (sc->sc_axi_config) {
173 mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
175 mode &= ~GMAC_SYS_BUS_MODE_EN_LPI;
177 mode |= GMAC_SYS_BUS_MODE_EN_LPI;
178 mode &= ~GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
180 mode |= GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
182 mode &= ~GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK;
183 mode |= (sc->sc_wr_osr_lmt << GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT);
184 mode &= ~GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK;
185 mode |= (sc->sc_rd_osr_lmt << GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT);
187 for (i = 0; i < nitems(sc->sc_blen); i++) {
188 switch (sc->sc_blen[i]) {
190 mode |= GMAC_SYS_BUS_MODE_BLEN_256;
193 mode |= GMAC_SYS_BUS_MODE_BLEN_128;
196 mode |= GMAC_SYS_BUS_MODE_BLEN_64;
199 mode |= GMAC_SYS_BUS_MODE_BLEN_32;
202 mode |= GMAC_SYS_BUS_MODE_BLEN_16;
205 mode |= GMAC_SYS_BUS_MODE_BLEN_8;
208 mode |= GMAC_SYS_BUS_MODE_BLEN_4;
213 dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
216 switch (sc->sc_phy_mode) {
217 case DWQE_PHY_MODE_RGMII:
218 mii_flags |= MIIF_SETDELAY;
220 case DWQE_PHY_MODE_RGMII_ID:
221 mii_flags |= MIIF_SETDELAY | MIIF_RXID | MIIF_TXID;
223 case DWQE_PHY_MODE_RGMII_RXID:
224 mii_flags |= MIIF_SETDELAY | MIIF_RXID;
226 case DWQE_PHY_MODE_RGMII_TXID:
227 mii_flags |= MIIF_SETDELAY | MIIF_TXID;
233 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
234 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, mii_flags);
235 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
236 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
237 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
238 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
240 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
245 /* Disable interrupts. */
246 dwqe_write(sc, GMAC_INT_EN, 0);
247 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
253 dwqe_read(struct dwqe_softc *sc, bus_addr_t addr)
255 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
259 dwqe_write(struct dwqe_softc *sc, bus_addr_t addr, uint32_t data)
261 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
265 dwqe_lladdr_read(struct dwqe_softc *sc, uint8_t *lladdr)
267 uint32_t machi, maclo;
269 machi = dwqe_read(sc, GMAC_MAC_ADDR0_HI);
270 maclo = dwqe_read(sc, GMAC_MAC_ADDR0_LO);
272 if (machi || maclo) {
273 lladdr[0] = (maclo >> 0) & 0xff;
274 lladdr[1] = (maclo >> 8) & 0xff;
275 lladdr[2] = (maclo >> 16) & 0xff;
276 lladdr[3] = (maclo >> 24) & 0xff;
277 lladdr[4] = (machi >> 0) & 0xff;
278 lladdr[5] = (machi >> 8) & 0xff;
280 ether_fakeaddr(&sc->sc_ac.ac_if);
285 dwqe_lladdr_write(struct dwqe_softc *sc)
287 dwqe_write(sc, GMAC_MAC_ADDR0_HI,
288 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
289 dwqe_write(sc, GMAC_MAC_ADDR0_LO,
290 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
291 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
295 dwqe_start(struct ifqueue *ifq)
297 struct ifnet *ifp = ifq->ifq_if;
298 struct dwqe_softc *sc = ifp->if_softc;
300 int error, idx, left, used;
302 if (!(ifp->if_flags & IFF_RUNNING))
304 if (ifq_is_oactive(&ifp->if_snd))
306 if (ifq_empty(&ifp->if_snd))
311 idx = sc->sc_tx_prod;
312 left = sc->sc_tx_cons;
314 left += DWQE_NTXDESC;
319 if (used + DWQE_NTXSEGS + 1 > left) {
320 ifq_set_oactive(ifq);
324 m = ifq_dequeue(ifq);
328 error = dwqe_encap(sc, m, &idx, &used);
329 if (error == EFBIG) {
330 m_freem(m); /* give up: drop it */
337 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
341 if (sc->sc_tx_prod != idx) {
342 sc->sc_tx_prod = idx;
344 /* Set a timeout in case the chip goes out to lunch. */
350 dwqe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
352 struct dwqe_softc *sc = ifp->if_softc;
353 struct ifreq *ifr = (struct ifreq *)addr;
360 ifp->if_flags |= IFF_UP;
363 if (ifp->if_flags & IFF_UP) {
364 if (ifp->if_flags & IFF_RUNNING)
369 if (ifp->if_flags & IFF_RUNNING)
376 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
380 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
381 NULL, MCLBYTES, &sc->sc_rx_ring);
385 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
389 if (error == ENETRESET) {
390 if (ifp->if_flags & IFF_RUNNING)
400 dwqe_watchdog(struct ifnet *ifp)
402 printf("%s\n", __func__);
406 dwqe_media_change(struct ifnet *ifp)
408 struct dwqe_softc *sc = ifp->if_softc;
410 if (LIST_FIRST(&sc->sc_mii.mii_phys))
411 mii_mediachg(&sc->sc_mii);
417 dwqe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
419 struct dwqe_softc *sc = ifp->if_softc;
421 if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
422 mii_pollstat(&sc->sc_mii);
423 ifmr->ifm_active = sc->sc_mii.mii_media_active;
424 ifmr->ifm_status = sc->sc_mii.mii_media_status;
429 dwqe_mii_readreg(struct device *self, int phy, int reg)
431 struct dwqe_softc *sc = (void *)self;
434 dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
435 sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
436 (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
437 (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
438 GMAC_MAC_MDIO_ADDR_GOC_READ |
439 GMAC_MAC_MDIO_ADDR_GB);
441 for (n = 0; n < 2000; n++) {
443 if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
444 return dwqe_read(sc, GMAC_MAC_MDIO_DATA);
447 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
452 dwqe_mii_writereg(struct device *self, int phy, int reg, int val)
454 struct dwqe_softc *sc = (void *)self;
457 dwqe_write(sc, GMAC_MAC_MDIO_DATA, val);
458 dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
459 sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
460 (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
461 (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
462 GMAC_MAC_MDIO_ADDR_GOC_WRITE |
463 GMAC_MAC_MDIO_ADDR_GB);
465 for (n = 0; n < 2000; n++) {
467 if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
471 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
475 dwqe_mii_statchg(struct device *self)
477 struct dwqe_softc *sc = (void *)self;
478 struct ifnet *ifp = &sc->sc_ac.ac_if;
481 conf = dwqe_read(sc, GMAC_MAC_CONF);
482 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
484 switch (ifp->if_baudrate) {
489 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES;
493 conf |= GMAC_MAC_CONF_PS;
501 if (sc->sc_link == 0)
504 conf &= ~GMAC_MAC_CONF_DM;
505 if (ifp->if_link_state == LINK_STATE_FULL_DUPLEX)
506 conf |= GMAC_MAC_CONF_DM;
508 dwqe_write(sc, GMAC_MAC_CONF, conf);
514 struct dwqe_softc *sc = arg;
518 mii_tick(&sc->sc_mii);
521 timeout_add_sec(&sc->sc_tick, 1);
525 dwqe_rxtick(void *arg)
527 struct dwqe_softc *sc = arg;
532 /* TODO: disable RXQ? */
533 printf("%s:%d\n", __func__, __LINE__);
535 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
536 0, DWQE_DMA_LEN(sc->sc_rxring),
537 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
539 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), 0);
540 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), 0);
542 sc->sc_rx_prod = sc->sc_rx_cons = 0;
543 dwqe_fill_rx_ring(sc);
545 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
546 0, DWQE_DMA_LEN(sc->sc_rxring),
547 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
549 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
550 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
552 /* TODO: re-enable RXQ? */
560 struct dwqe_softc *sc = arg;
563 reg = dwqe_read(sc, GMAC_INT_STATUS);
564 dwqe_write(sc, GMAC_INT_STATUS, reg);
566 reg = dwqe_read(sc, GMAC_CHAN_STATUS(0));
567 dwqe_write(sc, GMAC_CHAN_STATUS(0), reg);
569 if (reg & GMAC_CHAN_STATUS_RI)
572 if (reg & GMAC_CHAN_STATUS_TI)
579 dwqe_tx_proc(struct dwqe_softc *sc)
581 struct ifnet *ifp = &sc->sc_ac.ac_if;
582 struct dwqe_desc *txd;
583 struct dwqe_buf *txb;
586 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 0,
587 DWQE_DMA_LEN(sc->sc_txring),
588 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
591 while (sc->sc_tx_cons != sc->sc_tx_prod) {
592 idx = sc->sc_tx_cons;
593 KASSERT(idx < DWQE_NTXDESC);
595 txd = &sc->sc_txdesc[idx];
596 if (txd->sd_tdes3 & TDES3_OWN)
599 txb = &sc->sc_txbuf[idx];
601 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
602 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
603 bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
611 if (sc->sc_tx_cons == (DWQE_NTXDESC - 1))
619 if (sc->sc_tx_cons == sc->sc_tx_prod)
623 if (ifq_is_oactive(&ifp->if_snd))
624 ifq_restart(&ifp->if_snd);
629 dwqe_rx_proc(struct dwqe_softc *sc)
631 struct ifnet *ifp = &sc->sc_ac.ac_if;
632 struct dwqe_desc *rxd;
633 struct dwqe_buf *rxb;
634 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
636 int idx, len, cnt, put;
638 if ((ifp->if_flags & IFF_RUNNING) == 0)
641 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
642 DWQE_DMA_LEN(sc->sc_rxring),
643 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
645 cnt = if_rxr_inuse(&sc->sc_rx_ring);
648 idx = sc->sc_rx_cons;
649 KASSERT(idx < DWQE_NRXDESC);
651 rxd = &sc->sc_rxdesc[idx];
652 if (rxd->sd_tdes3 & RDES3_OWN)
655 len = rxd->sd_tdes3 & RDES3_LENGTH;
656 rxb = &sc->sc_rxbuf[idx];
659 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
660 len, BUS_DMASYNC_POSTREAD);
661 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
664 len -= ETHER_CRC_LEN;
669 m->m_pkthdr.len = m->m_len = len;
674 if (sc->sc_rx_cons == (DWQE_NRXDESC - 1))
680 if_rxr_put(&sc->sc_rx_ring, put);
681 if (ifiq_input(&ifp->if_rcv, &ml))
682 if_rxr_livelocked(&sc->sc_rx_ring);
684 dwqe_fill_rx_ring(sc);
686 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
687 DWQE_DMA_LEN(sc->sc_rxring),
688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
693 dwqe_up(struct dwqe_softc *sc)
695 struct ifnet *ifp = &sc->sc_ac.ac_if;
696 struct dwqe_buf *txb, *rxb;
697 uint32_t mode, reg, tqs, rqs;
700 /* Allocate Tx descriptor ring. */
701 sc->sc_txring = dwqe_dmamem_alloc(sc,
702 DWQE_NTXDESC * sizeof(struct dwqe_desc), 8);
703 sc->sc_txdesc = DWQE_DMA_KVA(sc->sc_txring);
705 sc->sc_txbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NTXDESC,
707 for (i = 0; i < DWQE_NTXDESC; i++) {
708 txb = &sc->sc_txbuf[i];
709 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWQE_NTXSEGS,
710 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
714 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
715 0, DWQE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
717 sc->sc_tx_prod = sc->sc_tx_cons = 0;
719 dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_txring) >> 32);
720 dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
721 dwqe_write(sc, GMAC_CHAN_TX_RING_LEN(0), DWQE_NTXDESC - 1);
722 dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
724 /* Allocate descriptor ring. */
725 sc->sc_rxring = dwqe_dmamem_alloc(sc,
726 DWQE_NRXDESC * sizeof(struct dwqe_desc), 8);
727 sc->sc_rxdesc = DWQE_DMA_KVA(sc->sc_rxring);
729 sc->sc_rxbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NRXDESC,
732 for (i = 0; i < DWQE_NRXDESC; i++) {
733 rxb = &sc->sc_rxbuf[i];
734 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
735 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
739 if_rxr_init(&sc->sc_rx_ring, 2, DWQE_NRXDESC);
741 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
742 dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
743 dwqe_write(sc, GMAC_CHAN_RX_RING_LEN(0), DWQE_NRXDESC - 1);
745 sc->sc_rx_prod = sc->sc_rx_cons = 0;
746 dwqe_fill_rx_ring(sc);
748 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
749 0, DWQE_DMA_LEN(sc->sc_rxring),
750 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
752 dwqe_lladdr_write(sc);
754 /* Configure media. */
755 if (LIST_FIRST(&sc->sc_mii.mii_phys))
756 mii_mediachg(&sc->sc_mii);
758 /* Program promiscuous mode and multicast filters. */
761 ifp->if_flags |= IFF_RUNNING;
762 ifq_clr_oactive(&ifp->if_snd);
764 dwqe_write(sc, GMAC_MAC_1US_TIC_CTR, (sc->sc_clk / 1000000) - 1);
766 /* Start receive DMA */
767 reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
768 reg |= GMAC_CHAN_RX_CONTROL_SR;
769 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
771 /* Start transmit DMA */
772 reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
773 reg |= GMAC_CHAN_TX_CONTROL_ST;
774 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
776 mode = dwqe_read(sc, GMAC_MTL_CHAN_RX_OP_MODE(0));
777 if (sc->sc_force_thresh_dma_mode) {
778 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RSF;
779 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK;
780 mode |= GMAC_MTL_CHAN_RX_OP_MODE_RTC_128;
782 mode |= GMAC_MTL_CHAN_RX_OP_MODE_RSF;
784 mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
785 rqs = (128 << GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(sc->sc_hw_feature[1]) /
787 mode |= rqs << GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT;
788 dwqe_write(sc, GMAC_MTL_CHAN_RX_OP_MODE(0), mode);
790 mode = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
791 if (sc->sc_force_thresh_dma_mode) {
792 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
793 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
794 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_128;
796 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
798 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK;
799 mode |= GMAC_MTL_CHAN_TX_OP_MODE_TXQEN;
800 mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
801 tqs = (128 << GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(sc->sc_hw_feature[1]) /
803 mode |= tqs << GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT;
804 dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), mode);
806 reg = dwqe_read(sc, GMAC_QX_TX_FLOW_CTRL(0));
807 reg |= 0xffffU << GMAC_QX_TX_FLOW_CTRL_PT_SHIFT;
808 reg |= GMAC_QX_TX_FLOW_CTRL_TFE;
809 dwqe_write(sc, GMAC_QX_TX_FLOW_CTRL(0), reg);
810 reg = dwqe_read(sc, GMAC_RX_FLOW_CTRL);
811 reg |= GMAC_RX_FLOW_CTRL_RFE;
812 dwqe_write(sc, GMAC_RX_FLOW_CTRL, reg);
814 dwqe_write(sc, GMAC_RXQ_CTRL0, GMAC_RXQ_CTRL0_DCB_QUEUE_EN(0));
816 dwqe_write(sc, GMAC_MAC_CONF, dwqe_read(sc, GMAC_MAC_CONF) |
817 GMAC_MAC_CONF_BE | GMAC_MAC_CONF_JD | GMAC_MAC_CONF_JE |
818 GMAC_MAC_CONF_DCRS | GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
820 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0),
821 GMAC_CHAN_INTR_ENA_NIE |
822 GMAC_CHAN_INTR_ENA_AIE |
823 GMAC_CHAN_INTR_ENA_FBE |
824 GMAC_CHAN_INTR_ENA_RIE |
825 GMAC_CHAN_INTR_ENA_TIE);
827 timeout_add_sec(&sc->sc_tick, 1);
831 dwqe_down(struct dwqe_softc *sc)
833 struct ifnet *ifp = &sc->sc_ac.ac_if;
834 struct dwqe_buf *txb, *rxb;
838 timeout_del(&sc->sc_rxto);
839 timeout_del(&sc->sc_tick);
841 ifp->if_flags &= ~IFF_RUNNING;
842 ifq_clr_oactive(&ifp->if_snd);
845 /* Disable receiver */
846 reg = dwqe_read(sc, GMAC_MAC_CONF);
847 reg &= ~GMAC_MAC_CONF_RE;
848 dwqe_write(sc, GMAC_MAC_CONF, reg);
850 /* Stop receive DMA */
851 reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
852 reg &= ~GMAC_CHAN_RX_CONTROL_SR;
853 dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
855 /* Stop transmit DMA */
856 reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
857 reg &= ~GMAC_CHAN_TX_CONTROL_ST;
858 dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
860 /* Flush data in the TX FIFO */
861 reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
862 reg |= GMAC_MTL_CHAN_TX_OP_MODE_FTQ;
863 dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), reg);
864 /* Wait for flush to complete */
865 for (i = 10000; i > 0; i--) {
866 reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
867 if ((reg & GMAC_MTL_CHAN_TX_OP_MODE_FTQ) == 0)
872 printf("%s: timeout flushing TX queue\n",
873 sc->sc_dev.dv_xname);
876 /* Disable transmitter */
877 reg = dwqe_read(sc, GMAC_MAC_CONF);
878 reg &= ~GMAC_MAC_CONF_TE;
879 dwqe_write(sc, GMAC_MAC_CONF, reg);
881 dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
883 intr_barrier(sc->sc_ih);
884 ifq_barrier(&ifp->if_snd);
886 for (i = 0; i < DWQE_NTXDESC; i++) {
887 txb = &sc->sc_txbuf[i];
889 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
890 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
891 bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
894 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
897 dwqe_dmamem_free(sc, sc->sc_txring);
898 free(sc->sc_txbuf, M_DEVBUF, 0);
900 for (i = 0; i < DWQE_NRXDESC; i++) {
901 rxb = &sc->sc_rxbuf[i];
903 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
904 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
905 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
908 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
911 dwqe_dmamem_free(sc, sc->sc_rxring);
912 free(sc->sc_rxbuf, M_DEVBUF, 0);
915 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
919 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
920 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
921 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
922 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
924 return (x >> 16) | (x << 16);
928 dwqe_iff(struct dwqe_softc *sc)
930 struct arpcom *ac = &sc->sc_ac;
931 struct ifnet *ifp = &sc->sc_ac.ac_if;
932 struct ether_multi *enm;
933 struct ether_multistep step;
934 uint32_t crc, hash[2], hashbit, hashreg;
939 ifp->if_flags &= ~IFF_ALLMULTI;
940 bzero(hash, sizeof(hash));
941 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
942 ifp->if_flags |= IFF_ALLMULTI;
943 reg |= GMAC_MAC_PACKET_FILTER_PM;
944 if (ifp->if_flags & IFF_PROMISC)
945 reg |= GMAC_MAC_PACKET_FILTER_PR |
946 GMAC_MAC_PACKET_FILTER_PCF_ALL;
948 reg |= GMAC_MAC_PACKET_FILTER_HMC;
949 ETHER_FIRST_MULTI(step, ac, enm);
950 while (enm != NULL) {
951 crc = ether_crc32_le(enm->enm_addrlo,
952 ETHER_ADDR_LEN) & 0x7f;
954 crc = bitrev32(~crc) >> 26;
955 hashreg = (crc >> 5);
956 hashbit = (crc & 0x1f);
957 hash[hashreg] |= (1 << hashbit);
959 ETHER_NEXT_MULTI(step, enm);
963 dwqe_lladdr_write(sc);
965 dwqe_write(sc, GMAC_MAC_HASH_TAB_REG0, hash[0]);
966 dwqe_write(sc, GMAC_MAC_HASH_TAB_REG1, hash[1]);
968 dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg);
972 dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
974 struct dwqe_desc *txd, *txd_start;
979 map = sc->sc_txbuf[cur].tb_map;
981 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
982 if (m_defrag(m, M_DONTWAIT))
984 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
988 /* Sync the DMA map. */
989 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
990 BUS_DMASYNC_PREWRITE);
992 txd = txd_start = &sc->sc_txdesc[frag];
993 for (i = 0; i < map->dm_nsegs; i++) {
994 /* TODO: check for 32-bit vs 64-bit support */
995 KASSERT((map->dm_segs[i].ds_addr >> 32) == 0);
997 txd->sd_tdes0 = (uint32_t)map->dm_segs[i].ds_addr;
998 txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32);
999 txd->sd_tdes2 = map->dm_segs[i].ds_len;
1000 txd->sd_tdes3 = m->m_pkthdr.len;
1002 txd->sd_tdes3 |= TDES3_FS;
1003 if (i == (map->dm_nsegs - 1)) {
1004 txd->sd_tdes2 |= TDES2_IC;
1005 txd->sd_tdes3 |= TDES3_LS;
1008 txd->sd_tdes3 |= TDES3_OWN;
1010 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1011 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1014 if (frag == (DWQE_NTXDESC - 1)) {
1015 txd = &sc->sc_txdesc[0];
1021 KASSERT(frag != sc->sc_tx_cons);
1024 txd_start->sd_tdes3 |= TDES3_OWN;
1025 bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1026 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1028 dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring) +
1029 frag * sizeof(*txd));
1031 KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1032 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1033 sc->sc_txbuf[cur].tb_map = map;
1034 sc->sc_txbuf[cur].tb_m = m;
1037 *used += map->dm_nsegs;
1043 dwqe_reset(struct dwqe_softc *sc)
1047 dwqe_write(sc, GMAC_BUS_MODE, dwqe_read(sc, GMAC_BUS_MODE) |
1050 for (n = 0; n < 30000; n++) {
1051 if ((dwqe_read(sc, GMAC_BUS_MODE) &
1052 GMAC_BUS_MODE_SWR) == 0)
1057 printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1060 struct dwqe_dmamem *
1061 dwqe_dmamem_alloc(struct dwqe_softc *sc, bus_size_t size, bus_size_t align)
1063 struct dwqe_dmamem *tdm;
1066 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1067 tdm->tdm_size = size;
1069 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1070 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1073 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1074 &nsegs, BUS_DMA_WAITOK) != 0)
1077 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1078 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1081 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1082 NULL, BUS_DMA_WAITOK) != 0)
1085 bzero(tdm->tdm_kva, size);
1090 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1092 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1094 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1096 free(tdm, M_DEVBUF, 0);
1102 dwqe_dmamem_free(struct dwqe_softc *sc, struct dwqe_dmamem *tdm)
1104 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1105 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1106 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1107 free(tdm, M_DEVBUF, 0);
1111 dwqe_alloc_mbuf(struct dwqe_softc *sc, bus_dmamap_t map)
1113 struct mbuf *m = NULL;
1115 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1118 m->m_len = m->m_pkthdr.len = MCLBYTES;
1119 m_adj(m, ETHER_ALIGN);
1121 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1122 printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1127 bus_dmamap_sync(sc->sc_dmat, map, 0,
1128 m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1134 dwqe_fill_rx_ring(struct dwqe_softc *sc)
1136 struct dwqe_desc *rxd;
1137 struct dwqe_buf *rxb;
1140 for (slots = if_rxr_get(&sc->sc_rx_ring, DWQE_NRXDESC);
1141 slots > 0; slots--) {
1142 rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1143 rxb->tb_m = dwqe_alloc_mbuf(sc, rxb->tb_map);
1144 if (rxb->tb_m == NULL)
1147 /* TODO: check for 32-bit vs 64-bit support */
1148 KASSERT((rxb->tb_map->dm_segs[0].ds_addr >> 32) == 0);
1150 rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1151 rxd->sd_tdes0 = (uint32_t)rxb->tb_map->dm_segs[0].ds_addr;
1152 rxd->sd_tdes1 = (uint32_t)(rxb->tb_map->dm_segs[0].ds_addr >> 32);
1154 rxd->sd_tdes3 = RDES3_OWN | RDES3_IC | RDES3_BUF1V;
1156 if (sc->sc_rx_prod == (DWQE_NRXDESC - 1))
1161 if_rxr_put(&sc->sc_rx_ring, slots);
1163 dwqe_write(sc, GMAC_CHAN_RX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring) +
1164 sc->sc_rx_prod * sizeof(*rxd));
1166 if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1167 timeout_add(&sc->sc_rxto, 1);