--- /dev/null
+/* $OpenBSD: if_dwqe_fdt.c,v 1.1 2023/02/13 19:18:53 patrick Exp $ */
+/*
+ * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Driver for the Synopsys Designware ethernet controller.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/timeout.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_clock.h>
+#include <dev/ofw/ofw_gpio.h>
+#include <dev/ofw/ofw_misc.h>
+#include <dev/ofw/ofw_pinctrl.h>
+#include <dev/ofw/ofw_regulator.h>
+#include <dev/ofw/fdt.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <dev/ic/dwqevar.h>
+#include <dev/ic/dwqereg.h>
+
+int dwqe_fdt_match(struct device *, void *, void *);
+void dwqe_fdt_attach(struct device *, struct device *, void *);
+void dwqe_setup_rockchip(struct dwqe_softc *);
+
+const struct cfattach dwqe_fdt_ca = {
+ sizeof(struct dwqe_softc), dwqe_fdt_match, dwqe_fdt_attach
+};
+
+void dwqe_reset_phy(struct dwqe_softc *);
+
+int
+dwqe_fdt_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ return OF_is_compatible(faa->fa_node, "snps,dwmac-4.20a");
+}
+
+void
+dwqe_fdt_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct dwqe_softc *sc = (void *)self;
+ struct fdt_attach_args *faa = aux;
+ uint32_t phy, phy_supply;
+ uint32_t axi_config;
+ uint32_t version;
+ int i, node;
+
+ sc->sc_node = faa->fa_node;
+ sc->sc_iot = faa->fa_iot;
+ if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
+ faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
+ printf(": cannot map registers\n");
+ return;
+ }
+ sc->sc_dmat = faa->fa_dmat;
+
+ /* Decide GMAC id through address */
+ switch (faa->fa_reg[0].addr) {
+ case 0xfe2a0000:
+ sc->sc_gmac_id = 0;
+ break;
+ case 0xfe010000:
+ sc->sc_gmac_id = 1;
+ break;
+ default:
+ printf(": unknown controller\n");
+ return;
+ }
+
+ /* Lookup PHY. */
+ phy = OF_getpropint(faa->fa_node, "phy", 0);
+ if (phy == 0)
+ phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
+ node = OF_getnodebyphandle(phy);
+ if (node)
+ sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
+ else
+ sc->sc_phyloc = MII_PHY_ANY;
+
+ pinctrl_byname(faa->fa_node, "default");
+
+ /* Enable clocks. */
+ clock_set_assigned(faa->fa_node);
+ clock_enable(faa->fa_node, "stmmaceth");
+ reset_deassert(faa->fa_node, "stmmaceth");
+ if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac")) {
+ clock_enable(faa->fa_node, "mac_clk_rx");
+ clock_enable(faa->fa_node, "mac_clk_tx");
+ clock_enable(faa->fa_node, "aclk_mac");
+ clock_enable(faa->fa_node, "pclk_mac");
+ }
+ delay(5000);
+
+ version = dwqe_read(sc, GMAC_VERSION);
+ printf(": rev 0x%02x", version & GMAC_VERSION_SNPS_MASK);
+
+ /* Power up PHY. */
+ phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0);
+ if (phy_supply)
+ regulator_enable(phy_supply);
+
+ /* Reset PHY */
+ dwqe_reset_phy(sc);
+
+ sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
+ if (sc->sc_clk > 500000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_500_800;
+ else if (sc->sc_clk > 300000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_300_500;
+ else if (sc->sc_clk > 150000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_150_250;
+ else if (sc->sc_clk > 100000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_100_150;
+ else if (sc->sc_clk > 60000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_60_100;
+ else if (sc->sc_clk > 35000000)
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_35_60;
+ else
+ sc->sc_clk = GMAC_MAC_MDIO_ADDR_CR_20_35;
+
+ for (i = 0; i < 4; i++)
+ sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i));
+
+ if (OF_getprop(faa->fa_node, "local-mac-address",
+ &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
+ dwqe_lladdr_read(sc, sc->sc_lladdr);
+
+ sc->sc_force_thresh_dma_mode =
+ OF_getpropbool(faa->fa_node, "snps,force_thresh_dma_mode");
+
+ dwqe_reset(sc);
+
+ sc->sc_fixed_burst = OF_getpropbool(faa->fa_node, "snps,fixed-burst");
+ sc->sc_mixed_burst = OF_getpropbool(faa->fa_node, "snps,mixed-burst");
+ sc->sc_aal = OF_getpropbool(faa->fa_node, "snps,aal");
+ sc->sc_8xpbl = !OF_getpropbool(faa->fa_node, "snps,no-pbl-x8");
+ sc->sc_pbl = OF_getpropint(faa->fa_node, "snps,pbl", 8);
+ sc->sc_txpbl = OF_getpropint(faa->fa_node, "snps,txpbl", sc->sc_pbl);
+ sc->sc_rxpbl = OF_getpropint(faa->fa_node, "snps,rxpbl", sc->sc_pbl);
+
+ /* Configure AXI master. */
+ axi_config = OF_getpropint(faa->fa_node, "snps,axi-config", 0);
+ node = OF_getnodebyphandle(axi_config);
+ if (node) {
+ sc->sc_axi_config = 1;
+ sc->sc_lpi_en = OF_getpropbool(node, "snps,lpi_en");
+ sc->sc_xit_frm = OF_getpropbool(node, "snps,xit_frm");
+
+ sc->sc_wr_osr_lmt = OF_getpropint(node, "snps,wr_osr_lmt", 1);
+ sc->sc_rd_osr_lmt = OF_getpropint(node, "snps,rd_osr_lmt", 1);
+
+ OF_getpropintarray(node, "snps,blen", sc->sc_blen, sizeof(sc->sc_blen));
+ }
+
+ if (dwqe_attach(sc) != 0)
+ return;
+
+ /* Do hardware specific initializations. */
+ if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac"))
+ dwqe_setup_rockchip(sc);
+
+ sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
+ dwqe_intr, sc, sc->sc_dev.dv_xname);
+ if (sc->sc_ih == NULL)
+ printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
+}
+
+void
+dwqe_reset_phy(struct dwqe_softc *sc)
+{
+ uint32_t *gpio;
+ uint32_t delays[3];
+ int active = 1;
+ int len;
+
+ len = OF_getproplen(sc->sc_node, "snps,reset-gpio");
+ if (len <= 0)
+ return;
+
+ gpio = malloc(len, M_TEMP, M_WAITOK);
+
+ /* Gather information. */
+ OF_getpropintarray(sc->sc_node, "snps,reset-gpio", gpio, len);
+ if (OF_getpropbool(sc->sc_node, "snps-reset-active-low"))
+ active = 0;
+ delays[0] = delays[1] = delays[2] = 0;
+ OF_getpropintarray(sc->sc_node, "snps,reset-delays-us", delays,
+ sizeof(delays));
+
+ /* Perform reset sequence. */
+ gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
+ gpio_controller_set_pin(gpio, !active);
+ delay(delays[0]);
+ gpio_controller_set_pin(gpio, active);
+ delay(delays[1]);
+ gpio_controller_set_pin(gpio, !active);
+ delay(delays[2]);
+
+ free(gpio, M_TEMP, len);
+}
+
+/* RK3568 registers */
+#define RK3568_GRF_GMACx_CON0(x) (0x0380 + (x) * 0x8)
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8))
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0))
+#define RK3568_GRF_GMACx_CON1(x) (0x0384 + (x) * 0x8)
+#define RK3568_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 4) << 16 | (0x1 << 4))
+#define RK3568_GMAC_PHY_INTF_SEL_RMII ((0x7 << 4) << 16 | (0x4 << 4))
+#define RK3568_GMAC_TXCLK_DLY_ENA ((1 << 0) << 16 | (1 << 0))
+#define RK3568_GMAC_RXCLK_DLY_ENA ((1 << 1) << 16 | (1 << 1))
+
+
+void dwqe_mii_statchg_rockchip(struct device *);
+
+void
+dwqe_setup_rockchip(struct dwqe_softc *sc)
+{
+ struct regmap *rm;
+ uint32_t grf;
+ int tx_delay, rx_delay;
+
+ grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0);
+ rm = regmap_byphandle(grf);
+ if (rm == NULL)
+ return;
+
+ tx_delay = OF_getpropint(sc->sc_node, "tx_delay", 0x30);
+ rx_delay = OF_getpropint(sc->sc_node, "rx_delay", 0x10);
+
+ if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-gmac")) {
+ /* Program clock delay lines. */
+ regmap_write_4(rm, RK3568_GRF_GMACx_CON0(sc->sc_gmac_id),
+ RK3568_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3568_GMAC_CLK_RX_DL_CFG(rx_delay));
+
+ /* Use RGMII interface and enable clock delay. */
+ regmap_write_4(rm, RK3568_GRF_GMACx_CON1(sc->sc_gmac_id),
+ RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_TXCLK_DLY_ENA |
+ RK3568_GMAC_RXCLK_DLY_ENA);
+ }
+
+ sc->sc_mii.mii_statchg = dwqe_mii_statchg_rockchip;
+}
+
+void
+dwqe_mii_statchg_rockchip(struct device *self)
+{
+ struct dwqe_softc *sc = (void *)self;
+ struct regmap *rm;
+ uint32_t grf;
+ uint32_t gmac_clk_sel = 0;
+
+ dwqe_mii_statchg(self);
+
+ grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0);
+ rm = regmap_byphandle(grf);
+ if (rm == NULL)
+ return;
+
+ switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
+ case IFM_10_T:
+ gmac_clk_sel = sc->sc_clk_sel_2_5;
+ if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-gmac"))
+ clock_set_frequency(sc->sc_node, "clk_mac_speed",
+ 2500000);
+ break;
+ case IFM_100_TX:
+ gmac_clk_sel = sc->sc_clk_sel_25;
+ if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-gmac"))
+ clock_set_frequency(sc->sc_node, "clk_mac_speed",
+ 25000000);
+ break;
+ case IFM_1000_T:
+ gmac_clk_sel = sc->sc_clk_sel_125;
+ if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-gmac"))
+ clock_set_frequency(sc->sc_node, "clk_mac_speed",
+ 125000000);
+ break;
+ }
+
+ if (OF_is_compatible(sc->sc_node, "rockchip,rk3588-gmac"))
+ regmap_write_4(rm, sc->sc_clk_sel, gmac_clk_sel);
+}
--- /dev/null
+/* $OpenBSD: dwqe.c,v 1.1 2023/02/13 19:18:53 patrick Exp $ */
+/*
+ * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Driver for the Synopsys Designware ethernet controller.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/timeout.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_clock.h>
+#include <dev/ofw/ofw_gpio.h>
+#include <dev/ofw/ofw_misc.h>
+#include <dev/ofw/ofw_pinctrl.h>
+#include <dev/ofw/ofw_regulator.h>
+#include <dev/ofw/fdt.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <dev/ic/dwqevar.h>
+#include <dev/ic/dwqereg.h>
+
+struct cfdriver dwqe_cd = {
+ NULL, "dwqe", DV_IFNET
+};
+
+uint32_t dwqe_read(struct dwqe_softc *, bus_addr_t);
+void dwqe_write(struct dwqe_softc *, bus_addr_t, uint32_t);
+
+int dwqe_ioctl(struct ifnet *, u_long, caddr_t);
+void dwqe_start(struct ifqueue *);
+void dwqe_watchdog(struct ifnet *);
+
+int dwqe_media_change(struct ifnet *);
+void dwqe_media_status(struct ifnet *, struct ifmediareq *);
+
+int dwqe_mii_readreg(struct device *, int, int);
+void dwqe_mii_writereg(struct device *, int, int, int);
+void dwqe_mii_statchg(struct device *);
+
+void dwqe_lladdr_read(struct dwqe_softc *, uint8_t *);
+void dwqe_lladdr_write(struct dwqe_softc *);
+
+void dwqe_tick(void *);
+void dwqe_rxtick(void *);
+
+int dwqe_intr(void *);
+void dwqe_tx_proc(struct dwqe_softc *);
+void dwqe_rx_proc(struct dwqe_softc *);
+
+void dwqe_up(struct dwqe_softc *);
+void dwqe_down(struct dwqe_softc *);
+void dwqe_iff(struct dwqe_softc *);
+int dwqe_encap(struct dwqe_softc *, struct mbuf *, int *, int *);
+
+void dwqe_reset(struct dwqe_softc *);
+
+struct dwqe_dmamem *
+ dwqe_dmamem_alloc(struct dwqe_softc *, bus_size_t, bus_size_t);
+void dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *);
+struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t);
+void dwqe_fill_rx_ring(struct dwqe_softc *);
+
+int
+dwqe_attach(struct dwqe_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t version, mode;
+ int i;
+
+ version = dwqe_read(sc, GMAC_VERSION);
+ printf(": rev 0x%02x, address %s", version & GMAC_VERSION_SNPS_MASK,
+ ether_sprintf(sc->sc_lladdr));
+
+ for (i = 0; i < 4; i++)
+ sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i));
+
+ timeout_set(&sc->sc_tick, dwqe_tick, sc);
+ timeout_set(&sc->sc_rxto, dwqe_rxtick, sc);
+
+ ifp = &sc->sc_ac.ac_if;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_xflags = IFXF_MPSAFE;
+ ifp->if_ioctl = dwqe_ioctl;
+ ifp->if_qstart = dwqe_start;
+ ifp->if_watchdog = dwqe_watchdog;
+ ifq_set_maxlen(&ifp->if_snd, DWQE_NTXDESC - 1);
+ bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+ sc->sc_mii.mii_ifp = ifp;
+ sc->sc_mii.mii_readreg = dwqe_mii_readreg;
+ sc->sc_mii.mii_writereg = dwqe_mii_writereg;
+ sc->sc_mii.mii_statchg = dwqe_mii_statchg;
+
+ ifmedia_init(&sc->sc_media, 0, dwqe_media_change, dwqe_media_status);
+
+ dwqe_reset(sc);
+
+ /* Configure DMA engine. */
+ mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
+ if (sc->sc_fixed_burst)
+ mode |= GMAC_SYS_BUS_MODE_FB;
+ if (sc->sc_mixed_burst)
+ mode |= GMAC_SYS_BUS_MODE_MB;
+ if (sc->sc_aal)
+ mode |= GMAC_SYS_BUS_MODE_AAL;
+ dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
+
+ /* Configure channel 0. */
+ mode = dwqe_read(sc, GMAC_CHAN_CONTROL(0));
+ if (sc->sc_8xpbl)
+ mode |= GMAC_CHAN_CONTROL_8XPBL;
+ dwqe_write(sc, GMAC_CHAN_CONTROL(0), mode);
+
+ mode = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
+ mode &= ~GMAC_CHAN_TX_CONTROL_PBL_MASK;
+ mode |= sc->sc_txpbl << GMAC_CHAN_TX_CONTROL_PBL_SHIFT;
+ mode |= GMAC_CHAN_TX_CONTROL_OSP;
+ dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), mode);
+ mode &= ~GMAC_CHAN_RX_CONTROL_RPBL_MASK;
+ mode |= sc->sc_rxpbl << GMAC_CHAN_RX_CONTROL_RPBL_SHIFT;
+ mode = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
+ dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), mode);
+
+ /* Configure AXI master. */
+ if (sc->sc_axi_config) {
+ int i;
+
+ mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
+
+ mode &= ~GMAC_SYS_BUS_MODE_EN_LPI;
+ if (sc->sc_lpi_en)
+ mode |= GMAC_SYS_BUS_MODE_EN_LPI;
+ mode &= ~GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
+ if (sc->sc_xit_frm)
+ mode |= GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
+
+ mode &= ~GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK;
+ mode |= (sc->sc_wr_osr_lmt << GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT);
+ mode &= ~GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK;
+ mode |= (sc->sc_rd_osr_lmt << GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT);
+
+ for (i = 0; i < nitems(sc->sc_blen); i++) {
+ switch (sc->sc_blen[i]) {
+ case 256:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_256;
+ break;
+ case 128:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_128;
+ break;
+ case 64:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_64;
+ break;
+ case 32:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_32;
+ break;
+ case 16:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_16;
+ break;
+ case 8:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_8;
+ break;
+ case 4:
+ mode |= GMAC_SYS_BUS_MODE_BLEN_4;
+ break;
+ }
+ }
+
+ dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
+ }
+
+ mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
+ (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
+ if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
+ printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
+ ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
+ ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
+ } else
+ ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
+
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ /* Disable interrupts. */
+ dwqe_write(sc, GMAC_INT_EN, 0);
+ dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
+
+ return 0;
+}
+
+uint32_t
+dwqe_read(struct dwqe_softc *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
+}
+
+void
+dwqe_write(struct dwqe_softc *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
+}
+
+void
+dwqe_lladdr_read(struct dwqe_softc *sc, uint8_t *lladdr)
+{
+ uint32_t machi, maclo;
+
+ machi = dwqe_read(sc, GMAC_MAC_ADDR0_HI);
+ maclo = dwqe_read(sc, GMAC_MAC_ADDR0_LO);
+
+ if (machi || maclo) {
+ lladdr[0] = (maclo >> 0) & 0xff;
+ lladdr[1] = (maclo >> 8) & 0xff;
+ lladdr[2] = (maclo >> 16) & 0xff;
+ lladdr[3] = (maclo >> 24) & 0xff;
+ lladdr[4] = (machi >> 0) & 0xff;
+ lladdr[5] = (machi >> 8) & 0xff;
+ } else {
+ ether_fakeaddr(&sc->sc_ac.ac_if);
+ }
+}
+
+void
+dwqe_lladdr_write(struct dwqe_softc *sc)
+{
+ dwqe_write(sc, GMAC_MAC_ADDR0_HI,
+ sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
+ dwqe_write(sc, GMAC_MAC_ADDR0_LO,
+ sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
+ sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
+}
+
+void
+dwqe_start(struct ifqueue *ifq)
+{
+ struct ifnet *ifp = ifq->ifq_if;
+ struct dwqe_softc *sc = ifp->if_softc;
+ struct mbuf *m;
+ int error, idx, left, used;
+
+ if (!(ifp->if_flags & IFF_RUNNING))
+ return;
+ if (ifq_is_oactive(&ifp->if_snd))
+ return;
+ if (ifq_empty(&ifp->if_snd))
+ return;
+ if (!sc->sc_link)
+ return;
+
+ idx = sc->sc_tx_prod;
+ left = sc->sc_tx_cons;
+ if (left <= idx)
+ left += DWQE_NTXDESC;
+ left -= idx;
+ used = 0;
+
+ for (;;) {
+ if (used + DWQE_NTXSEGS + 1 > left) {
+ ifq_set_oactive(ifq);
+ break;
+ }
+
+ m = ifq_dequeue(ifq);
+ if (m == NULL)
+ break;
+
+ error = dwqe_encap(sc, m, &idx, &used);
+ if (error == EFBIG) {
+ m_freem(m); /* give up: drop it */
+ ifp->if_oerrors++;
+ continue;
+ }
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+ }
+
+ if (sc->sc_tx_prod != idx) {
+ sc->sc_tx_prod = idx;
+
+ /* Set a timeout in case the chip goes out to lunch. */
+ ifp->if_timer = 5;
+ }
+}
+
+int
+dwqe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
+{
+ struct dwqe_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)addr;
+ int error = 0, s;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ /* FALLTHROUGH */
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_flags & IFF_RUNNING)
+ error = ENETRESET;
+ else
+ dwqe_up(sc);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ dwqe_down(sc);
+ }
+ break;
+
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
+ break;
+
+ case SIOCGIFRXR:
+ error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
+ NULL, MCLBYTES, &sc->sc_rx_ring);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
+ break;
+ }
+
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ dwqe_iff(sc);
+ error = 0;
+ }
+
+ splx(s);
+ return (error);
+}
+
+void
+dwqe_watchdog(struct ifnet *ifp)
+{
+ printf("%s\n", __func__);
+}
+
+int
+dwqe_media_change(struct ifnet *ifp)
+{
+ struct dwqe_softc *sc = ifp->if_softc;
+
+ if (LIST_FIRST(&sc->sc_mii.mii_phys))
+ mii_mediachg(&sc->sc_mii);
+
+ return (0);
+}
+
+void
+dwqe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct dwqe_softc *sc = ifp->if_softc;
+
+ if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
+ mii_pollstat(&sc->sc_mii);
+ ifmr->ifm_active = sc->sc_mii.mii_media_active;
+ ifmr->ifm_status = sc->sc_mii.mii_media_status;
+ }
+}
+
+int
+dwqe_mii_readreg(struct device *self, int phy, int reg)
+{
+ struct dwqe_softc *sc = (void *)self;
+ int n;
+
+ dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
+ sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
+ (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
+ (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
+ GMAC_MAC_MDIO_ADDR_GOC_READ |
+ GMAC_MAC_MDIO_ADDR_GB);
+ delay(10000);
+
+ for (n = 0; n < 1000; n++) {
+ if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
+ return dwqe_read(sc, GMAC_MAC_MDIO_DATA);
+ delay(10);
+ }
+
+ printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
+ return (0);
+}
+
+void
+dwqe_mii_writereg(struct device *self, int phy, int reg, int val)
+{
+ struct dwqe_softc *sc = (void *)self;
+ int n;
+
+ dwqe_write(sc, GMAC_MAC_MDIO_DATA, val);
+ dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
+ sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
+ (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
+ (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
+ GMAC_MAC_MDIO_ADDR_GOC_WRITE |
+ GMAC_MAC_MDIO_ADDR_GB);
+ delay(10000);
+ for (n = 0; n < 1000; n++) {
+ if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
+ return;
+ delay(10);
+ }
+
+ printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
+}
+
+void
+dwqe_mii_statchg(struct device *self)
+{
+ struct dwqe_softc *sc = (void *)self;
+ uint32_t conf;
+
+ conf = dwqe_read(sc, GMAC_MAC_CONF);
+ conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
+
+ switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
+ case IFM_1000_SX:
+ case IFM_1000_LX:
+ case IFM_1000_CX:
+ case IFM_1000_T:
+ sc->sc_link = 1;
+ break;
+ case IFM_100_TX:
+ conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES;
+ sc->sc_link = 1;
+ break;
+ case IFM_10_T:
+ conf |= GMAC_MAC_CONF_PS;
+ sc->sc_link = 1;
+ break;
+ default:
+ sc->sc_link = 0;
+ return;
+ }
+
+ if (sc->sc_link == 0)
+ return;
+
+ conf &= ~GMAC_MAC_CONF_DM;
+ if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
+ conf |= GMAC_MAC_CONF_DM;
+
+ dwqe_write(sc, GMAC_MAC_CONF, conf);
+}
+
+void
+dwqe_tick(void *arg)
+{
+ struct dwqe_softc *sc = arg;
+ int s;
+
+ s = splnet();
+ mii_tick(&sc->sc_mii);
+ splx(s);
+
+ timeout_add_sec(&sc->sc_tick, 1);
+}
+
+void
+dwqe_rxtick(void *arg)
+{
+ struct dwqe_softc *sc = arg;
+ int s;
+
+ s = splnet();
+
+ /* TODO: disable RXQ? */
+ printf("%s:%d\n", __func__, __LINE__);
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
+ 0, DWQE_DMA_LEN(sc->sc_rxring),
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), 0);
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), 0);
+
+ sc->sc_rx_prod = sc->sc_rx_cons = 0;
+ dwqe_fill_rx_ring(sc);
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
+ 0, DWQE_DMA_LEN(sc->sc_rxring),
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
+
+ /* TODO: re-enable RXQ? */
+
+ splx(s);
+}
+
+int
+dwqe_intr(void *arg)
+{
+ struct dwqe_softc *sc = arg;
+ uint32_t reg;
+
+ reg = dwqe_read(sc, GMAC_INT_STATUS);
+ dwqe_write(sc, GMAC_INT_STATUS, reg);
+
+ reg = dwqe_read(sc, GMAC_CHAN_STATUS(0));
+ dwqe_write(sc, GMAC_CHAN_STATUS(0), reg);
+
+ if (reg & GMAC_CHAN_STATUS_RI)
+ dwqe_rx_proc(sc);
+
+ if (reg & GMAC_CHAN_STATUS_TI)
+ dwqe_tx_proc(sc);
+
+ return (1);
+}
+
+void
+dwqe_tx_proc(struct dwqe_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct dwqe_desc *txd;
+ struct dwqe_buf *txb;
+ int idx, txfree;
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 0,
+ DWQE_DMA_LEN(sc->sc_txring),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ txfree = 0;
+ while (sc->sc_tx_cons != sc->sc_tx_prod) {
+ idx = sc->sc_tx_cons;
+ KASSERT(idx < DWQE_NTXDESC);
+
+ txd = &sc->sc_txdesc[idx];
+ if (txd->sd_tdes3 & TDES3_OWN)
+ break;
+
+ txb = &sc->sc_txbuf[idx];
+ if (txb->tb_m) {
+ bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
+ txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
+
+ m_freem(txb->tb_m);
+ txb->tb_m = NULL;
+ }
+
+ txfree++;
+
+ if (sc->sc_tx_cons == (DWQE_NTXDESC - 1))
+ sc->sc_tx_cons = 0;
+ else
+ sc->sc_tx_cons++;
+
+ txd->sd_tdes3 = 0;
+ }
+
+ if (sc->sc_tx_cons == sc->sc_tx_prod)
+ ifp->if_timer = 0;
+
+ if (txfree) {
+ if (ifq_is_oactive(&ifp->if_snd))
+ ifq_restart(&ifp->if_snd);
+ }
+}
+
+void
+dwqe_rx_proc(struct dwqe_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct dwqe_desc *rxd;
+ struct dwqe_buf *rxb;
+ struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+ struct mbuf *m;
+ int idx, len, cnt, put;
+
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ return;
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
+ DWQE_DMA_LEN(sc->sc_rxring),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cnt = if_rxr_inuse(&sc->sc_rx_ring);
+ put = 0;
+ while (put < cnt) {
+ idx = sc->sc_rx_cons;
+ KASSERT(idx < DWQE_NRXDESC);
+
+ rxd = &sc->sc_rxdesc[idx];
+ if (rxd->sd_tdes3 & RDES3_OWN)
+ break;
+
+ len = rxd->sd_tdes3 & RDES3_LENGTH;
+ rxb = &sc->sc_rxbuf[idx];
+ KASSERT(rxb->tb_m);
+
+ bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
+ len, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
+
+ /* Strip off CRC. */
+ len -= ETHER_CRC_LEN;
+ KASSERT(len > 0);
+
+ m = rxb->tb_m;
+ rxb->tb_m = NULL;
+ m->m_pkthdr.len = m->m_len = len;
+
+ ml_enqueue(&ml, m);
+
+ put++;
+ if (sc->sc_rx_cons == (DWQE_NRXDESC - 1))
+ sc->sc_rx_cons = 0;
+ else
+ sc->sc_rx_cons++;
+ }
+
+ if_rxr_put(&sc->sc_rx_ring, put);
+ if (ifiq_input(&ifp->if_rcv, &ml))
+ if_rxr_livelocked(&sc->sc_rx_ring);
+
+ dwqe_fill_rx_ring(sc);
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
+ DWQE_DMA_LEN(sc->sc_rxring),
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+}
+
+void
+dwqe_up(struct dwqe_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct dwqe_buf *txb, *rxb;
+ uint32_t mode, reg, tqs, rqs;
+ int i;
+
+ /* Allocate Tx descriptor ring. */
+ sc->sc_txring = dwqe_dmamem_alloc(sc,
+ DWQE_NTXDESC * sizeof(struct dwqe_desc), 8);
+ sc->sc_txdesc = DWQE_DMA_KVA(sc->sc_txring);
+
+ sc->sc_txbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NTXDESC,
+ M_DEVBUF, M_WAITOK);
+ for (i = 0; i < DWQE_NTXDESC; i++) {
+ txb = &sc->sc_txbuf[i];
+ bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWQE_NTXSEGS,
+ MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
+ txb->tb_m = NULL;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
+ 0, DWQE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
+
+ sc->sc_tx_prod = sc->sc_tx_cons = 0;
+
+ dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_txring) >> 32);
+ dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
+ dwqe_write(sc, GMAC_CHAN_TX_RING_LEN(0), DWQE_NTXDESC - 1);
+ dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
+
+ /* Allocate descriptor ring. */
+ sc->sc_rxring = dwqe_dmamem_alloc(sc,
+ DWQE_NRXDESC * sizeof(struct dwqe_desc), 8);
+ sc->sc_rxdesc = DWQE_DMA_KVA(sc->sc_rxring);
+
+ sc->sc_rxbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NRXDESC,
+ M_DEVBUF, M_WAITOK);
+
+ for (i = 0; i < DWQE_NRXDESC; i++) {
+ rxb = &sc->sc_rxbuf[i];
+ bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
+ MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
+ rxb->tb_m = NULL;
+ }
+
+ if_rxr_init(&sc->sc_rx_ring, 2, DWQE_NRXDESC);
+
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
+ dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
+ dwqe_write(sc, GMAC_CHAN_RX_RING_LEN(0), DWQE_NRXDESC - 1);
+
+ sc->sc_rx_prod = sc->sc_rx_cons = 0;
+ dwqe_fill_rx_ring(sc);
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
+ 0, DWQE_DMA_LEN(sc->sc_rxring),
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ dwqe_lladdr_write(sc);
+
+ /* Configure media. */
+ if (LIST_FIRST(&sc->sc_mii.mii_phys))
+ mii_mediachg(&sc->sc_mii);
+
+ /* Program promiscuous mode and multicast filters. */
+ dwqe_iff(sc);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
+
+ dwqe_write(sc, GMAC_MAC_1US_TIC_CTR, (sc->sc_clk / 1000000) - 1);
+
+ /* Start receive DMA */
+ reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
+ reg |= GMAC_CHAN_RX_CONTROL_SR;
+ dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
+
+ /* Start transmit DMA */
+ reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
+ reg |= GMAC_CHAN_TX_CONTROL_ST;
+ dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
+
+ mode = dwqe_read(sc, GMAC_MTL_CHAN_RX_OP_MODE(0));
+ if (sc->sc_force_thresh_dma_mode) {
+ mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RSF;
+ mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK;
+ mode |= GMAC_MTL_CHAN_RX_OP_MODE_RTC_128;
+ } else {
+ mode |= GMAC_MTL_CHAN_RX_OP_MODE_RSF;
+ }
+ mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
+ rqs = (128 << GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(sc->sc_hw_feature[1]) /
+ 256) - 1;
+ mode |= rqs << GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT;
+ dwqe_write(sc, GMAC_MTL_CHAN_RX_OP_MODE(0), mode);
+
+ mode = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
+ if (sc->sc_force_thresh_dma_mode) {
+ mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
+ mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
+ mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_128;
+ } else {
+ mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
+ }
+ mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK;
+ mode |= GMAC_MTL_CHAN_TX_OP_MODE_TXQEN;
+ mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
+ tqs = (128 << GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(sc->sc_hw_feature[1]) /
+ 256) - 1;
+ mode |= tqs << GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT;
+ dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), mode);
+
+ reg = dwqe_read(sc, GMAC_QX_TX_FLOW_CTRL(0));
+ reg |= 0xffffU << GMAC_QX_TX_FLOW_CTRL_PT_SHIFT;
+ reg |= GMAC_QX_TX_FLOW_CTRL_TFE;
+ dwqe_write(sc, GMAC_QX_TX_FLOW_CTRL(0), reg);
+ reg = dwqe_read(sc, GMAC_RX_FLOW_CTRL);
+ reg |= GMAC_RX_FLOW_CTRL_RFE;
+ dwqe_write(sc, GMAC_RX_FLOW_CTRL, reg);
+
+ dwqe_write(sc, GMAC_RXQ_CTRL0, GMAC_RXQ_CTRL0_DCB_QUEUE_EN(0));
+
+ dwqe_write(sc, GMAC_MAC_CONF, dwqe_read(sc, GMAC_MAC_CONF) |
+ GMAC_MAC_CONF_BE | GMAC_MAC_CONF_JD | GMAC_MAC_CONF_JE |
+ GMAC_MAC_CONF_DCRS | GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
+
+ dwqe_write(sc, GMAC_CHAN_INTR_ENA(0),
+ GMAC_CHAN_INTR_ENA_NIE |
+ GMAC_CHAN_INTR_ENA_AIE |
+ GMAC_CHAN_INTR_ENA_FBE |
+ GMAC_CHAN_INTR_ENA_RIE |
+ GMAC_CHAN_INTR_ENA_TIE);
+
+ timeout_add_sec(&sc->sc_tick, 1);
+}
+
+void
+dwqe_down(struct dwqe_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct dwqe_buf *txb, *rxb;
+ uint32_t reg;
+ int i;
+
+ timeout_del(&sc->sc_rxto);
+ timeout_del(&sc->sc_tick);
+
+ ifp->if_flags &= ~IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
+ ifp->if_timer = 0;
+
+ /* Disable receiver */
+ reg = dwqe_read(sc, GMAC_MAC_CONF);
+ reg &= ~GMAC_MAC_CONF_RE;
+ dwqe_write(sc, GMAC_MAC_CONF, reg);
+
+ /* Stop receive DMA */
+ reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
+ reg &= ~GMAC_CHAN_RX_CONTROL_SR;
+ dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
+
+ /* Stop transmit DMA */
+ reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
+ reg &= ~GMAC_CHAN_TX_CONTROL_ST;
+ dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
+
+ /* Flush data in the TX FIFO */
+ reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
+ reg |= GMAC_MTL_CHAN_TX_OP_MODE_FTQ;
+ dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), reg);
+ /* Wait for flush to complete */
+ for (i = 10000; i > 0; i--) {
+ reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
+ if ((reg & GMAC_MTL_CHAN_TX_OP_MODE_FTQ) == 0)
+ break;
+ delay(1);
+ }
+ if (i == 0) {
+ printf("%s: timeout flushing TX queue\n",
+ sc->sc_dev.dv_xname);
+ }
+
+ /* Disable transmitter */
+ reg = dwqe_read(sc, GMAC_MAC_CONF);
+ reg &= ~GMAC_MAC_CONF_TE;
+ dwqe_write(sc, GMAC_MAC_CONF, reg);
+
+ dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
+
+ intr_barrier(sc->sc_ih);
+ ifq_barrier(&ifp->if_snd);
+
+ for (i = 0; i < DWQE_NTXDESC; i++) {
+ txb = &sc->sc_txbuf[i];
+ if (txb->tb_m) {
+ bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
+ txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
+ m_freem(txb->tb_m);
+ }
+ bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
+ }
+
+ dwqe_dmamem_free(sc, sc->sc_txring);
+ free(sc->sc_txbuf, M_DEVBUF, 0);
+
+ for (i = 0; i < DWQE_NRXDESC; i++) {
+ rxb = &sc->sc_rxbuf[i];
+ if (rxb->tb_m) {
+ bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
+ rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
+ m_freem(rxb->tb_m);
+ }
+ bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
+ }
+
+ dwqe_dmamem_free(sc, sc->sc_rxring);
+ free(sc->sc_rxbuf, M_DEVBUF, 0);
+}
+
+/* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
+static uint32_t
+bitrev32(uint32_t x)
+{
+ x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
+ x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
+ x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
+ x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
+
+ return (x >> 16) | (x << 16);
+}
+
+void
+dwqe_iff(struct dwqe_softc *sc)
+{
+ struct arpcom *ac = &sc->sc_ac;
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ uint32_t crc, hash[2], hashbit, hashreg;
+ uint32_t reg;
+
+ reg = 0;
+
+ ifp->if_flags &= ~IFF_ALLMULTI;
+ bzero(hash, sizeof(hash));
+ if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
+ ifp->if_flags |= IFF_ALLMULTI;
+ reg |= GMAC_MAC_PACKET_FILTER_PM;
+ if (ifp->if_flags & IFF_PROMISC)
+ reg |= GMAC_MAC_PACKET_FILTER_PR |
+ GMAC_MAC_PACKET_FILTER_PCF_ALL;
+ } else {
+ reg |= GMAC_MAC_PACKET_FILTER_HMC;
+ ETHER_FIRST_MULTI(step, ac, enm);
+ while (enm != NULL) {
+ crc = ether_crc32_le(enm->enm_addrlo,
+ ETHER_ADDR_LEN) & 0x7f;
+
+ crc = bitrev32(~crc) >> 26;
+ hashreg = (crc >> 5);
+ hashbit = (crc & 0x1f);
+ hash[hashreg] |= (1 << hashbit);
+
+ ETHER_NEXT_MULTI(step, enm);
+ }
+ }
+
+ dwqe_lladdr_write(sc);
+
+ dwqe_write(sc, GMAC_MAC_HASH_TAB_REG0, hash[0]);
+ dwqe_write(sc, GMAC_MAC_HASH_TAB_REG1, hash[1]);
+
+ dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg);
+}
+
+int
+dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
+{
+ struct dwqe_desc *txd, *txd_start;
+ bus_dmamap_t map;
+ int cur, frag, i;
+
+ cur = frag = *idx;
+ map = sc->sc_txbuf[cur].tb_map;
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
+ if (m_defrag(m, M_DONTWAIT))
+ return (EFBIG);
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
+ return (EFBIG);
+ }
+
+ /* Sync the DMA map. */
+ bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+
+ txd = txd_start = &sc->sc_txdesc[frag];
+ for (i = 0; i < map->dm_nsegs; i++) {
+ /* TODO: check for 32-bit vs 64-bit support */
+ KASSERT((map->dm_segs[i].ds_addr >> 32) == 0);
+
+ txd->sd_tdes0 = (uint32_t)map->dm_segs[i].ds_addr;
+ txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32);
+ txd->sd_tdes2 = map->dm_segs[i].ds_len;
+ txd->sd_tdes3 = m->m_pkthdr.len;
+ if (i == 0)
+ txd->sd_tdes3 |= TDES3_FS;
+ if (i == (map->dm_nsegs - 1)) {
+ txd->sd_tdes2 |= TDES2_IC;
+ txd->sd_tdes3 |= TDES3_LS;
+ }
+ if (i != 0)
+ txd->sd_tdes3 |= TDES3_OWN;
+
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
+ frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
+
+ cur = frag;
+ if (frag == (DWQE_NTXDESC - 1)) {
+ txd = &sc->sc_txdesc[0];
+ frag = 0;
+ } else {
+ txd++;
+ frag++;
+ }
+ KASSERT(frag != sc->sc_tx_cons);
+ }
+
+ txd_start->sd_tdes3 |= TDES3_OWN;
+ bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
+ *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
+
+ dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring) +
+ sc->sc_tx_cons * sizeof(*txd));
+
+ KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
+ sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
+ sc->sc_txbuf[cur].tb_map = map;
+ sc->sc_txbuf[cur].tb_m = m;
+
+ *idx = frag;
+ *used += map->dm_nsegs;
+
+ return (0);
+}
+
+void
+dwqe_reset(struct dwqe_softc *sc)
+{
+ int n;
+
+ dwqe_write(sc, GMAC_BUS_MODE, dwqe_read(sc, GMAC_BUS_MODE) |
+ GMAC_BUS_MODE_SWR);
+
+ for (n = 0; n < 30000; n++) {
+ if ((dwqe_read(sc, GMAC_BUS_MODE) &
+ GMAC_BUS_MODE_SWR) == 0)
+ return;
+ delay(10);
+ }
+
+ printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
+}
+
+struct dwqe_dmamem *
+dwqe_dmamem_alloc(struct dwqe_softc *sc, bus_size_t size, bus_size_t align)
+{
+ struct dwqe_dmamem *tdm;
+ int nsegs;
+
+ tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
+ tdm->tdm_size = size;
+
+ if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
+ BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
+ goto tdmfree;
+
+ if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
+ &nsegs, BUS_DMA_WAITOK) != 0)
+ goto destroy;
+
+ if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
+ &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
+ goto free;
+
+ if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
+ NULL, BUS_DMA_WAITOK) != 0)
+ goto unmap;
+
+ bzero(tdm->tdm_kva, size);
+
+ return (tdm);
+
+unmap:
+ bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
+free:
+ bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
+destroy:
+ bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
+tdmfree:
+ free(tdm, M_DEVBUF, 0);
+
+ return (NULL);
+}
+
+void
+dwqe_dmamem_free(struct dwqe_softc *sc, struct dwqe_dmamem *tdm)
+{
+ bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
+ bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
+ bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
+ free(tdm, M_DEVBUF, 0);
+}
+
+struct mbuf *
+dwqe_alloc_mbuf(struct dwqe_softc *sc, bus_dmamap_t map)
+{
+ struct mbuf *m = NULL;
+
+ m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+ if (!m)
+ return (NULL);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m_adj(m, ETHER_ALIGN);
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
+ printf("%s: could not load mbuf DMA map", DEVNAME(sc));
+ m_freem(m);
+ return (NULL);
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, map, 0,
+ m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
+
+ return (m);
+}
+
+void
+dwqe_fill_rx_ring(struct dwqe_softc *sc)
+{
+ struct dwqe_desc *rxd;
+ struct dwqe_buf *rxb;
+ u_int slots;
+
+ for (slots = if_rxr_get(&sc->sc_rx_ring, DWQE_NRXDESC);
+ slots > 0; slots--) {
+ rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
+ rxb->tb_m = dwqe_alloc_mbuf(sc, rxb->tb_map);
+ if (rxb->tb_m == NULL)
+ break;
+
+ /* TODO: check for 32-bit vs 64-bit support */
+ KASSERT((rxb->tb_map->dm_segs[0].ds_len >> 32) == 0);
+
+ rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
+ rxd->sd_tdes0 = (uint32_t)rxb->tb_map->dm_segs[0].ds_len;
+ rxd->sd_tdes1 = (uint32_t)(rxb->tb_map->dm_segs[0].ds_len >> 32);
+ rxd->sd_tdes2 = 0;
+ rxd->sd_tdes3 = RDES3_OWN | RDES3_IC | RDES3_BUF1V;
+
+ if (sc->sc_rx_prod == (DWQE_NRXDESC - 1))
+ sc->sc_rx_prod = 0;
+ else
+ sc->sc_rx_prod++;
+ }
+ if_rxr_put(&sc->sc_rx_ring, slots);
+
+ dwqe_write(sc, GMAC_CHAN_RX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring) +
+ sc->sc_rx_prod * sizeof(*rxd));
+
+ if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
+ timeout_add(&sc->sc_rxto, 1);
+}
--- /dev/null
+/* $OpenBSD: dwqereg.h,v 1.1 2023/02/13 19:18:53 patrick Exp $ */
+/*
+ * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define GMAC_MAC_CONF 0x0000
+#define GMAC_MAC_CONF_CST (1 << 21)
+#define GMAC_MAC_CONF_ACS (1 << 20)
+#define GMAC_MAC_CONF_BE (1 << 18)
+#define GMAC_MAC_CONF_JD (1 << 17)
+#define GMAC_MAC_CONF_JE (1 << 16)
+#define GMAC_MAC_CONF_PS (1 << 15)
+#define GMAC_MAC_CONF_FES (1 << 14)
+#define GMAC_MAC_CONF_DM (1 << 13)
+#define GMAC_MAC_CONF_DCRS (1 << 9)
+#define GMAC_MAC_CONF_TE (1 << 1)
+#define GMAC_MAC_CONF_RE (1 << 0)
+#define GMAC_MAC_PACKET_FILTER 0x0008
+#define GMAC_MAC_PACKET_FILTER_HPF (1 << 10)
+#define GMAC_MAC_PACKET_FILTER_PCF_MASK (3 << 6)
+#define GMAC_MAC_PACKET_FILTER_PCF_ALL (2 << 6)
+#define GMAC_MAC_PACKET_FILTER_DBF (1 << 5)
+#define GMAC_MAC_PACKET_FILTER_PM (1 << 4)
+#define GMAC_MAC_PACKET_FILTER_HMC (1 << 2)
+#define GMAC_MAC_PACKET_FILTER_HUC (1 << 1)
+#define GMAC_MAC_PACKET_FILTER_PR (1 << 0)
+#define GMAC_MAC_HASH_TAB_REG0 0x0010
+#define GMAC_MAC_HASH_TAB_REG1 0x0014
+#define GMAC_VERSION 0x0020
+#define GMAC_VERSION_SNPS_MASK 0xff
+#define GMAC_INT_MASK 0x003c
+#define GMAC_INT_MASK_LPIIM (1 << 10)
+#define GMAC_INT_MASK_PIM (1 << 3)
+#define GMAC_INT_MASK_RIM (1 << 0)
+#define GMAC_MAC_ADDR0_HI 0x0040
+#define GMAC_MAC_ADDR0_LO 0x0044
+#define GMAC_QX_TX_FLOW_CTRL(x) (0x0070 + (x) * 4)
+#define GMAC_QX_TX_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_QX_TX_FLOW_CTRL_TFE (1 << 0)
+#define GMAC_RX_FLOW_CTRL 0x0090
+#define GMAC_RX_FLOW_CTRL_RFE (1 << 0)
+#define GMAC_RXQ_CTRL0 0x00a0
+#define GMAC_RXQ_CTRL0_QUEUE_CLR(x) (0x3 << ((x) * 2)
+#define GMAC_RXQ_CTRL0_AVB_QUEUE_EN(x) (1 << ((x) * 2))
+#define GMAC_RXQ_CTRL0_DCB_QUEUE_EN(x) (2 << ((x) * 2))
+#define GMAC_RXQ_CTRL1 0x00a4
+#define GMAC_RXQ_CTRL2 0x00a8
+#define GMAC_RXQ_CTRL3 0x00ac
+#define GMAC_INT_STATUS 0x00b0
+#define GMAC_INT_EN 0x00b4
+#define GMAC_MAC_1US_TIC_CTR 0x00dc
+#define GMAC_MAC_HW_FEATURE(x) (0x011c + (x) * 0x4)
+#define GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(x) (((x) >> 6) & 0x1f)
+#define GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(x) (((x) >> 0) & 0x3f)
+#define GMAC_MAC_MDIO_ADDR 0x0200
+#define GMAC_MAC_MDIO_ADDR_PA_SHIFT 21
+#define GMAC_MAC_MDIO_ADDR_RDA_SHIFT 16
+#define GMAC_MAC_MDIO_ADDR_CR_SHIFT 8
+#define GMAC_MAC_MDIO_ADDR_CR_60_100 0
+#define GMAC_MAC_MDIO_ADDR_CR_100_150 1
+#define GMAC_MAC_MDIO_ADDR_CR_20_35 2
+#define GMAC_MAC_MDIO_ADDR_CR_35_60 3
+#define GMAC_MAC_MDIO_ADDR_CR_150_250 4
+#define GMAC_MAC_MDIO_ADDR_CR_250_300 5
+#define GMAC_MAC_MDIO_ADDR_CR_300_500 6
+#define GMAC_MAC_MDIO_ADDR_CR_500_800 7
+#define GMAC_MAC_MDIO_ADDR_SKAP (1 << 4)
+#define GMAC_MAC_MDIO_ADDR_GOC_READ (3 << 2)
+#define GMAC_MAC_MDIO_ADDR_GOC_WRITE (1 << 2)
+#define GMAC_MAC_MDIO_ADDR_C45E (1 << 1)
+#define GMAC_MAC_MDIO_ADDR_GB (1 << 0)
+#define GMAC_MAC_MDIO_DATA 0x0204
+
+#define GMAC_MTL_OPERATION_MODE 0x0c00
+#define GMAC_MTL_FRPE (1 << 15)
+#define GMAC_MTL_OPERATION_SCHALG_MASK (0x3 << 5)
+#define GMAC_MTL_OPERATION_SCHALG_WRR (0x0 << 5)
+#define GMAC_MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
+#define GMAC_MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
+#define GMAC_MTL_OPERATION_SCHALG_SP (0x3 << 5)
+#define GMAC_MTL_OPERATION_RAA_MASK (0x1 << 2)
+#define GMAC_MTL_OPERATION_RAA_SP (0x0 << 2)
+#define GMAC_MTL_OPERATION_RAA_WSP (0x1 << 2)
+
+#define GMAC_MTL_CHAN_BASE_ADDR(x) (0x0d00 + (x) * 0x40)
+#define GMAC_MTL_CHAN_TX_OP_MODE(x) (GMAC_MTL_CHAN_BASE_ADDR(x) + 0x0)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK (0x1ffU << 16)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT 16
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK (0x7 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_SHIFT 4
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_32 0
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_64 (1 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_96 (2 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_128 (3 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_192 (4 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_256 (5 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_384 (6 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TTC_512 (7 << 4)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK (0x3 << 2)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_AV (1 << 2)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TXQEN (2 << 2)
+#define GMAC_MTL_CHAN_TX_OP_MODE_TSF (1 << 1)
+#define GMAC_MTL_CHAN_TX_OP_MODE_FTQ (1 << 0)
+#define GMAC_MTL_CHAN_TX_DEBUG(x) (GMAC_MTL_CHAN_BASE_ADDR(x) + 0x8)
+#define GMAC_MTL_CHAN_INT_CTRL(x) (GMAC_MTL_CHAN_BASE_ADDR(x) + 0x2c)
+#define GMAC_MTL_CHAN_RX_OP_MODE(x) (GMAC_MTL_CHAN_BASE_ADDR(x) + 0x30)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK (0x3ffU << 20)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT 20
+#define GMAC_MTL_CHAN_RX_OP_MODE_RFD_MASK (0x3fU << 14)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RFD_SHIFT 14
+#define GMAC_MTL_CHAN_RX_OP_MODE_RFA_MASK (0x3fU << 8)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RFA_SHIFT 8
+#define GMAC_MTL_CHAN_RX_OP_MODE_EHFC (1 << 7)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RSF (1 << 5)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK (0x3 << 3)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_SHIFT 3
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_32 (1 << 3)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_64 (0 << 3)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_96 (2 << 3)
+#define GMAC_MTL_CHAN_RX_OP_MODE_RTC_128 (3 << 3)
+#define GMAC_MTL_CHAN_RX_DEBUG(x) (GMAC_MTL_CHAN_BASE_ADDR(x) + 0x38)
+
+#define GMAC_BUS_MODE 0x1000
+#define GMAC_BUS_MODE_DCHE (1 << 19)
+#define GMAC_BUS_MODE_SWR (1 << 0)
+#define GMAC_SYS_BUS_MODE 0x1004
+#define GMAC_SYS_BUS_MODE_EN_LPI (1U << 31)
+#define GMAC_SYS_BUS_MODE_LPI_XIT_FRM (1 << 30)
+#define GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK (0xf << 24)
+#define GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT 24
+#define GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK (0xf << 16)
+#define GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT 16
+#define GMAC_SYS_BUS_MODE_MB (1 << 14)
+#define GMAC_SYS_BUS_MODE_AAL (1 << 12)
+#define GMAC_SYS_BUS_MODE_EAME (1 << 11)
+#define GMAC_SYS_BUS_MODE_BLEN_256 (1 << 7)
+#define GMAC_SYS_BUS_MODE_BLEN_128 (1 << 6)
+#define GMAC_SYS_BUS_MODE_BLEN_64 (1 << 5)
+#define GMAC_SYS_BUS_MODE_BLEN_32 (1 << 4)
+#define GMAC_SYS_BUS_MODE_BLEN_16 (1 << 3)
+#define GMAC_SYS_BUS_MODE_BLEN_8 (1 << 2)
+#define GMAC_SYS_BUS_MODE_BLEN_4 (1 << 1)
+#define GMAC_SYS_BUS_MODE_FB (1 << 0)
+
+#define GMAC_CHAN_BASE_ADDR(x) (0x1100 + (x) * 0x80)
+#define GMAC_CHAN_CONTROL(x) (GMAC_CHAN_BASE_ADDR(x) + 0x0)
+#define GMAC_CHAN_CONTROL_8XPBL (1 << 16)
+#define GMAC_CHAN_TX_CONTROL(x) (GMAC_CHAN_BASE_ADDR(x) + 0x4)
+#define GMAC_CHAN_TX_CONTROL_PBL_MASK (0x3f << 8)
+#define GMAC_CHAN_TX_CONTROL_PBL_SHIFT 8
+#define GMAC_CHAN_TX_CONTROL_OSP (1 << 4)
+#define GMAC_CHAN_TX_CONTROL_ST (1 << 0)
+#define GMAC_CHAN_RX_CONTROL(x) (GMAC_CHAN_BASE_ADDR(x) + 0x8)
+#define GMAC_CHAN_RX_CONTROL_RPBL_MASK (0x3f << 17)
+#define GMAC_CHAN_RX_CONTROL_RPBL_SHIFT 17
+#define GMAC_CHAN_RX_CONTROL_SR (1 << 0)
+#define GMAC_CHAN_TX_BASE_ADDR_HI(x) (GMAC_CHAN_BASE_ADDR(x) + 0x10)
+#define GMAC_CHAN_TX_BASE_ADDR(x) (GMAC_CHAN_BASE_ADDR(x) + 0x14)
+#define GMAC_CHAN_RX_BASE_ADDR_HI(x) (GMAC_CHAN_BASE_ADDR(x) + 0x18)
+#define GMAC_CHAN_RX_BASE_ADDR(x) (GMAC_CHAN_BASE_ADDR(x) + 0x1c)
+#define GMAC_CHAN_TX_END_ADDR(x) (GMAC_CHAN_BASE_ADDR(x) + 0x20)
+#define GMAC_CHAN_RX_END_ADDR(x) (GMAC_CHAN_BASE_ADDR(x) + 0x28)
+#define GMAC_CHAN_TX_RING_LEN(x) (GMAC_CHAN_BASE_ADDR(x) + 0x2c)
+#define GMAC_CHAN_RX_RING_LEN(x) (GMAC_CHAN_BASE_ADDR(x) + 0x30)
+#define GMAC_CHAN_INTR_ENA(x) (GMAC_CHAN_BASE_ADDR(x) + 0x34)
+#define GMAC_CHAN_INTR_ENA_NIE (1 << 15)
+#define GMAC_CHAN_INTR_ENA_AIE (1 << 14)
+#define GMAC_CHAN_INTR_ENA_CDE (1 << 13)
+#define GMAC_CHAN_INTR_ENA_FBE (1 << 12)
+#define GMAC_CHAN_INTR_ENA_ERE (1 << 11)
+#define GMAC_CHAN_INTR_ENA_ETE (1 << 10)
+#define GMAC_CHAN_INTR_ENA_RWE (1 << 9)
+#define GMAC_CHAN_INTR_ENA_RSE (1 << 8)
+#define GMAC_CHAN_INTR_ENA_RBUE (1 << 7)
+#define GMAC_CHAN_INTR_ENA_RIE (1 << 6)
+#define GMAC_CHAN_INTR_ENA_TBUE (1 << 2)
+#define GMAC_CHAN_INTR_ENA_TSE (1 << 1)
+#define GMAC_CHAN_INTR_ENA_TIE (1 << 0)
+#define GMAC_CHAN_RX_WATCHDOG(x) (GMAC_CHAN_CONTROL(x) + 0x38)
+#define GMAC_CHAN_SLOT_CTRL_STATUS(x) (GMAC_CHAN_CONTROL(x) + 0x3c)
+#define GMAC_CHAN_CUR_TX_DESC(x) (GMAC_CHAN_CONTROL(x) + 0x44)
+#define GMAC_CHAN_CUR_RX_DESC(x) (GMAC_CHAN_CONTROL(x) + 0x4c)
+#define GMAC_CHAN_CUR_TX_BUF_ADDR(x) (GMAC_CHAN_CONTROL(x) + 0x54)
+#define GMAC_CHAN_CUR_RX_BUF_ADDR(x) (GMAC_CHAN_CONTROL(x) + 0x5c)
+#define GMAC_CHAN_STATUS(x) (GMAC_CHAN_CONTROL(x) + 0x60)
+#define GMAC_CHAN_STATUS_REB_MASK 0x7
+#define GMAC_CHAN_STATUS_REB_SHIFT 19
+#define GMAC_CHAN_STATUS_TEB_MASK 0x7
+#define GMAC_CHAN_STATUS_TEB_SHIFT 16
+#define GMAC_CHAN_STATUS_NIS (1 << 15)
+#define GMAC_CHAN_STATUS_AIS (1 << 14)
+#define GMAC_CHAN_STATUS_CDE (1 << 13)
+#define GMAC_CHAN_STATUS_FBE (1 << 12)
+#define GMAC_CHAN_STATUS_ERI (1 << 11)
+#define GMAC_CHAN_STATUS_ETI (1 << 10)
+#define GMAC_CHAN_STATUS_RWT (1 << 9)
+#define GMAC_CHAN_STATUS_RPS (1 << 8)
+#define GMAC_CHAN_STATUS_RBU (1 << 7)
+#define GMAC_CHAN_STATUS_RI (1 << 6)
+#define GMAC_CHAN_STATUS_TBU (1 << 2)
+#define GMAC_CHAN_STATUS_TPS (1 << 1)
+#define GMAC_CHAN_STATUS_TI (1 << 0)
+
+/*
+ * DWQE descriptors.
+ */
+
+struct dwqe_desc {
+ uint32_t sd_tdes0;
+ uint32_t sd_tdes1;
+ uint32_t sd_tdes2;
+ uint32_t sd_tdes3;
+};
+
+/* Tx bits */
+#define TDES2_IC (1U << 31)
+#define TDES3_ES (1 << 15)
+#define TDES3_DE (1 << 23)
+#define TDES3_LS (1 << 28)
+#define TDES3_FS (1 << 29)
+#define TDES3_OWN (1U << 31)
+
+/* Rx bits */
+#define RDES3_ES (1 << 15)
+#define RDES3_DE (1 << 19)
+#define RDES3_RE (1 << 20)
+#define RDES3_OE (1 << 21)
+#define RDES3_RWT (1 << 22)
+#define RDES3_CE (1 << 24)
+#define RDES3_BUF1V (1 << 24)
+#define RDES3_IC (1 << 30)
+#define RDES3_OWN (1U << 31)
+#define RDES3_LENGTH (0x7fff << 0)