-/* $OpenBSD: if_bnxt.c,v 1.31 2021/04/23 07:00:58 jmatthew Exp $ */
+/* $OpenBSD: if_bnxt.c,v 1.32 2021/04/24 09:37:46 jmatthew Exp $ */
/*-
* Broadcom NetXtreme-C/E network driver.
*
#include <sys/stdint.h>
#include <sys/sockio.h>
#include <sys/atomic.h>
+#include <sys/intrmap.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_media.h>
+#include <net/toeplitz.h>
#if NBPFILTER > 0
#include <net/bpf.h>
struct bnxt_vnic_info sc_vnic;
struct bnxt_dmamem *sc_stats_ctx_mem;
- struct bnxt_dmamem *sc_rx_mcast;
+ struct bnxt_dmamem *sc_rx_cfg;
struct bnxt_cp_ring sc_cp_ring;
int bnxt_ioctl(struct ifnet *, u_long, caddr_t);
int bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
void bnxt_start(struct ifqueue *);
+int bnxt_admin_intr(void *);
int bnxt_intr(void *);
void bnxt_watchdog(struct ifnet *);
void bnxt_media_status(struct ifnet *, struct ifmediareq *);
struct bnxt_vnic_info *);
int bnxt_hwrm_free_filter(struct bnxt_softc *,
struct bnxt_vnic_info *);
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
+ struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
int bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
-int bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
- uint32_t hash_type);
int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
* devices advertise msi support, but there's no way to tell a
* completion queue to use msi mode, only legacy or msi-x.
*/
- sc->sc_nqueues = 1;
if (pci_intr_map_msix(pa, 0, &ih) == 0) {
+ int nmsix;
+
sc->sc_flags |= BNXT_FLAG_MSIX;
- } else if (pci_intr_map(pa, &ih) != 0) {
+ intrstr = pci_intr_string(sc->sc_pc, ih);
+
+ nmsix = pci_intr_msix_count(pa->pa_pc, pa->pa_tag);
+ if (nmsix > 1) {
+ sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
+ IPL_NET | IPL_MPSAFE, bnxt_admin_intr, sc, DEVNAME(sc));
+ sc->sc_intrmap = intrmap_create(&sc->sc_dev,
+ nmsix - 1, BNXT_MAX_QUEUES, INTRMAP_POWEROF2);
+ sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
+ KASSERT(sc->sc_nqueues > 0);
+ KASSERT(powerof2(sc->sc_nqueues));
+ } else {
+ sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
+ IPL_NET | IPL_MPSAFE, bnxt_intr, &sc->sc_queues[0],
+ DEVNAME(sc));
+ sc->sc_nqueues = 1;
+ }
+ } else if (pci_intr_map(pa, &ih) == 0) {
+ intrstr = pci_intr_string(sc->sc_pc, ih);
+ sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
+ bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
+ sc->sc_nqueues = 1;
+ } else {
printf(": unable to map interrupt\n");
goto free_resp;
}
- intrstr = pci_intr_string(sc->sc_pc, ih);
- sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
- bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
if (sc->sc_ih == NULL) {
printf(": unable to establish interrupt");
if (intrstr != NULL)
printf("\n");
goto deintr;
}
- printf("%s, address %s\n", intrstr, ether_sprintf(sc->sc_ac.ac_enaddr));
+ printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
+ ether_sprintf(sc->sc_ac.ac_enaddr));
if (bnxt_hwrm_func_qcfg(sc) != 0) {
printf("%s: failed to query function config\n", DEVNAME(sc));
struct ifiqueue *ifiq = ifp->if_iqs[i];
struct ifqueue *ifq = ifp->if_ifqs[i];
struct bnxt_queue *bq = &sc->sc_queues[i];
+ struct bnxt_cp_ring *cp = &bq->q_cp;
struct bnxt_rx_queue *rx = &bq->q_rx;
struct bnxt_tx_queue *tx = &bq->q_tx;
tx->tx_softc = sc;
tx->tx_ifq = ifq;
ifq->ifq_softc = tx;
+
+ if (sc->sc_nqueues > 1) {
+ cp->stats_ctx_id = HWRM_NA_SIGNATURE;
+ cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
+ cp->ring.id = i + 1; /* first cp ring is async only */
+ cp->softc = sc;
+ cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
+ cp->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
+ sizeof(struct cmpl_base);
+ if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
+ printf("%s: unable to map queue interrupt %d\n",
+ DEVNAME(sc), i);
+ goto intrdisestablish;
+ }
+ snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
+ DEVNAME(sc), i);
+ bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
+ IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
+ bnxt_intr, bq, bq->q_name);
+ if (bq->q_ihc == NULL) {
+ printf("%s: unable to establish interrupt %d\n",
+ DEVNAME(sc), i);
+ goto intrdisestablish;
+ }
+ }
}
bnxt_media_autonegotiate(sc);
bnxt_hwrm_port_phy_qcfg(sc, NULL);
return;
+intrdisestablish:
+ for (i = 0; i < sc->sc_nqueues; i++) {
+ struct bnxt_queue *bq = &sc->sc_queues[i];
+ if (bq->q_ihc == NULL)
+ continue;
+ pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
+ bq->q_ihc = NULL;
+ }
free_cp_mem:
bnxt_dmamem_free(sc, cpr->ring_mem);
deintr:
+ if (sc->sc_intrmap != NULL) {
+ intrmap_destroy(sc->sc_intrmap);
+ sc->sc_intrmap = NULL;
+ }
pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
sc->sc_ih = NULL;
free_resp:
return;
}
- sc->sc_rx_mcast = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
- if (sc->sc_rx_mcast == NULL) {
- printf("%s: failed to allocate multicast table\n",
+ sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
+ if (sc->sc_rx_cfg == NULL) {
+ printf("%s: failed to allocate rx config buffer\n",
DEVNAME(sc));
goto free_stats;
}
goto dealloc_vnic;
}
+ if (sc->sc_nqueues > 1) {
+ uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg) + PAGE_SIZE);
+ uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE);
+
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++) {
+ struct bnxt_queue *bq;
+
+ bq = &sc->sc_queues[i % sc->sc_nqueues];
+ rss_table[i] = htole16(bq->q_rg.grp_id);
+ }
+ stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE);
+
+ if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6,
+ BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE,
+ BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE +
+ (HW_HASH_INDEX_SIZE * sizeof(uint16_t))) != 0) {
+ printf("%s: failed to set RSS config\n", DEVNAME(sc));
+ goto dealloc_vnic;
+ }
+ }
+
bnxt_iff(sc);
SET(ifp->if_flags, IFF_RUNNING);
for (i = 0; i < sc->sc_nqueues; i++)
bnxt_queue_down(sc, &sc->sc_queues[i]);
- bnxt_dmamem_free(sc, sc->sc_rx_mcast);
- sc->sc_rx_mcast = NULL;
+ bnxt_dmamem_free(sc, sc->sc_rx_cfg);
+ sc->sc_rx_cfg = NULL;
free_stats:
bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
sc->sc_stats_ctx_mem = NULL;
for (i = 0; i < sc->sc_nqueues; i++)
bnxt_queue_down(sc, &sc->sc_queues[i]);
- bnxt_dmamem_free(sc, sc->sc_rx_mcast);
- sc->sc_rx_mcast = NULL;
+ bnxt_dmamem_free(sc, sc->sc_rx_cfg);
+ sc->sc_rx_cfg = NULL;
bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
sc->sc_stats_ctx_mem = NULL;
| HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
| HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
- mc_list = BNXT_DMA_KVA(sc->sc_rx_mcast);
+ mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg);
mc_count = 0;
if (ifp->if_flags & IFF_PROMISC) {
}
bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
- BNXT_DMA_DVA(sc->sc_rx_mcast), mc_count);
+ BNXT_DMA_DVA(sc->sc_rx_cfg), mc_count);
}
int
}
#endif
+ if (lemtoh16(&rxlo->flags_type) & RX_PKT_CMPL_FLAGS_RSS_VALID) {
+ m->m_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash);
+ m->m_pkthdr.csum_flags |= M_FLOWID;
+ }
+
if (ag != NULL) {
bs = &rx->rx_ag_slots[ag->opaque];
bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
}
-#if 0
-
int
-bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
- uint32_t hash_type)
+bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
+ uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
{
struct hwrm_vnic_rss_cfg_input req = {0};
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
req.hash_type = htole32(hash_type);
- req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
- req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
+ req.ring_grp_tbl_addr = htole64(rss_table);
+ req.hash_key_tbl_addr = htole64(rss_key);
req.rss_ctx_idx = htole16(vnic->rss_id);
return hwrm_send_message(softc, &req, sizeof(req));
}
-#endif
-
int
bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
{