From d04289f55ff9924187bc56e6158fa417450862fa Mon Sep 17 00:00:00 2001 From: jmatthew Date: Thu, 10 Mar 2022 11:35:13 +0000 Subject: [PATCH] Invalidate the nic's rx descriptor cache when taking the interface up or down, and turn off the global tx and rx enables when going down. Without this, the nic can write to mbufs that were taken off the ring when the interface was taken down, triggering mbuf cluster pool use after free checks. ok dlg@ --- sys/dev/pci/if_aq_pci.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/sys/dev/pci/if_aq_pci.c b/sys/dev/pci/if_aq_pci.c index d17b00df5f3..18e6d308ebd 100644 --- a/sys/dev/pci/if_aq_pci.c +++ b/sys/dev/pci/if_aq_pci.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_aq_pci.c,v 1.5 2022/03/08 06:56:14 jmatthew Exp $ */ +/* $OpenBSD: if_aq_pci.c,v 1.6 2022/03/10 11:35:13 jmatthew Exp $ */ /* $NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $ */ /* @@ -251,6 +251,9 @@ #define RPB_RXB_XOFF_THRESH_HI 0x3FFF0000 #define RPB_RXB_XOFF_THRESH_LO 0x3FFF +#define RX_DMA_DESC_CACHE_INIT_REG 0x5a00 +#define RX_DMA_DESC_CACHE_INIT (1 << 0) + #define RX_DMA_INT_DESC_WRWB_EN_REG 0x5a30 #define RX_DMA_INT_DESC_WRWB_EN (1 << 2) #define RX_DMA_INT_DESC_MODERATE_EN (1 << 3) @@ -2482,12 +2485,24 @@ aq_queue_down(struct aq_softc *sc, struct aq_queues *aq) aq_dmamem_free(sc, &rx->rx_mem); } +void +aq_invalidate_rx_desc_cache(struct aq_softc *sc) +{ + uint32_t cache; + + cache = AQ_READ_REG(sc, RX_DMA_DESC_CACHE_INIT_REG); + AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_CACHE_INIT_REG, RX_DMA_DESC_CACHE_INIT, + (cache & RX_DMA_DESC_CACHE_INIT) ^ RX_DMA_DESC_CACHE_INIT); +} + int aq_up(struct aq_softc *sc) { struct ifnet *ifp = &sc->sc_arpcom.ac_if; int i; + aq_invalidate_rx_desc_cache(sc); + for (i = 0; i < sc->sc_nqueues; i++) { if (aq_queue_up(sc, &sc->sc_queues[i]) != 0) goto downqueues; @@ -2529,10 +2544,14 @@ aq_down(struct aq_softc *sc) aq_enable_intr(sc, 1, 0); intr_barrier(sc->sc_ih); + AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 0); + AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 0); for (i = 0; i < sc->sc_nqueues; i++) { /* queue intr barrier? */ aq_queue_down(sc, &sc->sc_queues[i]); } + + aq_invalidate_rx_desc_cache(sc); } void -- 2.20.1