-/* $OpenBSD: if_ixl.c,v 1.89 2023/09/29 19:44:47 bluhm Exp $ */
+/* $OpenBSD: if_ixl.c,v 1.90 2023/10/19 16:28:02 jan Exp $ */
/*
* Copyright (c) 2013-2015, Intel Corporation
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
+#include <net/route.h>
#include <net/toeplitz.h>
#if NBPFILTER > 0
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
+#include <netinet/tcp_timer.h>
+#include <netinet/tcp_var.h>
#include <netinet/udp.h>
#include <netinet/if_ether.h>
#define IXL_TX_DESC_BSIZE_MASK \
(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
+#define IXL_TX_CTX_DESC_CMD_TSO 0x10
+#define IXL_TX_CTX_DESC_TLEN_SHIFT 30
+#define IXL_TX_CTX_DESC_MSS_SHIFT 50
+
#define IXL_TX_DESC_L2TAG1_SHIFT 48
} __packed __aligned(16);
uint64_t qword3;
} __packed __aligned(16);
-#define IXL_TX_PKT_DESCS 8
+#define IXL_TX_PKT_DESCS 32
#define IXL_TX_QUEUE_ALIGN 128
#define IXL_RX_QUEUE_ALIGN 128
#define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
+#define IXL_TSO_SIZE ((255 * 1024) - 1)
+#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
+
+/*
+ * Our TCP/IP Stack could not handle packets greater than MAXMCLBYTES.
+ * This interface could not handle packets greater than IXL_TSO_SIZE.
+ */
+CTASSERT(MAXMCLBYTES < IXL_TSO_SIZE);
#define IXL_PCIREG PCI_MAPREG_START
ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
+ ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
txm = &maps[i];
if (bus_dmamap_create(sc->sc_dmat,
- IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
+ MAXMCLBYTES, IXL_TX_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
&txm->txm_map) != 0)
goto uncreate;
}
static uint64_t
-ixl_tx_setup_offload(struct mbuf *m0)
+ixl_tx_setup_offload(struct mbuf *m0, struct ixl_tx_ring *txr,
+ unsigned int prod)
{
struct ether_extracted ext;
uint64_t hlen;
}
if (!ISSET(m0->m_pkthdr.csum_flags,
- M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
+ M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO))
return (offload);
ether_extract_headers(m0, &ext);
offload |= (sizeof(*ext.udp) >> 2) << IXL_TX_DESC_L4LEN_SHIFT;
}
+ if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) {
+ if (ext.tcp) {
+ struct ixl_tx_desc *ring, *txd;
+ uint64_t cmd = 0;
+
+ hlen += ext.tcp->th_off << 2;
+ ring = IXL_DMA_KVA(&txr->txr_mem);
+ txd = &ring[prod];
+
+ cmd |= IXL_TX_DESC_DTYPE_CONTEXT;
+ cmd |= IXL_TX_CTX_DESC_CMD_TSO;
+ cmd |= (uint64_t)(m0->m_pkthdr.len - ETHER_HDR_LEN
+ - hlen) << IXL_TX_CTX_DESC_TLEN_SHIFT;
+ cmd |= (uint64_t)(m0->m_pkthdr.ph_mss)
+ << IXL_TX_CTX_DESC_MSS_SHIFT;
+
+ htolem64(&txd->addr, 0);
+ htolem64(&txd->cmd, cmd);
+ } else
+ tcpstat_inc(tcps_outbadtso);
+ }
+
return (offload);
}
mask = sc->sc_tx_ring_ndescs - 1;
for (;;) {
- if (free <= IXL_TX_PKT_DESCS) {
+ /* We need one extra descriptor for TSO packets. */
+ if (free <= (IXL_TX_PKT_DESCS + 1)) {
ifq_set_oactive(ifq);
break;
}
if (m == NULL)
break;
- offload = ixl_tx_setup_offload(m);
+ offload = ixl_tx_setup_offload(m, txr, prod);
txm = &txr->txr_maps[prod];
map = txm->txm_map;
+ if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
+ prod++;
+ prod &= mask;
+ free--;
+ }
+
if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
ifq->ifq_errors++;
m_freem(m);