-/* $OpenBSD: if_iwm.c,v 1.318 2021/03/12 16:27:10 stsp Exp $ */
+/* $OpenBSD: if_iwm.c,v 1.319 2021/04/25 15:32:21 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
#include <net80211/ieee80211_amrr.h>
#include <net80211/ieee80211_ra.h>
#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
+#undef DPRINTF /* defined in ieee80211_priv.h */
#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
void iwm_setup_ht_rates(struct iwm_softc *);
void iwm_htprot_task(void *);
void iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
+void iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
+ uint16_t);
+void iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
+void iwm_rx_ba_session_expired(void *);
+void iwm_reorder_timer_expired(void *);
void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
- uint16_t, uint16_t, int);
+ uint16_t, uint16_t, int, int);
#ifdef notyet
int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
struct iwm_rx_data *);
int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
+int iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
+ struct ieee80211_rxinfo *);
int iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
- struct ieee80211_node *);
+ struct ieee80211_node *, struct ieee80211_rxinfo *);
void iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
#endif
void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
struct mbuf_list *);
+void iwm_flip_address(uint8_t *);
+int iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
+ struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
+int iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
+void iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
+ struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
+ struct mbuf_list *);
+int iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
+ int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
+int iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
+ struct iwm_rx_mpdu_desc *, int, int, uint32_t,
+ struct ieee80211_rxinfo *, struct mbuf_list *);
+void iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
+ struct mbuf_list *);
int iwm_rx_pkt_valid(struct iwm_rx_packet *);
void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
struct mbuf_list *);
ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
}
+void
+iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
+ uint16_t ssn, uint16_t buf_size)
+{
+ reorder_buf->head_sn = ssn;
+ reorder_buf->num_stored = 0;
+ reorder_buf->buf_size = buf_size;
+ reorder_buf->last_amsdu = 0;
+ reorder_buf->last_sub_index = 0;
+ reorder_buf->removed = 0;
+ reorder_buf->valid = 0;
+ reorder_buf->consec_oldsn_drops = 0;
+ reorder_buf->consec_oldsn_ampdu_gp2 = 0;
+ reorder_buf->consec_oldsn_prev_drop = 0;
+}
+
+void
+iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
+{
+ int i;
+ struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
+ struct iwm_reorder_buf_entry *entry;
+
+ for (i = 0; i < reorder_buf->buf_size; i++) {
+ entry = &rxba->entries[i];
+ ml_purge(&entry->frames);
+ timerclear(&entry->reorder_time);
+ }
+
+ reorder_buf->removed = 1;
+ timeout_del(&reorder_buf->reorder_timer);
+ timerclear(&rxba->last_rx);
+ timeout_del(&rxba->session_timer);
+ rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
+
+void
+iwm_rx_ba_session_expired(void *arg)
+{
+ struct iwm_rxba_data *rxba = arg;
+ struct iwm_softc *sc = rxba->sc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ struct timeval now, timeout, expiry;
+ int s;
+
+ s = splnet();
+ if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
+ ic->ic_state == IEEE80211_S_RUN &&
+ rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
+ getmicrouptime(&now);
+ USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
+ timeradd(&rxba->last_rx, &timeout, &expiry);
+ if (timercmp(&now, &expiry, <)) {
+ timeout_add_usec(&rxba->session_timer, rxba->timeout);
+ } else {
+ ic->ic_stats.is_ht_rx_ba_timeout++;
+ ieee80211_delba_request(ic, ni,
+ IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
+ }
+ }
+ splx(s);
+}
+
+void
+iwm_reorder_timer_expired(void *arg)
+{
+ struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+ struct iwm_reorder_buffer *buf = arg;
+ struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
+ struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
+ struct iwm_softc *sc = rxba->sc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int i, s;
+ uint16_t sn = 0, index = 0;
+ int expired = 0;
+ int cont = 0;
+ struct timeval now, timeout, expiry;
+
+ if (!buf->num_stored || buf->removed)
+ return;
+
+ s = splnet();
+ getmicrouptime(&now);
+ USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (ml_empty(&entries[index].frames)) {
+ /*
+ * If there is a hole and the next frame didn't expire
+ * we want to break and not advance SN.
+ */
+ cont = 0;
+ continue;
+ }
+ timeradd(&entries[index].reorder_time, &timeout, &expiry);
+ if (!cont && timercmp(&now, &expiry, <))
+ break;
+
+ expired = 1;
+ /* continue until next hole after this expired frame */
+ cont = 1;
+ sn = (buf->head_sn + (i + 1)) & 0xfff;
+ }
+
+ if (expired) {
+ /* SN is set to the last expired frame + 1 */
+ iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
+ if_input(&sc->sc_ic.ic_if, &ml);
+ ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
+ } else {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify reorder timeout
+ * accordingly.
+ */
+ timeout_add_usec(&buf->reorder_timer,
+ RX_REORDER_BUF_TIMEOUT_MQ_USEC);
+ }
+
+ splx(s);
+}
+
#define IWM_MAX_RX_BA_SESSIONS 16
void
iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
- uint16_t ssn, uint16_t winsize, int start)
+ uint16_t ssn, uint16_t winsize, int timeout_val, int start)
{
struct ieee80211com *ic = &sc->sc_ic;
struct iwm_add_sta_cmd cmd;
int err, s;
uint32_t status;
size_t cmdsize;
+ struct iwm_rxba_data *rxba = NULL;
+ uint8_t baid = 0;
+
+ s = splnet();
if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
return;
}
err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
&status);
- s = splnet();
- if (!err && (status & IWM_ADD_STA_STATUS_MASK) == IWM_ADD_STA_SUCCESS) {
+ if (err || (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS) {
+ if (start)
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+
+ if (sc->sc_mqrx_supported) {
+ /* Deaggregation is done in hardware. */
if (start) {
- sc->sc_rx_ba_sessions++;
- ieee80211_addba_req_accept(ic, ni, tid);
- } else if (sc->sc_rx_ba_sessions > 0)
- sc->sc_rx_ba_sessions--;
- } else if (start)
- ieee80211_addba_req_refuse(ic, ni, tid);
+ if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ baid = (status & IWM_ADD_STA_BAID_MASK) >>
+ IWM_ADD_STA_BAID_SHIFT;
+ if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= nitems(sc->sc_rxba_data)) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ rxba = &sc->sc_rxba_data[baid];
+ if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ rxba->sta_id = IWM_STATION_ID;
+ rxba->tid = tid;
+ rxba->baid = baid;
+ rxba->timeout = timeout_val;
+ getmicrouptime(&rxba->last_rx);
+ iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
+ winsize);
+ if (timeout_val != 0) {
+ struct ieee80211_rx_ba *ba;
+ timeout_add_usec(&rxba->session_timer,
+ timeout_val);
+ /* XXX disable net80211's BA timeout handler */
+ ba = &ni->ni_rx_ba[tid];
+ ba->ba_timeout_val = 0;
+ }
+ } else {
+ int i;
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ rxba = &sc->sc_rxba_data[i];
+ if (rxba->baid ==
+ IWM_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ if (rxba->tid != tid)
+ continue;
+ iwm_clear_reorder_buffer(sc, rxba);
+ break;
+ }
+ }
+ }
+
+ if (start) {
+ sc->sc_rx_ba_sessions++;
+ ieee80211_addba_req_accept(ic, ni, tid);
+ } else if (sc->sc_rx_ba_sessions > 0)
+ sc->sc_rx_ba_sessions--;
splx(s);
}
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = ic->ic_bss;
int s = splnet();
+ int tid;
- if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
- refcnt_rele_wake(&sc->task_refs);
- splx(s);
- return;
+ for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_start_tidmask & (1 << tid)) {
+ iwm_sta_rx_agg(sc, ni, tid, sc->ba_ssn[tid],
+ sc->ba_winsize[tid], sc->ba_timeout_val[tid], 1);
+ sc->ba_start_tidmask &= ~(1 << tid);
+ } else if (sc->ba_stop_tidmask & (1 << tid)) {
+ iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
+ sc->ba_stop_tidmask &= ~(1 << tid);
+ }
}
-
- if (sc->ba_start)
- iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
- sc->ba_winsize, 1);
- else
- iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
refcnt_rele_wake(&sc->task_refs);
splx(s);
struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
struct iwm_softc *sc = IC2IFP(ic)->if_softc;
- if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
+ if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
+ tid > IWM_MAX_TID_COUNT || (sc->ba_start_tidmask & (1 << tid)))
return ENOSPC;
- sc->ba_start = 1;
- sc->ba_tid = tid;
- sc->ba_ssn = htole16(ba->ba_winstart);
- sc->ba_winsize = htole16(ba->ba_winsize);
+ sc->ba_start_tidmask |= (1 << tid);
+ sc->ba_ssn[tid] = ba->ba_winstart;
+ sc->ba_winsize[tid] = ba->ba_winsize;
+ sc->ba_timeout_val[tid] = ba->ba_timeout_val;
iwm_add_task(sc, systq, &sc->ba_task);
return EBUSY;
{
struct iwm_softc *sc = IC2IFP(ic)->if_softc;
- sc->ba_start = 0;
- sc->ba_tid = tid;
+ if (tid > IWM_MAX_TID_COUNT || sc->ba_stop_tidmask & (1 << tid))
+ return;
+
+ sc->ba_stop_tidmask = (1 << tid);
iwm_add_task(sc, systq, &sc->ba_task);
}
}
int
-iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ struct ieee80211_rxinfo *rxi)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_key *k = &ni->ni_pairwise_key;
(uint64_t)ivp[5] << 24 |
(uint64_t)ivp[6] << 32 |
(uint64_t)ivp[7] << 40;
- if (pn <= *prsc) {
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ } else if (pn <= *prsc) {
ic->ic_stats.is_ccmp_replays++;
return 1;
}
return 0;
}
+int
+iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
+ struct ieee80211_rxinfo *rxi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ int ret = 0;
+ uint8_t type, subtype;
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL)
+ return 0;
+
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
+ return 0;
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
+ return 0;
+
+ ni = ieee80211_find_rxnode(ic, wh);
+ /* Handle hardware decryption. */
+ if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
+ ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
+ if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
+ IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ ic->ic_stats.is_ccmp_dec_errs++;
+ ret = 1;
+ goto out;
+ }
+ /* Check whether decryption was successful or not. */
+ if ((rx_pkt_status &
+ (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
+ (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
+ ic->ic_stats.is_ccmp_dec_errs++;
+ ret = 1;
+ goto out;
+ }
+ rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
+ }
+out:
+ if (ret)
+ ifp->if_ierrors++;
+ ieee80211_release_node(ic, ni);
+ return ret;
+}
+
void
iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
struct mbuf_list *ml)
{
struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct ieee80211_channel *bss_chan;
uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
- struct ifnet *ifp = IC2IFP(ic);
if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
}
ni->ni_chan = &ic->ic_channels[chanidx];
- /* Handle hardware decryption. */
- if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
- && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
- !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
- (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
- ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
- if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
- IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
- ic->ic_stats.is_ccmp_dec_errs++;
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
- }
- /* Check whether decryption was successful or not. */
- if ((rx_pkt_status &
- (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
- IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
- (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
- IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
- ic->ic_stats.is_ccmp_dec_errs++;
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
- }
- if (iwm_ccmp_decap(sc, m, ni) != 0) {
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
- }
- rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
+ if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
+ iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
+ ifp->if_ierrors++;
+ m_freem(m);
+ ieee80211_release_node(ic, ni);
+ return;
}
#if NBPFILTER > 0
uint32_t rx_pkt_status;
int rssi, chanidx, rate_n_flags;
+ memset(&rxi, 0, sizeof(rxi));
+
phy_info = &sc->sc_last_phy_info;
rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
len = le16toh(rx_res->byte_count);
m->m_data = pktdata + sizeof(*rx_res);
m->m_pkthdr.len = m->m_len = len;
+ if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
+ m_freem(m);
+ return;
+ }
+
chanidx = letoh32(phy_info->channel);
device_timestamp = le32toh(phy_info->system_timestamp);
phy_flags = letoh16(phy_info->phy_flags);
rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
- memset(&rxi, 0, sizeof(rxi));
rxi.rxi_rssi = rssi;
rxi.rxi_tstamp = device_timestamp;
rate_n_flags, device_timestamp, &rxi, ml);
}
+void
+iwm_flip_address(uint8_t *addr)
+{
+ int i;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
+ IEEE80211_ADDR_COPY(addr, mac_addr);
+}
+
+/*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ * and handle pseudo-duplicate frames which result from deaggregation
+ * of A-MSDU frames in hardware.
+ */
+int
+iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
+ struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_node *in = (void *)ic->ic_bss;
+ struct iwm_rxq_dup_data *dup_data = &in->dup_data;
+ uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ int hasqos = ieee80211_has_qos(wh);
+ uint16_t seq;
+
+ if (type == IEEE80211_FC0_TYPE_CTL ||
+ (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
+ IEEE80211_IS_MULTICAST(wh->i_addr1))
+ return 0;
+
+ if (hasqos) {
+ tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
+ if (tid > IWM_MAX_TID_COUNT)
+ tid = IWM_MAX_TID_COUNT;
+ }
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ subframe_idx = desc->amsdu_info &
+ IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
+ if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ dup_data->last_seq[tid] == seq &&
+ dup_data->last_sub_frame[tid] >= subframe_idx)
+ return 1;
+
+ /*
+ * Allow the same frame sequence number for all A-MSDU subframes
+ * following the first subframe.
+ * Otherwise these subframes would be discarded as replays.
+ */
+ if (dup_data->last_seq[tid] == seq &&
+ subframe_idx > dup_data->last_sub_frame[tid] &&
+ (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
+ rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
+ }
+
+ dup_data->last_seq[tid] = seq;
+ dup_data->last_sub_frame[tid] = subframe_idx;
+
+ return 0;
+}
+
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+int
+iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
+{
+ return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
+}
+
+void
+iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
+ struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
+ uint16_t nssn, struct mbuf_list *ml)
+{
+ struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
+ uint16_t ssn = reorder_buf->head_sn;
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ goto set_timer;
+
+ while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct mbuf *m;
+ int chanidx, is_shortpre;
+ uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
+ struct ieee80211_rxinfo *rxi;
+
+ /* This data is the same for all A-MSDU subframes. */
+ chanidx = entries[index].chanidx;
+ rx_pkt_status = entries[index].rx_pkt_status;
+ is_shortpre = entries[index].is_shortpre;
+ rate_n_flags = entries[index].rate_n_flags;
+ device_timestamp = entries[index].device_timestamp;
+ rxi = &entries[index].rxi;
+
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
+ iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
+ rate_n_flags, device_timestamp, rxi, ml);
+ reorder_buf->num_stored--;
+
+ /*
+ * Allow the same frame sequence number and CCMP PN for
+ * all A-MSDU subframes following the first subframe.
+ * Otherwise they would be discarded as replays.
+ */
+ rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
+ rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+ }
+
+ ssn = (ssn + 1) & 0xfff;
+ }
+ reorder_buf->head_sn = nssn;
+
+set_timer:
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ timeout_add_usec(&reorder_buf->reorder_timer,
+ RX_REORDER_BUF_TIMEOUT_MQ_USEC);
+ } else
+ timeout_del(&reorder_buf->reorder_timer);
+}
+
+int
+iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
+ struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
+ /* we have a new (A-)MPDU ... */
+
+ /*
+ * reset counter to 0 if we didn't have any oldsn in
+ * the last A-MPDU (as detected by GP2 being identical)
+ */
+ if (!buffer->consec_oldsn_prev_drop)
+ buffer->consec_oldsn_drops = 0;
+
+ /* either way, update our tracking state */
+ buffer->consec_oldsn_ampdu_gp2 = gp2;
+ } else if (buffer->consec_oldsn_prev_drop) {
+ /*
+ * tracking state didn't change, and we had an old SN
+ * indication before - do nothing in this case, we
+ * already noted this one down and are waiting for the
+ * next A-MPDU (by GP2)
+ */
+ return 0;
+ }
+
+ /* return unless this MPDU has old SN */
+ if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
+ return 0;
+
+ /* update state */
+ buffer->consec_oldsn_prev_drop = 1;
+ buffer->consec_oldsn_drops++;
+
+ /* if limit is reached, send del BA and reset state */
+ if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
+ ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
+ 0, tid);
+ buffer->consec_oldsn_prev_drop = 0;
+ buffer->consec_oldsn_drops = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Handle re-ordering of frames which were de-aggregated in hardware.
+ * Returns 1 if the MPDU was consumed (buffered or dropped).
+ * Returns 0 if the MPDU should be passed to upper layer.
+ */
+int
+iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
+ struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
+ uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
+ struct mbuf_list *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct iwm_rxba_data *rxba;
+ struct iwm_reorder_buffer *buffer;
+ uint32_t reorder_data = le32toh(desc->reorder_data);
+ int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
+ int last_subframe =
+ (desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
+ uint8_t tid;
+ uint8_t subframe_idx = (desc->amsdu_info &
+ IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+ struct iwm_reorder_buf_entry *entries;
+ int index;
+ uint16_t nssn, sn;
+ uint8_t baid, type, subtype;
+ int hasqos;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hasqos = ieee80211_has_qos(wh);
+ tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
+
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ ni = ieee80211_find_rxnode(ic, wh);
+
+ /*
+ * We are only interested in Block Ack requests and unicast QoS data.
+ */
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1))
+ return 0;
+ if (hasqos) {
+ if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
+ return 0;
+ } else {
+ if (type != IEEE80211_FC0_TYPE_CTL ||
+ subtype != IEEE80211_FC0_SUBTYPE_BAR)
+ return 0;
+ }
+
+ baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
+ IWM_RX_MPDU_REORDER_BAID_SHIFT;
+ if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= nitems(sc->sc_rxba_data))
+ return 0;
+
+ rxba = &sc->sc_rxba_data[baid];
+ if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
+ return 0;
+
+ /* Bypass A-MPDU re-ordering in net80211. */
+ rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
+
+ nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
+ IWM_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &rxba->reorder_buf;
+ entries = &rxba->entries[0];
+
+ if (!buffer->valid) {
+ if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
+ return 0;
+ buffer->valid = 1;
+ }
+
+ if (type == IEEE80211_FC0_TYPE_CTL &&
+ subtype == IEEE80211_FC0_SUBTYPE_BAR) {
+ iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
+ goto drop;
+ }
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take care of storing it and releasing up to
+ * the nssn.
+ */
+ if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size) ||
+ !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
+ uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
+ ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
+ iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
+ }
+
+ if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
+ device_timestamp)) {
+ /* BA session will be torn down. */
+ ic->ic_stats.is_ht_rx_ba_window_jump++;
+ goto drop;
+
+ }
+
+ /* drop any outdated packets */
+ if (SEQ_LT(sn, buffer->head_sn)) {
+ ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
+ goto drop;
+ }
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
+ if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
+ (!is_amsdu || last_subframe))
+ buffer->head_sn = nssn;
+ return 0;
+ }
+
+ /*
+ * release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!is_amsdu || last_subframe)
+ buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
+ return 0;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ if (!ml_empty(&entries[index].frames)) {
+ if (!is_amsdu) {
+ ic->ic_stats.is_ht_rx_ba_no_buf++;
+ goto drop;
+ } else if (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= subframe_idx) {
+ ic->ic_stats.is_ht_rx_ba_no_buf++;
+ goto drop;
+ }
+ } else {
+ /* This data is the same for all A-MSDU subframes. */
+ entries[index].chanidx = chanidx;
+ entries[index].is_shortpre = is_shortpre;
+ entries[index].rate_n_flags = rate_n_flags;
+ entries[index].device_timestamp = device_timestamp;
+ memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
+ }
+
+ /* put in reorder buffer */
+ ml_enqueue(&entries[index].frames, m);
+ buffer->num_stored++;
+ getmicrouptime(&entries[index].reorder_time);
+
+ if (is_amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = subframe_idx;
+ }
+
+ /*
+ * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+ * The reason is that NSSN advances on the first sub-frame, and may
+ * cause the reorder buffer to advance before all the sub-frames arrive.
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!is_amsdu || last_subframe)
+ iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
+
+ return 1;
+
+drop:
+ m_freem(m);
+ return 1;
+}
+
void
iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
size_t maxlen, struct mbuf_list *ml)
uint8_t chanidx;
uint16_t phy_info;
+ memset(&rxi, 0, sizeof(rxi));
+
desc = (struct iwm_rx_mpdu_desc *)pktdata;
if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
m_adj(m, 2);
}
+ /*
+ * Hardware de-aggregates A-MSDUs and copies the same MAC header
+ * in place for each subframe. But it leaves the 'A-MSDU present'
+ * bit set in the frame header. We need to clear this bit ourselves.
+ *
+ * And we must allow the same CCMP PN for subframes following the
+ * first subframe. Otherwise they would be discarded as replays.
+ */
+ if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ uint8_t subframe_idx = (desc->amsdu_info &
+ IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+ if (subframe_idx > 0)
+ rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+ if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
+ m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
+ struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
+ struct ieee80211_qosframe_addr4 *);
+ qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+
+ /* HW reverses addr3 and addr4. */
+ iwm_flip_address(qwh4->i_addr3);
+ iwm_flip_address(qwh4->i_addr4);
+ } else if (ieee80211_has_qos(wh) &&
+ m->m_len >= sizeof(struct ieee80211_qosframe)) {
+ struct ieee80211_qosframe *qwh = mtod(m,
+ struct ieee80211_qosframe *);
+ qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+
+ /* HW reverses addr3. */
+ iwm_flip_address(qwh->i_addr3);
+ }
+ }
+
+ /*
+ * Verify decryption before duplicate detection. The latter uses
+ * the TID supplied in QoS frame headers and this TID is implicitly
+ * verified as part of the CCMP nonce.
+ */
+ if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
+ m_freem(m);
+ return;
+ }
+
+ if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
+ m_freem(m);
+ return;
+ }
+
phy_info = le16toh(desc->phy_info);
rate_n_flags = le32toh(desc->v1.rate_n_flags);
chanidx = desc->v1.channel;
rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
- memset(&rxi, 0, sizeof(rxi));
rxi.rxi_rssi = rssi;
rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
+ if (iwm_rx_reorder(sc, m, chanidx, desc,
+ (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, &rxi, ml))
+ return;
+
iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
(phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
rate_n_flags, device_timestamp, &rxi, ml);
}
sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
sc->sc_rx_ba_sessions = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
}
tfd_queue_msk = 0;
}
sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
sc->sc_rx_ba_sessions = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
}
return 0;
{
struct ifnet *ifp = IC2IFP(ic);
struct iwm_softc *sc = ifp->if_softc;
+ int i;
if (ic->ic_state == IEEE80211_S_RUN) {
timeout_del(&sc->sc_calib_to);
iwm_del_task(sc, systq, &sc->ba_task);
iwm_del_task(sc, systq, &sc->htprot_task);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
+ iwm_clear_reorder_buffer(sc, rxba);
+ }
}
sc->ns_nstate = nstate;
sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
sc->sc_rx_ba_sessions = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
+ memset(sc->ba_ssn, 0, sizeof(sc->ba_ssn));
+ memset(sc->ba_winsize, 0, sizeof(sc->ba_winsize));
+ memset(sc->ba_timeout_val, 0, sizeof(sc->ba_timeout_val));
sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
timeout_del(&sc->sc_calib_to); /* XXX refcount? */
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
+ iwm_clear_reorder_buffer(sc, rxba);
+ }
iwm_led_blink_stop(sc);
ifp->if_timer = sc->sc_tx_timer = 0;
struct ifnet *ifp = &ic->ic_if;
const char *intrstr;
int err;
- int txq_i, i;
+ int txq_i, i, j;
sc->sc_pct = pa->pa_pc;
sc->sc_pcitag = pa->pa_tag;
#endif
timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
+ rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
+ rxba->sc = sc;
+ timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
+ rxba);
+ timeout_set(&rxba->reorder_buf.reorder_timer,
+ iwm_reorder_timer_expired, &rxba->reorder_buf);
+ for (j = 0; j < nitems(rxba->entries); j++)
+ ml_init(&rxba->entries[j].frames);
+ }
task_set(&sc->init_task, iwm_init_task, sc);
task_set(&sc->newstate_task, iwm_newstate_task, sc);
task_set(&sc->ba_task, iwm_ba_task, sc);
-/* $OpenBSD: if_iwmreg.h,v 1.48 2020/05/18 17:56:41 stsp Exp $ */
+/* $OpenBSD: if_iwmreg.h,v 1.49 2021/04/25 15:32:21 stsp Exp $ */
/******************************************************************************
*
#define IWM_RX_MPDU_MFLG2_PAD 0x20
#define IWM_RX_MPDU_MFLG2_AMSDU 0x40
+#define IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK 0x7f
+#define IWM_RX_MPDU_AMSDU_LAST_SUBFRAME 0x80
+
#define IWM_RX_MPDU_PHY_AMPDU (1 << 5)
#define IWM_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6)
#define IWM_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7)
};
} __packed;
+#define IWM_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+#define IWM_RX_MPDU_REORDER_NSSN_MASK 0x00000fff
+#define IWM_RX_MPDU_REORDER_SN_MASK 0x00fff000
+#define IWM_RX_MPDU_REORDER_SN_SHIFT 12
+#define IWM_RX_MPDU_REORDER_BAID_MASK 0x7f000000
+#define IWM_RX_MPDU_REORDER_BAID_SHIFT 24
+#define IWM_RX_MPDU_REORDER_BA_OLD_SN 0x80000000
+
struct iwm_rx_mpdu_desc {
uint16_t mpdu_len;
uint8_t mac_flags1;
/*
* TID for non QoS frames - to be written in tid_tspec
*/
+#define IWM_MAX_TID_COUNT 8
#define IWM_TID_NON_QOS IWM_MAX_TID_COUNT
/*
-/* $OpenBSD: if_iwmvar.h,v 1.58 2021/03/12 16:27:10 stsp Exp $ */
+/* $OpenBSD: if_iwmvar.h,v 1.59 2021/04/25 15:32:21 stsp Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
int last_cqm_event;
};
+/**
+ * struct iwm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @consec_oldsn_drops: consecutive drops due to old SN
+ * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
+ * when to apply old SN consecutive drop workaround
+ * @consec_oldsn_prev_drop: track whether or not an MPDU
+ * that was single/part of the previous A-MPDU was
+ * dropped due to old SN
+ */
+struct iwm_reorder_buffer {
+ uint16_t head_sn;
+ uint16_t num_stored;
+ uint16_t buf_size;
+ uint16_t last_amsdu;
+ uint8_t last_sub_index;
+ struct timeout reorder_timer;
+ int removed;
+ int valid;
+ unsigned int consec_oldsn_drops;
+ uint32_t consec_oldsn_ampdu_gp2;
+ unsigned int consec_oldsn_prev_drop;
+#define IWM_AMPDU_CONSEC_DROPS_DELBA 10
+};
+
+/**
+ * struct iwm_reorder_buf_entry - reorder buffer entry per frame sequence number
+ * @frames: list of mbufs stored (A-MSDU subframes share a sequence number)
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct iwm_reorder_buf_entry {
+ struct mbuf_list frames;
+ struct timeval reorder_time;
+ uint32_t rx_pkt_status;
+ int chanidx;
+ int is_shortpre;
+ uint32_t rate_n_flags;
+ uint32_t device_timestamp;
+ struct ieee80211_rxinfo rxi;
+};
+
+/**
+ * struct iwm_rxba_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue
+ * @last_rx: last rx timestamp, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @sc: softc pointer, needed for timer context
+ * @reorder_buf: reorder buffer
+ * @reorder_buf_data: buffered frames, one entry per sequence number
+ */
+struct iwm_rxba_data {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t baid;
+ uint16_t timeout;
+ uint16_t entries_per_queue;
+ struct timeval last_rx;
+ struct timeout session_timer;
+ struct iwm_softc *sc;
+ struct iwm_reorder_buffer reorder_buf;
+ struct iwm_reorder_buf_entry entries[IEEE80211_BA_MAX_WINSZ];
+};
+
+static inline struct iwm_rxba_data *
+iwm_rxba_data_from_reorder_buf(struct iwm_reorder_buffer *buf)
+{
+ return (void *)((uint8_t *)buf -
+ offsetof(struct iwm_rxba_data, reorder_buf));
+}
+
+/**
+ * struct iwm_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwm_rxq_dup_data {
+ uint16_t last_seq[IWM_MAX_TID_COUNT + 1];
+ uint8_t last_sub_frame[IWM_MAX_TID_COUNT + 1];
+};
+
struct iwm_softc {
struct device sc_dev;
struct ieee80211com sc_ic;
/* Task for firmware BlockAck setup/teardown and its arguments. */
struct task ba_task;
- int ba_start;
- int ba_tid;
- uint16_t ba_ssn;
- uint16_t ba_winsize;
+ uint32_t ba_start_tidmask;
+ uint32_t ba_stop_tidmask;
+ uint16_t ba_ssn[IWM_MAX_TID_COUNT];
+ uint16_t ba_winsize[IWM_MAX_TID_COUNT];
+ int ba_timeout_val[IWM_MAX_TID_COUNT];
/* Task for HT protection updates. */
struct task htprot_task;
struct iwm_rx_phy_info sc_last_phy_info;
int sc_ampdu_ref;
+#define IWM_MAX_BAID 32
+ struct iwm_rxba_data sc_rxba_data[IWM_MAX_BAID];
uint32_t sc_time_event_uid;
struct ieee80211_amrr_node in_amn;
struct ieee80211_ra_node in_rn;
int lq_rate_mismatch;
+
+ struct iwm_rxq_dup_data dup_data;
};
#define IWM_STATION_ID 0
#define IWM_AUX_STA_ID 1
-/* $OpenBSD: if_iwx.c,v 1.52 2021/04/19 14:27:25 stsp Exp $ */
+/* $OpenBSD: if_iwx.c,v 1.53 2021/04/25 15:32:21 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
+#undef DPRINTF /* defined in ieee80211_priv.h */
#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
int iwx_mimo_enabled(struct iwx_softc *);
void iwx_htprot_task(void *);
void iwx_update_htprot(struct ieee80211com *, struct ieee80211_node *);
+void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
+ uint16_t);
+void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
+void iwx_rx_ba_session_expired(void *);
+void iwx_reorder_timer_expired(void *);
void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
- uint16_t, uint16_t, int);
+ uint16_t, uint16_t, int, int);
#ifdef notyet
int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
struct iwx_rx_data *);
int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
+int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
+ struct ieee80211_rxinfo *);
int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
- struct ieee80211_node *);
+ struct ieee80211_node *, struct ieee80211_rxinfo *);
void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
void iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
const char *iwx_desc_lookup(uint32_t);
void iwx_nic_error(struct iwx_softc *);
void iwx_nic_umac_error(struct iwx_softc *);
+int iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
+ struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
+int iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
+void iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
+ struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
+ struct mbuf_list *);
+int iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
+ int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
+int iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
+ struct iwx_rx_mpdu_desc *, int, int, uint32_t,
+ struct ieee80211_rxinfo *, struct mbuf_list *);
+void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
+ struct mbuf_list *);
int iwx_rx_pkt_valid(struct iwx_rx_packet *);
void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
struct mbuf_list *);
ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
}
+void
+iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
+ uint16_t ssn, uint16_t buf_size)
+{
+ reorder_buf->head_sn = ssn;
+ reorder_buf->num_stored = 0;
+ reorder_buf->buf_size = buf_size;
+ reorder_buf->last_amsdu = 0;
+ reorder_buf->last_sub_index = 0;
+ reorder_buf->removed = 0;
+ reorder_buf->valid = 0;
+ reorder_buf->consec_oldsn_drops = 0;
+ reorder_buf->consec_oldsn_ampdu_gp2 = 0;
+ reorder_buf->consec_oldsn_prev_drop = 0;
+}
+
+void
+iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
+{
+ int i;
+ struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
+ struct iwx_reorder_buf_entry *entry;
+
+ for (i = 0; i < reorder_buf->buf_size; i++) {
+ entry = &rxba->entries[i];
+ ml_purge(&entry->frames);
+ timerclear(&entry->reorder_time);
+ }
+
+ reorder_buf->removed = 1;
+ timeout_del(&reorder_buf->reorder_timer);
+ timerclear(&rxba->last_rx);
+ timeout_del(&rxba->session_timer);
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
+
+void
+iwx_rx_ba_session_expired(void *arg)
+{
+ struct iwx_rxba_data *rxba = arg;
+ struct iwx_softc *sc = rxba->sc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ struct timeval now, timeout, expiry;
+ int s;
+
+ s = splnet();
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
+ ic->ic_state == IEEE80211_S_RUN &&
+ rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
+ getmicrouptime(&now);
+ USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
+ timeradd(&rxba->last_rx, &timeout, &expiry);
+ if (timercmp(&now, &expiry, <)) {
+ timeout_add_usec(&rxba->session_timer, rxba->timeout);
+ } else {
+ ic->ic_stats.is_ht_rx_ba_timeout++;
+ ieee80211_delba_request(ic, ni,
+ IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
+ }
+ }
+ splx(s);
+}
+
+void
+iwx_reorder_timer_expired(void *arg)
+{
+ struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+ struct iwx_reorder_buffer *buf = arg;
+ struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
+ struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
+ struct iwx_softc *sc = rxba->sc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int i, s;
+ uint16_t sn = 0, index = 0;
+ int expired = 0;
+ int cont = 0;
+ struct timeval now, timeout, expiry;
+
+ if (!buf->num_stored || buf->removed)
+ return;
+
+ s = splnet();
+ getmicrouptime(&now);
+ USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (ml_empty(&entries[index].frames)) {
+ /*
+ * If there is a hole and the next frame didn't expire
+ * we want to break and not advance SN.
+ */
+ cont = 0;
+ continue;
+ }
+ timeradd(&entries[index].reorder_time, &timeout, &expiry);
+ if (!cont && timercmp(&now, &expiry, <))
+ break;
+
+ expired = 1;
+ /* continue until next hole after this expired frame */
+ cont = 1;
+ sn = (buf->head_sn + (i + 1)) & 0xfff;
+ }
+
+ if (expired) {
+ /* SN is set to the last expired frame + 1 */
+ iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
+ if_input(&sc->sc_ic.ic_if, &ml);
+ ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
+ } else {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify reorder timeout
+ * accordingly.
+ */
+ timeout_add_usec(&buf->reorder_timer,
+ RX_REORDER_BUF_TIMEOUT_MQ_USEC);
+ }
+
+ splx(s);
+}
+
#define IWX_MAX_RX_BA_SESSIONS 16
void
iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
- uint16_t ssn, uint16_t winsize, int start)
+ uint16_t ssn, uint16_t winsize, int timeout_val, int start)
{
struct ieee80211com *ic = &sc->sc_ic;
struct iwx_add_sta_cmd cmd;
struct iwx_node *in = (void *)ni;
int err, s;
uint32_t status;
+ struct iwx_rxba_data *rxba = NULL;
+ uint8_t baid = 0;
+
+ s = splnet();
if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
return;
}
err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
&status);
- s = splnet();
- if (!err && (status & IWX_ADD_STA_STATUS_MASK) == IWX_ADD_STA_SUCCESS) {
- if (start) {
- sc->sc_rx_ba_sessions++;
- ieee80211_addba_req_accept(ic, ni, tid);
- } else if (sc->sc_rx_ba_sessions > 0)
- sc->sc_rx_ba_sessions--;
- } else if (start)
- ieee80211_addba_req_refuse(ic, ni, tid);
+ if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
+ if (start)
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+
+ /* Deaggregation is done in hardware. */
+ if (start) {
+ if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ baid = (status & IWX_ADD_STA_BAID_MASK) >>
+ IWX_ADD_STA_BAID_SHIFT;
+ if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= nitems(sc->sc_rxba_data)) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ rxba = &sc->sc_rxba_data[baid];
+ if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ splx(s);
+ return;
+ }
+ rxba->sta_id = IWX_STATION_ID;
+ rxba->tid = tid;
+ rxba->baid = baid;
+ rxba->timeout = timeout_val;
+ getmicrouptime(&rxba->last_rx);
+ iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
+ winsize);
+ if (timeout_val != 0) {
+ struct ieee80211_rx_ba *ba;
+ timeout_add_usec(&rxba->session_timer,
+ timeout_val);
+ /* XXX disable net80211's BA timeout handler */
+ ba = &ni->ni_rx_ba[tid];
+ ba->ba_timeout_val = 0;
+ }
+ } else {
+ int i;
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ rxba = &sc->sc_rxba_data[i];
+ if (rxba->baid ==
+ IWX_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ if (rxba->tid != tid)
+ continue;
+ iwx_clear_reorder_buffer(sc, rxba);
+ break;
+ }
+ }
+
+ if (start) {
+ sc->sc_rx_ba_sessions++;
+ ieee80211_addba_req_accept(ic, ni, tid);
+ } else if (sc->sc_rx_ba_sessions > 0)
+ sc->sc_rx_ba_sessions--;
splx(s);
}
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = ic->ic_bss;
int s = splnet();
+ int tid;
- if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
- refcnt_rele_wake(&sc->task_refs);
- splx(s);
- return;
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_start_tidmask & (1 << tid)) {
+ iwx_sta_rx_agg(sc, ni, tid, sc->ba_ssn[tid],
+ sc->ba_winsize[tid], sc->ba_timeout_val[tid], 1);
+ sc->ba_start_tidmask &= ~(1 << tid);
+ } else if (sc->ba_stop_tidmask & (1 << tid)) {
+ iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
+ sc->ba_stop_tidmask &= ~(1 << tid);
+ }
}
-
- if (sc->ba_start)
- iwx_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
- sc->ba_winsize, 1);
- else
- iwx_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
refcnt_rele_wake(&sc->task_refs);
splx(s);
struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
struct iwx_softc *sc = IC2IFP(ic)->if_softc;
- if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS)
+ if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
+ tid > IWX_MAX_TID_COUNT || (sc->ba_start_tidmask & (1 << tid)))
return ENOSPC;
- sc->ba_start = 1;
- sc->ba_tid = tid;
- sc->ba_ssn = htole16(ba->ba_winstart);
- sc->ba_winsize = htole16(ba->ba_winsize);
+ sc->ba_start_tidmask |= (1 << tid);
+ sc->ba_ssn[tid] = ba->ba_winstart;
+ sc->ba_winsize[tid] = ba->ba_winsize;
+ sc->ba_timeout_val[tid] = ba->ba_timeout_val;
iwx_add_task(sc, systq, &sc->ba_task);
return EBUSY;
{
struct iwx_softc *sc = IC2IFP(ic)->if_softc;
- sc->ba_start = 0;
- sc->ba_tid = tid;
+ if (tid > IWX_MAX_TID_COUNT || sc->ba_stop_tidmask & (1 << tid))
+ return;
+
+ sc->ba_stop_tidmask = (1 << tid);
iwx_add_task(sc, systq, &sc->ba_task);
}
}
int
-iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ struct ieee80211_rxinfo *rxi)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_key *k;
(uint64_t)ivp[5] << 24 |
(uint64_t)ivp[6] << 32 |
(uint64_t)ivp[7] << 40;
- if (pn <= *prsc) {
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ } else if (pn <= *prsc) {
ic->ic_stats.is_ccmp_replays++;
return 1;
}
return 0;
}
-void
-iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
- uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
- uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
- struct mbuf_list *ml)
+int
+iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
+ struct ieee80211_rxinfo *rxi)
{
struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
- struct ieee80211_channel *bss_chan;
- uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
- struct ifnet *ifp = IC2IFP(ic);
-
- if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
- chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
+ int ret = 0;
+ uint8_t type, subtype;
wh = mtod(m, struct ieee80211_frame *);
- ni = ieee80211_find_rxnode(ic, wh);
- if (ni == ic->ic_bss) {
- /*
- * We may switch ic_bss's channel during scans.
- * Record the current channel so we can restore it later.
- */
- bss_chan = ni->ni_chan;
- IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
- }
- ni->ni_chan = &ic->ic_channels[chanidx];
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL)
+ return 0;
+
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
+ return 0;
+
+ ni = ieee80211_find_rxnode(ic, wh);
/* Handle hardware decryption. */
if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
&& (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
ic->ic_stats.is_ccmp_dec_errs++;
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
+ ret = 1;
+ goto out;
}
/* Check whether decryption was successful or not. */
if ((rx_pkt_status &
(IWX_RX_MPDU_RES_STATUS_DEC_DONE |
IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
ic->ic_stats.is_ccmp_dec_errs++;
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
- }
- if (iwx_ccmp_decap(sc, m, ni) != 0) {
- ifp->if_ierrors++;
- m_freem(m);
- ieee80211_release_node(ic, ni);
- return;
+ ret = 1;
+ goto out;
}
rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
}
+out:
+ if (ret)
+ ifp->if_ierrors++;
+ ieee80211_release_node(ic, ni);
+ return ret;
+}
+
+void
+iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
+ uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
+ uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
+ struct mbuf_list *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct ieee80211_channel *bss_chan;
+ uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
+
+ if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
+ chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ni = ieee80211_find_rxnode(ic, wh);
+ if (ni == ic->ic_bss) {
+ /*
+ * We may switch ic_bss's channel during scans.
+ * Record the current channel so we can restore it later.
+ */
+ bss_chan = ni->ni_chan;
+ IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
+ }
+ ni->ni_chan = &ic->ic_channels[chanidx];
+
+ if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
+ iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
+ ifp->if_ierrors++;
+ m_freem(m);
+ ieee80211_release_node(ic, ni);
+ return;
+ }
#if NBPFILTER > 0
if (sc->sc_drvbpf != NULL) {
ieee80211_release_node(ic, ni);
}
+/*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ * and handle pseudo-duplicate frames which result from deaggregation
+ * of A-MSDU frames in hardware.
+ */
+int
+iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
+ struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ struct iwx_rxq_dup_data *dup_data = &in->dup_data;
+ uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ int hasqos = ieee80211_has_qos(wh);
+ uint16_t seq;
+
+ if (type == IEEE80211_FC0_TYPE_CTL ||
+ (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
+ IEEE80211_IS_MULTICAST(wh->i_addr1))
+ return 0;
+
+ if (hasqos) {
+ tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
+ if (tid > IWX_MAX_TID_COUNT)
+ tid = IWX_MAX_TID_COUNT;
+ }
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ subframe_idx = desc->amsdu_info &
+ IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
+ if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ dup_data->last_seq[tid] == seq &&
+ dup_data->last_sub_frame[tid] >= subframe_idx)
+ return 1;
+
+ /*
+ * Allow the same frame sequence number for all A-MSDU subframes
+ * following the first subframe.
+ * Otherwise these subframes would be discarded as replays.
+ */
+ if (dup_data->last_seq[tid] == seq &&
+ subframe_idx > dup_data->last_sub_frame[tid] &&
+ (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
+ rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
+ }
+
+ dup_data->last_seq[tid] = seq;
+ dup_data->last_sub_frame[tid] = subframe_idx;
+
+ return 0;
+}
+
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+int
+iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
+{
+ return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
+}
+
+void
+iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
+ struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
+ uint16_t nssn, struct mbuf_list *ml)
+{
+ struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
+ uint16_t ssn = reorder_buf->head_sn;
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ goto set_timer;
+
+ while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct mbuf *m;
+ int chanidx, is_shortpre;
+ uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
+ struct ieee80211_rxinfo *rxi;
+
+ /* This data is the same for all A-MSDU subframes. */
+ chanidx = entries[index].chanidx;
+ rx_pkt_status = entries[index].rx_pkt_status;
+ is_shortpre = entries[index].is_shortpre;
+ rate_n_flags = entries[index].rate_n_flags;
+ device_timestamp = entries[index].device_timestamp;
+ rxi = &entries[index].rxi;
+
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
+ iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
+ rate_n_flags, device_timestamp, rxi, ml);
+ reorder_buf->num_stored--;
+
+ /*
+ * Allow the same frame sequence number and CCMP PN for
+ * all A-MSDU subframes following the first subframe.
+ * Otherwise they would be discarded as replays.
+ */
+ rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
+ rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+ }
+
+ ssn = (ssn + 1) & 0xfff;
+ }
+ reorder_buf->head_sn = nssn;
+
+set_timer:
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ timeout_add_usec(&reorder_buf->reorder_timer,
+ RX_REORDER_BUF_TIMEOUT_MQ_USEC);
+ } else
+ timeout_del(&reorder_buf->reorder_timer);
+}
+
+int
+iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
+ struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
+ /* we have a new (A-)MPDU ... */
+
+ /*
+ * reset counter to 0 if we didn't have any oldsn in
+ * the last A-MPDU (as detected by GP2 being identical)
+ */
+ if (!buffer->consec_oldsn_prev_drop)
+ buffer->consec_oldsn_drops = 0;
+
+ /* either way, update our tracking state */
+ buffer->consec_oldsn_ampdu_gp2 = gp2;
+ } else if (buffer->consec_oldsn_prev_drop) {
+ /*
+ * tracking state didn't change, and we had an old SN
+ * indication before - do nothing in this case, we
+ * already noted this one down and are waiting for the
+ * next A-MPDU (by GP2)
+ */
+ return 0;
+ }
+
+ /* return unless this MPDU has old SN */
+ if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
+ return 0;
+
+ /* update state */
+ buffer->consec_oldsn_prev_drop = 1;
+ buffer->consec_oldsn_drops++;
+
+ /* if limit is reached, send del BA and reset state */
+ if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
+ ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
+ 0, tid);
+ buffer->consec_oldsn_prev_drop = 0;
+ buffer->consec_oldsn_drops = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Handle re-ordering of frames which were de-aggregated in hardware.
+ * Returns 1 if the MPDU was consumed (buffered or dropped).
+ * Returns 0 if the MPDU should be passed to upper layer.
+ */
+int
+iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
+ struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
+ uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
+ struct mbuf_list *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct iwx_rxba_data *rxba;
+ struct iwx_reorder_buffer *buffer;
+ uint32_t reorder_data = le32toh(desc->reorder_data);
+ int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
+ int last_subframe =
+ (desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
+ uint8_t tid;
+ uint8_t subframe_idx = (desc->amsdu_info &
+ IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+ struct iwx_reorder_buf_entry *entries;
+ int index;
+ uint16_t nssn, sn;
+ uint8_t baid, type, subtype;
+ int hasqos;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hasqos = ieee80211_has_qos(wh);
+ tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
+
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ ni = ieee80211_find_rxnode(ic, wh);
+
+ /*
+ * We are only interested in Block Ack requests and unicast QoS data.
+ */
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1))
+ return 0;
+ if (hasqos) {
+ if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
+ return 0;
+ } else {
+ if (type != IEEE80211_FC0_TYPE_CTL ||
+ subtype != IEEE80211_FC0_SUBTYPE_BAR)
+ return 0;
+ }
+
+ baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
+ IWX_RX_MPDU_REORDER_BAID_SHIFT;
+ if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= nitems(sc->sc_rxba_data))
+ return 0;
+
+ rxba = &sc->sc_rxba_data[baid];
+ if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
+ return 0;
+
+ /* Bypass A-MPDU re-ordering in net80211. */
+ rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
+
+ nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
+ IWX_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &rxba->reorder_buf;
+ entries = &rxba->entries[0];
+
+ if (!buffer->valid) {
+ if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
+ return 0;
+ buffer->valid = 1;
+ }
+
+ if (type == IEEE80211_FC0_TYPE_CTL &&
+ subtype == IEEE80211_FC0_SUBTYPE_BAR) {
+ iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
+ goto drop;
+ }
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take care of storing it and releasing up to
+ * the nssn.
+ */
+ if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size) ||
+ !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
+ uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
+ ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
+ iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
+ }
+
+ if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
+ device_timestamp)) {
+ /* BA session will be torn down. */
+ ic->ic_stats.is_ht_rx_ba_window_jump++;
+ goto drop;
+
+ }
+
+ /* drop any outdated packets */
+ if (SEQ_LT(sn, buffer->head_sn)) {
+ ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
+ goto drop;
+ }
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
+ if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
+ (!is_amsdu || last_subframe))
+ buffer->head_sn = nssn;
+ return 0;
+ }
+
+ /*
+ * release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!is_amsdu || last_subframe)
+ buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
+ return 0;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ if (!ml_empty(&entries[index].frames)) {
+ if (!is_amsdu) {
+ ic->ic_stats.is_ht_rx_ba_no_buf++;
+ goto drop;
+ } else if (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= subframe_idx) {
+ ic->ic_stats.is_ht_rx_ba_no_buf++;
+ goto drop;
+ }
+ } else {
+ /* This data is the same for all A-MSDU subframes. */
+ entries[index].chanidx = chanidx;
+ entries[index].is_shortpre = is_shortpre;
+ entries[index].rate_n_flags = rate_n_flags;
+ entries[index].device_timestamp = device_timestamp;
+ memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
+ }
+
+ /* put in reorder buffer */
+ ml_enqueue(&entries[index].frames, m);
+ buffer->num_stored++;
+ getmicrouptime(&entries[index].reorder_time);
+
+ if (is_amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = subframe_idx;
+ }
+
+ /*
+ * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+ * The reason is that NSSN advances on the first sub-frame, and may
+ * cause the reorder buffer to advance before all the sub-frames arrive.
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!is_amsdu || last_subframe)
+ iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
+
+ return 1;
+
+drop:
+ m_freem(m);
+ return 1;
+}
+
void
iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
size_t maxlen, struct mbuf_list *ml)
m_adj(m, 2);
}
+ memset(&rxi, 0, sizeof(rxi));
+
+ /*
+ * Hardware de-aggregates A-MSDUs and copies the same MAC header
+ * in place for each subframe. But it leaves the 'A-MSDU present'
+ * bit set in the frame header. We need to clear this bit ourselves.
+ * (XXX This workaround is not required on AX200/AX201 devices that
+ * have been tested by me, but it's unclear when this problem was
+ * fixed in the hardware. It definitely affects the 9k generation.
+ * Leaving this in place for now since some 9k/AX200 hybrids seem
+ * to exist that we may eventually add support for.)
+ *
+ * And we must allow the same CCMP PN for subframes following the
+ * first subframe. Otherwise they would be discarded as replays.
+ */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ uint8_t subframe_idx = (desc->amsdu_info &
+ IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+ if (subframe_idx > 0)
+ rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+ if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
+ m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
+ struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
+ struct ieee80211_qosframe_addr4 *);
+ qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+ } else if (ieee80211_has_qos(wh) &&
+ m->m_len >= sizeof(struct ieee80211_qosframe)) {
+ struct ieee80211_qosframe *qwh = mtod(m,
+ struct ieee80211_qosframe *);
+ qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+ }
+ }
+
+ /*
+ * Verify decryption before duplicate detection. The latter uses
+ * the TID supplied in QoS frame headers and this TID is implicitly
+ * verified as part of the CCMP nonce.
+ */
+ if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
+ m_freem(m);
+ return;
+ }
+
+ if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
+ m_freem(m);
+ return;
+ }
+
phy_info = le16toh(desc->phy_info);
rate_n_flags = le32toh(desc->v1.rate_n_flags);
chanidx = desc->v1.channel;
rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
- memset(&rxi, 0, sizeof(rxi));
rxi.rxi_rssi = rssi;
rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
+ if (iwx_rx_reorder(sc, m, chanidx, desc,
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, &rxi, ml))
+ return;
+
iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
(phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
rate_n_flags, device_timestamp, &rxi, ml);
}
sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
sc->sc_rx_ba_sessions = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
}
return 0;
{
struct ifnet *ifp = IC2IFP(ic);
struct iwx_softc *sc = ifp->if_softc;
+ int i;
if (ic->ic_state == IEEE80211_S_RUN) {
iwx_del_task(sc, systq, &sc->ba_task);
iwx_del_task(sc, systq, &sc->htprot_task);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ iwx_clear_reorder_buffer(sc, rxba);
+ }
}
sc->ns_nstate = nstate;
sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
sc->sc_rx_ba_sessions = 0;
+ sc->ba_start_tidmask = 0;
+ sc->ba_stop_tidmask = 0;
+ memset(sc->ba_ssn, 0, sizeof(sc->ba_ssn));
+ memset(sc->ba_winsize, 0, sizeof(sc->ba_winsize));
+ memset(sc->ba_timeout_val, 0, sizeof(sc->ba_timeout_val));
sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ iwx_clear_reorder_buffer(sc, rxba);
+ }
ifp->if_timer = sc->sc_tx_timer = 0;
splx(s);
struct ifnet *ifp = &ic->ic_if;
const char *intrstr;
int err;
- int txq_i, i;
+ int txq_i, i, j;
sc->sc_pct = pa->pa_pc;
sc->sc_pcitag = pa->pa_tag;
#if NBPFILTER > 0
iwx_radiotap_attach(sc);
#endif
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+ rxba->sc = sc;
+ timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
+ rxba);
+ timeout_set(&rxba->reorder_buf.reorder_timer,
+ iwx_reorder_timer_expired, &rxba->reorder_buf);
+ for (j = 0; j < nitems(rxba->entries); j++)
+ ml_init(&rxba->entries[j].frames);
+ }
task_set(&sc->init_task, iwx_init_task, sc);
task_set(&sc->newstate_task, iwx_newstate_task, sc);
task_set(&sc->ba_task, iwx_ba_task, sc);
-/* $OpenBSD: if_iwxreg.h,v 1.18 2021/01/17 14:24:00 jcs Exp $ */
+/* $OpenBSD: if_iwxreg.h,v 1.19 2021/04/25 15:32:21 stsp Exp $ */
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
#define IWX_RX_MPDU_MFLG2_PAD 0x20
#define IWX_RX_MPDU_MFLG2_AMSDU 0x40
+#define IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK 0x7f
+#define IWX_RX_MPDU_AMSDU_LAST_SUBFRAME 0x80
+
#define IWX_RX_MPDU_PHY_AMPDU (1 << 5)
#define IWX_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6)
#define IWX_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7)
};
} __packed;
+#define IWX_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+#define IWX_RX_MPDU_REORDER_NSSN_MASK 0x00000fff
+#define IWX_RX_MPDU_REORDER_SN_MASK 0x00fff000
+#define IWX_RX_MPDU_REORDER_SN_SHIFT 12
+#define IWX_RX_MPDU_REORDER_BAID_MASK 0x7f000000
+#define IWX_RX_MPDU_REORDER_BAID_SHIFT 24
+#define IWX_RX_MPDU_REORDER_BA_OLD_SN 0x80000000
+
struct iwx_rx_mpdu_desc {
uint16_t mpdu_len;
uint8_t mac_flags1;
/*
* TID for non QoS frames - to be written in tid_tspec
*/
+#define IWX_MAX_TID_COUNT 8
#define IWX_TID_NON_QOS 0
/*
-/* $OpenBSD: if_iwxvar.h,v 1.13 2020/10/11 07:05:28 mpi Exp $ */
+/* $OpenBSD: if_iwxvar.h,v 1.14 2021/04/25 15:32:21 stsp Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
int paging_cnt;
};
+/**
+ * struct iwx_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @consec_oldsn_drops: consecutive drops due to old SN
+ * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
+ * when to apply old SN consecutive drop workaround
+ * @consec_oldsn_prev_drop: track whether or not an MPDU
+ * that was single/part of the previous A-MPDU was
+ * dropped due to old SN
+ */
+struct iwx_reorder_buffer {
+ uint16_t head_sn;
+ uint16_t num_stored;
+ uint16_t buf_size;
+ uint16_t last_amsdu;
+ uint8_t last_sub_index;
+ struct timeout reorder_timer;
+ int removed;
+ int valid;
+ unsigned int consec_oldsn_drops;
+ uint32_t consec_oldsn_ampdu_gp2;
+ unsigned int consec_oldsn_prev_drop;
+#define IWX_AMPDU_CONSEC_DROPS_DELBA 10
+};
+
+/**
+ * struct iwx_reorder_buf_entry - reorder buffer entry per frame sequence number
+ * @frames: list of mbufs stored (A-MSDU subframes share a sequence number)
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct iwx_reorder_buf_entry {
+ struct mbuf_list frames;
+ struct timeval reorder_time;
+ uint32_t rx_pkt_status;
+ int chanidx;
+ int is_shortpre;
+ uint32_t rate_n_flags;
+ uint32_t device_timestamp;
+ struct ieee80211_rxinfo rxi;
+};
+
+/**
+ * struct iwx_rxba_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue
+ * @last_rx: last rx timestamp, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @sc: softc pointer, needed for timer context
+ * @reorder_buf: reorder buffer
+ * @reorder_buf_data: buffered frames, one entry per sequence number
+ */
+struct iwx_rxba_data {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t baid;
+ uint16_t timeout;
+ uint16_t entries_per_queue;
+ struct timeval last_rx;
+ struct timeout session_timer;
+ struct iwx_softc *sc;
+ struct iwx_reorder_buffer reorder_buf;
+ struct iwx_reorder_buf_entry entries[IEEE80211_BA_MAX_WINSZ];
+};
+
+static inline struct iwx_rxba_data *
+iwx_rxba_data_from_reorder_buf(struct iwx_reorder_buffer *buf)
+{
+ return (void *)((uint8_t *)buf -
+ offsetof(struct iwx_rxba_data, reorder_buf));
+}
+
+/**
+ * struct iwx_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwx_rxq_dup_data {
+ uint16_t last_seq[IWX_MAX_TID_COUNT + 1];
+ uint8_t last_sub_frame[IWX_MAX_TID_COUNT + 1];
+};
+
struct iwx_softc {
struct device sc_dev;
struct ieee80211com sc_ic;
/* Task for firmware BlockAck setup/teardown and its arguments. */
struct task ba_task;
- int ba_start;
- int ba_tid;
- uint16_t ba_ssn;
- uint16_t ba_winsize;
+ uint32_t ba_start_tidmask;
+ uint32_t ba_stop_tidmask;
+ uint16_t ba_ssn[IWX_MAX_TID_COUNT];
+ uint16_t ba_winsize[IWX_MAX_TID_COUNT];
+ int ba_timeout_val[IWX_MAX_TID_COUNT];
/* Task for HT protection updates. */
struct task htprot_task;
struct iwx_rx_phy_info sc_last_phy_info;
int sc_ampdu_ref;
+#define IWX_MAX_BAID 32
+ struct iwx_rxba_data sc_rxba_data[IWX_MAX_BAID];
uint32_t sc_time_event_uid;
uint16_t in_id;
uint16_t in_color;
+
+ struct iwx_rxq_dup_data dup_data;
};
#define IWX_STATION_ID 0
#define IWX_AUX_STA_ID 1
-/* $OpenBSD: ieee80211_input.c,v 1.232 2021/03/26 10:18:55 stsp Exp $ */
+/* $OpenBSD: ieee80211_input.c,v 1.233 2021/04/25 15:32:21 stsp Exp $ */
/*-
* Copyright (c) 2001 Atsushi Onoe
#include <net80211/ieee80211_priv.h>
struct mbuf *ieee80211_input_hwdecrypt(struct ieee80211com *,
- struct ieee80211_node *, struct mbuf *);
+ struct ieee80211_node *, struct mbuf *,
+ struct ieee80211_rxinfo *rxi);
struct mbuf *ieee80211_defrag(struct ieee80211com *, struct mbuf *, int);
void ieee80211_defrag_timeout(void *);
void ieee80211_input_ba(struct ieee80211com *, struct mbuf *,
/* Post-processing for drivers which perform decryption in hardware. */
struct mbuf *
ieee80211_input_hwdecrypt(struct ieee80211com *ic, struct ieee80211_node *ni,
- struct mbuf *m)
+ struct mbuf *m, struct ieee80211_rxinfo *rxi)
{
struct ieee80211_key *k;
struct ieee80211_frame *wh;
}
if (ieee80211_ccmp_get_pn(&pn, &prsc, m, k) != 0)
return NULL;
- if (pn <= *prsc) {
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return NULL;
+ }
+ } else if (pn <= *prsc) {
ic->ic_stats.is_ccmp_replays++;
return NULL;
}
}
if (ieee80211_tkip_get_tsc(&pn, &prsc, m, k) != 0)
return NULL;
-
- if (pn <= *prsc) {
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_tkip_replays++;
+ return NULL;
+ }
+ } else if (pn <= *prsc) {
ic->ic_stats.is_tkip_replays++;
return NULL;
}
orxseq = &ni->ni_qos_rxseqs[tid];
else
orxseq = &ni->ni_rxseq;
- if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ if (rxi->rxi_flags & IEEE80211_RXI_SAME_SEQ) {
+ if (nrxseq != *orxseq) {
+ /* duplicate, silently discarded */
+ ic->ic_stats.is_rx_dup++;
+ goto out;
+ }
+ } else if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
nrxseq == *orxseq) {
/* duplicate, silently discarded */
ic->ic_stats.is_rx_dup++;
goto err;
}
} else {
- m = ieee80211_input_hwdecrypt(ic, ni, m);
+ m = ieee80211_input_hwdecrypt(ic, ni, m, rxi);
if (m == NULL)
goto err;
}
ba->ba_params = (params & IEEE80211_ADDBA_BA_POLICY);
ba->ba_params |= ((ba->ba_winsize << IEEE80211_ADDBA_BUFSZ_SHIFT) |
(tid << IEEE80211_ADDBA_TID_SHIFT));
-#if 0
- /* iwm(4) 9k and iwx(4) need more work before AMSDU can be enabled. */
ba->ba_params |= IEEE80211_ADDBA_AMSDU;
-#endif
ba->ba_winstart = ssn;
ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
/* allocate and setup our reordering buffer */
-/* $OpenBSD: ieee80211_node.h,v 1.87 2020/07/21 08:38:59 stsp Exp $ */
+/* $OpenBSD: ieee80211_node.h,v 1.88 2021/04/25 15:32:21 stsp Exp $ */
/* $NetBSD: ieee80211_node.h,v 1.9 2004/04/30 22:57:32 dyoung Exp $ */
/*-
};
#define IEEE80211_RXI_HWDEC 0x00000001
#define IEEE80211_RXI_AMPDU_DONE 0x00000002
+#define IEEE80211_RXI_HWDEC_SAME_PN 0x00000004
+#define IEEE80211_RXI_SAME_SEQ 0x00000008
/* Block Acknowledgement Record */
struct ieee80211_tx_ba {
-/* $OpenBSD: ieee80211_proto.c,v 1.101 2020/12/09 15:50:58 stsp Exp $ */
+/* $OpenBSD: ieee80211_proto.c,v 1.102 2021/04/25 15:32:21 stsp Exp $ */
/* $NetBSD: ieee80211_proto.c,v 1.8 2004/04/30 23:58:20 dyoung Exp $ */
/*-
ba->ba_params =
(ba->ba_winsize << IEEE80211_ADDBA_BUFSZ_SHIFT) |
(tid << IEEE80211_ADDBA_TID_SHIFT);
-#if 0
- /* iwm(4) 9k and iwx(4) need more work before AMSDU can be enabled. */
ba->ba_params |= IEEE80211_ADDBA_AMSDU;
-#endif
if ((ic->ic_htcaps & IEEE80211_HTCAP_DELAYEDBA) == 0)
/* immediate BA */
ba->ba_params |= IEEE80211_ADDBA_BA_POLICY;