-/* $OpenBSD: qwx.c,v 1.6 2024/01/25 10:07:47 stsp Exp $ */
+/* $OpenBSD: qwx.c,v 1.7 2024/01/25 10:11:04 stsp Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
#endif
#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define ffz(x) ffs(~(x))
#define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
#define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
#define BIT(x) (1UL << (x))
struct qwx_htc_svc_conn_resp *);
void qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *, struct hal_srng *);
void qwx_wmi_free_dbring_caps(struct qwx_softc *);
+int qwx_wmi_set_peer_param(struct qwx_softc *, uint8_t *, uint32_t,
+ uint32_t, uint32_t, uint32_t);
+int qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *, int, int,
+ uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
+const void **qwx_wmi_tlv_parse_alloc(struct qwx_softc *, const void *, size_t);
int qwx_core_init(struct qwx_softc *);
int qwx_qmi_event_server_arrive(struct qwx_softc *);
int qwx_mac_register(struct qwx_softc *);
int qwx_mac_start(struct qwx_softc *);
void qwx_mac_scan_finish(struct qwx_softc *);
+int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
+ enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
+ void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
int qwx_scan(struct qwx_softc *);
void qwx_scan_abort(struct qwx_softc *);
struct ieee80211_node *
qwx_node_alloc(struct ieee80211com *ic)
{
- return malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
+ struct qwx_node *nq;
+
+ nq = malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
+ nq->peer.peer_id = HAL_INVALID_PEERID;
+ return (struct ieee80211_node *)nq;
}
int
sc->ns_nstate = IEEE80211_S_INIT;
sc->scan.state = ATH11K_SCAN_IDLE;
sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
+ sc->pdevs_active = 0;
/* power off hardware */
qwx_core_deinit(sc);
}
}
+int
+qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwx_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ uint8_t avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwx_hal_reo_cmd_send(struct qwx_softc *sc, struct hal_srng *srng,
+ enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+#ifdef notyet
+ spin_lock_bh(&srng->lock);
+#endif
+ qwx_hal_srng_access_begin(sc, srng);
+ reo_desc = (struct hal_tlv_hdr *)qwx_hal_srng_src_get_next_entry(sc, srng);
+ if (!reo_desc) {
+ ret = ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = qwx_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = qwx_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = qwx_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ printf("%s: unsupported reo command %d\n",
+ sc->sc_dev.dv_xname, type);
+ ret = ENOTSUP;
+ break;
+ default:
+ printf("%s: unknown reo command %d\n",
+ sc->sc_dev.dv_xname, type);
+ ret = EINVAL;
+ break;
+ }
+
+ qwx_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer);
+out:
+ qwx_hal_srng_access_end(sc, srng);
+#ifdef notyet
+ spin_unlock_bh(&srng->lock);
+#endif
+ return ret;
+}
int
qwx_dp_srng_common_setup(struct qwx_softc *sc)
{
wakeup(&sc->wmi.unified_ready);
}
+int
+qwx_pull_peer_del_resp_ev(struct qwx_softc *sc, struct mbuf *m,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+ if (tb == NULL) {
+ ret = ENOMEM;
+ printf("%s: failed to parse tlv: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ printf("%s: failed to fetch peer delete resp ev\n",
+ sc->sc_dev.dv_xname);
+ free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+ return EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+ return 0;
+}
+
+void
+qwx_peer_delete_resp_event(struct qwx_softc *sc, struct mbuf *m)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+
+ if (qwx_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
+ printf("%s: failed to extract peer delete resp",
+ sc->sc_dev.dv_xname);
+ return;
+ }
+
+ sc->peer_delete_done = 1;
+ wakeup(&sc->peer_delete_done);
+
+ DNPRINTF(QWX_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
+ __func__, peer_del_resp.vdev_id,
+ ether_sprintf(peer_del_resp.peer_macaddr.addr));
+}
+
+const char *
+qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+int
+qwx_pull_vdev_start_resp_tlv(struct qwx_softc *sc, struct mbuf *m,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+ if (tb == NULL) {
+ ret = ENOMEM;
+ printf("%s: failed to parse tlv: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ printf("%s: failed to fetch vdev start resp ev\n",
+ sc->sc_dev.dv_xname);
+ free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+ return EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+ free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+ return 0;
+}
+
+void
+qwx_vdev_start_resp_event(struct qwx_softc *sc, struct mbuf *m)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ uint32_t status;
+
+ if (qwx_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
+ printf("%s: failed to extract vdev start resp",
+ sc->sc_dev.dv_xname);
+ return;
+ }
+
+ status = vdev_start_resp.status;
+ if (status) {
+ printf("%s: vdev start resp error status %d (%s)\n",
+ sc->sc_dev.dv_xname, status,
+ qwx_wmi_vdev_resp_print(status));
+ }
+
+ sc->vdev_setup_done = 1;
+ wakeup(&sc->vdev_setup_done);
+
+ DNPRINTF(QWX_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
+ vdev_start_resp.vdev_id);
+}
+
int
qwx_wmi_tlv_iter_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
const void *ptr, void *data)
(void *)tb);
}
-
const void **
qwx_wmi_tlv_parse_alloc(struct qwx_softc *sc, const void *ptr, size_t len)
{
case WMI_READY_EVENTID:
qwx_ready_event(sc, m);
break;
-#if 0
case WMI_PEER_DELETE_RESP_EVENTID:
- ath11k_peer_delete_resp_event(ab, skb);
+ qwx_peer_delete_resp_event(sc, m);
break;
case WMI_VDEV_START_RESP_EVENTID:
- ath11k_vdev_start_resp_event(ab, skb);
+ qwx_vdev_start_resp_event(sc, m);
break;
+#if 0
case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
ath11k_bcn_tx_status_event(ab, skb);
break;
m_freem(m);
}
+static inline void
+qwx_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
+{
+#if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+#endif
+ uint32_t val32;
+ uint16_t val16;
+
+ val32 = le32toh(addr_l32);
+ memcpy(addr, &val32, 4);
+ val16 = le16toh(addr_h16);
+ memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
+}
+
+void
+qwx_peer_map_event(struct qwx_softc *sc, uint8_t vdev_id, uint16_t peer_id,
+ uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni;
+ struct qwx_node *nq;
+ struct ath11k_peer *peer;
+#ifdef notyet
+ spin_lock_bh(&ab->base_lock);
+#endif
+ ni = ieee80211_find_node(ic, mac_addr);
+ if (ni == NULL)
+ return;
+ nq = (struct qwx_node *)ni;
+ peer = &nq->peer;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+#if 0
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+#endif
+ sc->peer_mapped = 1;
+ wakeup(&sc->peer_mapped);
+
+ DNPRINTF(QWX_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
+ __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+}
+
+
void
qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m)
{
struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
*(uint32_t *)resp);
-#if 0
uint16_t peer_id;
uint8_t vdev_id;
uint8_t mac_addr[IEEE80211_ADDR_LEN];
uint16_t peer_mac_h16;
uint16_t ast_hash;
uint16_t hw_peer_id;
-#endif
+
DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
switch (type) {
dp->htt_tgt_version_received = 1;
wakeup(&dp->htt_tgt_version_received);
break;
-#if 0
case HTT_T2H_MSG_TYPE_PEER_MAP:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
- resp->peer_map_ev.info);
+ resp->peer_map_ev.info);
peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
- resp->peer_map_ev.info);
+ resp->peer_map_ev.info);
peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
- resp->peer_map_ev.info1);
- ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
- peer_mac_h16, mac_addr);
- ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ resp->peer_map_ev.info1);
+ qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP2:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
- resp->peer_map_ev.info);
+ resp->peer_map_ev.info);
peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
- resp->peer_map_ev.info);
+ resp->peer_map_ev.info);
peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
- resp->peer_map_ev.info1);
- ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
- peer_mac_h16, mac_addr);
+ resp->peer_map_ev.info1);
+ qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
- resp->peer_map_ev.info2);
+ resp->peer_map_ev.info2);
hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
resp->peer_map_ev.info1);
- ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- hw_peer_id);
+ qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
break;
+#if 0
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
return ret;
}
+int
+qwx_wmi_send_peer_create_cmd(struct qwx_softc *sc, uint8_t pdev_id,
+ struct peer_create_params *param)
+{
+ struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+ struct wmi_peer_create_cmd *cmd;
+ struct mbuf *m;
+ int ret;
+
+ m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
+ if (!m)
+ return ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
+ sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
+ sc->sc_dev.dv_xname);
+ m_freem(m);
+ return ret;
+ }
+
+ DNPRINTF(QWX_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
+ __func__, param->vdev_id, ether_sprintf(param->peer_addr));
+
+ return ret;
+}
+
+int
+qwx_wmi_send_peer_delete_cmd(struct qwx_softc *sc, const uint8_t *peer_addr,
+ uint8_t vdev_id, uint8_t pdev_id)
+{
+ struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+ struct wmi_peer_delete_cmd *cmd;
+ struct mbuf *m;
+ int ret;
+
+ m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
+ if (!m)
+ return ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
+ sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ printf("%s: failed to send WMI_PEER_DELETE cmd\n",
+ sc->sc_dev.dv_xname);
+ m_freem(m);
+ return ret;
+ }
+
+ DNPRINTF(QWX_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
+ __func__, vdev_id, peer_addr);
+
+ return 0;
+}
+
void
qwx_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
struct target_resource_config *tg_cfg)
return 0;
}
-int
-qwx_core_start(struct qwx_softc *sc)
+void
+qwx_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
{
- int ret;
+ uint32_t center_freq1 = arg->channel.band_center_freq1;
- ret = qwx_wmi_attach(sc);
- if (ret) {
- printf("%s: failed to attach wmi: %d\n",
- sc->sc_dev.dv_xname, ret);
- return ret;
- }
+ memset(chan, 0, sizeof(*chan));
- ret = qwx_htc_init(sc);
- if (ret) {
- printf("%s: failed to init htc: %d\n",
- sc->sc_dev.dv_xname, ret);
- goto err_wmi_detach;
- }
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
- ret = sc->ops.start(sc);
- if (ret) {
- printf("%s: failed to start host interface: %d\n",
- sc->sc_dev.dv_xname, ret);
- goto err_wmi_detach;
- }
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
- ret = qwx_htc_wait_target(sc);
- if (ret) {
- printf("%s: failed to connect to HTC: %d\n",
- sc->sc_dev.dv_xname, ret);
- goto err_hif_stop;
- }
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+ } else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
+ (arg->channel.mode == MODE_11AX_HE80_80)) {
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ } else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
- ret = qwx_dp_htt_connect(&sc->dp);
- if (ret) {
- printf("%s: failed to connect to HTT: %d\n",
- sc->sc_dev.dv_xname, ret);
- goto err_hif_stop;
+int
+qwx_wmi_vdev_start(struct qwx_softc *sc, struct wmi_vdev_start_req_arg *arg,
+ int pdev_id, int restart)
+{
+ struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct mbuf *m;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (arg->ssid_len > sizeof(cmd->ssid.ssid))
+ return EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ m = qwx_wmi_alloc_mbuf(len);
+ if (!m)
+ return ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
+ sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+ cmd->mbssid_flags = arg->mbssid_flags;
+ cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
+
+ ptr = mtod(m, void *) + sizeof(struct ath11k_htc_hdr) +
+ sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
+ chan = ptr;
+
+ qwx_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ ret = qwx_wmi_cmd_send(wmi, m, restart ?
+ WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ printf("%s: failed to submit vdev_%s cmd\n",
+ sc->sc_dev.dv_xname, restart ? "restart" : "start");
+ m_freem(m);
+ return ret;
+ }
+
+ DNPRINTF(QWX_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
+ __func__, restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int
+qwx_core_start(struct qwx_softc *sc)
+{
+ int ret;
+
+ ret = qwx_wmi_attach(sc);
+ if (ret) {
+ printf("%s: failed to attach wmi: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ return ret;
+ }
+
+ ret = qwx_htc_init(sc);
+ if (ret) {
+ printf("%s: failed to init htc: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ goto err_wmi_detach;
+ }
+
+ ret = sc->ops.start(sc);
+ if (ret) {
+ printf("%s: failed to start host interface: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ goto err_wmi_detach;
+ }
+
+ ret = qwx_htc_wait_target(sc);
+ if (ret) {
+ printf("%s: failed to connect to HTC: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ goto err_hif_stop;
+ }
+
+ ret = qwx_dp_htt_connect(&sc->dp);
+ if (ret) {
+ printf("%s: failed to connect to HTT: %d\n",
+ sc->sc_dev.dv_xname, ret);
+ goto err_hif_stop;
}
ret = qwx_wmi_connect(sc);
ch->antennamax = 0;
/* TODO: Use appropriate phymodes */
- if (IEEE80211_IS_CHAN_2GHZ(channel))
+ if (IEEE80211_IS_CHAN_A(channel))
+ ch->phy_mode = MODE_11A;
+ else if (IEEE80211_IS_CHAN_G(channel))
ch->phy_mode = MODE_11G;
else
- ch->phy_mode = MODE_11A;
+ ch->phy_mode = MODE_11B;
#ifdef notyet
if (channel->band == NL80211_BAND_6GHZ &&
cfg80211_channel_is_psc(channel))
#ifdef notyet
mutex_unlock(&ar->conf_mutex);
#endif
-#if 0
- rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
- &ab->pdevs[ar->pdev_idx]);
-#endif
-
+ sc->pdevs_active |= (1 << pdev->pdev_id);
return 0;
err:
#ifdef notyet
printf("%s: not implemented\n", __func__);
}
+int
+qwx_mac_vdev_setup_sync(struct qwx_softc *sc)
+{
+ int ret;
+
+#ifdef notyet
+ lockdep_assert_held(&ar->conf_mutex);
+#endif
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
+ return ESHUTDOWN;
+
+ while (!sc->vdev_setup_done) {
+ ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwxvdev",
+ SEC_TO_NSEC(1));
+ if (ret) {
+ printf("%s: vdev start timeout\n",
+ sc->sc_dev.dv_xname);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+qwx_mac_set_txbf_conf(struct qwx_vif *arvif)
+{
+ /* TX beamforming is not yet supported. */
+ return 0;
+}
+
+int
+qwx_mac_vdev_start_restart(struct qwx_softc *sc, struct qwx_vif *arvif,
+ int pdev_id, int restart)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret = 0;
+#ifdef notyet
+ lockdep_assert_held(&ar->conf_mutex);
+#endif
+#if 0
+ reinit_completion(&ar->vdev_setup_done);
+#endif
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = ic->ic_dtim_period;
+ arg.bcn_intval = ic->ic_lintval;
+
+ arg.channel.freq = chan->ic_freq;
+ arg.channel.band_center_freq1 = chan->ic_freq;
+ arg.channel.band_center_freq2 = chan->ic_freq;
+
+ switch (ic->ic_curmode) {
+ case IEEE80211_MODE_11A:
+ arg.channel.mode = MODE_11A;
+ break;
+ case IEEE80211_MODE_11B:
+ arg.channel.mode = MODE_11B;
+ break;
+ case IEEE80211_MODE_11G:
+ arg.channel.mode = MODE_11G;
+ break;
+ default:
+ printf("%s: unsupported phy mode %d\n",
+ sc->sc_dev.dv_xname, ic->ic_curmode);
+ return ENOTSUP;
+ }
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = 20; /* XXX */
+ arg.channel.max_reg_power = 20; /* XXX */
+ arg.channel.max_antenna_gain = 0; /* XXX */
+
+ arg.pref_tx_streams = 1;
+ arg.pref_rx_streams = 1;
+
+ arg.mbssid_flags = 0;
+ arg.mbssid_tx_vdev_id = 0;
+ if (isset(sc->wmi.svc_map,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
+ ret = qwx_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+#if 0
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.freq2_radar = ctx->radar_enabled;
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+ }
+#endif
+ /* XXX */
+ arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
+
+ DNPRINTF(QWX_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
+ __func__, arg.vdev_id, arg.channel.freq,
+ qwx_wmi_phymode_str(arg.channel.mode));
+
+ sc->vdev_setup_done = 0;
+ ret = qwx_wmi_vdev_start(sc, &arg, pdev_id, restart);
+ if (ret) {
+ printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = qwx_mac_vdev_setup_sync(sc);
+ if (ret) {
+ printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
+ sc->sc_dev.dv_xname, arg.vdev_id,
+ restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ if (!restart)
+ sc->num_started_vdevs++;
+
+ DNPRINTF(QWX_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+#if 0
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.channel.freq, arg.vdev_id);
+ }
+#endif
+ ret = qwx_mac_set_txbf_conf(arvif);
+ if (ret)
+ printf("%s: failed to set txbf conf for vdev %d: %d\n",
+ sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+
+ return 0;
+}
+
+int
+qwx_mac_vdev_restart(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
+{
+ return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
+}
+
+int
+qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
+{
+ return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
+}
+
int
qwx_mac_op_add_interface(struct qwx_pdev *pdev)
{
}
#endif
- bit = ffs(sc->free_vdev_map);
+ if (sc->free_vdev_map == 0) {
+ printf("%s: cannot add interface; all vdevs are busy\n",
+ sc->sc_dev.dv_xname);
+ ret = EBUSY;
+ goto err;
+ }
+ bit = ffs(sc->free_vdev_map) - 1;
arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
}
}
-void
-qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
+int
+qwx_mac_get_rate_hw_value(struct ieee80211com *ic,
+ struct ieee80211_node *ni, int bitrate)
{
- /* setup commonly used values */
- arg->scan_req_id = 1;
- if (sc->state_11d == ATH11K_11D_PREPARING)
- arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ uint32_t preamble;
+ uint16_t hw_value;
+ int shortpre = 0;
+
+ if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
+ preamble = WMI_RATE_PREAMBLE_CCK;
else
- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
- arg->dwell_time_active = 50;
- arg->dwell_time_active_2g = 0;
- arg->dwell_time_passive = 150;
- arg->dwell_time_active_6g = 40;
- arg->dwell_time_passive_6g = 30;
- arg->min_rest_time = 50;
- arg->max_rest_time = 500;
- arg->repeat_probe_time = 0;
- arg->probe_spacing_time = 0;
- arg->idle_time = 0;
- arg->max_scan_time = 20000;
- arg->probe_delay = 5;
- arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
- WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
- WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ preamble = WMI_RATE_PREAMBLE_OFDM;
- if (isset(sc->wmi.svc_map,
- WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
- arg->scan_ctrl_flags_ext |=
- WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
+ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+ IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
+ shortpre = 1;
- arg->num_bssid = 1;
+ switch (bitrate) {
+ case 2:
+ hw_value = ATH11K_HW_RATE_CCK_LP_1M;
+ break;
+ case 4:
+ if (shortpre)
+ hw_value = ATH11K_HW_RATE_CCK_SP_2M;
+ else
+ hw_value = ATH11K_HW_RATE_CCK_LP_2M;
+ break;
+ case 11:
+ if (shortpre)
+ hw_value = ATH11K_HW_RATE_CCK_SP_5_5M;
+ else
+ hw_value = ATH11K_HW_RATE_CCK_LP_5_5M;
+ break;
+ case 22:
+ if (shortpre)
+ hw_value = ATH11K_HW_RATE_CCK_SP_11M;
+ else
+ hw_value = ATH11K_HW_RATE_CCK_LP_11M;
+ break;
+ case 12:
+ hw_value = ATH11K_HW_RATE_OFDM_6M;
+ break;
+ case 18:
+ hw_value = ATH11K_HW_RATE_OFDM_9M;
+ break;
+ case 24:
+ hw_value = ATH11K_HW_RATE_OFDM_12M;
+ break;
+ case 36:
+ hw_value = ATH11K_HW_RATE_OFDM_18M;
+ break;
+ case 48:
+ hw_value = ATH11K_HW_RATE_OFDM_24M;
+ break;
+ case 72:
+ hw_value = ATH11K_HW_RATE_OFDM_36M;
+ break;
+ case 96:
+ hw_value = ATH11K_HW_RATE_OFDM_48M;
+ break;
+ case 108:
+ hw_value = ATH11K_HW_RATE_OFDM_54M;
+ break;
+ default:
+ return -1;
+ }
- /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
- * ZEROs in probe request
- */
- IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
+ return ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
}
-enum ath11k_spectral_mode
-qwx_spectral_get_mode(struct qwx_softc *sc)
+int
+qwx_peer_delete(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
+ uint8_t *addr)
{
-#if 0
- if (sc->spectral.enabled)
- return ar->spectral.mode;
- else
-#endif
- return ATH11K_SPECTRAL_DISABLED;
+ int ret;
+
+ sc->peer_delete_done = 0;
+ ret = qwx_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
+ if (ret) {
+ printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
+ sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
+ return ret;
+ }
+
+ while (!sc->peer_delete_done) {
+ ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwxpeerd",
+ SEC_TO_NSEC(3));
+ if (ret) {
+ printf("%s: peer delete command timeout\n",
+ sc->sc_dev.dv_xname);
+ return ret;
+ }
+ }
+
+ sc->num_peers--;
+ return 0;
}
-void
-qwx_spectral_reset_buffer(struct qwx_softc *sc)
+int
+qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
+ struct ieee80211_node *ni, struct peer_create_params *param)
+{
+ struct qwx_node *nq = (struct qwx_node *)ni;
+ struct ath11k_peer *peer;
+ int ret;
+#ifdef notyet
+ lockdep_assert_held(&ar->conf_mutex);
+#endif
+ if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
+ DPRINTF("%s: failed to create peer due to insufficient "
+ "peer entry resource in firmware\n", __func__);
+ return ENOBUFS;
+ }
+#ifdef notyet
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+#endif
+ peer = &nq->peer;
+ if (peer) {
+ if (peer->peer_id != HAL_INVALID_PEERID &&
+ peer->vdev_id == param->vdev_id) {
+#ifdef notyet
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+ return EINVAL;
+ }
+#if 0
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ ath11k_peer_rhash_delete(ar->ab, peer);
+#endif
+ }
+#ifdef notyet
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+ sc->peer_mapped = 0;
+
+ ret = qwx_wmi_send_peer_create_cmd(sc, pdev_id, param);
+ if (ret) {
+ printf("%s: failed to send peer create vdev_id %d ret %d\n",
+ sc->sc_dev.dv_xname, param->vdev_id, ret);
+ return ret;
+ }
+
+ while (!sc->peer_mapped) {
+ ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
+ SEC_TO_NSEC(3));
+ if (ret) {
+ printf("%s: peer create command timeout\n",
+ sc->sc_dev.dv_xname);
+ return ret;
+ }
+ }
+
+#ifdef notyet
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+#endif
+#if 0
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+
+ ret = -ENOENT;
+ goto cleanup;
+ }
+
+ ret = ath11k_peer_rhash_add(ar->ab, peer);
+ if (ret) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ goto cleanup;
+ }
+#endif
+ peer->pdev_id = pdev_id;
+#if 0
+ peer->sta = sta;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ if (sta) {
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+#endif
+ sc->num_peers++;
+#ifdef notyet
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+ return 0;
+#if 0
+cleanup:
+ int fbret = qwx_peer_delete(sc, param->vdev_id, param->peer_addr);
+ if (fbret) {
+ printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
+ sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+ param->vdev_id, fbret);
+ }
+
+ return ret;
+#endif
+}
+
+int
+qwx_dp_tx_send_reo_cmd(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct qwx_dp *, void *, enum hal_reo_cmd_status))
+{
+ struct qwx_dp *dp = &sc->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
+ return ESHUTDOWN;
+
+ cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = qwx_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!dp_cmd)
+ return ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+#ifdef notyet
+ spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+ TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
+#ifdef notyet
+ spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+ return 0;
+}
+
+uint32_t
+qwx_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
+{
+ uint32_t num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void
+qwx_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+void
+qwx_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
+ uint32_t start_seq, enum hal_pn_type type)
+{
+ struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ qwx_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwx_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+ HAL_RX_REO_QUEUE_PN_SIZE_48);
+ break;
+ }
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is received. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void
+qwx_dp_reo_cmd_free(struct qwx_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct qwx_softc *sc = dp->sc;
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
+ sc->sc_dev.dv_xname, rx_tid->tid, status);
+
+ if (rx_tid->mem) {
+ qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
+ rx_tid->mem = NULL;
+ rx_tid->vaddr = NULL;
+ rx_tid->paddr = 0ULL;
+ rx_tid->size = 0;
+ }
+}
+
+void
+qwx_dp_reo_cache_flush(struct qwx_softc *sc, struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = qwx_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
+ cmd.addr_hi = rx_tid->paddr >> 32;
+ ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
+ if (ret) {
+ printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
+ "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
+ ret);
+ }
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+ cmd.addr_hi = rx_tid->paddr >> 32;
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, qwx_dp_reo_cmd_free);
+ if (ret) {
+ printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
+ "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
+ if (rx_tid->mem) {
+ qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
+ rx_tid->mem = NULL;
+ rx_tid->vaddr = NULL;
+ rx_tid->paddr = 0ULL;
+ rx_tid->size = 0;
+ }
+ }
+}
+
+void
+qwx_dp_rx_tid_del_func(struct qwx_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct qwx_softc *sc = dp->sc;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+ time_t now;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ printf("%s: failed to delete rx tid %d hw descriptor %d\n",
+ sc->sc_dev.dv_xname, rx_tid->tid, status);
+ return;
+ }
+
+ elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!elem)
+ goto free_desc;
+
+ now = gettime();
+ elem->ts = now;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+#ifdef notyet
+ spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+ TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
+ dp->reo_cmd_cache_flush_count++;
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ now < elem->ts + DP_REO_DESC_FREE_TIMEOUT_MS) {
+ TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
+ dp->reo_cmd_cache_flush_count--;
+#ifdef notyet
+ spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+ qwx_dp_reo_cache_flush(sc, &elem->data);
+ free(elem, M_DEVBUF, sizeof(*elem));
+#ifdef notyet
+ spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+ }
+ }
+#ifdef notyet
+ spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+ return;
+free_desc:
+ if (rx_tid->mem) {
+ qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
+ rx_tid->mem = NULL;
+ rx_tid->vaddr = NULL;
+ rx_tid->paddr = 0ULL;
+ rx_tid->size = 0;
+ }
+}
+
+void
+qwx_peer_rx_tid_delete(struct qwx_softc *sc, struct ath11k_peer *peer,
+ uint8_t tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ rx_tid->active = 0;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+ cmd.addr_hi = rx_tid->paddr >> 32;
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, qwx_dp_rx_tid_del_func);
+ if (ret && ret != ESHUTDOWN) {
+ printf("%s: failed to send "
+ "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ sc->sc_dev.dv_xname, tid, ret);
+ }
+
+ if (rx_tid->mem) {
+ qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
+ rx_tid->mem = NULL;
+ rx_tid->vaddr = NULL;
+ rx_tid->paddr = 0ULL;
+ rx_tid->size = 0;
+ }
+}
+
+int
+qwx_peer_rx_tid_reo_update(struct qwx_softc *sc, struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
+ int update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+ cmd.addr_hi = rx_tid->paddr >> 32;
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ printf("%s: failed to update rx tid queue, tid %d (%d)\n",
+ sc->sc_dev.dv_xname, rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+void
+qwx_dp_rx_tid_mem_free(struct qwx_softc *sc, struct ieee80211_node *ni,
+ int vdev_id, uint8_t tid)
+{
+ struct qwx_node *nq = (struct qwx_node *)ni;
+ struct ath11k_peer *peer = &nq->peer;
+ struct dp_rx_tid *rx_tid;
+#ifdef notyet
+ spin_lock_bh(&ab->base_lock);
+#endif
+ rx_tid = &peer->rx_tid[tid];
+
+ if (rx_tid->mem) {
+ qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
+ rx_tid->mem = NULL;
+ rx_tid->vaddr = NULL;
+ rx_tid->paddr = 0ULL;
+ rx_tid->size = 0;
+ }
+
+ rx_tid->active = 0;
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+}
+
+int
+qwx_peer_rx_tid_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
+ int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
+ enum hal_pn_type pn_type)
+{
+ struct qwx_node *nq = (struct qwx_node *)ni;
+ struct ath11k_peer *peer = &nq->peer;
+ struct dp_rx_tid *rx_tid;
+ uint32_t hw_desc_sz;
+ void *vaddr;
+ uint64_t paddr;
+ int ret;
+#ifdef notyet
+ spin_lock_bh(&ab->base_lock);
+#endif
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = qwx_peer_rx_tid_reo_update(sc, peer, rx_tid,
+ ba_win_sz, ssn, 1);
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+ if (ret) {
+ printf("%s: failed to update reo for peer %s "
+ "rx tid %d\n: %d", sc->sc_dev.dv_xname,
+ ether_sprintf(ni->ni_macaddr), tid, ret);
+ return ret;
+ }
+
+ ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
+ pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
+ if (ret)
+ printf("%s: failed to send wmi rx reorder queue "
+ "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
+ ether_sprintf(ni->ni_macaddr), tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = qwx_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = qwx_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ rx_tid->mem = qwx_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
+ HAL_LINK_DESC_ALIGN);
+ if (rx_tid->mem == NULL) {
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+ return ENOMEM;
+ }
+
+ vaddr = QWX_DMA_KVA(rx_tid->mem);
+
+ qwx_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
+
+ paddr = QWX_DMA_DVA(rx_tid->mem);
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = 1;
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+ ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
+ ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
+ if (ret) {
+ printf("%s: failed to setup rx reorder queue for peer %s "
+ "tid %d: %d\n", sc->sc_dev.dv_xname,
+ ether_sprintf(ni->ni_macaddr), tid, ret);
+ qwx_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
+ }
+
+ return ret;
+}
+
+int
+qwx_peer_rx_frag_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
+ int vdev_id)
+{
+ struct qwx_node *nq = (struct qwx_node *)ni;
+ struct ath11k_peer *peer = &nq->peer;
+ struct dp_rx_tid *rx_tid;
+ int i;
+#ifdef notyet
+ spin_lock_bh(&ab->base_lock);
+#endif
+ for (i = 0; i <= nitems(peer->rx_tid); i++) {
+ rx_tid = &peer->rx_tid[i];
+#if 0
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
+#endif
+ }
+#if 0
+ peer->dp_setup_done = true;
+#endif
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+ return 0;
+}
+
+int
+qwx_dp_peer_setup(struct qwx_softc *sc, int vdev_id, int pdev_id,
+ struct ieee80211_node *ni)
+{
+ struct qwx_node *nq = (struct qwx_node *)ni;
+ struct ath11k_peer *peer = &nq->peer;
+ uint32_t reo_dest;
+ int ret = 0, tid;
+
+ /* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = sc->pdev_dp.mac_id + 1;
+ ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
+ if (ret) {
+ printf("%s: failed to set default routing %d peer %s "
+ "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
+ ether_sprintf(ni->ni_macaddr), vdev_id);
+ return ret;
+ }
+
+ for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
+ ret = qwx_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
+ tid, 1, 0, HAL_PN_TYPE_NONE);
+ if (ret) {
+ printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
+ sc->sc_dev.dv_xname, tid, ret);
+ goto peer_clean;
+ }
+ }
+
+ ret = qwx_peer_rx_frag_setup(sc, ni, vdev_id);
+ if (ret) {
+ printf("%s: failed to setup rx defrag context\n",
+ sc->sc_dev.dv_xname);
+ tid--;
+ goto peer_clean;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+
+peer_clean:
+#ifdef notyet
+ spin_lock_bh(&ab->base_lock);
+#endif
+#if 0
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+#endif
+ for (; tid >= 0; tid--)
+ qwx_peer_rx_tid_delete(sc, peer, tid);
+#ifdef notyet
+ spin_unlock_bh(&ab->base_lock);
+#endif
+ return ret;
+}
+
+int
+qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif,
+ uint8_t pdev_id, struct ieee80211_node *ni)
+{
+ struct peer_create_params peer_param;
+ int ret;
+#ifdef notyet
+ lockdep_assert_held(&ar->conf_mutex);
+#endif
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = ni->ni_macaddr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = qwx_peer_create(sc, arvif, pdev_id, ni, &peer_param);
+ if (ret) {
+ printf("%s: Failed to add peer: %s for VDEV: %d\n",
+ sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+ arvif->vdev_id);
+ return ret;
+ }
+
+ DNPRINTF(QWX_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
+ ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
+
+ ret = qwx_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
+ if (ret) {
+ printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
+ sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+ arvif->vdev_id, ret);
+ goto free_peer;
+ }
+
+ return 0;
+
+free_peer:
+ qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
+ return ret;
+}
+
+void
+qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ if (sc->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->dwell_time_active_6g = 40;
+ arg->dwell_time_passive_6g = 30;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+
+ if (isset(sc->wmi.svc_map,
+ WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
+ arg->scan_ctrl_flags_ext |=
+ WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
+
+ arg->num_bssid = 1;
+
+ /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+ * ZEROs in probe request
+ */
+ IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
+}
+
+int
+qwx_wmi_set_peer_param(struct qwx_softc *sc, uint8_t *peer_addr,
+ uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
+{
+ struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+ struct wmi_peer_set_param_cmd *cmd;
+ struct mbuf *m;
+ int ret;
+
+ m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
+ if (!m)
+ return ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
+ sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
+ sc->sc_dev.dv_xname);
+ m_freem(m);
+ return ret;
+ }
+
+ DNPRINTF(QWX_D_WMI, "%s: cmd peer set param vdev %d peer %s "
+ "set param %d value %d\n", __func__, vdev_id,
+ ether_sprintf(peer_addr), param_id, param_val);
+
+ return 0;
+}
+
+int
+qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *sc, int vdev_id,
+ int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
+ uint8_t ba_window_size_valid, uint32_t ba_window_size)
+{
+ struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct mbuf *m;
+ int ret;
+
+ m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
+ if (!m)
+ return ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
+ sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = paddr & 0xffffffff;
+ cmd->queue_ptr_hi = paddr >> 32;
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ printf("%s: failed to send WMI_PEER_REORDER_QUEUE_SETUP\n",
+ sc->sc_dev.dv_xname);
+ m_freem(m);
+ }
+
+ DNPRINTF(QWX_D_WMI, "%s: cmd peer reorder queue setup addr %s "
+ "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
+
+ return ret;
+}
+
+enum ath11k_spectral_mode
+qwx_spectral_get_mode(struct qwx_softc *sc)
+{
+#if 0
+ if (sc->spectral.enabled)
+ return ar->spectral.mode;
+ else
+#endif
+ return ATH11K_SPECTRAL_DISABLED;
+}
+
+void
+qwx_spectral_reset_buffer(struct qwx_softc *sc)
{
printf("%s: not implemented\n", __func__);
}
#endif
}
+/*
+ * Find a pdev which corresponds to a given channel.
+ * This doesn't exactly match the semantics of the Linux driver
+ * but because OpenBSD does not (yet) implement multi-bss mode
+ * we can assume that only one PHY will be active in either the
+ * 2 GHz or the 5 GHz band.
+ */
+struct qwx_pdev *
+qwx_get_pdev_for_chan(struct qwx_softc *sc, struct ieee80211_channel *chan)
+{
+ struct qwx_pdev *pdev;
+ int i;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ if ((sc->pdevs_active & (1 << i)) == 0)
+ continue;
+
+ pdev = &sc->pdevs[i];
+ if (IEEE80211_IS_CHAN_2GHZ(chan) &&
+ (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
+ return pdev;
+ if (IEEE80211_IS_CHAN_5GHZ(chan) &&
+ (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return pdev;
+ }
+
+ return NULL;
+}
+
+void
+qwx_recalculate_mgmt_rate(struct qwx_softc *sc, struct ieee80211_node *ni,
+ uint32_t vdev_id, uint32_t pdev_id)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int hw_rate_code;
+ uint32_t vdev_param;
+ int bitrate;
+ int ret;
+#ifdef notyet
+ lockdep_assert_held(&ar->conf_mutex);
+#endif
+ bitrate = ieee80211_min_basic_rate(ic);
+ hw_rate_code = qwx_mac_get_rate_hw_value(ic, ni, bitrate);
+ if (hw_rate_code < 0) {
+ DPRINTF("%s: bitrate not supported %d\n",
+ sc->sc_dev.dv_xname, bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
+ vdev_param, hw_rate_code);
+ if (ret)
+ printf("%s: failed to set mgmt tx rate\n",
+ sc->sc_dev.dv_xname);
+#if 0
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ab->hw_rate_code = hw_rate_code;
+#endif
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ printf("%s: failed to set beacon tx rate\n",
+ sc->sc_dev.dv_xname);
+}
+
int
qwx_auth(struct qwx_softc *sc)
{
- printf("%s: not implemented\n", __func__);
- return ENOTSUP;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ uint32_t param_id;
+ struct qwx_vif *arvif;
+ struct qwx_pdev *pdev;
+ int ret;
+
+ arvif = TAILQ_FIRST(&sc->vif_list);
+ if (arvif == NULL) {
+ printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
+ return EINVAL;
+ }
+
+ pdev = qwx_get_pdev_for_chan(sc, ni->ni_chan);
+ if (pdev == NULL) {
+ printf("%s: no pdev found for channel %d\n",
+ sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
+ return EINVAL;
+ }
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+ param_id, ni->ni_intval);
+ if (ret) {
+ printf("%s: failed to set beacon interval for VDEV: %d\n",
+ sc->sc_dev.dv_xname, arvif->vdev_id);
+ return ret;
+ }
+
+ qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
+
+ ret = qwx_mac_station_add(sc, arvif, pdev->pdev_id, ni);
+ if (ret)
+ return ret;
+
+ /* Start vdev. */
+ ret = qwx_mac_vdev_start(sc, arvif, pdev->pdev_id);
+ if (ret) {
+ printf("%s: failed to start MAC for VDEV: %d\n",
+ sc->sc_dev.dv_xname, arvif->vdev_id);
+ return ret;
+ }
+
+ /*
+ * WCN6855 firmware clears basic-rate parameters when vdev starts.
+ * Set it once more.
+ */
+ qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
+
+ return ret;
}
int