-/* $OpenBSD: if_iwm.c,v 1.326 2021/05/31 08:40:41 stsp Exp $ */
+/* $OpenBSD: if_iwm.c,v 1.327 2021/06/01 13:21:08 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
uint8_t *, size_t);
int iwm_set_default_calib(struct iwm_softc *, const void *);
void iwm_fw_info_free(struct iwm_fw_info *);
+void iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
+uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
+void iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
int iwm_allow_mcast(struct iwm_softc *);
void iwm_init_msix_hw(struct iwm_softc *);
void iwm_conf_msix_hw(struct iwm_softc *, int);
+int iwm_clear_persistence_bit(struct iwm_softc *);
int iwm_start_hw(struct iwm_softc *);
void iwm_stop_device(struct iwm_softc *);
void iwm_nic_config(struct iwm_softc *);
void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
struct iwm_rx_data *);
int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
+int iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
+ uint8_t, uint32_t, uint32_t);
void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
struct iwm_phy_context_cmd *, uint32_t, uint32_t);
void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
int iwm_lmac_scan(struct iwm_softc *, int);
int iwm_config_umac_scan(struct iwm_softc *);
int iwm_umac_scan(struct iwm_softc *, int);
+void iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
int iwm_rval2ridx(int);
void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
int iwm_umac_scan_abort(struct iwm_softc *);
int iwm_lmac_scan_abort(struct iwm_softc *);
int iwm_scan_abort(struct iwm_softc *);
+int iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
+ struct ieee80211_channel *, uint8_t, uint8_t, uint32_t);
int iwm_auth(struct iwm_softc *);
int iwm_deauth(struct iwm_softc *);
int iwm_assoc(struct iwm_softc *);
struct ieee80211_node *);
int iwm_sf_config(struct iwm_softc *, int);
int iwm_send_bt_init_conf(struct iwm_softc *);
+int iwm_send_soc_conf(struct iwm_softc *);
int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
+int iwm_send_temp_report_ths_cmd(struct iwm_softc *);
void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
void iwm_free_fw_paging(struct iwm_softc *);
int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
void iwm_radiotap_attach(struct iwm_softc *);
#endif
+uint8_t
+iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwm_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWM_FW_CMD_VER_UNKNOWN;
+}
+
int
iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
{
/* don't touch fw->fw_status */
memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
}
+
+void
+iwm_fw_version_str(char *buf, size_t bufsize,
+ uint32_t major, uint32_t minor, uint32_t api)
+{
+ /*
+ * Starting with major version 35 the Linux driver prints the minor
+ * version in hexadecimal.
+ */
+ if (major >= 35)
+ snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
+ else
+ snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
+}
int
iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
sc->sc_capaflags = 0;
sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
- memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
+ memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
+ sc->n_cmd_versions = 0;
uhdr = (void *)fw->fw_rawdata;
if (*(uint32_t *)fw->fw_rawdata != 0
goto out;
}
- snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
+ iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
IWM_UCODE_MINOR(le32toh(uhdr->ver)),
IWM_UCODE_API(le32toh(uhdr->ver)));
+
data = uhdr->data;
len = fw->fw_rawsize - sizeof(*uhdr);
break;
}
- case 48: /* undocumented TLV */
+ case IWM_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
+ tlv_len /= sizeof(struct iwm_fw_cmd_version);
+ tlv_len *= sizeof(struct iwm_fw_cmd_version);
+ }
+ if (sc->n_cmd_versions != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (tlv_len > sizeof(sc->cmd_versions)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
+ sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
+ break;
+
case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
case IWM_UCODE_TLV_FW_GSCAN_CAPA:
/* ignore, not used by current driver */
err = EINVAL;
goto parse_out;
}
- snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
- "%u.%u.%u",
+
+ iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
le32toh(((uint32_t *)tlv_data)[0]),
le32toh(((uint32_t *)tlv_data)[1]),
le32toh(((uint32_t *)tlv_data)[2]));
case IWM_UCODE_TLV_FW_DBG_DEST:
case IWM_UCODE_TLV_FW_DBG_CONF:
+ case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
+ case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
+ case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
+ case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ case IWM_UCODE_TLV_TYPE_HCMD:
+ case IWM_UCODE_TLV_TYPE_REGIONS:
+ case IWM_UCODE_TLV_TYPE_TRIGGERS:
+ break;
+
+ case IWM_UCODE_TLV_HW_TYPE:
break;
case IWM_UCODE_TLV_FW_MEM_SEG:
break;
+ /* undocumented TLVs found in iwm-9000-43 image */
+ case 0x1000003:
+ case 0x1000004:
+ break;
+
default:
err = EINVAL;
goto parse_out;
}
uint32_t
-iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
+iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
{
- iwm_nic_assert_locked(sc);
IWM_WRITE(sc,
IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
IWM_BARRIER_READ_WRITE(sc);
return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
}
-void
-iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
+uint32_t
+iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
{
iwm_nic_assert_locked(sc);
+ return iwm_read_prph_unlocked(sc, addr);
+}
+
+void
+iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
+{
IWM_WRITE(sc,
IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
IWM_BARRIER_WRITE(sc);
IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
}
+void
+iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwm_nic_assert_locked(sc);
+ iwm_write_prph_unlocked(sc, addr, val);
+}
+
void
iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
{
IWM_MSIX_HW_INT_CAUSES_REG_HAP);
}
+int
+iwm_clear_persistence_bit(struct iwm_softc *sc)
+{
+ uint32_t hpm, wprot;
+
+ hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
+ if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
+ wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
+ if (wprot & IWM_PREG_WFPM_ACCESS) {
+ printf("%s: cannot clear persistence bit\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+ iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
+ hpm & ~IWM_HPM_PERSISTENCE_BIT);
+ }
+
+ return 0;
+}
+
int
iwm_start_hw(struct iwm_softc *sc)
{
if (err)
return err;
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
+ err = iwm_clear_persistence_bit(sc);
+ if (err)
+ return err;
+ }
+
/* Reset the entire device */
IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
DELAY(5000);
uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
uint32_t status;
+ size_t len;
if (action == IWM_FW_CTXT_ACTION_ADD && active)
panic("binding already added");
for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
+ if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
+ !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
+
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
+ len = sizeof(cmd);
+ else
+ len = sizeof(struct iwm_binding_cmd_v1);
status = 0;
- err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
- sizeof(cmd), &cmd, &status);
+ err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
+ &status);
if (err == 0 && status != 0)
err = EIO;
cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
}
+int
+iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
+ uint32_t apply_time)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_phy_context_cmd_uhb cmd;
+ uint8_t active_cnt, idle_cnt;
+ struct ieee80211_channel *chan = ctxt->channel;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd.action = htole32(action);
+ cmd.apply_time = htole32(apply_time);
+
+ cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
+ cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
+ cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
+
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+ cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
+ IWM_PHY_RX_CHAIN_VALID_POS);
+ cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
+ cmd.rxchain_info |= htole32(active_cnt <<
+ IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
+ cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
+
+ return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
int
iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
{
struct iwm_phy_context_cmd cmd;
+ /*
+ * Intel increased the size of the fw_channel_info struct and neglected
+ * to bump the phy_context_cmd struct, which contains an fw_channel_info
+ * member in the middle.
+ * To keep things simple we use a separate function to handle the larger
+ * variant of the phy context command.
+ */
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
+ return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
+ chains_dynamic, action, apply_time);
+
iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
rinfo = &iwm_rates[ridx];
if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
+ else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
+ rate_flags = IWM_RATE_MCS_ANT_B_MSK;
else
rate_flags = IWM_RATE_MCS_ANT_A_MSK;
if (IWM_RIDX_IS_CCK(ridx))
iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
{
struct iwm_tx_path_flush_cmd flush_cmd = {
- .queues_ctl = htole32(tfd_queue_msk),
- .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
+ .sta_id = htole32(IWM_STATION_ID),
+ .tid_mask = htole16(0xffff),
};
int err;
goto done;
}
+ /*
+ * Flushing Tx rings may fail if the AP has disappeared.
+ * We can rely on iwm_newstate_task() to reset everything and begin
+ * scanning again if we are left with outstanding frames on queues.
+ */
err = iwm_wait_tx_queues_empty(sc);
- if (err) {
- printf("%s: Could not empty Tx queues (error %d)\n",
- DEVNAME(sc), err);
-#if 1
- iwm_dump_driver_status(sc);
-#endif
+ if (err)
goto done;
- }
err = iwm_drain_sta(sc, in, 0);
done:
chan->iter_count = htole16(1);
chan->iter_interval = 0;
chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
- if (n_ssids != 0 && !bgscan)
+ /*
+ * Firmware may become unresponsive when asked to send
+ * a directed probe request on a passive channel.
+ */
+ if (n_ssids != 0 && !bgscan &&
+ (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
chan->flags |= htole32(1 << 1); /* select SSID 0 */
chan++;
nchan++;
chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
chan->iter_count = 1;
chan->iter_interval = htole16(0);
- if (n_ssids != 0 && !bgscan)
+ /*
+ * Firmware may become unresponsive when asked to send
+ * a directed probe request on a passive channel.
+ */
+ if (n_ssids != 0 && !bgscan &&
+ (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
chan->flags = htole32(1 << 0); /* select SSID 0 */
chan++;
nchan++;
memset(preq, 0, sizeof(*preq));
- if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
+ if (remain < sizeof(*wh) + 2)
return ENOBUFS;
/*
*(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
frm = (uint8_t *)(wh + 1);
- frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
- /* Tell the firmware where the MAC header is. */
+ *frm++ = IEEE80211_ELEMID_SSID;
+ *frm++ = 0;
+ /* hardware inserts SSID */
+
+ /* Tell firmware where the MAC header and SSID IE are. */
preq->mac_header.offset = 0;
preq->mac_header.len = htole16(frm - (uint8_t *)wh);
- remain -= frm - (uint8_t *)wh;
+ remain -= (frm - (uint8_t *)wh);
/* Fill in 2GHz IEs and tell firmware where they are. */
rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
frm = ieee80211_add_rates(frm, rs);
if (rs->rs_nrates > IEEE80211_RATE_SIZE)
frm = ieee80211_add_xrates(frm, rs);
- preq->band_data[0].len = htole16(frm - pos);
remain -= frm - pos;
if (isset(sc->sc_enabled_capa,
*frm++ = 0;
remain -= 3;
}
+ preq->band_data[0].len = htole16(frm - pos);
if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
/* Fill in 5GHz IEs. */
req->scan_flags |=
htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
if (isset(sc->sc_enabled_capa,
- IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
+ isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
req->flags = htole32(IWM_PHY_BAND_24);
base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
-#ifdef notyet
- else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
- base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
-#endif
if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
else
if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
return &req->v7.channel;
-#ifdef notyet
- if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
- return &req->v6.channel;
-#endif
+
return &req->v1.channel;
}
if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
return (void *)&req->v7.data;
-#ifdef notyet
- if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
- return (void *)&req->v6.data;
-#endif
+
return (void *)&req->v1.data;
}
req->v1.passive_dwell = 110;
req->v1.fragmented_dwell = 44;
req->v1.extended_dwell = 90;
+
+ req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
}
if (bgscan) {
}
}
- req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
cmd_data = iwm_get_scan_req_umac_data(sc, req);
req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
+ if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
+ req->v8.general_flags2 =
+ IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
+ }
/* Check if we're doing an active directed scan. */
if (ic->ic_des_esslen != 0) {
req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
if (isset(sc->sc_enabled_capa,
- IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
+ isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
req->general_flags |=
htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
return err;
}
+void
+iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ char alpha2[3];
+
+ snprintf(alpha2, sizeof(alpha2), "%c%c",
+ (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
+
+ if (ifp->if_flags & IFF_DEBUG) {
+ printf("%s: firmware has detected regulatory domain '%s' "
+ "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
+ }
+
+ /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
+}
+
uint8_t
iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
{
int
iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
{
- struct iwm_time_quota_cmd cmd;
+ struct iwm_time_quota_cmd_v1 cmd;
int i, idx, num_active_macs, quota, quota_rem;
int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
int n_ifs[IWM_MAX_BINDINGS] = {0, };
/* Give the remainder of the session to the first binding */
cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
- return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
- sizeof(cmd), &cmd);
+ if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
+ struct iwm_time_quota_cmd cmd_v2;
+
+ memset(&cmd_v2, 0, sizeof(cmd_v2));
+ for (i = 0; i < IWM_MAX_BINDINGS; i++) {
+ cmd_v2.quotas[i].id_and_color =
+ cmd.quotas[i].id_and_color;
+ cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
+ cmd_v2.quotas[i].max_duration =
+ cmd.quotas[i].max_duration;
+ }
+ return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
+ sizeof(cmd_v2), &cmd_v2);
+ }
+
+ return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
}
void
return err;
}
+int
+iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
+ struct ieee80211_channel *chan, uint8_t chains_static,
+ uint8_t chains_dynamic, uint32_t apply_time)
+{
+ uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+ int err;
+
+ if (isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ (phyctxt->channel->ic_flags & band_flags) !=
+ (chan->ic_flags & band_flags)) {
+ err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time);
+ if (err) {
+ printf("%s: could not remove PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ phyctxt->channel = chan;
+ err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time);
+ if (err) {
+ printf("%s: could not remove PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ } else {
+ phyctxt->channel = chan;
+ err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time);
+ if (err) {
+ printf("%s: could not update PHY context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int
iwm_auth(struct iwm_softc *sc)
{
splassert(IPL_NET);
- if (ic->ic_opmode == IEEE80211_M_MONITOR)
- sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
- else
- sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
- err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
- IWM_FW_CTXT_ACTION_MODIFY, 0);
- if (err) {
- printf("%s: could not update PHY context (error %d)\n",
- DEVNAME(sc), err);
- return err;
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ ic->ic_ibss_chan, 1, 1, 0);
+ if (err)
+ return err;
+ } else {
+ err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 1, 1, 0);
+ if (err)
+ return err;
}
in->in_phyctxt = &sc->sc_phyctxt[0];
sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
}
+ /* Move unused PHY context to a default channel. */
+ err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ &ic->ic_channels[1], 1, 1, 0);
+ if (err)
+ return err;
+
return 0;
}
if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
(in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
iwm_mimo_enabled(sc)) {
- err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
- 2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
+ err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 2, 2, 0);
if (err) {
- printf("%s: failed to update PHY\n",
- DEVNAME(sc));
+ printf("%s: failed to update PHY\n", DEVNAME(sc));
return err;
}
}
return err;
}
- err = iwm_update_quotas(sc, in, 1);
- if (err) {
- printf("%s: could not update quotas (error %d)\n",
- DEVNAME(sc), err);
- return err;
+ if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ err = iwm_update_quotas(sc, in, 1);
+ if (err) {
+ printf("%s: could not update quotas (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
}
ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
iwm_disable_beacon_filter(sc);
- err = iwm_update_quotas(sc, in, 0);
- if (err) {
- printf("%s: could not update quotas (error %d)\n",
- DEVNAME(sc), err);
- return err;
+ if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ err = iwm_update_quotas(sc, in, 0);
+ if (err) {
+ printf("%s: could not update quotas (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
}
err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
/* Reset Tx chains in case MIMO was enabled. */
if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
iwm_mimo_enabled(sc)) {
- err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
- IWM_FW_CTXT_ACTION_MODIFY, 0);
+ err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 1, 1, 0);
if (err) {
printf("%s: failed to update PHY\n", DEVNAME(sc));
return err;
if (iwm_is_mimo_ht_plcp(ht_plcp))
tab |= IWM_RATE_MCS_ANT_AB_MSK;
+ else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
+ tab |= IWM_RATE_MCS_ANT_B_MSK;
else
tab |= IWM_RATE_MCS_ANT_A_MSK;
tab = iwm_rates[ridx_min].plcp;
if (IWM_RIDX_IS_CCK(ridx_min))
tab |= IWM_RATE_MCS_CCK_MSK;
- tab |= IWM_RATE_MCS_ANT_A_MSK;
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
+ tab |= IWM_RATE_MCS_ANT_B_MSK;
+ else
+ tab |= IWM_RATE_MCS_ANT_A_MSK;
lqcmd.rs_table[j++] = htole32(tab);
}
- lqcmd.single_stream_ant_msk = IWM_ANT_A;
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
+ lqcmd.single_stream_ant_msk = IWM_ANT_B;
+ else
+ lqcmd.single_stream_ant_msk = IWM_ANT_A;
lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
lqcmd.agg_time_limit = htole16(4000); /* 4ms */
&bt_cmd);
}
+int
+iwm_send_soc_conf(struct iwm_softc *sc)
+{
+ struct iwm_soc_configuration_cmd cmd;
+ int err;
+ uint32_t cmd_id, flags = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other flag bits.
+ */
+ if (!sc->sc_integrated) { /* VER_1 */
+ flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
+ } else { /* VER_2 */
+ uint8_t scan_cmd_ver;
+ if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
+ flags |= (sc->sc_ltr_delay &
+ IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+ scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
+ IWM_SCAN_REQ_UMAC);
+ if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
+ scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
+ flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
+ }
+ cmd.flags = htole32(flags);
+
+ cmd.latency = htole32(sc->sc_xtal_latency);
+
+ cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
+ err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
+ return err;
+}
+
int
iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
{
.flags = IWM_CMD_WANT_RESP,
.data = { &mcc_cmd },
};
+ struct iwm_rx_packet *pkt;
+ struct iwm_mcc_update_resp_v3 *resp;
+ size_t resp_len;
int err;
- int resp_v2 = isset(sc->sc_enabled_capa,
- IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
!sc->sc_nvm.lar_enabled) {
else
mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
- if (resp_v2) {
- hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
- hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
- sizeof(struct iwm_mcc_update_resp);
- } else {
- hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
- hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
- sizeof(struct iwm_mcc_update_resp_v1);
- }
+ hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
+ hcmd.resp_pkt_len = IWM_CMD_RESP_MAX;
err = iwm_send_cmd(sc, &hcmd);
if (err)
return err;
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwm_rx_packet_payload_len(pkt);
+ if (resp_len < sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ if (resp_len != sizeof(*resp) +
+ resp->n_channels * sizeof(resp->channels[0])) {
+ err = EIO;
+ goto out;
+ }
+
+out:
iwm_free_resp(sc, &hcmd);
+ return err;
+}
- return 0;
+int
+iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
+{
+ struct iwm_temp_report_ths_cmd cmd;
+ int err;
+
+ /*
+ * In order to give responsibility for critical-temperature-kill
+ * and TX backoff to FW we need to send an empty temperature
+ * reporting command at init time.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwm_send_cmd_pdu(sc,
+ IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
+ DEVNAME(sc), err);
+
+ return err;
}
void
return err;
}
+ if (isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
+ err = iwm_send_soc_conf(sc);
+ if (err)
+ return err;
+ }
+
if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
err = iwm_send_dqa_cmd(sc);
if (err)
goto err;
}
- for (i = 0; i < 1; i++) {
+ for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
/*
* The channel used here isn't relevant as it's
* going to be overwritten in the other flows.
* For now use the first channel we have.
*/
+ sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
IWM_FW_CTXT_ACTION_ADD, 0);
DEVNAME(sc), err);
}
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
+ err = iwm_send_temp_report_ths_cmd(sc);
+ if (err)
+ goto err;
+ }
+
err = iwm_power_update_device(sc);
if (err) {
printf("%s: could not send power command (error %d)\n",
case IWM_MCC_CHUB_UPDATE_CMD: {
struct iwm_mcc_chub_notif *notif;
SYNC_RESP_STRUCT(notif, pkt);
-
- sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
- sc->sc_fw_mcc[1] = notif->mcc & 0xff;
- sc->sc_fw_mcc[2] = '\0';
+ iwm_mcc_update(sc, notif);
+ break;
}
case IWM_DTS_MEASUREMENT_NOTIFICATION:
case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
IWM_DTS_MEASUREMENT_NOTIF_WIDE):
+ case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
+ IWM_TEMP_REPORTING_THRESHOLDS_CMD):
break;
+ case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
+ IWM_CT_KILL_NOTIFICATION): {
+ struct iwm_ct_kill_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ printf("%s: device at critical temperature (%u degC), "
+ "stopping device\n",
+ DEVNAME(sc), le16toh(notif->temperature));
+ sc->sc_flags |= IWM_FLAG_HW_ERR;
+ task_add(systq, &sc->init_task);
+ break;
+ }
+
case IWM_ADD_STA_KEY:
case IWM_PHY_CONFIGURATION_CMD:
case IWM_TX_ANT_CONFIGURATION_CMD:
case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
break;
+ case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
+ break;
+
default:
handled = 0;
printf("%s: unhandled firmware response 0x%x/0x%x "
break;
case PCI_PRODUCT_INTEL_WL_3165_1:
case PCI_PRODUCT_INTEL_WL_3165_2:
- sc->sc_fwname = "iwm-7265-17";
+ sc->sc_fwname = "iwm-7265D-29";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
break;
case PCI_PRODUCT_INTEL_WL_7265_1:
case PCI_PRODUCT_INTEL_WL_7265_2:
- sc->sc_fwname = "iwm-7265-17";
+ sc->sc_fwname = "iwm-7265D-29";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
break;
case PCI_PRODUCT_INTEL_WL_8260_1:
case PCI_PRODUCT_INTEL_WL_8260_2:
- sc->sc_fwname = "iwm-8000C-34";
+ sc->sc_fwname = "iwm-8000C-36";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
sc->nvm_type = IWM_NVM_EXT;
break;
case PCI_PRODUCT_INTEL_WL_8265_1:
- sc->sc_fwname = "iwm-8265-34";
+ sc->sc_fwname = "iwm-8265-36";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
sc->nvm_type = IWM_NVM_EXT;
break;
case PCI_PRODUCT_INTEL_WL_9260_1:
- sc->sc_fwname = "iwm-9260-34";
+ sc->sc_fwname = "iwm-9260-46";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
break;
case PCI_PRODUCT_INTEL_WL_9560_1:
case PCI_PRODUCT_INTEL_WL_9560_2:
- sc->sc_fwname = "iwm-9000-34";
+ sc->sc_fwname = "iwm-9000-46";
sc->host_interrupt_operation_mode = 0;
sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
sc->sc_nvm_max_section_size = 32768;
sc->sc_mqrx_supported = 1;
sc->sc_integrated = 1;
+ sc->sc_xtal_latency = 650;
break;
default:
printf("%s: unknown adapter type\n", DEVNAME(sc));
-/* $OpenBSD: if_iwmreg.h,v 1.50 2021/05/03 08:41:25 stsp Exp $ */
+/* $OpenBSD: if_iwmreg.h,v 1.51 2021/06/01 13:21:08 stsp Exp $ */
/******************************************************************************
*
#define IWM_UCODE_TLV_API_STA_TYPE 30
#define IWM_UCODE_TLV_API_NAN2_VER2 31
#define IWM_UCODE_TLV_API_ADAPTIVE_DWELL 32
+#define IWM_UCODE_TLV_API_NEW_BEACON_TEMPLATE 34
#define IWM_UCODE_TLV_API_NEW_RX_STATS 35
+#define IWM_UCODE_TLV_API_WOWLAN_KEY_MATERIAL 36
+#define IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY 38
+#define IWM_UCODE_TLV_API_DEPRECATE_TTAK 41
#define IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2 42
+#define IWM_UCODE_TLV_API_NAN_NOTIF_V2 43
+#define IWM_UCODE_TLV_API_REDUCE_TX_POWER 45
+#define IWM_UCODE_TLV_API_SHORT_BEACON_NOTIF 46
+#define IWM_UCODE_TLV_API_REGULATORY_NVM_INFO 48
+#define IWM_UCODE_TLV_API_FTM_NEW_RANGE_REQ 49
+#define IWM_UCODE_TLV_API_FTM_RTT_ACCURACY 54
+#define IWM_UCODE_TLV_API_SAR_TABLE_VER 55
#define IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER 58
#define IWM_NUM_UCODE_TLV_API 128
#define IWM_UCODE_TLV_CAPA_GSCAN_SUPPORT 31
#define IWM_UCODE_TLV_CAPA_NAN_SUPPORT 34
#define IWM_UCODE_TLV_CAPA_UMAC_UPLOAD 35
+#define IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT 37
+#define IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT 39
+#define IWM_UCODE_TLV_CAPA_CDB_SUPPORT 40
+#define IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA 44
+#define IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS 48
#define IWM_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE 64
#define IWM_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS 65
#define IWM_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT 67
#define IWM_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT 68
+#define IWM_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD 70
#define IWM_UCODE_TLV_CAPA_BEACON_ANT_SELECTION 71
#define IWM_UCODE_TLV_CAPA_BEACON_STORING 72
#define IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2 73
#define IWM_UCODE_TLV_CAPA_LMAC_UPLOAD 79
#define IWM_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG 80
#define IWM_UCODE_TLV_CAPA_LQM_SUPPORT 81
+#define IWM_UCODE_TLV_CAPA_TX_POWER_ACK 84
+#define IWM_UCODE_TLV_CAPA_D3_DEBUG 87
+#define IWM_UCODE_TLV_CAPA_LED_CMD_SUPPORT 88
+#define IWM_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP 92
#define IWM_NUM_UCODE_TLV_CAPA 128
#define IWM_UCODE_TLV_FW_DBG_DEST 38
#define IWM_UCODE_TLV_FW_DBG_CONF 39
#define IWM_UCODE_TLV_FW_DBG_TRIGGER 40
+#define IWM_UCODE_TLV_CMD_VERSIONS 48
#define IWM_UCODE_TLV_FW_GSCAN_CAPA 50
#define IWM_UCODE_TLV_FW_MEM_SEG 51
+#define IWM_UCODE_TLV_UMAC_DEBUG_ADDRS 54
+#define IWM_UCODE_TLV_LMAC_DEBUG_ADDRS 55
+#define IWM_UCODE_TLV_HW_TYPE 58
+
+#define IWM_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWM_UCODE_TLV_TYPE_DEBUG_INFO (IWM_UCODE_TLV_DEBUG_BASE + 0)
+#define IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION (IWM_UCODE_TLV_DEBUG_BASE + 1)
+#define IWM_UCODE_TLV_TYPE_HCMD (IWM_UCODE_TLV_DEBUG_BASE + 2)
+#define IWM_UCODE_TLV_TYPE_REGIONS (IWM_UCODE_TLV_DEBUG_BASE + 3)
+#define IWM_UCODE_TLV_TYPE_TRIGGERS (IWM_UCODE_TLV_DEBUG_BASE + 4)
+#define IWM_UCODE_TLV_DEBUG_MAX IWM_UCODE_TLV_TYPE_TRIGGERS
struct iwm_ucode_tlv {
uint32_t type; /* see above */
#define IWM_DATA_PATH_GROUP 0x5
#define IWM_PROT_OFFLOAD_GROUP 0xb
+/* SYSTEM_GROUP group subcommand IDs */
+
+#define IWM_SHARED_MEM_CFG_CMD 0x00
+#define IWM_SOC_CONFIGURATION_CMD 0x01
+#define IWM_INIT_EXTENDED_CFG_CMD 0x03
+#define IWM_FW_ERROR_RECOVERY_CMD 0x07
+
/* DATA_PATH group subcommand IDs */
#define IWM_DQA_ENABLE_CMD 0x00
#define IWM_PHY_CFG_RX_CHAIN_B (1 << 13)
#define IWM_PHY_CFG_RX_CHAIN_C (1 << 14)
+#define IWM_MAX_DTS_TRIPS 8
+
+/**
+ * struct iwm_ct_kill_notif - CT-kill entry notification
+ *
+ * @temperature: the current temperature in celsius
+ * @reserved: reserved
+ */
+struct iwm_ct_kill_notif {
+ uint16_t temperature;
+ uint16_t reserved;
+} __packed; /* GRP_PHY_CT_KILL_NTF */
+
+/**
+ * struct iwm_temp_report_ths_cmd - set temperature thresholds
+ * (IWM_TEMP_REPORTING_THRESHOLDS_CMD)
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct iwm_temp_report_ths_cmd {
+ uint32_t num_temps;
+ uint16_t thresholds[IWM_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
+
/*
* PHY db
*/
uint32_t dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_3 */
+#define IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE (1 << 0)
+#define IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY (1 << 1)
+
+#define IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define IWM_SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define IWM_SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define IWM_SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
+/**
+ * struct iwm_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwm_soc_configuration_cmd {
+ uint32_t flags;
+ uint32_t latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+
/* Error response/notification */
#define IWM_FW_ERR_UNKNOWN_CMD 0x0
#define IWM_FW_ERR_INVALID_CMD_PARAM 0x1
uint64_t timestamp;
} __packed;
+#define IWM_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwm_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwm_fw_cmd_version {
+ uint8_t cmd;
+ uint8_t group;
+ uint8_t cmd_ver;
+ uint8_t notif_ver;
+} __packed;
+
/* Common PHY, MAC and Bindings definitions */
/* Bindings and Time Quota */
+/**
+ * struct iwm_binding_cmd_v1 - configuring bindings
+ * ( IWM_BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwm_binding_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWM_BINDING_DATA_API_S_VER_1 */
+ uint32_t macs[IWM_MAX_MACS_IN_BINDING];
+ uint32_t phy;
+} __packed; /* IWM_BINDING_CMD_API_S_VER_1 */
+
/**
* struct iwm_binding_cmd - configuring bindings
* ( IWM_BINDING_CONTEXT_CMD = 0x2b )
* @action: action to perform, one of IWM_FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
* @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
*/
struct iwm_binding_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
/* IWM_BINDING_DATA_API_S_VER_1 */
uint32_t macs[IWM_MAX_MACS_IN_BINDING];
uint32_t phy;
-} __packed; /* IWM_BINDING_CMD_API_S_VER_1 */
+ uint32_t lmac_id;
+} __packed; /* IWM_BINDING_CMD_API_S_VER_2 */
+
+#define IWM_LMAC_24G_INDEX 0
+#define IWM_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWM_MAX_QUOTA 128
* remainig quota (after Time Events) according to this quota.
* @max_duration: max uninterrupted context duration in TU
*/
-struct iwm_time_quota_data {
+struct iwm_time_quota_data_v1 {
uint32_t id_and_color;
uint32_t quota;
uint32_t max_duration;
* ( IWM_TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
*/
+struct iwm_time_quota_cmd_v1 {
+ struct iwm_time_quota_data_v1 quotas[IWM_MAX_BINDINGS];
+} __packed; /* IWM_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+#define IWM_QUOTA_LOW_LATENCY_NONE 0
+#define IWM_QUOTA_LOW_LATENCY_TX (1 << 0)
+#define IWM_QUOTA_LOW_LATENCY_RX (1 << 1)
+
+/**
+ * struct iwm_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding.
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ * @low_latency: low latency status IWM_QUOTA_LOW_LATENCY_*
+ */
+struct iwm_time_quota_data {
+ uint32_t id_and_color;
+ uint32_t quota;
+ uint32_t max_duration;
+ uint32_t low_latency;
+}; /* TIME_QUOTA_DATA_API_S_VER_2 */
+
+/**
+ * struct iwm_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero.
+ * On CDB the fourth one is a regular binding.
+ *
+ * @quotas: allocations per binding
+ */
struct iwm_time_quota_cmd {
struct iwm_time_quota_data quotas[IWM_MAX_BINDINGS];
-} __packed; /* IWM_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+}; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */
/* PHY context */
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
*/
-struct iwm_fw_channel_info {
+struct iwm_fw_channel_info_v1 {
uint8_t band;
uint8_t channel;
uint8_t width;
uint8_t ctrl_pos;
-} __packed;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwm_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwm_fw_channel_info {
+ uint32_t channel;
+ uint8_t band;
+ uint8_t width;
+ uint8_t ctrl_pos;
+ uint8_t reserved;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_2 */
+
#define IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
#define IWM_PHY_RX_CHAIN_DRIVER_FORCE_MSK \
* @acquisition_data: ???
* @dsp_cfg_flags: set to 0
*/
-struct iwm_phy_context_cmd {
+/*
+ * XXX Intel forgot to bump the PHY_CONTEXT command API when they increased
+ * the size of fw_channel_info from v1 to v2.
+ * To keep things simple we define two versions of this struct, and both
+ * are labled as CMD_API_VER_1. (The Linux iwlwifi driver performs dark
+ * magic with pointers to struct members instead.)
+ */
+/* This version must be used if IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS is set: */
+struct iwm_phy_context_cmd_uhb {
/* COMMON_INDEX_HDR_API_S_VER_1 */
uint32_t id_and_color;
uint32_t action;
uint32_t acquisition_data;
uint32_t dsp_cfg_flags;
} __packed; /* IWM_PHY_CONTEXT_CMD_API_VER_1 */
+/* This version must be used otherwise: */
+struct iwm_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWM_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwm_fw_channel_info_v1 ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWM_PHY_CONTEXT_CMD_API_VER_1 */
#define IWM_RX_INFO_PHY_CNT 8
#define IWM_RX_INFO_ENERGY_ANT_ABC_IDX 1
* @flush_ctl: control flags
* @reserved: reserved
*/
-struct iwm_tx_path_flush_cmd {
+struct iwm_tx_path_flush_cmd_v1 {
uint32_t queues_ctl;
uint16_t flush_ctl;
uint16_t reserved;
} __packed; /* IWM_TX_PATH_FLUSH_CMD_API_S_VER_1 */
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwm_tx_path_flush_cmd {
+ uint32_t sta_id;
+ uint16_t tid_mask;
+ uint16_t reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
/**
* iwm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
#define IWM_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME (1 << 14)
#define IWM_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE (1 << 15)
+/**
+ * UMAC scan general flags #2
+ * @IWM_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ * notification per channel or not.
+ * @IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
+ */
+#define IWM_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL (1 << 0)
+#define IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER (1 << 1)
+
/**
* struct iwm_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
-struct iwm_mcc_update_resp {
+struct iwm_mcc_update_resp_v2 {
uint32_t status;
uint16_t mcc;
uint8_t cap;
uint32_t channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */
+/**
+ * iwm_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwm_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see IWM_MCC_SOURCE_*
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @reserved: reserved.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwm_mcc_update_resp_v3 {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint16_t time;
+ uint16_t geo_info;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+
/**
* struct iwm_mcc_chub_notif - chub notifies of mcc change
* (MCC_CHUB_UPDATE_CMD = 0xc9)