Introduce qwz(4), a work-in-progress port of the Linux ath12k driver.
authorpatrick <patrick@openbsd.org>
Wed, 14 Aug 2024 14:40:45 +0000 (14:40 +0000)
committerpatrick <patrick@openbsd.org>
Wed, 14 Aug 2024 14:40:45 +0000 (14:40 +0000)
This driver is not working yet, it's a copy of qwx(4) which bit-by-bit will be
adjusted to work on the newer generation of chips.  Even though this is only a
minor bump over the previous generation, the changes are large enough that
bringup, debugging and long-term maintenance would suffer trying to squash them
into a single driver.  This can be reconsidered once we have reached a stable
state.

sys/arch/amd64/conf/GENERIC
sys/arch/amd64/conf/RAMDISK_CD
sys/arch/arm64/conf/GENERIC
sys/arch/arm64/conf/RAMDISK
sys/conf/files
sys/dev/ic/qwz.c [new file with mode: 0644]
sys/dev/ic/qwzreg.h [new file with mode: 0644]
sys/dev/ic/qwzvar.h [new file with mode: 0644]
sys/dev/pci/files.pci
sys/dev/pci/if_qwz_pci.c [new file with mode: 0644]

index 1263cc5..80e71a9 100644 (file)
@@ -1,4 +1,4 @@
-#      $OpenBSD: GENERIC,v 1.524 2024/08/04 11:05:18 kettenis Exp $
+#      $OpenBSD: GENERIC,v 1.525 2024/08/14 14:40:45 patrick Exp $
 #
 # For further information on compiling OpenBSD kernels, see the config(8)
 # man page.
@@ -586,6 +586,7 @@ iwn*        at pci?                         # Intel WiFi Link 4965/5000/1000/6000
 iwm*   at pci?                         # Intel WiFi Link 7xxx
 iwx*   at pci?                         # Intel WiFi Link 22xxx
 qwx*   at pci?                         # Qualcomm 802.11ax
+#qwz*  at pci?                         # Qualcomm 802.11be
 ral*   at pci?                         # Ralink RT2500/RT2501/RT2600
 ral*   at cardbus?                     # Ralink RT2500/RT2501/RT2600
 rtw*   at pci?                         # Realtek 8180
index 4c8f4c0..f91780a 100644 (file)
@@ -1,4 +1,4 @@
-#      $OpenBSD: RAMDISK_CD,v 1.206 2024/05/09 17:05:22 mglocker Exp $
+#      $OpenBSD: RAMDISK_CD,v 1.207 2024/08/14 14:40:45 patrick Exp $
 
 machine                amd64
 maxusers       4
@@ -286,6 +286,7 @@ iwn*                at pci?                 # Intel Wireless WiFi Link 4965AGN
 iwm*           at pci?                 # Intel WiFi Link 7xxx
 iwx*           at pci?                 # Intel WiFi Link 22xxx
 qwx*           at pci?                 # Qualcomm 802.11ax
+#qwz*          at pci?                 # Qualcomm 802.11be
 ral*           at pci?                 # Ralink RT2500/RT2501/RT2600
 ral*           at cardbus?             # Ralink RT2500/RT2501/RT2600
 rtw*           at pci?                 # Realtek 8180
index f6bb7ef..8a3e008 100644 (file)
@@ -1,4 +1,4 @@
-# $OpenBSD: GENERIC,v 1.288 2024/07/31 10:07:33 mglocker Exp $
+# $OpenBSD: GENERIC,v 1.289 2024/08/14 14:40:46 patrick Exp $
 #
 # GENERIC machine description file
 #
@@ -404,6 +404,7 @@ iwn*                at pci?                 # Intel WiFi Link 4965/5000/1000/6000
 iwm*           at pci?                 # Intel WiFi Link 7xxx
 iwx*           at pci?                 # Intel WiFi Link 22xxx
 qwx*           at pci?                 # Qualcomm 802.11ax
+#qwz*          at pci?                 # Qualcomm 802.11be
 
 # PCI SCSI
 ahci*          at pci? flags 0x0000    # AHCI SATA controllers
index 344acd0..49ad373 100644 (file)
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISK,v 1.218 2024/07/31 10:07:33 mglocker Exp $
+# $OpenBSD: RAMDISK,v 1.219 2024/08/14 14:40:46 patrick Exp $
 
 machine                arm64
 maxusers       4
@@ -319,6 +319,7 @@ athn*               at pci?                 # Atheros AR9k (802.11a/g/n)
 bwfm*          at pci?                 # Broadcom FullMAC
 iwx*           at pci?                 # Intel WiFi Link 22xxx
 qwx*           at pci?                 # Qualcomm 802.11ax
+#qwz*          at pci?                 # Qualcomm 802.11be
 
 # PCI SCSI
 ahci*          at pci? flags 0x0000    # AHCI SATA controllers
index a97e3ef..e26cfb4 100644 (file)
@@ -1,4 +1,4 @@
-#      $OpenBSD: files,v 1.734 2024/07/13 13:20:44 bluhm Exp $
+#      $OpenBSD: files,v 1.735 2024/08/14 14:40:46 patrick Exp $
 #      $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
 
 #      @(#)files.newconf       7.5 (Berkeley) 5/10/93
@@ -429,6 +429,10 @@ file       dev/ic/bwi.c                    bwi
 device qwx: ether, ifnet, ifmedia, firmload, wlan
 file   dev/ic/qwx.c                    qwx
 
+# Qualcomm 802.11be
+device qwz: ether, ifnet, ifmedia, firmload, wlan
+file   dev/ic/qwz.c                    qwz
+
 # Intel OnChip System Fabric
 device iosf
 file   dev/ic/iosf.c                   iosf    needs-flag
diff --git a/sys/dev/ic/qwz.c b/sys/dev/ic/qwz.c
new file mode 100644 (file)
index 0000000..9d4f5fc
--- /dev/null
@@ -0,0 +1,25568 @@
+/*     $OpenBSD: qwz.c,v 1.1 2024/08/14 14:40:46 patrick Exp $ */
+
+/*
+ * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted (subject to the limitations in the disclaimer
+ * below) provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ *  * Neither the name of [Owner Organization] nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
+ * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Driver for Qualcomm Technologies 802.11be chipset.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/device.h>
+#include <sys/rwlock.h>
+#include <sys/systm.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <sys/refcnt.h>
+#include <sys/task.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#ifdef __HAVE_FDT
+#include <dev/ofw/openfirm.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+
+/* XXX linux porting goo */
+#ifdef __LP64__
+#define BITS_PER_LONG          64
+#else
+#define BITS_PER_LONG          32
+#endif
+#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define ffz(x) ffs(~(x))
+#define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
+#define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
+#define BIT(x)               (1UL << (x))
+#define test_bit(i, a)  ((a) & (1 << (i)))
+#define clear_bit(i, a) ((a)) &= ~(1 << (i))
+#define set_bit(i, a)   ((a)) |= (1 << (i))
+#define container_of(ptr, type, member) ({                     \
+       const __typeof( ((type *)0)->member ) *__mptr = (ptr);  \
+       (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/* #define QWZ_DEBUG */
+
+#include <dev/ic/qwzreg.h>
+#include <dev/ic/qwzvar.h>
+
+#ifdef QWZ_DEBUG
+uint32_t       qwz_debug = 0
+                   | QWZ_D_MISC
+/*                 | QWZ_D_MHI */
+/*                 | QWZ_D_QMI */
+/*                 | QWZ_D_WMI */
+/*                 | QWZ_D_HTC */
+/*                 | QWZ_D_HTT */
+/*                 | QWZ_D_MAC */
+/*                 | QWZ_D_MGMT */
+               ;
+#endif
+
+int qwz_ce_init_pipes(struct qwz_softc *);
+int qwz_hal_srng_src_num_free(struct qwz_softc *, struct hal_srng *, int);
+int qwz_ce_per_engine_service(struct qwz_softc *, uint16_t);
+int qwz_hal_srng_setup(struct qwz_softc *, enum hal_ring_type, int, int,
+    struct hal_srng_params *);
+int qwz_ce_send(struct qwz_softc *, struct mbuf *, uint8_t, uint16_t);
+int qwz_htc_connect_service(struct qwz_htc *, struct qwz_htc_svc_conn_req *,
+    struct qwz_htc_svc_conn_resp *);
+void qwz_hal_srng_shadow_update_hp_tp(struct qwz_softc *, struct hal_srng *);
+void qwz_wmi_free_dbring_caps(struct qwz_softc *);
+int qwz_wmi_set_peer_param(struct qwz_softc *, uint8_t *, uint32_t,
+    uint32_t, uint32_t, uint32_t);
+int qwz_wmi_peer_rx_reorder_queue_setup(struct qwz_softc *, int, int,
+    uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
+const void **qwz_wmi_tlv_parse_alloc(struct qwz_softc *, const void *, size_t);
+int qwz_core_init(struct qwz_softc *);
+int qwz_qmi_event_server_arrive(struct qwz_softc *);
+int qwz_mac_register(struct qwz_softc *);
+int qwz_mac_start(struct qwz_softc *);
+void qwz_mac_scan_finish(struct qwz_softc *);
+int qwz_mac_mgmt_tx_wmi(struct qwz_softc *, struct qwz_vif *, uint8_t,
+    struct ieee80211_node *, struct mbuf *);
+int qwz_dp_tx(struct qwz_softc *, struct qwz_vif *, uint8_t,
+    struct ieee80211_node *, struct mbuf *);
+int qwz_dp_tx_send_reo_cmd(struct qwz_softc *, struct dp_rx_tid *,
+    enum hal_reo_cmd_type , struct ath12k_hal_reo_cmd *,
+    void (*func)(struct qwz_dp *, void *, enum hal_reo_cmd_status));
+void qwz_dp_rx_deliver_msdu(struct qwz_softc *, struct qwz_rx_msdu *);
+void qwz_dp_service_mon_ring(void *);
+void qwz_peer_frags_flush(struct qwz_softc *, struct ath12k_peer *);
+int qwz_wmi_vdev_install_key(struct qwz_softc *,
+    struct wmi_vdev_install_key_arg *, uint8_t);
+int qwz_dp_peer_rx_pn_replay_config(struct qwz_softc *, struct qwz_vif *,
+    struct ieee80211_node *, struct ieee80211_key *, int);
+void qwz_setkey_clear(struct qwz_softc *);
+
+int qwz_scan(struct qwz_softc *);
+void qwz_scan_abort(struct qwz_softc *);
+int qwz_auth(struct qwz_softc *);
+int qwz_deauth(struct qwz_softc *);
+int qwz_run(struct qwz_softc *);
+int qwz_run_stop(struct qwz_softc *);
+
+struct ieee80211_node *
+qwz_node_alloc(struct ieee80211com *ic)
+{
+       struct qwz_node *nq;
+
+       nq = malloc(sizeof(struct qwz_node), M_DEVBUF, M_NOWAIT | M_ZERO);
+       nq->peer.peer_id = HAL_INVALID_PEERID;
+       return (struct ieee80211_node *)nq;
+}
+
+int
+qwz_init(struct ifnet *ifp)
+{
+       int error;
+       struct qwz_softc *sc = ifp->if_softc;
+       struct ieee80211com *ic = &sc->sc_ic;
+
+       sc->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
+       /*
+        * There are several known hardware/software crypto issues
+        * on wcn6855 devices, firmware 0x1106196e. It is unclear
+        * if these are driver or firmware bugs.
+        *
+        * 1) Broadcast/Multicast frames will only be received on
+        *    encrypted networks if hardware crypto is used and a
+        *    CCMP group key is used. Otherwise such frames never
+        *    even trigger an interrupt. This breaks ARP and IPv6.
+        *    This issue is known to affect the Linux ath12k vendor
+        *    driver when software crypto mode is selected.
+        *    Workaround: Use hardware crypto on WPA2 networks.
+        *    However, even with hardware crypto broadcast frames
+        *    are never received if TKIP is used as the WPA2 group
+        *    cipher and we have no workaround for this.
+        *
+        * 2) Adding WEP keys for hardware crypto crashes the firmware.
+        *    Presumably, lack of WEP support is deliberate because the
+        *    Linux ath12k vendor driver rejects attempts to install
+        *    WEP keys to hardware.
+        *    Workaround: Use software crypto if WEP is enabled.
+        *    This suffers from the broadcast issues mentioned above.
+        *
+        * 3) A WPA1 group key handshake message from the AP is never
+        *    received if hardware crypto is used.
+        *    Workaround: Use software crypto if WPA1 is enabled.
+        *    This suffers from the broadcast issues mentioned above,
+        *    even on WPA2 networks when WPA1 and WPA2 are both enabled.
+        *    On OpenBSD, WPA1 is disabled by default.
+        *
+        * The only known fully working configurations are unencrypted
+        * networks, and WPA2/CCMP-only networks provided WPA1 remains
+        * disabled.
+        */
+       if ((ic->ic_flags & IEEE80211_F_WEPON) ||
+           (ic->ic_rsnprotos & IEEE80211_PROTO_WPA))
+               sc->crypto_mode = ATH12K_CRYPT_MODE_SW;
+       else
+               sc->crypto_mode = ATH12K_CRYPT_MODE_HW;
+       sc->frame_mode = ATH12K_HW_TXRX_NATIVE_WIFI;
+       ic->ic_state = IEEE80211_S_INIT;
+       sc->ns_nstate = IEEE80211_S_INIT;
+       sc->scan.state = ATH12K_SCAN_IDLE;
+       sc->vdev_id_11d_scan = QWZ_11D_INVALID_VDEV_ID;
+
+       error = qwz_core_init(sc);
+       if (error)
+               return error;
+
+       memset(&sc->qrtr_server, 0, sizeof(sc->qrtr_server));
+       sc->qrtr_server.node = QRTR_NODE_BCAST;
+
+       /* wait for QRTR init to be done */
+       while (sc->qrtr_server.node == QRTR_NODE_BCAST) {
+               error = tsleep_nsec(&sc->qrtr_server, 0, "qwzqrtr",
+                   SEC_TO_NSEC(5));
+               if (error) {
+                       printf("%s: qrtr init timeout\n", sc->sc_dev.dv_xname);
+                       return error;
+               }
+       }
+
+       error = qwz_qmi_event_server_arrive(sc);
+       if (error)
+               return error;
+
+       if (sc->attached) {
+               /* Update MAC in case the upper layers changed it. */
+               IEEE80211_ADDR_COPY(ic->ic_myaddr,
+                   ((struct arpcom *)ifp)->ac_enaddr);
+       } else {
+               sc->attached = 1;
+
+               /* Configure channel information obtained from firmware. */
+               ieee80211_channel_init(ifp);
+
+               /* Configure initial MAC address. */
+               error = if_setlladdr(ifp, ic->ic_myaddr);
+               if (error)
+                       printf("%s: could not set MAC address %s: %d\n",
+                           sc->sc_dev.dv_xname, ether_sprintf(ic->ic_myaddr),
+                           error);
+
+               ieee80211_media_init(ifp, qwz_media_change,
+                   ieee80211_media_status);
+       }
+
+       if (ifp->if_flags & IFF_UP) {
+               refcnt_init(&sc->task_refs);
+
+               ifq_clr_oactive(&ifp->if_snd);
+               ifp->if_flags |= IFF_RUNNING;
+
+               error = qwz_mac_start(sc);
+               if (error)
+                       return error;
+
+               ieee80211_begin_scan(ifp);
+       }
+
+       return 0;
+}
+
+void
+qwz_add_task(struct qwz_softc *sc, struct taskq *taskq, struct task *task)
+{
+       int s = splnet();
+
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
+               splx(s);
+               return;
+       }
+
+       refcnt_take(&sc->task_refs);
+       if (!task_add(taskq, task))
+               refcnt_rele_wake(&sc->task_refs);
+       splx(s);
+}
+
+void
+qwz_del_task(struct qwz_softc *sc, struct taskq *taskq, struct task *task)
+{
+       if (task_del(taskq, task))
+               refcnt_rele(&sc->task_refs);
+}
+
+void
+qwz_stop(struct ifnet *ifp)
+{
+       struct qwz_softc *sc = ifp->if_softc;
+       struct ieee80211com *ic = &sc->sc_ic;
+       int s = splnet();
+
+       rw_assert_wrlock(&sc->ioctl_rwl);
+
+       timeout_del(&sc->mon_reap_timer);
+
+       /* Disallow new tasks. */
+       set_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
+
+       /* Cancel scheduled tasks and let any stale tasks finish up. */
+       task_del(systq, &sc->init_task);
+       qwz_del_task(sc, sc->sc_nswq, &sc->newstate_task);
+       qwz_del_task(sc, systq, &sc->setkey_task);
+       refcnt_finalize(&sc->task_refs, "qwzstop");
+
+       qwz_setkey_clear(sc);
+
+       clear_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
+
+       ifp->if_timer = sc->sc_tx_timer = 0;
+
+       ifp->if_flags &= ~IFF_RUNNING;
+       ifq_clr_oactive(&ifp->if_snd);
+
+       sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
+       sc->ns_nstate = IEEE80211_S_INIT;
+       sc->scan.state = ATH12K_SCAN_IDLE;
+       sc->vdev_id_11d_scan = QWZ_11D_INVALID_VDEV_ID;
+       sc->pdevs_active = 0;
+
+       /* power off hardware */
+       qwz_core_deinit(sc);
+
+       splx(s);
+}
+
+void
+qwz_free_firmware(struct qwz_softc *sc)
+{
+       int i;
+
+       for (i = 0; i < nitems(sc->fw_img); i++) {
+               free(sc->fw_img[i].data, M_DEVBUF, sc->fw_img[i].size);
+               sc->fw_img[i].data = NULL;
+               sc->fw_img[i].size = 0;
+       }
+}
+
+int
+qwz_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+       struct qwz_softc *sc = ifp->if_softc;
+       int s, err = 0;
+
+       /*
+        * Prevent processes from entering this function while another
+        * process is tsleep'ing in it.
+        */
+       err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
+       if (err)
+               return err;
+       s = splnet();
+
+       switch (cmd) {
+       case SIOCSIFADDR:
+               ifp->if_flags |= IFF_UP;
+               /* FALLTHROUGH */
+       case SIOCSIFFLAGS:
+               if (ifp->if_flags & IFF_UP) {
+                       if (!(ifp->if_flags & IFF_RUNNING)) {
+                               /* Force reload of firmware image from disk. */
+                               qwz_free_firmware(sc);
+                               err = qwz_init(ifp);
+                       }
+               } else {
+                       if (ifp->if_flags & IFF_RUNNING)
+                               qwz_stop(ifp);
+               }
+               break;
+
+       default:
+               err = ieee80211_ioctl(ifp, cmd, data);
+       }
+
+       if (err == ENETRESET) {
+               err = 0;
+               if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+                   (IFF_UP | IFF_RUNNING)) {
+                       qwz_stop(ifp);
+                       err = qwz_init(ifp);
+               }
+       }
+
+       splx(s);
+       rw_exit(&sc->ioctl_rwl);
+
+       return err;
+}
+
+int
+qwz_tx(struct qwz_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+{
+       struct ieee80211_frame *wh;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
+       uint8_t frame_type;
+
+       wh = mtod(m, struct ieee80211_frame *);
+       frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+#if NBPFILTER > 0
+       if (sc->sc_drvbpf != NULL) {
+               struct qwz_tx_radiotap_header *tap = &sc->sc_txtap;
+
+               bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
+                   m, BPF_DIRECTION_OUT);
+       }
+#endif
+
+       if (frame_type == IEEE80211_FC0_TYPE_MGT)
+               return qwz_mac_mgmt_tx_wmi(sc, arvif, pdev_id, ni, m);
+
+       return qwz_dp_tx(sc, arvif, pdev_id, ni, m);
+}
+
+void
+qwz_start(struct ifnet *ifp)
+{
+       struct qwz_softc *sc = ifp->if_softc;
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni;
+       struct ether_header *eh;
+       struct mbuf *m;
+
+       if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
+               return;
+
+       for (;;) {
+               /* why isn't this done per-queue? */
+               if (sc->qfullmsk != 0) {
+                       ifq_set_oactive(&ifp->if_snd);
+                       break;
+               }
+
+               /* need to send management frames even if we're not RUNning */
+               m = mq_dequeue(&ic->ic_mgtq);
+               if (m) {
+                       ni = m->m_pkthdr.ph_cookie;
+                       goto sendit;
+               }
+
+               if (ic->ic_state != IEEE80211_S_RUN ||
+                   (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
+                       break;
+
+               m = ifq_dequeue(&ifp->if_snd);
+               if (!m)
+                       break;
+               if (m->m_len < sizeof (*eh) &&
+                   (m = m_pullup(m, sizeof (*eh))) == NULL) {
+                       ifp->if_oerrors++;
+                       continue;
+               }
+#if NBPFILTER > 0
+               if (ifp->if_bpf != NULL)
+                       bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+               if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
+                       ifp->if_oerrors++;
+                       continue;
+               }
+
+ sendit:
+#if NBPFILTER > 0
+               if (ic->ic_rawbpf != NULL)
+                       bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
+#endif
+               if (qwz_tx(sc, m, ni) != 0) {
+                       ieee80211_release_node(ic, ni);
+                       ifp->if_oerrors++;
+                       continue;
+               }
+
+               if (ifp->if_flags & IFF_UP)
+                       ifp->if_timer = 1;
+       }
+}
+
+void
+qwz_watchdog(struct ifnet *ifp)
+{
+       struct qwz_softc *sc = ifp->if_softc;
+
+       ifp->if_timer = 0;
+
+       if (sc->sc_tx_timer > 0) {
+               if (--sc->sc_tx_timer == 0) {
+                       printf("%s: device timeout\n", sc->sc_dev.dv_xname);
+                       if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+                               task_add(systq, &sc->init_task);
+                       ifp->if_oerrors++;
+                       return;
+               }
+               ifp->if_timer = 1;
+       }
+
+       ieee80211_watchdog(ifp);
+}
+
+int
+qwz_media_change(struct ifnet *ifp)
+{
+       int err;
+
+       err = ieee80211_media_change(ifp);
+       if (err != ENETRESET)
+               return err;
+
+       if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+           (IFF_UP | IFF_RUNNING)) {
+               qwz_stop(ifp);
+               err = qwz_init(ifp);
+       }
+
+       return err;
+}
+
+int
+qwz_queue_setkey_cmd(struct ieee80211com *ic, struct ieee80211_node *ni,
+    struct ieee80211_key *k, int cmd)
+{
+       struct qwz_softc *sc = ic->ic_softc;
+       struct qwz_setkey_task_arg *a;
+
+       if (sc->setkey_nkeys >= nitems(sc->setkey_arg) ||
+           k->k_id > WMI_MAX_KEY_INDEX)
+               return ENOSPC;
+
+       a = &sc->setkey_arg[sc->setkey_cur];
+       a->ni = ieee80211_ref_node(ni);
+       a->k = k;
+       a->cmd = cmd;
+       sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
+       sc->setkey_nkeys++;
+       qwz_add_task(sc, systq, &sc->setkey_task);
+       return EBUSY;
+}
+
+int
+qwz_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+    struct ieee80211_key *k)
+{
+       struct qwz_softc *sc = ic->ic_softc;
+
+       if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
+           k->k_cipher == IEEE80211_CIPHER_WEP40 ||
+           k->k_cipher == IEEE80211_CIPHER_WEP104)
+               return ieee80211_set_key(ic, ni, k);
+
+       return qwz_queue_setkey_cmd(ic, ni, k, QWZ_ADD_KEY);
+}
+
+void
+qwz_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+    struct ieee80211_key *k)
+{
+       struct qwz_softc *sc = ic->ic_softc;
+
+       if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
+           k->k_cipher == IEEE80211_CIPHER_WEP40 ||
+           k->k_cipher == IEEE80211_CIPHER_WEP104) {
+               ieee80211_delete_key(ic, ni, k);
+               return;
+       }
+
+       if (ic->ic_state != IEEE80211_S_RUN) {
+               /* Keys removed implicitly when firmware station is removed. */
+               return;
+       }
+       
+       /*
+        * net80211 calls us with a NULL node when deleting group keys,
+        * but firmware expects a MAC address in the command.
+        */
+       if (ni == NULL)
+               ni = ic->ic_bss;
+
+       qwz_queue_setkey_cmd(ic, ni, k, QWZ_DEL_KEY);
+}
+
+int
+qwz_wmi_install_key_cmd(struct qwz_softc *sc, struct qwz_vif *arvif,
+    uint8_t *macaddr, struct ieee80211_key *k, uint32_t flags,
+    int delete_key)
+{
+       int ret;
+       struct wmi_vdev_install_key_arg arg = {
+               .vdev_id = arvif->vdev_id,
+               .key_idx = k->k_id,
+               .key_len = k->k_len,
+               .key_data = k->k_key,
+               .key_flags = flags,
+               .macaddr = macaddr,
+       };
+       uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
+#ifdef notyet
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
+       reinit_completion(&ar->install_key_done);
+#endif
+       if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
+               return 0;
+
+       if (delete_key) {
+               arg.key_cipher = WMI_CIPHER_NONE;
+               arg.key_data = NULL;
+       } else {
+               switch (k->k_cipher) {
+               case IEEE80211_CIPHER_CCMP:
+                       arg.key_cipher = WMI_CIPHER_AES_CCM;
+#if 0
+                       /* TODO: Re-check if flag is valid */
+                       key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+#endif
+                       break;
+               case IEEE80211_CIPHER_TKIP:
+                       arg.key_cipher = WMI_CIPHER_TKIP;
+                       arg.key_txmic_len = 8;
+                       arg.key_rxmic_len = 8;
+                       break;
+#if 0
+               case WLAN_CIPHER_SUITE_CCMP_256:
+                       arg.key_cipher = WMI_CIPHER_AES_CCM;
+                       break;
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
+                       arg.key_cipher = WMI_CIPHER_AES_GCM;
+                       break;
+#endif
+               default:
+                       printf("%s: cipher %u is not supported\n",
+                           sc->sc_dev.dv_xname, k->k_cipher);
+                       return EOPNOTSUPP;
+               }
+#if 0
+               if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+                       key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+                                     IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+#endif
+       }
+
+       sc->install_key_done = 0;
+       ret = qwz_wmi_vdev_install_key(sc, &arg, pdev_id);
+       if (ret)
+               return ret;
+
+       while (!sc->install_key_done) {
+               ret = tsleep_nsec(&sc->install_key_done, 0, "qwzinstkey",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: install key timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return -1;
+               }
+       }
+
+       return sc->install_key_status;
+}
+
+int
+qwz_add_sta_key(struct qwz_softc *sc, struct ieee80211_node *ni,
+    struct ieee80211_key *k)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       int ret = 0;
+       uint32_t flags = 0;
+       const int want_keymask = (QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY |
+           QWZ_NODE_FLAG_HAVE_GROUP_KEY);
+
+       /*
+        * Flush the fragments cache during key (re)install to
+        * ensure all frags in the new frag list belong to the same key.
+        */
+       qwz_peer_frags_flush(sc, peer);
+
+       if (k->k_flags & IEEE80211_KEY_GROUP)
+               flags |= WMI_KEY_GROUP;
+       else
+               flags |= WMI_KEY_PAIRWISE;
+
+       ret = qwz_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, flags, 0);
+       if (ret) {
+               printf("%s: installing crypto key failed (%d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 0);
+       if (ret) {
+               printf("%s: failed to offload PN replay detection %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       if (k->k_flags & IEEE80211_KEY_GROUP)
+               nq->flags |= QWZ_NODE_FLAG_HAVE_GROUP_KEY;
+       else
+               nq->flags |= QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY;
+
+       if ((nq->flags & want_keymask) == want_keymask) {
+               DPRINTF("marking port %s valid\n",
+                   ether_sprintf(ni->ni_macaddr));
+               ni->ni_port_valid = 1;
+               ieee80211_set_link_state(ic, LINK_STATE_UP);
+       }
+
+       return 0;
+}
+
+int
+qwz_del_sta_key(struct qwz_softc *sc, struct ieee80211_node *ni,
+    struct ieee80211_key *k)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       int ret = 0;
+
+       ret = qwz_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, 0, 1);
+       if (ret) {
+               printf("%s: deleting crypto key failed (%d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 1);
+       if (ret) {
+               printf("%s: failed to disable PN replay detection %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       if (k->k_flags & IEEE80211_KEY_GROUP)
+               nq->flags &= ~QWZ_NODE_FLAG_HAVE_GROUP_KEY;
+       else
+               nq->flags &= ~QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY;
+
+       return 0;
+}
+
+void
+qwz_setkey_task(void *arg)
+{
+       struct qwz_softc *sc = arg;
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_setkey_task_arg *a;
+       int err = 0, s = splnet();
+
+       while (sc->setkey_nkeys > 0) {
+               if (err || test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+                       break;
+               a = &sc->setkey_arg[sc->setkey_tail];
+               KASSERT(a->cmd == QWZ_ADD_KEY || a->cmd == QWZ_DEL_KEY);
+               if (ic->ic_state == IEEE80211_S_RUN) {
+                       if (a->cmd == QWZ_ADD_KEY)
+                               err = qwz_add_sta_key(sc, a->ni, a->k);
+                       else
+                               err = qwz_del_sta_key(sc, a->ni, a->k);
+               }
+               ieee80211_release_node(ic, a->ni);
+               a->ni = NULL;
+               a->k = NULL;
+               sc->setkey_tail = (sc->setkey_tail + 1) %
+                   nitems(sc->setkey_arg);
+               sc->setkey_nkeys--;
+       }
+
+       refcnt_rele_wake(&sc->task_refs);
+       splx(s);
+}
+
+void
+qwz_setkey_clear(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_setkey_task_arg *a;
+
+       while (sc->setkey_nkeys > 0) {
+               a = &sc->setkey_arg[sc->setkey_tail];
+               ieee80211_release_node(ic, a->ni);
+               a->ni = NULL;
+               sc->setkey_tail = (sc->setkey_tail + 1) %
+                   nitems(sc->setkey_arg);
+               sc->setkey_nkeys--;
+       }
+       memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
+       sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
+}
+
+int
+qwz_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
+{
+       struct ifnet *ifp = &ic->ic_if;
+       struct qwz_softc *sc = ifp->if_softc;
+
+       /*
+        * Prevent attempts to transition towards the same state, unless
+        * we are scanning in which case a SCAN -> SCAN transition
+        * triggers another scan iteration. And AUTH -> AUTH is needed
+        * to support band-steering.
+        */
+       if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
+           nstate != IEEE80211_S_AUTH)
+               return 0;
+       if (ic->ic_state == IEEE80211_S_RUN) {
+#if 0
+               qwz_del_task(sc, systq, &sc->ba_task);
+#endif
+               qwz_del_task(sc, systq, &sc->setkey_task);
+               qwz_setkey_clear(sc);
+#if 0
+               qwz_del_task(sc, systq, &sc->bgscan_done_task);
+#endif
+       }
+
+       sc->ns_nstate = nstate;
+       sc->ns_arg = arg;
+
+       qwz_add_task(sc, sc->sc_nswq, &sc->newstate_task);
+
+       return 0;
+}
+
+void
+qwz_newstate_task(void *arg)
+{
+       struct qwz_softc *sc = (struct qwz_softc *)arg;
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       enum ieee80211_state nstate = sc->ns_nstate;
+       enum ieee80211_state ostate = ic->ic_state;
+       int err = 0, s = splnet();
+
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
+               /* qwz_stop() is waiting for us. */
+               refcnt_rele_wake(&sc->task_refs);
+               splx(s);
+               return;
+       }
+
+       if (ostate == IEEE80211_S_SCAN) {
+               if (nstate == ostate) {
+                       if (sc->scan.state != ATH12K_SCAN_IDLE) {
+                               refcnt_rele_wake(&sc->task_refs);
+                               splx(s);
+                               return;
+                       }
+                       /* Firmware is no longer scanning. Do another scan. */
+                       goto next_scan;
+               }
+       }
+
+       if (nstate <= ostate) {
+               switch (ostate) {
+               case IEEE80211_S_RUN:
+                       err = qwz_run_stop(sc);
+                       if (err)
+                               goto out;
+                       /* FALLTHROUGH */
+               case IEEE80211_S_ASSOC:
+               case IEEE80211_S_AUTH:
+                       if (nstate <= IEEE80211_S_AUTH) {
+                               err = qwz_deauth(sc);
+                               if (err)
+                                       goto out;
+                       }
+                       /* FALLTHROUGH */
+               case IEEE80211_S_SCAN:
+               case IEEE80211_S_INIT:
+                       break;
+               }
+
+               /* Die now if qwz_stop() was called while we were sleeping. */
+               if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
+                       refcnt_rele_wake(&sc->task_refs);
+                       splx(s);
+                       return;
+               }
+       }
+
+       switch (nstate) {
+       case IEEE80211_S_INIT:
+               break;
+
+       case IEEE80211_S_SCAN:
+next_scan:
+               err = qwz_scan(sc);
+               if (err)
+                       break;
+               if (ifp->if_flags & IFF_DEBUG)
+                       printf("%s: %s -> %s\n", ifp->if_xname,
+                           ieee80211_state_name[ic->ic_state],
+                           ieee80211_state_name[IEEE80211_S_SCAN]);
+#if 0
+               if ((sc->sc_flags & QWZ_FLAG_BGSCAN) == 0) {
+#endif
+                       ieee80211_set_link_state(ic, LINK_STATE_DOWN);
+                       ieee80211_node_cleanup(ic, ic->ic_bss);
+#if 0
+               }
+#endif
+               ic->ic_state = IEEE80211_S_SCAN;
+               refcnt_rele_wake(&sc->task_refs);
+               splx(s);
+               return;
+
+       case IEEE80211_S_AUTH:
+               err = qwz_auth(sc);
+               break;
+
+       case IEEE80211_S_ASSOC:
+               break;
+
+       case IEEE80211_S_RUN:
+               err = qwz_run(sc);
+               break;
+       }
+out:
+       if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
+               if (err)
+                       task_add(systq, &sc->init_task);
+               else
+                       sc->sc_newstate(ic, nstate, sc->ns_arg);
+       }
+       refcnt_rele_wake(&sc->task_refs);
+       splx(s);
+}
+
+struct cfdriver qwz_cd = {
+       NULL, "qwz", DV_IFNET
+};
+
+void
+qwz_init_wmi_config_qca6390(struct qwz_softc *sc,
+    struct target_resource_config *config)
+{
+       config->num_vdevs = 4;
+       config->num_peers = 16;
+       config->num_tids = 32;
+
+       config->num_offload_peers = 3;
+       config->num_offload_reorder_buffs = 3;
+       config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+       config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+       config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
+       config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
+       config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+       config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+       config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+       config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+       config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+       config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+       config->num_mcast_groups = 0;
+       config->num_mcast_table_elems = 0;
+       config->mcast2ucast_mode = 0;
+       config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+       config->num_wds_entries = 0;
+       config->dma_burst_size = 0;
+       config->rx_skip_defrag_timeout_dup_detection_check = 0;
+       config->vow_config = TARGET_VOW_CONFIG;
+       config->gtk_offload_max_vdev = 2;
+       config->num_msdu_desc = 0x400;
+       config->beacon_tx_offload_max_vdev = 2;
+       config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+       config->peer_map_unmap_v2_support = 0;
+       config->use_pdev_id = 1;
+       config->max_frag_entries = 0xa;
+       config->num_tdls_vdevs = 0x1;
+       config->num_tdls_conn_table_entries = 8;
+       config->beacon_tx_offload_max_vdev = 0x2;
+       config->num_multicast_filter_entries = 0x20;
+       config->num_wow_filters = 0x16;
+       config->num_keep_alive_pattern = 0;
+       config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+}
+
+void
+qwz_hw_ipq8074_reo_setup(struct qwz_softc *sc)
+{
+       uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+       uint32_t val;
+       /* Each hash entry uses three bits to map to a particular ring. */
+       uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+           HAL_HASH_ROUTING_RING_SW2 << 3 |
+           HAL_HASH_ROUTING_RING_SW3 << 6 |
+           HAL_HASH_ROUTING_RING_SW4 << 9 |
+           HAL_HASH_ROUTING_RING_SW1 << 12 |
+           HAL_HASH_ROUTING_RING_SW2 << 15 |
+           HAL_HASH_ROUTING_RING_SW3 << 18 |
+           HAL_HASH_ROUTING_RING_SW4 << 21;
+
+       val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
+
+       val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+       val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+           HAL_SRNG_RING_ID_REO2SW1) |
+           FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+           FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+       sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+           FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+           FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+           FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+           FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
+}
+
+void
+qwz_init_wmi_config_ipq8074(struct qwz_softc *sc,
+    struct target_resource_config *config)
+{
+       config->num_vdevs = sc->num_radios * TARGET_NUM_VDEVS(sc);
+
+       if (sc->num_radios == 2) {
+               config->num_peers = TARGET_NUM_PEERS(sc, DBS);
+               config->num_tids = TARGET_NUM_TIDS(sc, DBS);
+       } else if (sc->num_radios == 3) {
+               config->num_peers = TARGET_NUM_PEERS(sc, DBS_SBS);
+               config->num_tids = TARGET_NUM_TIDS(sc, DBS_SBS);
+       } else {
+               /* Control should not reach here */
+               config->num_peers = TARGET_NUM_PEERS(sc, SINGLE);
+               config->num_tids = TARGET_NUM_TIDS(sc, SINGLE);
+       }
+       config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+       config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+       config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+       config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+       config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
+       config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
+       config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+       config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+       if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
+               config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+       else
+               config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+       config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+       config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+       config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+       config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+       config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+       config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+       config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+       config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+       config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+       config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+       config->rx_skip_defrag_timeout_dup_detection_check =
+               TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+       config->vow_config = TARGET_VOW_CONFIG;
+       config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+       config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+       config->beacon_tx_offload_max_vdev = sc->num_radios * TARGET_MAX_BCN_OFFLD;
+       config->rx_batchmode = TARGET_RX_BATCHMODE;
+       config->peer_map_unmap_v2_support = 1;
+       config->twt_ap_pdev_count = sc->num_radios;
+       config->twt_ap_sta_count = 1000;
+       config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+       config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
+       config->ema_max_vap_cnt = sc->num_radios;
+       config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+       config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+}
+
+void
+qwz_hw_wcn6855_reo_setup(struct qwz_softc *sc)
+{
+       uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+       uint32_t val;
+       /* Each hash entry uses four bits to map to a particular ring. */
+       uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+           HAL_HASH_ROUTING_RING_SW2 << 4 |
+           HAL_HASH_ROUTING_RING_SW3 << 8 |
+           HAL_HASH_ROUTING_RING_SW4 << 12 |
+           HAL_HASH_ROUTING_RING_SW1 << 16 |
+           HAL_HASH_ROUTING_RING_SW2 << 20 |
+           HAL_HASH_ROUTING_RING_SW3 << 24 |
+           HAL_HASH_ROUTING_RING_SW4 << 28;
+
+       val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
+       val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+           FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+       sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+       val = sc->ops.read32(sc, reo_base + HAL_REO1_MISC_CTL(sc));
+       val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
+       val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING,
+           HAL_SRNG_RING_ID_REO2SW1);
+       sc->ops.write32(sc, reo_base + HAL_REO1_MISC_CTL(sc), val);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+           ring_hash_map);
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+           ring_hash_map);
+}
+
+void
+qwz_hw_ipq5018_reo_setup(struct qwz_softc *sc)
+{
+       uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+       uint32_t val;
+
+       /* Each hash entry uses three bits to map to a particular ring. */
+       uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+           HAL_HASH_ROUTING_RING_SW2 << 4 |
+           HAL_HASH_ROUTING_RING_SW3 << 8 |
+           HAL_HASH_ROUTING_RING_SW4 << 12 |
+           HAL_HASH_ROUTING_RING_SW1 << 16 |
+           HAL_HASH_ROUTING_RING_SW2 << 20 |
+           HAL_HASH_ROUTING_RING_SW3 << 24 |
+           HAL_HASH_ROUTING_RING_SW4 << 28;
+
+       val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
+
+       val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+       val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+           HAL_SRNG_RING_ID_REO2SW1) |
+           FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+           FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+       sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+       sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
+           HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+           ring_hash_map);
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+           ring_hash_map);
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+           ring_hash_map);
+       sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+           ring_hash_map);
+}
+
+int
+qwz_hw_mac_id_to_pdev_id_ipq8074(struct ath12k_hw_params *hw, int mac_id)
+{
+       return mac_id;
+}
+
+int
+qwz_hw_mac_id_to_srng_id_ipq8074(struct ath12k_hw_params *hw, int mac_id)
+{
+       return 0;
+}
+
+int
+qwz_hw_mac_id_to_pdev_id_qca6390(struct ath12k_hw_params *hw, int mac_id)
+{
+       return 0;
+}
+
+int
+qwz_hw_mac_id_to_srng_id_qca6390(struct ath12k_hw_params *hw, int mac_id)
+{
+       return mac_id;
+}
+
+int
+qwz_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+           le32toh(desc->u.ipq8074.msdu_end.info2));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+           le32toh(desc->u.ipq8074.msdu_end.info2));
+}
+
+uint8_t *
+qwz_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+       return desc->u.ipq8074.hdr_status;
+}
+
+int
+qwz_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.ipq8074.mpdu_start.info1) &
+              RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+uint32_t
+qwz_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+           le32toh(desc->u.ipq8074.mpdu_start.info2));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+           le32toh(desc->u.ipq8074.msdu_start.info2));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+           le32toh(desc->u.ipq8074.msdu_start.info2));
+}
+
+int
+qwz_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+           le32toh(desc->u.ipq8074.msdu_start.info2));
+}
+
+int
+qwz_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+             le32toh(desc->u.ipq8074.mpdu_start.info1));
+}
+
+int
+qwz_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+             le32toh(desc->u.ipq8074.mpdu_start.info1));
+}
+
+uint16_t
+qwz_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+           le32toh(desc->u.ipq8074.mpdu_start.info1));
+}
+
+uint16_t
+qwz_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+           le32toh(desc->u.ipq8074.msdu_start.info1));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+           le32toh(desc->u.ipq8074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+           le32toh(desc->u.ipq8074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+           le32toh(desc->u.ipq8074.msdu_start.info3));
+}
+
+uint32_t
+qwz_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.ipq8074.msdu_start.phy_meta_data);
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+           le32toh(desc->u.ipq8074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+           le32toh(desc->u.ipq8074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO2_TID,
+           le32toh(desc->u.ipq8074.mpdu_start.info2));
+}
+
+uint16_t
+qwz_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.ipq8074.mpdu_start.sw_peer_id);
+}
+
+void
+qwz_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+                                      struct hal_rx_desc *ldesc)
+{
+       memcpy((uint8_t *)&fdesc->u.ipq8074.msdu_end, (uint8_t *)&ldesc->u.ipq8074.msdu_end,
+              sizeof(struct rx_msdu_end_ipq8074));
+       memcpy((uint8_t *)&fdesc->u.ipq8074.attention, (uint8_t *)&ldesc->u.ipq8074.attention,
+              sizeof(struct rx_attention));
+       memcpy((uint8_t *)&fdesc->u.ipq8074.mpdu_end, (uint8_t *)&ldesc->u.ipq8074.mpdu_end,
+              sizeof(struct rx_mpdu_end));
+}
+
+uint32_t
+qwz_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(HAL_TLV_HDR_TAG,
+           le32toh(desc->u.ipq8074.mpdu_start_tag));
+}
+
+uint32_t
+qwz_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
+}
+
+void
+qwz_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
+{
+       uint32_t info = le32toh(desc->u.ipq8074.msdu_start.info1);
+
+       info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+       info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+       desc->u.ipq8074.msdu_start.info1 = htole32(info);
+}
+
+int
+qwz_dp_rx_h_msdu_end_first_msdu(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
+}
+
+int
+qwz_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.ipq8074.mpdu_start.info1) &
+              RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+uint8_t *
+qwz_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+       return desc->u.ipq8074.mpdu_start.addr2;
+}
+
+struct rx_attention *
+qwz_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+       return &desc->u.ipq8074.attention;
+}
+
+uint8_t *
+qwz_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+       return &desc->u.ipq8074.msdu_payload[0];
+}
+
+int
+qwz_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
+             le16toh(desc->u.qcn9074.msdu_end.info4));
+}
+
+int
+qwz_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
+             le16toh(desc->u.qcn9074.msdu_end.info4));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
+           le16toh(desc->u.qcn9074.msdu_end.info4));
+}
+
+uint8_t *
+qwz_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+       return desc->u.qcn9074.hdr_status;
+}
+
+int
+qwz_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.qcn9074.mpdu_start.info11) &
+              RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
+}
+
+uint32_t
+qwz_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
+           le32toh(desc->u.qcn9074.mpdu_start.info9));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+           le32toh(desc->u.qcn9074.msdu_start.info2));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+           le32toh(desc->u.qcn9074.msdu_start.info2));
+}
+
+int
+qwz_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+           le32toh(desc->u.qcn9074.msdu_start.info2));
+}
+
+int
+qwz_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
+             le32toh(desc->u.qcn9074.mpdu_start.info11));
+}
+
+int
+qwz_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
+             le32toh(desc->u.qcn9074.mpdu_start.info11));
+}
+
+uint16_t
+qwz_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
+           le32toh(desc->u.qcn9074.mpdu_start.info11));
+}
+
+uint16_t
+qwz_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+           le32toh(desc->u.qcn9074.msdu_start.info1));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+           le32toh(desc->u.qcn9074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+           le32toh(desc->u.qcn9074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+           le32toh(desc->u.qcn9074.msdu_start.info3));
+}
+
+uint32_t
+qwz_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.qcn9074.msdu_start.phy_meta_data);
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+           le32toh(desc->u.qcn9074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+           le32toh(desc->u.qcn9074.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO9_TID,
+           le32toh(desc->u.qcn9074.mpdu_start.info9));
+}
+
+uint16_t
+qwz_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.qcn9074.mpdu_start.sw_peer_id);
+}
+
+void
+qwz_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+                                      struct hal_rx_desc *ldesc)
+{
+       memcpy((uint8_t *)&fdesc->u.qcn9074.msdu_end, (uint8_t *)&ldesc->u.qcn9074.msdu_end,
+              sizeof(struct rx_msdu_end_qcn9074));
+       memcpy((uint8_t *)&fdesc->u.qcn9074.attention, (uint8_t *)&ldesc->u.qcn9074.attention,
+              sizeof(struct rx_attention));
+       memcpy((uint8_t *)&fdesc->u.qcn9074.mpdu_end, (uint8_t *)&ldesc->u.qcn9074.mpdu_end,
+              sizeof(struct rx_mpdu_end));
+}
+
+uint32_t
+qwz_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(HAL_TLV_HDR_TAG,
+           le32toh(desc->u.qcn9074.mpdu_start_tag));
+}
+
+uint32_t
+qwz_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
+}
+
+void
+qwz_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
+{
+       uint32_t info = le32toh(desc->u.qcn9074.msdu_start.info1);
+
+       info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+       info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+       desc->u.qcn9074.msdu_start.info1 = htole32(info);
+}
+
+struct rx_attention *
+qwz_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+       return &desc->u.qcn9074.attention;
+}
+
+uint8_t *
+qwz_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+       return &desc->u.qcn9074.msdu_payload[0];
+}
+
+int
+qwz_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.qcn9074.mpdu_start.info11) &
+              RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+uint8_t *
+qwz_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+       return desc->u.qcn9074.mpdu_start.addr2;
+}
+
+int
+qwz_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
+             le32toh(desc->u.wcn6855.msdu_end.info2));
+}
+
+int
+qwz_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
+             le32toh(desc->u.wcn6855.msdu_end.info2));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+           le32toh(desc->u.wcn6855.msdu_end.info2));
+}
+
+uint8_t *
+qwz_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+       return desc->u.wcn6855.hdr_status;
+}
+
+int
+qwz_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.wcn6855.mpdu_start.info1) &
+              RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+uint32_t
+qwz_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+           le32toh(desc->u.wcn6855.mpdu_start.info2));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+           le32toh(desc->u.wcn6855.msdu_start.info2));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+           le32toh(desc->u.wcn6855.msdu_start.info2));
+}
+
+int
+qwz_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+             le32toh(desc->u.wcn6855.mpdu_start.info1));
+}
+
+int
+qwz_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+       return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+             le32toh(desc->u.wcn6855.mpdu_start.info1));
+}
+
+uint16_t
+qwz_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+           le32toh(desc->u.wcn6855.mpdu_start.info1));
+}
+
+uint16_t
+qwz_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+           le32toh(desc->u.wcn6855.msdu_start.info1));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+           le32toh(desc->u.wcn6855.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+           le32toh(desc->u.wcn6855.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+           le32toh(desc->u.wcn6855.msdu_start.info3));
+}
+
+uint32_t
+qwz_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.wcn6855.msdu_start.phy_meta_data);
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+           le32toh(desc->u.wcn6855.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+           le32toh(desc->u.wcn6855.msdu_start.info3));
+}
+
+uint8_t
+qwz_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
+           le32toh(desc->u.wcn6855.mpdu_start.info2));
+}
+
+uint16_t
+qwz_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.wcn6855.mpdu_start.sw_peer_id);
+}
+
+void
+qwz_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+    struct hal_rx_desc *ldesc)
+{
+       memcpy((uint8_t *)&fdesc->u.wcn6855.msdu_end, (uint8_t *)&ldesc->u.wcn6855.msdu_end,
+              sizeof(struct rx_msdu_end_wcn6855));
+       memcpy((uint8_t *)&fdesc->u.wcn6855.attention, (uint8_t *)&ldesc->u.wcn6855.attention,
+              sizeof(struct rx_attention));
+       memcpy((uint8_t *)&fdesc->u.wcn6855.mpdu_end, (uint8_t *)&ldesc->u.wcn6855.mpdu_end,
+              sizeof(struct rx_mpdu_end));
+}
+
+uint32_t
+qwz_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(HAL_TLV_HDR_TAG,
+           le32toh(desc->u.wcn6855.mpdu_start_tag));
+}
+
+uint32_t
+qwz_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+       return le16toh(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
+}
+
+void
+qwz_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
+{
+       uint32_t info = le32toh(desc->u.wcn6855.msdu_start.info1);
+
+       info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+       info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+       desc->u.wcn6855.msdu_start.info1 = htole32(info);
+}
+
+struct rx_attention *
+qwz_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+       return &desc->u.wcn6855.attention;
+}
+
+uint8_t *
+qwz_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+       return &desc->u.wcn6855.msdu_payload[0];
+}
+
+int
+qwz_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+       return le32toh(desc->u.wcn6855.mpdu_start.info1) &
+              RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+uint8_t *
+qwz_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+       return desc->u.wcn6855.mpdu_start.addr2;
+}
+
+/* Map from pdev index to hw mac index */
+uint8_t
+qwz_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
+{
+       switch (pdev_idx) {
+       case 0:
+               return 0;
+       case 1:
+               return 2;
+       case 2:
+               return 1;
+       default:
+               return ATH12K_INVALID_HW_MAC_ID;
+       }
+}
+
+uint8_t
+qwz_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
+{
+       return pdev_idx;
+}
+
+static inline int
+qwz_hw_get_mac_from_pdev_id(struct qwz_softc *sc, int pdev_idx)
+{
+       if (sc->hw_params.hw_ops->get_hw_mac_from_pdev_id)
+               return sc->hw_params.hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+       return 0;
+}
+
+const struct ath12k_hw_ops ipq8074_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq8074_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_ipq8074,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_ipq8074,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_ipq8074,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_ipq8074_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_ipq8074_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_ipq8074_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_ipq8074_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_ipq8074_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_ipq8074_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_ipq8074_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_ipq8074_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_ipq8074_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_ipq8074_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_ipq8074_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_ipq8074_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_ipq8074_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_ipq8074_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_ipq8074_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_ipq8074_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_ipq8074_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_ipq8074_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_ipq8074_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_ipq8074_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_ipq8074_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_ipq8074_get_tcl_ring_selector,
+#endif
+};
+
+const struct ath12k_hw_ops ipq6018_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq6018_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_ipq8074,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_ipq8074,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_ipq8074,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_ipq8074_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_ipq8074_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_ipq8074_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_ipq8074_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_ipq8074_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_ipq8074_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_ipq8074_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_ipq8074_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_ipq8074_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_ipq8074_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_ipq8074_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_ipq8074_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_ipq8074_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_ipq8074_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_ipq8074_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_ipq8074_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_ipq8074_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_ipq8074_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_ipq8074_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_ipq8074_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_ipq8074_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_ipq8074_get_tcl_ring_selector,
+#endif
+};
+
+const struct ath12k_hw_ops qca6390_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq8074_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_qca6390,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_qca6390,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_qca6390,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_ipq8074_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_ipq8074_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_ipq8074_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_ipq8074_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_ipq8074_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_ipq8074_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_ipq8074_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_ipq8074_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_ipq8074_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_ipq8074_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_ipq8074_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_ipq8074_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_ipq8074_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_ipq8074_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_ipq8074_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_ipq8074_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_ipq8074_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_ipq8074_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_ipq8074_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_ipq8074_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_ipq8074_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_ipq8074_get_tcl_ring_selector,
+#endif
+};
+
+const struct ath12k_hw_ops qcn9074_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq6018_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_ipq8074,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_ipq8074,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_ipq8074,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_qcn9074_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_qcn9074_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_qcn9074_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_qcn9074_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_qcn9074_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_qcn9074_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_qcn9074_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_qcn9074_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_qcn9074_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_qcn9074_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_qcn9074_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_qcn9074_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_qcn9074_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_qcn9074_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_qcn9074_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_qcn9074_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_qcn9074_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_qcn9074_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_ipq8074_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_ipq8074_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_ipq9074_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_ipq8074_get_tcl_ring_selector,
+#endif
+};
+
+const struct ath12k_hw_ops wcn6855_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq8074_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_qca6390,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_qca6390,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_qca6390,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_wcn6855_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_wcn6855_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_wcn6855_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_wcn6855_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_wcn6855_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_wcn6855_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_wcn6855_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_wcn6855_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_wcn6855_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_wcn6855_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_wcn6855_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_wcn6855_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_wcn6855_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_wcn6855_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_wcn6855_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_wcn6855_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_wcn6855_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_wcn6855_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_wcn6855_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_wcn6855_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_wcn6855_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_wcn6855_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_ipq8074_get_tcl_ring_selector,
+#endif
+};
+
+const struct ath12k_hw_ops wcn6750_ops = {
+       .get_hw_mac_from_pdev_id = qwz_hw_ipq8074_mac_from_pdev_id,
+       .wmi_init_config = qwz_init_wmi_config_qca6390,
+       .mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_qca6390,
+       .mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_qca6390,
+#if notyet
+       .tx_mesh_enable = ath12k_hw_qcn9074_tx_mesh_enable,
+#endif
+       .rx_desc_get_first_msdu = qwz_hw_qcn9074_rx_desc_get_first_msdu,
+#if notyet
+       .rx_desc_get_last_msdu = ath12k_hw_qcn9074_rx_desc_get_last_msdu,
+#endif
+       .rx_desc_get_l3_pad_bytes = qwz_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_hdr_status = qwz_hw_qcn9074_rx_desc_get_hdr_status,
+       .rx_desc_encrypt_valid = qwz_hw_qcn9074_rx_desc_encrypt_valid,
+       .rx_desc_get_encrypt_type = qwz_hw_qcn9074_rx_desc_get_encrypt_type,
+       .rx_desc_get_decap_type = qwz_hw_qcn9074_rx_desc_get_decap_type,
+#ifdef notyet
+       .rx_desc_get_mesh_ctl = ath12k_hw_qcn9074_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath12k_hw_qcn9074_rx_desc_get_ldpc_support,
+       .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+       .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+       .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+#endif
+       .rx_desc_get_msdu_len = qwz_hw_qcn9074_rx_desc_get_msdu_len,
+#ifdef notyet
+       .rx_desc_get_msdu_sgi = ath12k_hw_qcn9074_rx_desc_get_msdu_sgi,
+       .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+       .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+#endif
+       .rx_desc_get_msdu_freq = qwz_hw_qcn9074_rx_desc_get_msdu_freq,
+#ifdef notyet
+       .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+       .rx_desc_get_msdu_nss = ath12k_hw_qcn9074_rx_desc_get_msdu_nss,
+       .rx_desc_get_mpdu_tid = ath12k_hw_qcn9074_rx_desc_get_mpdu_tid,
+       .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+       .rx_desc_copy_attn_end_tlv = ath12k_hw_qcn9074_rx_desc_copy_attn_end,
+       .rx_desc_get_mpdu_start_tag = ath12k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+       .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+       .rx_desc_set_msdu_len = ath12k_hw_qcn9074_rx_desc_set_msdu_len,
+#endif
+       .rx_desc_get_attention = qwz_hw_qcn9074_rx_desc_get_attention,
+#ifdef notyet
+       .rx_desc_get_msdu_payload = ath12k_hw_qcn9074_rx_desc_get_msdu_payload,
+#endif
+       .reo_setup = qwz_hw_wcn6855_reo_setup,
+#ifdef notyet
+       .mpdu_info_get_peerid = ath12k_hw_ipq8074_mpdu_info_get_peerid,
+       .rx_desc_mac_addr2_valid = ath12k_hw_ipq9074_rx_desc_mac_addr2_valid,
+       .rx_desc_mpdu_start_addr2 = ath12k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+       .get_ring_selector = ath12k_hw_wcn6750_get_tcl_ring_selector,
+#endif
+};
+
+#define ATH12K_TX_RING_MASK_0 BIT(0)
+#define ATH12K_TX_RING_MASK_1 BIT(1)
+#define ATH12K_TX_RING_MASK_2 BIT(2)
+#define ATH12K_TX_RING_MASK_3 BIT(3)
+#define ATH12K_TX_RING_MASK_4 BIT(4)
+
+#define ATH12K_RX_RING_MASK_0 0x1
+#define ATH12K_RX_RING_MASK_1 0x2
+#define ATH12K_RX_RING_MASK_2 0x4
+#define ATH12K_RX_RING_MASK_3 0x8
+
+#define ATH12K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH12K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH12K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH12K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH12K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH12K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH12K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq8074 = {
+       .tx  = {
+               ATH12K_TX_RING_MASK_0,
+               ATH12K_TX_RING_MASK_1,
+               ATH12K_TX_RING_MASK_2,
+       },
+       .rx_mon_status = {
+               0, 0, 0, 0,
+               ATH12K_RX_MON_STATUS_RING_MASK_0,
+               ATH12K_RX_MON_STATUS_RING_MASK_1,
+               ATH12K_RX_MON_STATUS_RING_MASK_2,
+       },
+       .rx = {
+               0, 0, 0, 0, 0, 0, 0,
+               ATH12K_RX_RING_MASK_0,
+               ATH12K_RX_RING_MASK_1,
+               ATH12K_RX_RING_MASK_2,
+               ATH12K_RX_RING_MASK_3,
+       },
+       .rx_err = {
+               ATH12K_RX_ERR_RING_MASK_0,
+       },
+       .rx_wbm_rel = {
+               ATH12K_RX_WBM_REL_RING_MASK_0,
+       },
+       .reo_status = {
+               ATH12K_REO_STATUS_RING_MASK_0,
+       },
+       .rxdma2host = {
+               ATH12K_RXDMA2HOST_RING_MASK_0,
+               ATH12K_RXDMA2HOST_RING_MASK_1,
+               ATH12K_RXDMA2HOST_RING_MASK_2,
+       },
+       .host2rxdma = {
+               ATH12K_HOST2RXDMA_RING_MASK_0,
+               ATH12K_HOST2RXDMA_RING_MASK_1,
+               ATH12K_HOST2RXDMA_RING_MASK_2,
+       },
+};
+
+const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qca6390 = {
+       .tx  = {
+               ATH12K_TX_RING_MASK_0,
+       },
+       .rx_mon_status = {
+               0, 0, 0, 0,
+               ATH12K_RX_MON_STATUS_RING_MASK_0,
+               ATH12K_RX_MON_STATUS_RING_MASK_1,
+               ATH12K_RX_MON_STATUS_RING_MASK_2,
+       },
+       .rx = {
+               0, 0, 0, 0, 0, 0, 0,
+               ATH12K_RX_RING_MASK_0,
+               ATH12K_RX_RING_MASK_1,
+               ATH12K_RX_RING_MASK_2,
+               ATH12K_RX_RING_MASK_3,
+       },
+       .rx_err = {
+               ATH12K_RX_ERR_RING_MASK_0,
+       },
+       .rx_wbm_rel = {
+               ATH12K_RX_WBM_REL_RING_MASK_0,
+       },
+       .reo_status = {
+               ATH12K_REO_STATUS_RING_MASK_0,
+       },
+       .rxdma2host = {
+               ATH12K_RXDMA2HOST_RING_MASK_0,
+               ATH12K_RXDMA2HOST_RING_MASK_1,
+               ATH12K_RXDMA2HOST_RING_MASK_2,
+       },
+       .host2rxdma = {
+       },
+};
+
+const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9074 = {
+       .tx  = {
+               ATH12K_TX_RING_MASK_0,
+               ATH12K_TX_RING_MASK_1,
+               ATH12K_TX_RING_MASK_2,
+       },
+       .rx_mon_status = {
+               0, 0, 0,
+               ATH12K_RX_MON_STATUS_RING_MASK_0,
+               ATH12K_RX_MON_STATUS_RING_MASK_1,
+               ATH12K_RX_MON_STATUS_RING_MASK_2,
+       },
+       .rx = {
+               0, 0, 0, 0,
+               ATH12K_RX_RING_MASK_0,
+               ATH12K_RX_RING_MASK_1,
+               ATH12K_RX_RING_MASK_2,
+               ATH12K_RX_RING_MASK_3,
+       },
+       .rx_err = {
+               0, 0, 0,
+               ATH12K_RX_ERR_RING_MASK_0,
+       },
+       .rx_wbm_rel = {
+               0, 0, 0,
+               ATH12K_RX_WBM_REL_RING_MASK_0,
+       },
+       .reo_status = {
+               0, 0, 0,
+               ATH12K_REO_STATUS_RING_MASK_0,
+       },
+       .rxdma2host = {
+               0, 0, 0,
+               ATH12K_RXDMA2HOST_RING_MASK_0,
+       },
+       .host2rxdma = {
+               0, 0, 0,
+               ATH12K_HOST2RXDMA_RING_MASK_0,
+       },
+};
+
+const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn6750 = {
+       .tx  = {
+               ATH12K_TX_RING_MASK_0,
+               0,
+               ATH12K_TX_RING_MASK_2,
+               0,
+               ATH12K_TX_RING_MASK_4,
+       },
+       .rx_mon_status = {
+               0, 0, 0, 0, 0, 0,
+               ATH12K_RX_MON_STATUS_RING_MASK_0,
+       },
+       .rx = {
+               0, 0, 0, 0, 0, 0, 0,
+               ATH12K_RX_RING_MASK_0,
+               ATH12K_RX_RING_MASK_1,
+               ATH12K_RX_RING_MASK_2,
+               ATH12K_RX_RING_MASK_3,
+       },
+       .rx_err = {
+               0, ATH12K_RX_ERR_RING_MASK_0,
+       },
+       .rx_wbm_rel = {
+               0, ATH12K_RX_WBM_REL_RING_MASK_0,
+       },
+       .reo_status = {
+               0, ATH12K_REO_STATUS_RING_MASK_0,
+       },
+       .rxdma2host = {
+               ATH12K_RXDMA2HOST_RING_MASK_0,
+               ATH12K_RXDMA2HOST_RING_MASK_1,
+               ATH12K_RXDMA2HOST_RING_MASK_2,
+       },
+       .host2rxdma = {
+       },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq8074[] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .pipenum = htole32(0),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .pipenum = htole32(1),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .pipenum = htole32(2),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .pipenum = htole32(3),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .pipenum = htole32(4),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(256),
+               .nbytes_max = htole32(256),
+               .flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = htole32(0),
+       },
+
+       /* CE5: target->host Pktlog */
+       {
+               .pipenum = htole32(5),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(0),
+               .reserved = htole32(0),
+       },
+
+       /* CE6: Reserved for target autonomous hif_memcpy */
+       {
+               .pipenum = htole32(6),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(65535),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE7 used only by Host */
+       {
+               .pipenum = htole32(7),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE8 target->host used only by IPA */
+       {
+               .pipenum = htole32(8),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(65535),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE9 host->target HTT */
+       {
+               .pipenum = htole32(9),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE10 target->host HTT */
+       {
+               .pipenum = htole32(10),
+               .pipedir = htole32(PIPEDIR_INOUT_H2H),
+               .nentries = htole32(0),
+               .nbytes_max = htole32(0),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE11 Not used */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq8074[] = {
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(7),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(9),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(0),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       { /* not used */
+               .service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(0),
+       },
+       { /* not used */
+               .service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(4),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(5),
+       },
+
+       /* (Additions here) */
+
+       { /* terminator entry */ }
+};
+
+const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq6018[] = {
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(3),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(7),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(2),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(0),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       { /* not used */
+               .service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(0),
+       },
+       { /* not used */
+               .service_id = htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               .pipedir = htole32(PIPEDIR_OUT),        /* out = UL = host -> target */
+               .pipenum = htole32(4),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(1),
+       },
+       {
+               .service_id = htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
+               .pipedir = htole32(PIPEDIR_IN), /* in = DL = target -> host */
+               .pipenum = htole32(5),
+       },
+
+       /* (Additions here) */
+
+       { /* terminator entry */ }
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_target_ce_config_wlan_qca6390[] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .pipenum = htole32(0),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .pipenum = htole32(1),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .pipenum = htole32(2),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .pipenum = htole32(3),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .pipenum = htole32(4),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(256),
+               .nbytes_max = htole32(256),
+               .flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = htole32(0),
+       },
+
+       /* CE5: target->host Pktlog */
+       {
+               .pipenum = htole32(5),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE6: Reserved for target autonomous hif_memcpy */
+       {
+               .pipenum = htole32(6),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(16384),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE7 used only by Host */
+       {
+               .pipenum = htole32(7),
+               .pipedir = htole32(PIPEDIR_INOUT_H2H),
+               .nentries = htole32(0),
+               .nbytes_max = htole32(0),
+               .flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = htole32(0),
+       },
+
+       /* CE8 target->host used only by IPA */
+       {
+               .pipenum = htole32(8),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(16384),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+       /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qca6390[] = {
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(0),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(4),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(1),
+       },
+
+       /* (Additions here) */
+
+       { /* must be last */
+               htole32(0),
+               htole32(0),
+               htole32(0),
+       },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9074[] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .pipenum = htole32(0),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .pipenum = htole32(1),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .pipenum = htole32(2),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .pipenum = htole32(3),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .pipenum = htole32(4),
+               .pipedir = htole32(PIPEDIR_OUT),
+               .nentries = htole32(256),
+               .nbytes_max = htole32(256),
+               .flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = htole32(0),
+       },
+
+       /* CE5: target->host Pktlog */
+       {
+               .pipenum = htole32(5),
+               .pipedir = htole32(PIPEDIR_IN),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(2048),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE6: Reserved for target autonomous hif_memcpy */
+       {
+               .pipenum = htole32(6),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(16384),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+
+       /* CE7 used only by Host */
+       {
+               .pipenum = htole32(7),
+               .pipedir = htole32(PIPEDIR_INOUT_H2H),
+               .nentries = htole32(0),
+               .nbytes_max = htole32(0),
+               .flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = htole32(0),
+       },
+
+       /* CE8 target->host used only by IPA */
+       {
+               .pipenum = htole32(8),
+               .pipedir = htole32(PIPEDIR_INOUT),
+               .nentries = htole32(32),
+               .nbytes_max = htole32(16384),
+               .flags = htole32(CE_ATTR_FLAGS),
+               .reserved = htole32(0),
+       },
+       /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9074[] = {
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(3),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(2),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(0),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(1),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(0),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(1),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               htole32(PIPEDIR_OUT),   /* out = UL = host -> target */
+               htole32(4),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(1),
+       },
+       {
+               htole32(ATH12K_HTC_SVC_ID_PKT_LOG),
+               htole32(PIPEDIR_IN),    /* in = DL = target -> host */
+               htole32(5),
+       },
+
+       /* (Additions here) */
+
+       { /* must be last */
+               htole32(0),
+               htole32(0),
+               htole32(0),
+       },
+};
+
+#define QWZ_CE_COUNT_IPQ8074   21
+
+const struct ce_attr qwz_host_ce_config_ipq8074[QWZ_CE_COUNT_IPQ8074] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE3: host->target WMI (mac0) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 2048,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE5: target->host pktlog */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
+       },
+
+       /* CE6: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE7: host->target WMI (mac1) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE8: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE9: host->target WMI (mac2) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE10: target->host HTT */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE11: Not used */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+};
+
+#define QWZ_CE_COUNT_QCA6390   9
+
+const struct ce_attr qwz_host_ce_config_qca6390[QWZ_CE_COUNT_QCA6390] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE3: host->target WMI (mac0) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 2048,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE5: target->host pktlog */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
+       },
+
+       /* CE6: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE7: host->target WMI (mac1) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE8: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+};
+
+#define QWZ_CE_COUNT_QCN9074   6
+
+const struct ce_attr qwz_host_ce_config_qcn9074[QWZ_CE_COUNT_QCN9074] = {
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 32,
+               .recv_cb = qwz_htc_rx_completion_handler,
+       },
+
+       /* CE3: host->target WMI (mac0) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = qwz_htc_tx_completion_handler,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 2048,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE5: target->host pktlog */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = qwz_dp_htt_htc_t2h_msg_handler,
+       },
+};
+
+static const struct ath12k_hw_tcl2wbm_rbm_map ath12k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+       {
+               .tcl_ring_num = 0,
+               .wbm_ring_num = 0,
+               .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+       },
+       {
+               .tcl_ring_num = 1,
+               .wbm_ring_num = 1,
+               .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+       },
+       {
+               .tcl_ring_num = 2,
+               .wbm_ring_num = 2,
+               .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+       },
+};
+
+static const struct ath12k_hw_tcl2wbm_rbm_map ath12k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+       {
+               .tcl_ring_num = 0,
+               .wbm_ring_num = 0,
+               .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+       },
+       {
+               .tcl_ring_num = 1,
+               .wbm_ring_num = 4,
+               .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+       },
+       {
+               .tcl_ring_num = 2,
+               .wbm_ring_num = 2,
+               .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+       },
+};
+
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq8074 = {
+       .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+       .tcl2wbm_rbm_map = ath12k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qca6390 = {
+       .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+       .tcl2wbm_rbm_map = ath12k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn6750 = {
+       .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+       .tcl2wbm_rbm_map = ath12k_hw_tcl2wbm_rbm_map_wcn6750,
+};
+
+static const struct ath12k_hw_params ath12k_hw_params[] = {
+       {
+               .hw_rev = ATH12K_HW_IPQ8074,
+               .name = "ipq8074 hw2.0",
+               .fw = {
+                       .dir = "ipq8074-hw2.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 3,
+               .bdf_addr = 0x4B0C0000,
+               .hw_ops = &ipq8074_ops,
+               .ring_mask = &ath12k_hw_ring_mask_ipq8074,
+               .internal_sleep_clock = false,
+               .regs = &ipq8074_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+               .host_ce_config = qwz_host_ce_config_ipq8074,
+               .ce_count = QWZ_CE_COUNT_IPQ8074,
+               .target_ce_config = ath12k_target_ce_config_wlan_ipq8074,
+               .target_ce_count = 11,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq8074,
+               .svc_to_ce_map_len = 21,
+               .single_pdev_only = false,
+               .rxdma1_enable = true,
+               .num_rxmda_per_pdev = 1,
+               .rx_mac_buf_ring = false,
+               .vdev_start_delay = false,
+               .htt_peer_map_v2 = true,
+#if notyet
+               .spectral = {
+                       .fft_sz = 2,
+                       /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+                        * so added pad size as 2 bytes to compensate the BIN size
+                        */
+                       .fft_pad_sz = 2,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 16,
+                       .max_fft_bins = 512,
+                       .fragment_160mhz = true,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP) |
+                                       BIT(NL80211_IFTYPE_MESH_POINT),
+               .supports_monitor = true,
+               .full_monitor_mode = false,
+#endif
+               .supports_shadow_regs = false,
+               .idle_ps = false,
+               .supports_sta_ps = false,
+               .cold_boot_calib = true,
+               .cbcal_restart_fw = true,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = false,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+               .supports_regdb = false,
+               .fix_l1ss = true,
+               .credit_flow = false,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX,
+               .hal_params = &ath12k_hw_hal_params_ipq8074,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = true,
+               .supports_rssi_stats = false,
+#endif
+               .fw_wmi_diag_event = false,
+               .current_cc_support = false,
+               .dbr_debug_support = true,
+               .global_reset = false,
+#ifdef notyet
+               .bios_sar_capa = NULL,
+#endif
+               .m3_fw_support = false,
+               .fixed_bdf_addr = true,
+               .fixed_mem_region = true,
+               .static_window_map = false,
+#if notyet
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+               .support_off_channel_tx = false,
+               .supports_multi_bssid = false,
+
+               .sram_dump = {},
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .hw_rev = ATH12K_HW_IPQ6018_HW10,
+               .name = "ipq6018 hw1.0",
+               .fw = {
+                       .dir = "ipq6018-hw1.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 2,
+               .bdf_addr = 0x4ABC0000,
+               .hw_ops = &ipq6018_ops,
+               .ring_mask = &ath12k_hw_ring_mask_ipq8074,
+               .internal_sleep_clock = false,
+               .regs = &ipq8074_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+               .host_ce_config = qwz_host_ce_config_ipq8074,
+               .ce_count = QWZ_CE_COUNT_IPQ8074,
+               .target_ce_config = ath12k_target_ce_config_wlan_ipq8074,
+               .target_ce_count = 11,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq6018,
+               .svc_to_ce_map_len = 19,
+               .single_pdev_only = false,
+               .rxdma1_enable = true,
+               .num_rxmda_per_pdev = 1,
+               .rx_mac_buf_ring = false,
+               .vdev_start_delay = false,
+               .htt_peer_map_v2 = true,
+#if notyet
+               .spectral = {
+                       .fft_sz = 4,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 16,
+                       .max_fft_bins = 512,
+                       .fragment_160mhz = true,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP) |
+                                       BIT(NL80211_IFTYPE_MESH_POINT),
+               .supports_monitor = true,
+               .full_monitor_mode = false,
+#endif
+               .supports_shadow_regs = false,
+               .idle_ps = false,
+               .supports_sta_ps = false,
+               .cold_boot_calib = true,
+               .cbcal_restart_fw = true,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = false,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+               .supports_regdb = false,
+               .fix_l1ss = true,
+               .credit_flow = false,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX,
+               .hal_params = &ath12k_hw_hal_params_ipq8074,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = true,
+               .supports_rssi_stats = false,
+#endif
+               .fw_wmi_diag_event = false,
+               .current_cc_support = false,
+               .dbr_debug_support = true,
+               .global_reset = false,
+#ifdef notyet
+               .bios_sar_capa = NULL,
+#endif
+               .m3_fw_support = false,
+               .fixed_bdf_addr = true,
+               .fixed_mem_region = true,
+               .static_window_map = false,
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+#if notyet
+               .support_off_channel_tx = false,
+               .supports_multi_bssid = false,
+
+               .sram_dump = {},
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .name = "qca6390 hw2.0",
+               .hw_rev = ATH12K_HW_QCA6390_HW20,
+               .fw = {
+                       .dir = "qca6390-hw2.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 3,
+               .bdf_addr = 0x4B0C0000,
+               .hw_ops = &qca6390_ops,
+               .ring_mask = &ath12k_hw_ring_mask_qca6390,
+               .internal_sleep_clock = true,
+               .regs = &qca6390_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+               .host_ce_config = qwz_host_ce_config_qca6390,
+               .ce_count = QWZ_CE_COUNT_QCA6390,
+               .target_ce_config = ath12k_target_ce_config_wlan_qca6390,
+               .target_ce_count = 9,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qca6390,
+               .svc_to_ce_map_len = 14,
+               .single_pdev_only = true,
+               .rxdma1_enable = false,
+               .num_rxmda_per_pdev = 2,
+               .rx_mac_buf_ring = true,
+               .vdev_start_delay = true,
+               .htt_peer_map_v2 = false,
+#if notyet
+               .spectral = {
+                       .fft_sz = 0,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 0,
+                       .max_fft_bins = 0,
+                       .fragment_160mhz = false,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP),
+               .supports_monitor = false,
+               .full_monitor_mode = false,
+#endif
+               .supports_shadow_regs = true,
+               .idle_ps = true,
+               .supports_sta_ps = true,
+               .cold_boot_calib = false,
+               .cbcal_restart_fw = false,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = true,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+               .supports_regdb = false,
+               .fix_l1ss = true,
+               .credit_flow = true,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+               .hal_params = &ath12k_hw_hal_params_qca6390,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = false,
+               .supports_rssi_stats = true,
+#endif
+               .fw_wmi_diag_event = true,
+               .current_cc_support = true,
+               .dbr_debug_support = false,
+               .global_reset = true,
+#ifdef notyet
+               .bios_sar_capa = NULL,
+#endif
+               .m3_fw_support = true,
+               .fixed_bdf_addr = false,
+               .fixed_mem_region = false,
+               .static_window_map = false,
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+#if notyet
+               .support_off_channel_tx = true,
+               .supports_multi_bssid = true,
+
+               .sram_dump = {
+                       .start = 0x01400000,
+                       .end = 0x0171ffff,
+               },
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .name = "qcn9074 hw1.0",
+               .hw_rev = ATH12K_HW_QCN9074_HW10,
+               .fw = {
+                       .dir = "qcn9074-hw1.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 1,
+#if notyet
+               .single_pdev_only = false,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
+#endif
+               .hw_ops = &qcn9074_ops,
+               .ring_mask = &ath12k_hw_ring_mask_qcn9074,
+               .internal_sleep_clock = false,
+               .regs = &qcn9074_regs,
+               .host_ce_config = qwz_host_ce_config_qcn9074,
+               .ce_count = QWZ_CE_COUNT_QCN9074,
+               .target_ce_config = ath12k_target_ce_config_wlan_qcn9074,
+               .target_ce_count = 9,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9074,
+               .svc_to_ce_map_len = 18,
+               .rxdma1_enable = true,
+               .num_rxmda_per_pdev = 1,
+               .rx_mac_buf_ring = false,
+               .vdev_start_delay = false,
+               .htt_peer_map_v2 = true,
+#if notyet
+               .spectral = {
+                       .fft_sz = 2,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 16,
+                       .fft_hdr_len = 24,
+                       .max_fft_bins = 1024,
+                       .fragment_160mhz = false,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP) |
+                                       BIT(NL80211_IFTYPE_MESH_POINT),
+               .supports_monitor = true,
+               .full_monitor_mode = true,
+#endif
+               .supports_shadow_regs = false,
+               .idle_ps = false,
+               .supports_sta_ps = false,
+               .cold_boot_calib = false,
+               .cbcal_restart_fw = false,
+               .fw_mem_mode = 2,
+               .num_vdevs = 8,
+               .num_peers = 128,
+               .supports_suspend = false,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+               .supports_regdb = false,
+               .fix_l1ss = true,
+               .credit_flow = false,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX,
+               .hal_params = &ath12k_hw_hal_params_ipq8074,
+#if notyet
+               .supports_dynamic_smps_6ghz = true,
+               .alloc_cacheable_memory = true,
+               .supports_rssi_stats = false,
+#endif
+               .fw_wmi_diag_event = false,
+               .current_cc_support = false,
+               .dbr_debug_support = true,
+               .global_reset = false,
+#ifdef notyet
+               .bios_sar_capa = NULL,
+#endif
+               .m3_fw_support = true,
+               .fixed_bdf_addr = false,
+               .fixed_mem_region = false,
+               .static_window_map = true,
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+#if notyet
+               .support_off_channel_tx = false,
+               .supports_multi_bssid = false,
+
+               .sram_dump = {},
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .name = "wcn6855 hw2.0",
+               .hw_rev = ATH12K_HW_WCN6855_HW20,
+               .fw = {
+                       .dir = "wcn6855-hw2.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 3,
+               .bdf_addr = 0x4B0C0000,
+               .hw_ops = &wcn6855_ops,
+               .ring_mask = &ath12k_hw_ring_mask_qca6390,
+               .internal_sleep_clock = true,
+               .regs = &wcn6855_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+               .host_ce_config = qwz_host_ce_config_qca6390,
+               .ce_count = QWZ_CE_COUNT_QCA6390,
+               .target_ce_config = ath12k_target_ce_config_wlan_qca6390,
+               .target_ce_count = 9,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qca6390,
+               .svc_to_ce_map_len = 14,
+               .single_pdev_only = true,
+               .rxdma1_enable = false,
+               .num_rxmda_per_pdev = 2,
+               .rx_mac_buf_ring = true,
+               .vdev_start_delay = true,
+               .htt_peer_map_v2 = false,
+#if notyet
+               .spectral = {
+                       .fft_sz = 0,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 0,
+                       .max_fft_bins = 0,
+                       .fragment_160mhz = false,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP),
+               .supports_monitor = false,
+               .full_monitor_mode = false,
+#endif
+               .supports_shadow_regs = true,
+               .idle_ps = true,
+               .supports_sta_ps = true,
+               .cold_boot_calib = false,
+               .cbcal_restart_fw = false,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = true,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+               .supports_regdb = true,
+               .fix_l1ss = false,
+               .credit_flow = true,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+               .hal_params = &ath12k_hw_hal_params_qca6390,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = false,
+               .supports_rssi_stats = true,
+#endif
+               .fw_wmi_diag_event = true,
+               .current_cc_support = true,
+               .dbr_debug_support = false,
+               .global_reset = true,
+#ifdef notyet
+               .bios_sar_capa = &ath12k_hw_sar_capa_wcn6855,
+#endif
+               .m3_fw_support = true,
+               .fixed_bdf_addr = false,
+               .fixed_mem_region = false,
+               .static_window_map = false,
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+#if notyet
+               .support_off_channel_tx = true,
+               .supports_multi_bssid = true,
+
+               .sram_dump = {
+                       .start = 0x01400000,
+                       .end = 0x0177ffff,
+               },
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .name = "wcn6855 hw2.1",
+               .hw_rev = ATH12K_HW_WCN6855_HW21,
+               .fw = {
+                       .dir = "wcn6855-hw2.1",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 3,
+               .bdf_addr = 0x4B0C0000,
+               .hw_ops = &wcn6855_ops,
+               .ring_mask = &ath12k_hw_ring_mask_qca6390,
+               .internal_sleep_clock = true,
+               .regs = &wcn6855_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+               .host_ce_config = qwz_host_ce_config_qca6390,
+               .ce_count = QWZ_CE_COUNT_QCA6390,
+               .target_ce_config = ath12k_target_ce_config_wlan_qca6390,
+               .target_ce_count = 9,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qca6390,
+               .svc_to_ce_map_len = 14,
+               .single_pdev_only = true,
+               .rxdma1_enable = false,
+               .num_rxmda_per_pdev = 2,
+               .rx_mac_buf_ring = true,
+               .vdev_start_delay = true,
+               .htt_peer_map_v2 = false,
+#if notyet
+               .spectral = {
+                       .fft_sz = 0,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 0,
+                       .max_fft_bins = 0,
+                       .fragment_160mhz = false,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP),
+               .supports_monitor = false,
+#endif
+               .supports_shadow_regs = true,
+               .idle_ps = true,
+               .supports_sta_ps = true,
+               .cold_boot_calib = false,
+               .cbcal_restart_fw = false,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = true,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+               .supports_regdb = true,
+               .fix_l1ss = false,
+               .credit_flow = true,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+               .hal_params = &ath12k_hw_hal_params_qca6390,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = false,
+               .supports_rssi_stats = true,
+#endif
+               .fw_wmi_diag_event = true,
+               .current_cc_support = true,
+               .dbr_debug_support = false,
+               .global_reset = true,
+#ifdef notyet
+               .bios_sar_capa = &ath12k_hw_sar_capa_wcn6855,
+#endif
+               .m3_fw_support = true,
+               .fixed_bdf_addr = false,
+               .fixed_mem_region = false,
+               .static_window_map = false,
+               .hybrid_bus_type = false,
+               .fixed_fw_mem = false,
+#if notyet
+               .support_off_channel_tx = true,
+               .supports_multi_bssid = true,
+
+               .sram_dump = {
+                       .start = 0x01400000,
+                       .end = 0x0177ffff,
+               },
+
+               .tcl_ring_retry = true,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+#ifdef notyet
+               .smp2p_wow_exit = false,
+#endif
+       },
+       {
+               .name = "wcn6750 hw1.0",
+               .hw_rev = ATH12K_HW_WCN6750_HW10,
+               .fw = {
+                       .dir = "wcn6750-hw1.0",
+                       .board_size = 256 * 1024,
+                       .cal_offset = 128 * 1024,
+               },
+               .max_radios = 1,
+               .bdf_addr = 0x4B0C0000,
+               .hw_ops = &wcn6750_ops,
+               .ring_mask = &ath12k_hw_ring_mask_wcn6750,
+               .internal_sleep_clock = false,
+               .regs = &wcn6750_regs,
+               .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
+               .host_ce_config = qwz_host_ce_config_qca6390,
+               .ce_count = QWZ_CE_COUNT_QCA6390,
+               .target_ce_config = ath12k_target_ce_config_wlan_qca6390,
+               .target_ce_count = 9,
+               .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qca6390,
+               .svc_to_ce_map_len = 14,
+               .single_pdev_only = true,
+               .rxdma1_enable = false,
+               .num_rxmda_per_pdev = 1,
+               .rx_mac_buf_ring = true,
+               .vdev_start_delay = true,
+               .htt_peer_map_v2 = false,
+#if notyet
+               .spectral = {
+                       .fft_sz = 0,
+                       .fft_pad_sz = 0,
+                       .summary_pad_sz = 0,
+                       .fft_hdr_len = 0,
+                       .max_fft_bins = 0,
+                       .fragment_160mhz = false,
+               },
+
+               .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                       BIT(NL80211_IFTYPE_AP),
+               .supports_monitor = false,
+#endif
+               .supports_shadow_regs = true,
+               .idle_ps = true,
+               .supports_sta_ps = true,
+               .cold_boot_calib = true,
+               .cbcal_restart_fw = false,
+               .fw_mem_mode = 0,
+               .num_vdevs = 16 + 1,
+               .num_peers = 512,
+               .supports_suspend = false,
+               .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+               .supports_regdb = true,
+               .fix_l1ss = false,
+               .credit_flow = true,
+               .max_tx_ring = DP_TCL_NUM_RING_MAX,
+               .hal_params = &ath12k_hw_hal_params_wcn6750,
+#if notyet
+               .supports_dynamic_smps_6ghz = false,
+               .alloc_cacheable_memory = false,
+               .supports_rssi_stats = true,
+#endif
+               .fw_wmi_diag_event = false,
+               .current_cc_support = true,
+               .dbr_debug_support = false,
+               .global_reset = false,
+#ifdef notyet
+               .bios_sar_capa = NULL,
+#endif
+               .m3_fw_support = false,
+               .fixed_bdf_addr = false,
+               .fixed_mem_region = false,
+               .static_window_map = true,
+               .hybrid_bus_type = true,
+               .fixed_fw_mem = true,
+#if notyet
+               .support_off_channel_tx = true,
+               .supports_multi_bssid = true,
+
+               .sram_dump = {},
+
+               .tcl_ring_retry = false,
+#endif
+               .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+#ifdef notyet
+               .smp2p_wow_exit = true,
+#endif
+       },
+};
+
+const struct ath12k_hw_regs ipq8074_regs = {
+       /* SW2TCL(x) R0 ring configuration address */
+       .hal_tcl1_ring_base_lsb = 0x00000510,
+       .hal_tcl1_ring_base_msb = 0x00000514,
+       .hal_tcl1_ring_id = 0x00000518,
+       .hal_tcl1_ring_misc = 0x00000520,
+       .hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
+       .hal_tcl1_ring_tp_addr_msb = 0x00000530,
+       .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
+       .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
+       .hal_tcl1_ring_msi1_base_lsb = 0x00000558,
+       .hal_tcl1_ring_msi1_base_msb = 0x0000055c,
+       .hal_tcl1_ring_msi1_data = 0x00000560,
+       .hal_tcl2_ring_base_lsb = 0x00000568,
+       .hal_tcl_ring_base_lsb = 0x00000618,
+
+       /* TCL STATUS ring address */
+       .hal_tcl_status_ring_base_lsb = 0x00000720,
+
+       /* REO2SW(x) R0 ring configuration address */
+       .hal_reo1_ring_base_lsb = 0x0000029c,
+       .hal_reo1_ring_base_msb = 0x000002a0,
+       .hal_reo1_ring_id = 0x000002a4,
+       .hal_reo1_ring_misc = 0x000002ac,
+       .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+       .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+       .hal_reo1_ring_producer_int_setup = 0x000002c0,
+       .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+       .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+       .hal_reo1_ring_msi1_data = 0x000002ec,
+       .hal_reo2_ring_base_lsb = 0x000002f4,
+       .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+       .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+       .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+       .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+       /* REO2SW(x) R2 ring pointers (head/tail) address */
+       .hal_reo1_ring_hp = 0x00003038,
+       .hal_reo1_ring_tp = 0x0000303c,
+       .hal_reo2_ring_hp = 0x00003040,
+
+       /* REO2TCL R0 ring configuration address */
+       .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+       .hal_reo_tcl_ring_hp = 0x00003058,
+
+       /* REO CMD ring address */
+       .hal_reo_cmd_ring_base_lsb = 0x00000194,
+       .hal_reo_cmd_ring_hp = 0x00003020,
+
+       /* REO status address */
+       .hal_reo_status_ring_base_lsb = 0x00000504,
+       .hal_reo_status_hp = 0x00003070,
+
+       /* SW2REO ring address */
+       .hal_sw2reo_ring_base_lsb = 0x000001ec,
+       .hal_sw2reo_ring_hp = 0x00003028,
+
+       /* WCSS relative address */
+       .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+       .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+       .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+       .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+       /* WBM Idle address */
+       .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+       .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+       /* SW2WBM release address */
+       .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+       /* WBM2SW release address */
+       .hal_wbm0_release_ring_base_lsb = 0x00000910,
+       .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+       /* PCIe base address */
+       .pcie_qserdes_sysclk_en_sel = 0x0,
+       .pcie_pcs_osc_dtct_config_base = 0x0,
+
+       /* Shadow register area */
+       .hal_shadow_base_addr = 0x0,
+
+       /* REO misc control register, not used in IPQ8074 */
+       .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath12k_hw_regs qca6390_regs = {
+       /* SW2TCL(x) R0 ring configuration address */
+       .hal_tcl1_ring_base_lsb = 0x00000684,
+       .hal_tcl1_ring_base_msb = 0x00000688,
+       .hal_tcl1_ring_id = 0x0000068c,
+       .hal_tcl1_ring_misc = 0x00000694,
+       .hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
+       .hal_tcl1_ring_tp_addr_msb = 0x000006a4,
+       .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
+       .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
+       .hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
+       .hal_tcl1_ring_msi1_base_msb = 0x000006d0,
+       .hal_tcl1_ring_msi1_data = 0x000006d4,
+       .hal_tcl2_ring_base_lsb = 0x000006dc,
+       .hal_tcl_ring_base_lsb = 0x0000078c,
+
+       /* TCL STATUS ring address */
+       .hal_tcl_status_ring_base_lsb = 0x00000894,
+
+       /* REO2SW(x) R0 ring configuration address */
+       .hal_reo1_ring_base_lsb = 0x00000244,
+       .hal_reo1_ring_base_msb = 0x00000248,
+       .hal_reo1_ring_id = 0x0000024c,
+       .hal_reo1_ring_misc = 0x00000254,
+       .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+       .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+       .hal_reo1_ring_producer_int_setup = 0x00000268,
+       .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+       .hal_reo1_ring_msi1_base_msb = 0x00000290,
+       .hal_reo1_ring_msi1_data = 0x00000294,
+       .hal_reo2_ring_base_lsb = 0x0000029c,
+       .hal_reo1_aging_thresh_ix_0 = 0x0000050c,
+       .hal_reo1_aging_thresh_ix_1 = 0x00000510,
+       .hal_reo1_aging_thresh_ix_2 = 0x00000514,
+       .hal_reo1_aging_thresh_ix_3 = 0x00000518,
+
+       /* REO2SW(x) R2 ring pointers (head/tail) address */
+       .hal_reo1_ring_hp = 0x00003030,
+       .hal_reo1_ring_tp = 0x00003034,
+       .hal_reo2_ring_hp = 0x00003038,
+
+       /* REO2TCL R0 ring configuration address */
+       .hal_reo_tcl_ring_base_lsb = 0x000003a4,
+       .hal_reo_tcl_ring_hp = 0x00003050,
+
+       /* REO CMD ring address */
+       .hal_reo_cmd_ring_base_lsb = 0x00000194,
+       .hal_reo_cmd_ring_hp = 0x00003020,
+
+       /* REO status address */
+       .hal_reo_status_ring_base_lsb = 0x000004ac,
+       .hal_reo_status_hp = 0x00003068,
+
+       /* SW2REO ring address */
+       .hal_sw2reo_ring_base_lsb = 0x000001ec,
+       .hal_sw2reo_ring_hp = 0x00003028,
+
+       /* WCSS relative address */
+       .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+       .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+       .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+       .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+       /* WBM Idle address */
+       .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+       .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+       /* SW2WBM release address */
+       .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+       /* WBM2SW release address */
+       .hal_wbm0_release_ring_base_lsb = 0x00000910,
+       .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+       /* PCIe base address */
+       .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+       .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+       /* Shadow register area */
+       .hal_shadow_base_addr = 0x000008fc,
+
+       /* REO misc control register, not used in QCA6390 */
+       .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath12k_hw_regs qcn9074_regs = {
+       /* SW2TCL(x) R0 ring configuration address */
+       .hal_tcl1_ring_base_lsb = 0x000004f0,
+       .hal_tcl1_ring_base_msb = 0x000004f4,
+       .hal_tcl1_ring_id = 0x000004f8,
+       .hal_tcl1_ring_misc = 0x00000500,
+       .hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
+       .hal_tcl1_ring_tp_addr_msb = 0x00000510,
+       .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
+       .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
+       .hal_tcl1_ring_msi1_base_lsb = 0x00000538,
+       .hal_tcl1_ring_msi1_base_msb = 0x0000053c,
+       .hal_tcl1_ring_msi1_data = 0x00000540,
+       .hal_tcl2_ring_base_lsb = 0x00000548,
+       .hal_tcl_ring_base_lsb = 0x000005f8,
+
+       /* TCL STATUS ring address */
+       .hal_tcl_status_ring_base_lsb = 0x00000700,
+
+       /* REO2SW(x) R0 ring configuration address */
+       .hal_reo1_ring_base_lsb = 0x0000029c,
+       .hal_reo1_ring_base_msb = 0x000002a0,
+       .hal_reo1_ring_id = 0x000002a4,
+       .hal_reo1_ring_misc = 0x000002ac,
+       .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+       .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+       .hal_reo1_ring_producer_int_setup = 0x000002c0,
+       .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+       .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+       .hal_reo1_ring_msi1_data = 0x000002ec,
+       .hal_reo2_ring_base_lsb = 0x000002f4,
+       .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+       .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+       .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+       .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+       /* REO2SW(x) R2 ring pointers (head/tail) address */
+       .hal_reo1_ring_hp = 0x00003038,
+       .hal_reo1_ring_tp = 0x0000303c,
+       .hal_reo2_ring_hp = 0x00003040,
+
+       /* REO2TCL R0 ring configuration address */
+       .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+       .hal_reo_tcl_ring_hp = 0x00003058,
+
+       /* REO CMD ring address */
+       .hal_reo_cmd_ring_base_lsb = 0x00000194,
+       .hal_reo_cmd_ring_hp = 0x00003020,
+
+       /* REO status address */
+       .hal_reo_status_ring_base_lsb = 0x00000504,
+       .hal_reo_status_hp = 0x00003070,
+
+       /* SW2REO ring address */
+       .hal_sw2reo_ring_base_lsb = 0x000001ec,
+       .hal_sw2reo_ring_hp = 0x00003028,
+
+       /* WCSS relative address */
+       .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+       .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+       .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+       .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+       /* WBM Idle address */
+       .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+       .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+       /* SW2WBM release address */
+       .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+       /* WBM2SW release address */
+       .hal_wbm0_release_ring_base_lsb = 0x00000924,
+       .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+       /* PCIe base address */
+       .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+       .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+       /* Shadow register area */
+       .hal_shadow_base_addr = 0x0,
+
+       /* REO misc control register, not used in QCN9074 */
+       .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath12k_hw_regs wcn6855_regs = {
+       /* SW2TCL(x) R0 ring configuration address */
+       .hal_tcl1_ring_base_lsb = 0x00000690,
+       .hal_tcl1_ring_base_msb = 0x00000694,
+       .hal_tcl1_ring_id = 0x00000698,
+       .hal_tcl1_ring_misc = 0x000006a0,
+       .hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
+       .hal_tcl1_ring_tp_addr_msb = 0x000006b0,
+       .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
+       .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
+       .hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
+       .hal_tcl1_ring_msi1_base_msb = 0x000006dc,
+       .hal_tcl1_ring_msi1_data = 0x000006e0,
+       .hal_tcl2_ring_base_lsb = 0x000006e8,
+       .hal_tcl_ring_base_lsb = 0x00000798,
+
+       /* TCL STATUS ring address */
+       .hal_tcl_status_ring_base_lsb = 0x000008a0,
+
+       /* REO2SW(x) R0 ring configuration address */
+       .hal_reo1_ring_base_lsb = 0x00000244,
+       .hal_reo1_ring_base_msb = 0x00000248,
+       .hal_reo1_ring_id = 0x0000024c,
+       .hal_reo1_ring_misc = 0x00000254,
+       .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+       .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+       .hal_reo1_ring_producer_int_setup = 0x00000268,
+       .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+       .hal_reo1_ring_msi1_base_msb = 0x00000290,
+       .hal_reo1_ring_msi1_data = 0x00000294,
+       .hal_reo2_ring_base_lsb = 0x0000029c,
+       .hal_reo1_aging_thresh_ix_0 = 0x000005bc,
+       .hal_reo1_aging_thresh_ix_1 = 0x000005c0,
+       .hal_reo1_aging_thresh_ix_2 = 0x000005c4,
+       .hal_reo1_aging_thresh_ix_3 = 0x000005c8,
+
+       /* REO2SW(x) R2 ring pointers (head/tail) address */
+       .hal_reo1_ring_hp = 0x00003030,
+       .hal_reo1_ring_tp = 0x00003034,
+       .hal_reo2_ring_hp = 0x00003038,
+
+       /* REO2TCL R0 ring configuration address */
+       .hal_reo_tcl_ring_base_lsb = 0x00000454,
+       .hal_reo_tcl_ring_hp = 0x00003060,
+
+       /* REO CMD ring address */
+       .hal_reo_cmd_ring_base_lsb = 0x00000194,
+       .hal_reo_cmd_ring_hp = 0x00003020,
+
+       /* REO status address */
+       .hal_reo_status_ring_base_lsb = 0x0000055c,
+       .hal_reo_status_hp = 0x00003078,
+
+       /* SW2REO ring address */
+       .hal_sw2reo_ring_base_lsb = 0x000001ec,
+       .hal_sw2reo_ring_hp = 0x00003028,
+
+       /* WCSS relative address */
+       .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
+       .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
+       .hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
+       .hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
+
+       /* WBM Idle address */
+       .hal_wbm_idle_link_ring_base_lsb = 0x00000870,
+       .hal_wbm_idle_link_ring_misc = 0x00000880,
+
+       /* SW2WBM release address */
+       .hal_wbm_release_ring_base_lsb = 0x000001e8,
+
+       /* WBM2SW release address */
+       .hal_wbm0_release_ring_base_lsb = 0x00000920,
+       .hal_wbm1_release_ring_base_lsb = 0x00000978,
+
+       /* PCIe base address */
+       .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+       .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+       /* Shadow register area */
+       .hal_shadow_base_addr = 0x000008fc,
+
+       /* REO misc control register, used for fragment
+        * destination ring config in WCN6855.
+        */
+       .hal_reo1_misc_ctl = 0x00000630,
+};
+
+const struct ath12k_hw_regs wcn6750_regs = {
+       /* SW2TCL(x) R0 ring configuration address */
+       .hal_tcl1_ring_base_lsb = 0x00000694,
+       .hal_tcl1_ring_base_msb = 0x00000698,
+       .hal_tcl1_ring_id = 0x0000069c,
+       .hal_tcl1_ring_misc = 0x000006a4,
+       .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+       .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+       .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+       .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+       .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+       .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+       .hal_tcl1_ring_msi1_data = 0x000006e4,
+       .hal_tcl2_ring_base_lsb = 0x000006ec,
+       .hal_tcl_ring_base_lsb = 0x0000079c,
+
+       /* TCL STATUS ring address */
+       .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+       /* REO2SW(x) R0 ring configuration address */
+       .hal_reo1_ring_base_lsb = 0x000001ec,
+       .hal_reo1_ring_base_msb = 0x000001f0,
+       .hal_reo1_ring_id = 0x000001f4,
+       .hal_reo1_ring_misc = 0x000001fc,
+       .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+       .hal_reo1_ring_hp_addr_msb = 0x00000204,
+       .hal_reo1_ring_producer_int_setup = 0x00000210,
+       .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+       .hal_reo1_ring_msi1_base_msb = 0x00000238,
+       .hal_reo1_ring_msi1_data = 0x0000023c,
+       .hal_reo2_ring_base_lsb = 0x00000244,
+       .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+       .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+       .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+       .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+       /* REO2SW(x) R2 ring pointers (head/tail) address */
+       .hal_reo1_ring_hp = 0x00003028,
+       .hal_reo1_ring_tp = 0x0000302c,
+       .hal_reo2_ring_hp = 0x00003030,
+
+       /* REO2TCL R0 ring configuration address */
+       .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+       .hal_reo_tcl_ring_hp = 0x00003058,
+
+       /* REO CMD ring address */
+       .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+       .hal_reo_cmd_ring_hp = 0x00003010,
+
+       /* REO status address */
+       .hal_reo_status_ring_base_lsb = 0x00000504,
+       .hal_reo_status_hp = 0x00003070,
+
+       /* SW2REO ring address */
+       .hal_sw2reo_ring_base_lsb = 0x0000013c,
+       .hal_sw2reo_ring_hp = 0x00003018,
+
+       /* WCSS relative address */
+       .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+       .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+       .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+       .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+       /* WBM Idle address */
+       .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+       .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+       /* SW2WBM release address */
+       .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+       /* WBM2SW release address */
+       .hal_wbm0_release_ring_base_lsb = 0x00000924,
+       .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+       /* PCIe base address */
+       .pcie_qserdes_sysclk_en_sel = 0x0,
+       .pcie_pcs_osc_dtct_config_base = 0x0,
+
+       /* Shadow register area */
+       .hal_shadow_base_addr = 0x00000504,
+
+       /* REO misc control register, used for fragment
+        * destination ring config in WCN6750.
+        */
+       .hal_reo1_misc_ctl = 0x000005d8,
+};
+
+#define QWZ_SLEEP_CLOCK_SELECT_INTERNAL_BIT    0x02
+#define QWZ_HOST_CSTATE_BIT                    0x04
+#define QWZ_PLATFORM_CAP_PCIE_GLOBAL_RESET     0x08
+#define QWZ_PLATFORM_CAP_PCIE_PME_D3COLD       0x10
+
+static const struct qmi_elem_info qmi_response_type_v01_ei[] = {
+       {
+               .data_type      = QMI_SIGNED_2_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint16_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+               .offset         = offsetof(struct qmi_response_type_v01, result),
+               .ei_array       = NULL,
+       },
+       {
+               .data_type      = QMI_SIGNED_2_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint16_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+               .offset         = offsetof(struct qmi_response_type_v01, error),
+               .ei_array       = NULL,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .elem_len       = 0,
+               .elem_size      = 0,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+               .offset         = 0,
+               .ei_array       = NULL,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_ready_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_ready_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          initiate_cal_download_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          initiate_cal_download_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          initiate_cal_update_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          initiate_cal_update_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          msa_ready_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          msa_ready_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          pin_connect_result_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          pin_connect_result_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          client_id_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          client_id),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          request_mem_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          request_mem_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_mem_ready_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_mem_ready_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_init_done_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          fw_init_done_enable),
+       },
+
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          rejuvenate_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          rejuvenate_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1A,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          xo_cal_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1A,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          xo_cal_enable),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1B,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          cal_done_enable_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1B,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                          cal_done_enable),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+                                          resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+                                          fw_status_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_8_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint64_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+                                          fw_status),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          num_clients_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          num_clients),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          wake_msi_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          wake_msi),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          gpios_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          gpios_len),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = QMI_WLFW_MAX_NUM_GPIO_V01,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          gpios),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          nm_modem_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          nm_modem),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          bdf_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          bdf_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          bdf_cache_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          bdf_cache_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          m3_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          m3_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          m3_cache_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          m3_cache_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_filesys_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_filesys_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_cache_support_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_cache_support),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1A,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_done_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1A,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          cal_done),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1B,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          mem_bucket_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1B,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          mem_bucket),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1C,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          mem_cfg_mode_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x1C,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+                                          mem_cfg_mode),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_8_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint64_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+                                 size),
+       },
+       {
+               .data_type      = QMI_SIGNED_4_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+               .ei_array       = qmi_wlanfw_mem_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+                                          mem_seg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+                                          mem_seg),
+               .ei_array       = qmi_wlanfw_mem_seg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_8_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint64_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+       },
+       {
+               .data_type      = QMI_SIGNED_4_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+                                          mem_seg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+                                          mem_seg),
+               .ei_array       = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+                                          resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+                                          chip_id),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+                                          chip_family),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+                                          board_id),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+                                          fw_version),
+       },
+       {
+               .data_type      = QMI_STRING,
+               .elem_len       = ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+               .elem_size      = sizeof(char),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+                                          fw_build_timestamp),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          chip_info_valid),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          chip_info),
+               .ei_array       = qmi_wlanfw_rf_chip_info_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          board_info_valid),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          board_info),
+               .ei_array       = qmi_wlanfw_rf_board_info_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          soc_info_valid),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          soc_info),
+               .ei_array       = qmi_wlanfw_soc_info_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          fw_version_info_valid),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          fw_version_info),
+               .ei_array       = qmi_wlanfw_fw_version_info_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          fw_build_id_valid),
+       },
+       {
+               .data_type      = QMI_STRING,
+               .elem_len       = ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+               .elem_size      = sizeof(char),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          fw_build_id),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          num_macs_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          num_macs),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          voltage_mv_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          voltage_mv),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          time_freq_hz_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x17,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          time_freq_hz),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          otp_version_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x18,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          otp_version),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          eeprom_read_timeout_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x19,
+               .offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+                                          eeprom_read_timeout),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          valid),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          file_id_valid),
+       },
+       {
+               .data_type      = QMI_SIGNED_4_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          file_id),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          total_size_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          total_size),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          seg_id_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          seg_id),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          data_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint16_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          data_len),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          data),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          end_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          end),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          bdf_type_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+                                          bdf_type),
+       },
+
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+                                          resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_8_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint64_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+                                          enablefwlog_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+                                          enablefwlog),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+                                          resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+                                          pipe_num),
+       },
+       {
+               .data_type      = QMI_SIGNED_4_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+                                          pipe_dir),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+                                          nentries),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+                                          nbytes_max),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+                                          flags),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+                                          service_id),
+       },
+       {
+               .data_type      = QMI_SIGNED_4_BYTE_ENUM,
+               .elem_len       = 1,
+               .elem_size      = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+                                          pipe_dir),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+                                          pipe_num),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_2_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint16_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_2_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint16_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+                                          offset),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0,
+               .offset         = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+                                          addr),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint32_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x01,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+                                          mode),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+                                          hw_debug_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_1_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+                                          hw_debug),
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+                                          resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          host_version_valid),
+       },
+       {
+               .data_type      = QMI_STRING,
+               .elem_len       = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+               .elem_size      = sizeof(char),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          host_version),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          tgt_cfg_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          tgt_cfg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLANFW_MAX_NUM_CE_V01,
+               .elem_size      = sizeof(
+                               struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          tgt_cfg),
+               .ei_array       = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          svc_cfg_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          svc_cfg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLANFW_MAX_NUM_SVC_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x12,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          svc_cfg),
+               .ei_array       = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x13,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg),
+               .ei_array       = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg_v2_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(uint8_t),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg_v2_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+               .elem_size      = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+               .array_type     = VAR_LEN_ARRAY,
+               .tlv_type       = 0x14,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+                                          shadow_reg_v2),
+               .ei_array       = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct qmi_response_type_v01),
+               .array_type     = NO_ARRAY,
+               .tlv_type       = 0x02,
+               .offset         = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+               .ei_array       = qmi_response_type_v01_ei,
+       },
+       {
+               .data_type      = QMI_EOTI,
+               .array_type     = NO_ARRAY,
+               .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+int
+qwz_ce_intr(void *arg)
+{
+       struct qwz_ce_pipe *pipe = arg;
+       struct qwz_softc *sc = pipe->sc;
+
+       if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) ||
+           ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) {
+               DPRINTF("%s: unexpected interrupt on pipe %d\n",
+                   __func__, pipe->pipe_num);
+               return 1;
+       }
+
+       return qwz_ce_per_engine_service(sc, pipe->pipe_num);
+}
+
+int
+qwz_ext_intr(void *arg)
+{
+       struct qwz_ext_irq_grp *irq_grp = arg;
+       struct qwz_softc *sc = irq_grp->sc;
+
+       if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
+               DPRINTF("%s: unexpected interrupt for ext group %d\n",
+                   __func__, irq_grp->grp_id);
+               return 1;
+       }
+
+       return qwz_dp_service_srng(sc, irq_grp->grp_id);
+}
+
+static const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = {
+       "EOTI",
+       "OPT_FLAG",
+       "DATA_LEN",
+       "UNSIGNED_1_BYTE",
+       "UNSIGNED_2_BYTE",
+       "UNSIGNED_4_BYTE",
+       "UNSIGNED_8_BYTE",
+       "SIGNED_2_BYTE_ENUM",
+       "SIGNED_4_BYTE_ENUM",
+       "STRUCT",
+       "STRING"
+};
+
+const struct qmi_elem_info *
+qwz_qmi_decode_get_elem(const struct qmi_elem_info *ei, uint8_t elem_type)
+{
+       while (ei->data_type != QMI_EOTI && ei->tlv_type != elem_type)
+               ei++;
+
+       DNPRINTF(QWZ_D_QMI, "%s: found elem 0x%x data type 0x%x\n", __func__,
+           ei->tlv_type, ei->data_type);
+       return ei;
+}
+
+size_t
+qwz_qmi_decode_min_elem_size(const struct qmi_elem_info *ei, int nested)
+{
+       size_t min_size = 0;
+
+       switch (ei->data_type) {
+       case QMI_EOTI:
+       case QMI_OPT_FLAG:
+               break;
+       case QMI_DATA_LEN:
+               if (ei->elem_len == 1)
+                       min_size += sizeof(uint8_t);
+               else
+                       min_size += sizeof(uint16_t);
+               break;
+       case QMI_UNSIGNED_1_BYTE:
+       case QMI_UNSIGNED_2_BYTE:
+       case QMI_UNSIGNED_4_BYTE:
+       case QMI_UNSIGNED_8_BYTE:
+       case QMI_SIGNED_2_BYTE_ENUM:
+       case QMI_SIGNED_4_BYTE_ENUM:
+               min_size += ei->elem_len * ei->elem_size;
+               break;
+       case QMI_STRUCT:
+               if (nested > 2) {
+                       printf("%s: QMI struct element 0x%x with "
+                           "data type %s (0x%x) is nested too "
+                           "deeply\n", __func__,
+                           ei->tlv_type,
+                           qmi_data_type_name[ei->data_type],
+                           ei->data_type);
+               }
+               ei = ei->ei_array;
+               while (ei->data_type != QMI_EOTI) {
+                       min_size += qwz_qmi_decode_min_elem_size(ei,
+                           nested + 1);
+                       ei++;
+               }
+               break;
+       case QMI_STRING:
+               min_size += 1;
+               /* Strings nested in structs use an in-band length field. */
+               if (nested) {
+                       if (ei->elem_len <= 0xff)
+                               min_size += sizeof(uint8_t);
+                       else
+                               min_size += sizeof(uint16_t);
+               }
+               break;
+       default:
+               printf("%s: unhandled data type 0x%x\n", __func__,
+                   ei->data_type);
+               break;
+       }
+
+       return min_size;
+}
+
+int
+qwz_qmi_decode_tlv_hdr(struct qwz_softc *sc,
+    const struct qmi_elem_info **next_ei, uint16_t *actual_size,
+    size_t output_len, const struct qmi_elem_info *ei0,
+    uint8_t *input, size_t input_len)
+{
+       uint8_t *p = input;
+       size_t remain = input_len;
+       uint8_t elem_type;
+       uint16_t elem_size = 0;
+       const struct qmi_elem_info *ei;
+
+       *next_ei = NULL;
+       *actual_size = 0;
+
+       if (remain < 3) {
+               printf("%s: QMI message TLV header too short\n",
+                  sc->sc_dev.dv_xname);
+               return -1;
+       }
+       elem_type = *p;
+       p++;
+       remain--;
+
+       /*
+        * By relying on TLV type information we can skip over EIs which
+        * describe optional elements that have not been encoded.
+        * Such elements will be left at their default value (zero) in
+        * the decoded output struct.
+        * XXX We currently allow elements to appear in any order and
+        * we do not detect duplicates.
+        */
+       ei = qwz_qmi_decode_get_elem(ei0, elem_type);
+
+       DNPRINTF(QWZ_D_QMI,
+           "%s: decoding element 0x%x with data type %s (0x%x)\n",
+           __func__, elem_type, qmi_data_type_name[ei->data_type],
+           ei->data_type);
+
+       if (remain < 2) {
+               printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
+               return -1;
+       }
+
+       if (ei->data_type == QMI_DATA_LEN && ei->elem_len == 1) {
+               elem_size = p[0];
+               p++;
+               remain--;
+       } else {
+               elem_size = (p[0] | (p[1] << 8));
+               p += 2;
+               remain -= 2;
+       }
+
+       *next_ei = ei;
+       *actual_size = elem_size;
+
+       if (ei->data_type == QMI_EOTI) {
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: unrecognized QMI element type 0x%x size %u\n",
+                   sc->sc_dev.dv_xname, elem_type, elem_size);
+               return 0;
+       }
+
+       /*
+        * Is this an optional element which has been encoded?
+        * If so, use info about this optional element for verification.
+        */
+       if (ei->data_type == QMI_OPT_FLAG)
+               ei++;
+
+       DNPRINTF(QWZ_D_QMI, "%s: ei->size %u, actual size %u\n", __func__,
+           ei->elem_size, *actual_size);
+
+       switch (ei->data_type) {
+       case QMI_UNSIGNED_1_BYTE:
+       case QMI_UNSIGNED_2_BYTE:
+       case QMI_UNSIGNED_4_BYTE:
+       case QMI_UNSIGNED_8_BYTE:
+       case QMI_SIGNED_2_BYTE_ENUM:
+       case QMI_SIGNED_4_BYTE_ENUM:
+               if (elem_size != ei->elem_size) {
+                       printf("%s: QMI message element 0x%x "
+                           "data type %s (0x%x) with bad size: %u\n",
+                           sc->sc_dev.dv_xname, elem_type,
+                           qmi_data_type_name[ei->data_type],
+                           ei->data_type, elem_size);
+                       return -1;
+               }
+               break;
+       case QMI_DATA_LEN:
+               break;
+       case QMI_STRING:
+       case QMI_STRUCT:
+               if (elem_size < qwz_qmi_decode_min_elem_size(ei, 0)) {
+                       printf("%s: QMI message element 0x%x "
+                           "data type %s (0x%x) with bad size: %u\n",
+                           sc->sc_dev.dv_xname, elem_type,
+                           qmi_data_type_name[ei->data_type],
+                           ei->data_type, elem_size);
+                       return -1;
+               }
+               break;
+       default:
+               printf("%s: unexpected QMI message element "
+                   "data type 0x%x\n", sc->sc_dev.dv_xname,
+                   ei->data_type);
+               return -1;
+       }
+
+       if (remain < elem_size) {
+               printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
+               return -1;
+       }
+
+       if (ei->offset + ei->elem_size > output_len) {
+               printf("%s: QMI message element type 0x%x too large: %u\n",
+                   sc->sc_dev.dv_xname, elem_type, ei->elem_size);
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_decode_byte(void *output, const struct qmi_elem_info *ei, void *input)
+{
+       if (ei->elem_size != sizeof(uint8_t)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
+           __func__, ei->tlv_type, ei->data_type, ei->elem_size);
+       memcpy(output, input, ei->elem_size);
+       return 0;
+}
+
+int
+qwz_qmi_decode_word(void *output, const struct qmi_elem_info *ei, void *input)
+{
+       if (ei->elem_size != sizeof(uint16_t)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
+           __func__, ei->tlv_type, ei->data_type, ei->elem_size);
+       memcpy(output, input, ei->elem_size);
+       return 0;
+}
+
+int
+qwz_qmi_decode_dword(void *output, const struct qmi_elem_info *ei, void *input)
+{
+       if (ei->elem_size != sizeof(uint32_t)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
+           __func__, ei->tlv_type, ei->data_type, ei->elem_size);
+       memcpy(output, input, ei->elem_size);
+       return 0;
+}
+
+int
+qwz_qmi_decode_qword(void *output, const struct qmi_elem_info *ei, void *input)
+{
+       if (ei->elem_size != sizeof(uint64_t)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
+           __func__, ei->tlv_type, ei->data_type, ei->elem_size);
+       memcpy(output, input, ei->elem_size);
+       return 0;
+}
+
+int
+qwz_qmi_decode_datalen(struct qwz_softc *sc, size_t *used, uint32_t *datalen,
+    void *output, size_t output_len, const struct qmi_elem_info *ei,
+    uint8_t *input, uint16_t input_len)
+{
+       uint8_t *p = input;
+       size_t remain = input_len;
+
+       *datalen = 0;
+
+       DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
+       for (int i = 0; i < input_len; i++) {
+               DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
+       }
+       DNPRINTF(QWZ_D_QMI, "\n");
+
+       if (remain < ei->elem_size) {
+               printf("%s: QMI message too short: remain=%zu elem_size=%u\n", __func__, remain, ei->elem_size);
+               return -1;
+       }
+
+       switch (ei->elem_size) {
+       case sizeof(uint8_t):
+               *datalen = p[0];
+               break;
+       case sizeof(uint16_t):
+               *datalen = p[0] | (p[1] << 8);
+               break;
+       default:
+               printf("%s: bad datalen element size %u\n",
+                   sc->sc_dev.dv_xname, ei->elem_size);
+               return -1;
+               
+       }
+       *used = ei->elem_size;
+
+       if (ei->offset + sizeof(*datalen) > output_len) {
+               printf("%s: QMI message element type 0x%x too large\n",
+                   sc->sc_dev.dv_xname, ei->tlv_type);
+               return -1;
+       }
+       memcpy(output + ei->offset, datalen, sizeof(*datalen));
+       return 0;
+}
+
+int
+qwz_qmi_decode_string(struct qwz_softc *sc, size_t *used_total,
+    void *output, size_t output_len, const struct qmi_elem_info *ei,
+    uint8_t *input, uint16_t input_len, uint16_t elem_size, int nested)
+{
+       uint8_t *p = input;
+       uint16_t len;
+       size_t remain = input_len;
+
+       *used_total = 0;
+
+       DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
+       for (int i = 0; i < input_len; i++) {
+               DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
+       }
+       DNPRINTF(QWZ_D_QMI, "\n");
+
+       if (nested) {
+               /* Strings nested in structs use an in-band length field. */
+               if (ei->elem_len <= 0xff) {
+                       if (remain == 0) {
+                               printf("%s: QMI string length header exceeds "
+                                   "input buffer size\n", __func__);
+                               return -1;
+                       }
+                       len = p[0];
+                       p++;
+                       (*used_total)++;
+                       remain--;
+               } else {
+                       if (remain < 2) {
+                               printf("%s: QMI string length header exceeds "
+                                   "input buffer size\n", __func__);
+                               return -1;
+                       }
+                       len = p[0] | (p[1] << 8);
+                       p += 2;
+                       *used_total += 2;
+                       remain -= 2;
+               }
+       } else
+               len = elem_size;
+
+       if (len > ei->elem_len) {
+               printf("%s: QMI string element of length %u exceeds "
+                   "maximum length %u\n", __func__, len, ei->elem_len);
+               return -1;
+       }
+       if (len > remain) {
+               printf("%s: QMI string element of length %u exceeds "
+                   "input buffer size %zu\n", __func__, len, remain);
+               return -1;
+       }
+       if (len > output_len) {
+               printf("%s: QMI string element of length %u exceeds "
+                   "output buffer size %zu\n", __func__, len, output_len);
+               return -1;
+       }
+
+       memcpy(output, p, len);
+
+       p = output;
+       p[len] = '\0';
+       DNPRINTF(QWZ_D_QMI, "%s: string (len %u): %s\n", __func__, len, p);
+
+       *used_total += len;
+       return 0;
+}
+
+int
+qwz_qmi_decode_struct(struct qwz_softc *sc, size_t *used_total,
+    void *output, size_t output_len,
+    const struct qmi_elem_info *struct_ei,
+    uint8_t *input, uint16_t input_len,
+    int nested)
+{
+       const struct qmi_elem_info *ei = struct_ei->ei_array;
+       uint32_t min_size;
+       uint8_t *p = input;
+       size_t remain = input_len;
+       size_t used = 0;
+
+       *used_total = 0;
+
+       DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
+       for (int i = 0; i < input_len; i++) {
+               DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
+       }
+       DNPRINTF(QWZ_D_QMI, "\n");
+
+       min_size = qwz_qmi_decode_min_elem_size(struct_ei, 0);
+       DNPRINTF(QWZ_D_QMI, "%s: minimum struct size: %u\n", __func__, min_size);
+       while (*used_total < min_size && ei->data_type != QMI_EOTI) {
+               if (remain == 0) {
+                       printf("%s: QMI message too short\n", __func__);
+                       return -1;
+               }
+
+               if (ei->data_type == QMI_DATA_LEN) {
+                       uint32_t datalen;
+
+                       used = 0;
+                       if (qwz_qmi_decode_datalen(sc, &used, &datalen,
+                           output, output_len, ei, p, remain))
+                               return -1;
+                       DNPRINTF(QWZ_D_QMI, "%s: datalen %u used %zu bytes\n",
+                           __func__, datalen, used);
+                       p += used;
+                       remain -= used;
+                       *used_total += used;
+                       if (remain < datalen) {
+                               printf("%s: QMI message too short\n", __func__);
+                               return -1;
+                       }
+                       ei++;
+                       DNPRINTF(QWZ_D_QMI, "%s: datalen is for data_type=0x%x "
+                           "tlv_type=0x%x elem_size=%u(0x%x) remain=%zu\n",
+                           __func__, ei->data_type, ei->tlv_type,
+                           ei->elem_size, ei->elem_size, remain);
+                       if (datalen == 0) {
+                               ei++;
+                               DNPRINTF(QWZ_D_QMI,
+                                   "%s: skipped to data_type=0x%x "
+                                   "tlv_type=0x%x elem_size=%u(0x%x) "
+                                   "remain=%zu\n", __func__,
+                                   ei->data_type, ei->tlv_type,
+                                   ei->elem_size, ei->elem_size, remain);
+                               continue;
+                       }
+               } else {
+                       if (remain < ei->elem_size) {
+                               printf("%s: QMI message too short\n",
+                                   __func__);
+                               return -1;
+                       }
+               }
+
+               if (ei->offset + ei->elem_size > output_len) {
+                       printf("%s: QMI message struct member element "
+                           "type 0x%x too large: %u\n", sc->sc_dev.dv_xname,
+                           ei->tlv_type, ei->elem_size);
+                       return -1;
+               }
+
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: decoding struct member element 0x%x with "
+                   "data type %s (0x%x) size=%u(0x%x) remain=%zu\n", __func__,
+                   ei->tlv_type, qmi_data_type_name[ei->data_type],
+                   ei->data_type, ei->elem_size, ei->elem_size, remain);
+               switch (ei->data_type) {
+               case QMI_UNSIGNED_1_BYTE:
+                       if (qwz_qmi_decode_byte(output + ei->offset, ei, p))
+                               return -1;
+                       remain -= ei->elem_size;
+                       p += ei->elem_size;
+                       *used_total += ei->elem_size;
+                       break;
+               case QMI_UNSIGNED_2_BYTE:
+               case QMI_SIGNED_2_BYTE_ENUM:
+                       if (qwz_qmi_decode_word(output + ei->offset, ei, p))
+                               return -1;
+                       remain -= ei->elem_size;
+                       p += ei->elem_size;
+                       *used_total += ei->elem_size;
+                       break;
+               case QMI_UNSIGNED_4_BYTE:
+               case QMI_SIGNED_4_BYTE_ENUM:
+                       if (qwz_qmi_decode_dword(output + ei->offset, ei, p))
+                               return -1;
+                       remain -= ei->elem_size;
+                       p += ei->elem_size;
+                       *used_total += ei->elem_size;
+                       break;
+               case QMI_UNSIGNED_8_BYTE:
+                       if (qwz_qmi_decode_qword(output + ei->offset, ei, p))
+                               return -1;
+                       remain -= ei->elem_size;
+                       p += ei->elem_size;
+                       *used_total += ei->elem_size;
+                       break;
+               case QMI_STRUCT:
+                       if (nested > 2) {
+                               printf("%s: QMI struct element data type 0x%x "
+                                   "is nested too deeply\n",
+                                   sc->sc_dev.dv_xname, ei->data_type);
+                               return -1;
+                       }
+                       used = 0;
+                       if (qwz_qmi_decode_struct(sc, &used,
+                           output + ei->offset, output_len - ei->offset,
+                           ei, p, remain, nested + 1))
+                               return -1;
+                       remain -= used;
+                       p += used;
+                       *used_total += used;
+                       break;
+               case QMI_STRING:
+                       used = 0;
+                       if (qwz_qmi_decode_string(sc, &used,
+                           output + ei->offset, output_len - ei->offset,
+                           ei, p, remain, 0, 1))
+                               return -1;
+                       remain -= used;
+                       p += used;
+                       *used_total += used;
+                       break;
+               default:
+                       printf("%s: unhandled QMI struct element "
+                           "data type 0x%x\n", sc->sc_dev.dv_xname,
+                           ei->data_type);
+                       return -1;
+               }
+
+               ei++;
+               DNPRINTF(QWZ_D_QMI, "%s: next ei 0x%x ei->data_type=0x%x\n",
+                   __func__, ei->tlv_type, ei->data_type);
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: used_total=%zu ei->data_type=0x%x\n",
+           __func__, *used_total, ei->data_type);
+
+       return 0;
+}
+
+int
+qwz_qmi_decode_msg(struct qwz_softc *sc, void *output, size_t output_len,
+    const struct qmi_elem_info *ei0, uint8_t *input, uint16_t input_len)
+{
+       uint8_t *p = input;
+       size_t remain = input_len, used;
+       const struct qmi_elem_info *ei = ei0;
+
+       memset(output, 0, output_len);
+
+       DNPRINTF(QWZ_D_QMI, "%s: input: ", __func__);
+       for (int i = 0; i < input_len; i++) {
+               DNPRINTF(QWZ_D_QMI, " %02x", input[i]);
+       }
+       DNPRINTF(QWZ_D_QMI, "\n");
+
+       while (remain > 0 && ei->data_type != QMI_EOTI) {
+               uint32_t nelem = 1, i;
+               uint16_t datalen;
+
+               if (qwz_qmi_decode_tlv_hdr(sc, &ei, &datalen, output_len,
+                   ei0, p, remain))
+                       return -1;
+
+               /* Skip unrecognized elements. */
+               if (ei->data_type == QMI_EOTI) {
+                       p += 3 + datalen;
+                       remain -= 3 + datalen;
+                       ei = ei0;
+                       continue;
+               }
+
+               /* Set 'valid' flag for optional fields in output struct. */
+               if (ei->data_type == QMI_OPT_FLAG) {
+                       uint8_t *pvalid;
+
+                       if (ei->offset + ei->elem_size > output_len) {
+                               printf("%s: QMI message element type 0x%x "
+                                   "too large: %u\n", sc->sc_dev.dv_xname,
+                                   ei->tlv_type, ei->elem_size);
+                       }
+
+                       pvalid = (uint8_t *)output + ei->offset;
+                       *pvalid = 1;
+
+                       ei++;
+               }
+
+               p += 3;
+               remain -= 3;
+
+               if (ei->data_type == QMI_DATA_LEN) {
+                       const struct qmi_elem_info *datalen_ei = ei;
+                       uint8_t elem_type = ei->tlv_type;
+
+                       /*
+                        * Size info in TLV header indicates the
+                        * total length of element data that follows.
+                        */
+                       if (remain < datalen) {
+                               printf("%s:%d QMI message too short\n",
+                                   __func__, __LINE__);
+                               return -1;
+                       }
+
+                       ei++;
+                       DNPRINTF(QWZ_D_QMI,
+                           "%s: next ei data_type=0x%x tlv_type=0x%x "
+                           "dst elem_size=%u(0x%x) src total size=%u "
+                           "remain=%zu\n", __func__, ei->data_type,
+                           ei->tlv_type, ei->elem_size, ei->elem_size,
+                           datalen, remain);
+
+                       /* Related EIs must have the same type. */
+                       if (ei->tlv_type != elem_type) {
+                               printf("%s: unexepected element type 0x%x; "
+                                   "expected 0x%x\n", __func__,
+                                   ei->tlv_type, elem_type);
+                               return -1;
+                       }
+
+                       if (datalen == 0) {
+                               if (ei->data_type != QMI_EOTI)
+                                       ei++;
+                               continue;
+                       }
+
+                       /*
+                        * For variable length arrays a one- or two-byte
+                        * value follows the header, indicating the number
+                        * of elements in the array.
+                        */
+                       if (ei->array_type == VAR_LEN_ARRAY) {
+                               DNPRINTF(QWZ_D_QMI,
+                                   "%s: variable length array\n", __func__);
+                               used = 0;
+                               if (qwz_qmi_decode_datalen(sc, &used, &nelem,
+                                   output, output_len, datalen_ei, p, remain))
+                                       return -1;
+                               p += used;
+                               remain -= used;
+                               /*
+                                * Previous datalen value included the total
+                                * amount of bytes following the DATALEN TLV
+                                * header.
+                                */
+                               datalen -= used;
+
+                               if (nelem == 0) {
+                                       if (ei->data_type != QMI_EOTI)
+                                               ei++;
+                                       continue;
+                               }
+
+                               DNPRINTF(QWZ_D_QMI,
+                                   "%s: datalen %u used %zu bytes\n",
+                                   __func__, nelem, used);
+
+                               DNPRINTF(QWZ_D_QMI,
+                                   "%s: decoding %u array elements with "
+                                   "src size %u dest size %u\n", __func__,
+                                   nelem, datalen / nelem, ei->elem_size);
+                       }
+               }
+
+               if (remain < datalen) {
+                       printf("%s:%d QMI message too short: remain=%zu, "
+                           "datalen=%u\n", __func__, __LINE__, remain,
+                           datalen);
+                       return -1;
+               }
+               if (output_len < nelem * ei->elem_size) {
+                       printf("%s: QMI output buffer too short: remain=%zu "
+                           "nelem=%u ei->elem_size=%u\n", __func__, remain,
+                           nelem, ei->elem_size);
+                       return -1;
+               }
+
+               for (i = 0; i < nelem && remain > 0; i++) {
+                       size_t outoff;
+
+                       outoff = ei->offset + (ei->elem_size * i);
+                       switch (ei->data_type) {
+                       case QMI_STRUCT:
+                               used = 0;
+                               if (qwz_qmi_decode_struct(sc, &used,
+                                   output + outoff, output_len - outoff,
+                                   ei, p, remain, 0))
+                                       return -1;
+                               remain -= used;
+                               p += used;
+                               if (used != datalen) {
+                                       DNPRINTF(QWZ_D_QMI,
+                                           "%s struct used only %zu bytes "
+                                           "of %u input bytes\n", __func__,
+                                           used, datalen);
+                               } else {
+                                       DNPRINTF(QWZ_D_QMI,
+                                           "%s: struct used %zu bytes "
+                                           "of input\n", __func__, used);
+                               }
+                               break;
+                       case QMI_STRING:
+                               used = 0;
+                               if (qwz_qmi_decode_string(sc, &used,
+                                   output + outoff, output_len - outoff,
+                                   ei, p, remain, datalen, 0))
+                                       return -1;
+                               remain -= used;
+                               p += used;
+                               if (used != datalen) {
+                                       DNPRINTF(QWZ_D_QMI,
+                                           "%s: string used only %zu bytes "
+                                           "of %u input bytes\n", __func__,
+                                           used, datalen);
+                               } else {
+                                       DNPRINTF(QWZ_D_QMI,
+                                           "%s: string used %zu bytes "
+                                           "of input\n", __func__, used);
+                               }
+                               break;
+                       case QMI_UNSIGNED_1_BYTE:
+                               if (remain < ei->elem_size) {
+                                       printf("%s: QMI message too "
+                                           "short\n", __func__);
+                                       return -1;
+                               }
+                               if (qwz_qmi_decode_byte(output + outoff,
+                                   ei, p))
+                                       return -1;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_2_BYTE:
+                       case QMI_SIGNED_2_BYTE_ENUM:
+                               if (remain < ei->elem_size) {
+                                       printf("%s: QMI message too "
+                                           "short\n", __func__);
+                                       return -1;
+                               }
+                               if (qwz_qmi_decode_word(output + outoff,
+                                   ei, p))
+                                       return -1;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_4_BYTE:
+                       case QMI_SIGNED_4_BYTE_ENUM:
+                               if (remain < ei->elem_size) {
+                                       printf("%s: QMI message too "
+                                           "short\n", __func__);
+                                       return -1;
+                               }
+                               if (qwz_qmi_decode_dword(output + outoff,
+                                   ei, p))
+                                       return -1;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_8_BYTE:
+                               if (remain < ei->elem_size) {
+                                       printf("%s: QMI message too "
+                                           "short 4\n", __func__);
+                                       return -1;
+                               }
+                               if (qwz_qmi_decode_qword(output + outoff,
+                                   ei, p))
+                                       return -1;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       default:
+                               printf("%s: unhandled QMI message element "
+                                   "data type 0x%x\n",
+                                   sc->sc_dev.dv_xname, ei->data_type);
+                               return -1;
+                       }
+               }
+
+               ei++;
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: next ei 0x%x ei->data_type=0x%x remain=%zu\n",
+                   __func__, ei->tlv_type, ei->data_type, remain);
+
+               DNPRINTF(QWZ_D_QMI, "%s: remaining input: ", __func__);
+               for (int i = 0; i < remain; i++)
+                       DNPRINTF(QWZ_D_QMI, " %02x", p[i]);
+               DNPRINTF(QWZ_D_QMI, "\n");
+       }
+
+       return 0;
+}
+
+void
+qwz_qmi_recv_wlanfw_ind_register_req_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_ind_register_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_ind_register_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.fw_status=0x%llx\n",
+          __func__, le64toh(resp.fw_status));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_host_cap_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_host_cap_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_respond_mem_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_cap_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_cap_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       memset(&resp, 0, sizeof(resp));
+
+       ei = qmi_wlanfw_cap_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       if (resp.chip_info_valid) {
+               sc->qmi_target.chip_id = resp.chip_info.chip_id;
+               sc->qmi_target.chip_family = resp.chip_info.chip_family;
+       }
+
+       if (resp.board_info_valid)
+               sc->qmi_target.board_id = resp.board_info.board_id;
+       else
+               sc->qmi_target.board_id = 0xFF;
+
+       if (resp.soc_info_valid)
+               sc->qmi_target.soc_id = resp.soc_info.soc_id;
+
+       if (resp.fw_version_info_valid) {
+               sc->qmi_target.fw_version = resp.fw_version_info.fw_version;
+               strlcpy(sc->qmi_target.fw_build_timestamp,
+                       resp.fw_version_info.fw_build_timestamp,
+                       sizeof(sc->qmi_target.fw_build_timestamp));
+       }
+
+       if (resp.fw_build_id_valid)
+               strlcpy(sc->qmi_target.fw_build_id, resp.fw_build_id,
+                       sizeof(sc->qmi_target.fw_build_id));
+
+       if (resp.eeprom_read_timeout_valid) {
+               sc->qmi_target.eeprom_caldata = resp.eeprom_read_timeout;
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: qmi cal data supported from eeprom\n", __func__);
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       memset(&resp, 0, sizeof(resp));
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_bdf_download_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_m3_info_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       memset(&resp, 0, sizeof(resp));
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_m3_info_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       memset(&resp, 0, sizeof(resp));
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_wlan_ini_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       memset(&resp, 0, sizeof(resp));
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_wlan_cfg_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       memset(&resp, 0, sizeof(resp));
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       ei = qmi_wlanfw_wlan_mode_resp_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
+               return;
+
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.result=0x%x\n",
+           __func__, le16toh(resp.resp.result));
+       DNPRINTF(QWZ_D_QMI, "%s: resp.resp.error=0x%x\n",
+           __func__, le16toh(resp.resp.error));
+
+       sc->qmi_resp.result = le16toh(resp.resp.result);
+       sc->qmi_resp.error = le16toh(resp.resp.error);
+       wakeup(&sc->qmi_resp);
+}
+
+void
+qwz_qmi_recv_response(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
+{
+       switch (msg_id) {
+       case QMI_WLANFW_IND_REGISTER_REQ_V01:
+               qwz_qmi_recv_wlanfw_ind_register_req_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLFW_HOST_CAP_RESP_V01:
+               qwz_qmi_recv_wlanfw_host_cap_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLFW_RESPOND_MEM_RESP_V01:
+               qwz_qmi_recv_wlanfw_respond_mem_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLANFW_CAP_RESP_V01:
+               qwz_qmi_recv_wlanfw_cap_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLANFW_BDF_DOWNLOAD_RESP_V01:
+               qwz_qmi_recv_wlanfw_bdf_download_resp_v1(sc, m, txn_id,
+                   msg_len);
+               break;
+       case QMI_WLANFW_M3_INFO_RESP_V01:
+               qwz_qmi_recv_wlanfw_m3_info_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLANFW_WLAN_INI_RESP_V01:
+               qwz_qmi_recv_wlanfw_wlan_ini_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLANFW_WLAN_CFG_RESP_V01:
+               qwz_qmi_recv_wlanfw_wlan_cfg_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       case QMI_WLANFW_WLAN_MODE_RESP_V01:
+               qwz_qmi_recv_wlanfw_wlan_mode_resp_v1(sc, m, txn_id, msg_len);
+               break;
+       default:
+               printf("%s: unhandled QMI response 0x%x\n",
+                   sc->sc_dev.dv_xname, msg_id);
+               break;
+       }
+}
+
+void
+qwz_qmi_recv_wlanfw_request_mem_indication(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_len)
+{
+       struct qmi_wlanfw_request_mem_ind_msg_v01 *ind = NULL;
+       const struct qmi_elem_info *ei;
+       uint8_t *msg = mtod(m, uint8_t *);
+
+       DNPRINTF(QWZ_D_QMI, "%s\n", __func__);
+
+       if (!sc->expect_fwmem_req || sc->sc_req_mem_ind != NULL)
+               return;
+
+       /* This structure is too large for the stack. */
+       ind = malloc(sizeof(*ind), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (ind == NULL)
+               return;
+
+       ei = qmi_wlanfw_request_mem_ind_msg_v01_ei;
+       if (qwz_qmi_decode_msg(sc, ind, sizeof(*ind), ei, msg, msg_len)) {
+               free(ind, M_DEVBUF, sizeof(*ind));
+               return;
+       }
+
+       /* Handled by qwz_qmi_mem_seg_send() in process context */
+       sc->sc_req_mem_ind = ind;
+       wakeup(&sc->sc_req_mem_ind);
+}
+
+void
+qwz_qmi_recv_indication(struct qwz_softc *sc, struct mbuf *m,
+    uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
+{
+       switch (msg_id) {
+       case QMI_WLFW_REQUEST_MEM_IND_V01:
+               qwz_qmi_recv_wlanfw_request_mem_indication(sc, m,
+                   txn_id, msg_len);
+               break;
+       case QMI_WLFW_FW_MEM_READY_IND_V01:
+               sc->fwmem_ready = 1;
+               wakeup(&sc->fwmem_ready);
+               break;
+       case QMI_WLFW_FW_INIT_DONE_IND_V01:
+               sc->fw_init_done = 1;
+               wakeup(&sc->fw_init_done);
+               break;
+       default:
+               printf("%s: unhandled QMI indication 0x%x\n",
+                   sc->sc_dev.dv_xname, msg_id);
+               break;
+       }
+}
+
+void
+qwz_qrtr_recv_data(struct qwz_softc *sc, struct mbuf *m, size_t size)
+{
+       struct qmi_header hdr;
+       uint16_t txn_id, msg_id, msg_len;
+
+       if (size < sizeof(hdr)) {
+               printf("%s: QMI message too short: %zu bytes\n",
+                   sc->sc_dev.dv_xname, size);
+               return;
+       }
+
+       memcpy(&hdr, mtod(m, void *), sizeof(hdr));
+
+       DNPRINTF(QWZ_D_QMI,
+           "%s: QMI message type=0x%x txn=0x%x id=0x%x len=%u\n",
+           __func__, hdr.type, le16toh(hdr.txn_id),
+           le16toh(hdr.msg_id), le16toh(hdr.msg_len));
+
+       txn_id = le16toh(hdr.txn_id);
+       msg_id = le16toh(hdr.msg_id);
+       msg_len = le16toh(hdr.msg_len);
+       if (sizeof(hdr) + msg_len != size) {
+               printf("%s: bad length in QMI message header: %u\n",
+                   sc->sc_dev.dv_xname, msg_len);
+               return;
+       }
+
+       switch (hdr.type) {
+       case QMI_RESPONSE:
+               m_adj(m, sizeof(hdr));
+               qwz_qmi_recv_response(sc, m, txn_id, msg_id, msg_len);
+               break;
+       case QMI_INDICATION:
+               m_adj(m, sizeof(hdr));
+               qwz_qmi_recv_indication(sc, m, txn_id, msg_id, msg_len);
+               break;
+       default:
+               printf("%s: unhandled QMI message type %u\n",
+                   sc->sc_dev.dv_xname, hdr.type);
+               break;
+       }
+}
+
+int
+qwz_qrtr_say_hello(struct qwz_softc *sc)
+{
+       struct qrtr_hdr_v1 hdr;
+       struct qrtr_ctrl_pkt pkt;
+       struct mbuf *m;
+       size_t totlen, padlen;
+       int err;
+
+       totlen = sizeof(hdr) + sizeof(pkt);
+       padlen = roundup(totlen, 4);
+
+       m = m_gethdr(M_DONTWAIT, MT_DATA);
+       if (m == NULL) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       if (padlen <= MCLBYTES)
+               MCLGET(m, M_DONTWAIT);
+       else
+               MCLGETL(m, M_DONTWAIT, padlen);
+       if ((m->m_flags & M_EXT) == 0) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       m->m_len = m->m_pkthdr.len = padlen;
+
+       memset(&hdr, 0, sizeof(hdr));
+       hdr.version = htole32(QRTR_PROTO_VER_1);
+       hdr.type = htole32(QRTR_TYPE_HELLO);
+       hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
+       hdr.src_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
+       hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
+       hdr.dst_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
+       hdr.size = htole32(sizeof(pkt));
+
+       err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
+       if (err)
+               goto done;
+
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.cmd = htole32(QRTR_TYPE_HELLO);
+
+       err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
+       if (err)
+               goto done;
+
+       /* Zero-pad the mbuf */
+       if (padlen != totlen) {
+               uint32_t pad = 0;
+               err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
+               if (err)
+                       goto done;
+       }
+
+       err = sc->ops.submit_xfer(sc, m);
+done:
+       if (err)
+               m_freem(m);
+       return err;
+}
+
+int
+qwz_qrtr_resume_tx(struct qwz_softc *sc)
+{
+       struct qrtr_hdr_v1 hdr;
+       struct qrtr_ctrl_pkt pkt;
+       struct mbuf *m;
+       size_t totlen, padlen;
+       int err;
+
+       totlen = sizeof(hdr) + sizeof(pkt);
+       padlen = roundup(totlen, 4);
+
+       m = m_gethdr(M_DONTWAIT, MT_DATA);
+       if (m == NULL) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       if (padlen <= MCLBYTES)
+               MCLGET(m, M_DONTWAIT);
+       else
+               MCLGETL(m, M_DONTWAIT, padlen);
+       if ((m->m_flags & M_EXT) == 0) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       m->m_len = m->m_pkthdr.len = padlen;
+
+       memset(&hdr, 0, sizeof(hdr));
+       hdr.version = htole32(QRTR_PROTO_VER_1);
+       hdr.type = htole32(QRTR_TYPE_RESUME_TX);
+       hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
+       hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
+       hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
+       hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
+       hdr.size = htole32(sizeof(pkt));
+
+       err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
+       if (err)
+               goto done;
+
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
+       pkt.client.node = htole32(0x01);
+       pkt.client.port = htole32(0x4000);
+
+       err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
+       if (err)
+               goto done;
+
+       /* Zero-pad the mbuf */
+       if (padlen != totlen) {
+               uint32_t pad = 0;
+               err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
+               if (err)
+                       goto done;
+       }
+
+       err = sc->ops.submit_xfer(sc, m);
+done:
+       if (err)
+               m_freem(m);
+       return err;
+}
+
+void
+qwz_qrtr_recv_msg(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qrtr_hdr_v1 *v1 = mtod(m, struct qrtr_hdr_v1 *);
+       struct qrtr_hdr_v2 *v2 = mtod(m, struct qrtr_hdr_v2 *);
+       struct qrtr_ctrl_pkt *pkt;
+       uint32_t type, size, hdrsize;
+       uint8_t ver, confirm_rx;
+
+       ver = *mtod(m, uint8_t *);
+       switch (ver) {
+       case QRTR_PROTO_VER_1:
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: type %u size %u confirm_rx %u\n", __func__,
+                   letoh32(v1->type), letoh32(v1->size),
+                   letoh32(v1->confirm_rx));
+               type = letoh32(v1->type);
+               size = letoh32(v1->size);
+               confirm_rx = !!letoh32(v1->confirm_rx);
+               hdrsize = sizeof(*v1);
+               break;
+       case QRTR_PROTO_VER_2:
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: type %u size %u confirm_rx %u\n", __func__,
+                   v2->type, letoh32(v2->size),
+                   !!(v2->flags & QRTR_FLAGS_CONFIRM_RX));
+               type = v2->type;
+               size = letoh32(v2->size);
+               confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
+               hdrsize = sizeof(*v2);
+               break;
+       default:
+               printf("%s: unsupported qrtr version %u\n",
+                   sc->sc_dev.dv_xname, ver);
+               return;
+       }
+
+       if (size > m->m_pkthdr.len) {
+               printf("%s: bad size in qrtr message header: %u\n",
+                   sc->sc_dev.dv_xname, size);
+               return;
+       }
+
+       switch (type) {
+       case QRTR_TYPE_DATA:
+               m_adj(m, hdrsize);
+               qwz_qrtr_recv_data(sc, m, size);
+               break;
+       case QRTR_TYPE_HELLO:
+               qwz_qrtr_say_hello(sc);
+               break;
+       case QRTR_TYPE_NEW_SERVER:
+               m_adj(m, hdrsize);
+               pkt = mtod(m, struct qrtr_ctrl_pkt *);
+               sc->qrtr_server.service = le32toh(pkt->server.service);
+               sc->qrtr_server.instance = le32toh(pkt->server.instance);
+               sc->qrtr_server.node = le32toh(pkt->server.node);
+               sc->qrtr_server.port = le32toh(pkt->server.port);
+               DNPRINTF(QWZ_D_QMI,
+                   "%s: new server: service=0x%x instance=0x%x node=0x%x "
+                   "port=0x%x\n", __func__, sc->qrtr_server.service,
+                   sc->qrtr_server.instance,
+                   sc->qrtr_server.node, sc->qrtr_server.port);
+               wakeup(&sc->qrtr_server);
+               break;
+       default:
+               DPRINTF("%s: unhandled qrtr type %u\n",
+                   sc->sc_dev.dv_xname, type);
+               return;
+       }
+
+       if (confirm_rx)
+               qwz_qrtr_resume_tx(sc);
+}
+
+// Not needed because we don't implenent QMI as a network service.
+#define qwz_qmi_init_service(sc)       (0)
+#define qwz_qmi_deinit_service(sc)     (0)
+
+int
+qwz_qmi_encode_datalen(uint8_t *p, uint32_t *datalen,
+    const struct qmi_elem_info *ei, void *input)
+{
+       memcpy(datalen, input + ei->offset, sizeof(uint32_t));
+
+       if (ei->elem_size == sizeof(uint8_t)) {
+               p[0] = (*datalen & 0xff);
+       } else if (ei->elem_size == sizeof(uint16_t)) {
+               p[0] = (*datalen & 0xff);
+               p[1] = (*datalen >> 8) & 0xff;
+       } else {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_encode_byte(uint8_t *p, const struct qmi_elem_info *ei, void *input,
+    int i)
+{
+       if (ei->elem_size != sizeof(uint8_t)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       if (p == NULL)
+               return 0;
+
+       memcpy(p, input + ei->offset + (i * ei->elem_size), ei->elem_size);
+       return 0;
+}
+
+int
+qwz_qmi_encode_word(uint8_t *p, const struct qmi_elem_info *ei, void *input,
+    int i)
+{
+       uint16_t val;
+
+       if (ei->elem_size != sizeof(val)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       if (p == NULL)
+               return 0;
+
+       memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
+       val = htole16(val);
+       memcpy(p, &val, sizeof(val));
+       return 0;
+}
+
+int
+qwz_qmi_encode_dword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
+    int i)
+{
+       uint32_t val;
+
+       if (ei->elem_size != sizeof(val)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       if (p == NULL)
+               return 0;
+
+       memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
+       val = htole32(val);
+       memcpy(p, &val, sizeof(val));
+       return 0;
+}
+
+int
+qwz_qmi_encode_qword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
+    int i)
+{
+       uint64_t val;
+
+       if (ei->elem_size != sizeof(val)) {
+               printf("%s: bad element size\n", __func__);
+               return -1;
+       }
+
+       if (p == NULL)
+               return 0;
+
+       memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
+       val = htole64(val);
+       memcpy(p, &val, sizeof(val));
+       return 0;
+}
+
+int
+qwz_qmi_encode_struct(uint8_t *p, size_t *encoded_len,
+    const struct qmi_elem_info *struct_ei, void *input, size_t input_len)
+{
+       const struct qmi_elem_info *ei = struct_ei->ei_array;
+       size_t remain = input_len;
+
+       *encoded_len = 0;
+
+       while (ei->data_type != QMI_EOTI) {
+               if (ei->data_type == QMI_OPT_FLAG) {
+                       uint8_t do_encode, tlv_type;
+
+                       memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
+                       ei++; /* Advance to element we might have to encode. */
+                       if (ei->data_type == QMI_OPT_FLAG ||
+                           ei->data_type == QMI_EOTI) {
+                               printf("%s: bad optional flag element\n",
+                                   __func__);
+                               return -1;
+                       }
+                       if (!do_encode) {
+                               /* The element will not be encoded. Skip it. */
+                               tlv_type = ei->tlv_type;
+                               while (ei->data_type != QMI_EOTI &&
+                                   ei->tlv_type == tlv_type)
+                                       ei++;
+                               continue;
+                       }
+               }
+
+               if (ei->elem_size > remain) {
+                       printf("%s: QMI message buffer too short\n", __func__);
+                       return -1;
+               }
+
+               switch (ei->data_type) {
+               case QMI_UNSIGNED_1_BYTE:
+                       if (qwz_qmi_encode_byte(p, ei, input, 0))
+                               return -1;
+                       break;
+               case QMI_UNSIGNED_2_BYTE:
+                       if (qwz_qmi_encode_word(p, ei, input, 0))
+                               return -1;
+                       break;
+               case QMI_UNSIGNED_4_BYTE:
+               case QMI_SIGNED_4_BYTE_ENUM:
+                       if (qwz_qmi_encode_dword(p, ei, input, 0))
+                               return -1;
+                       break;
+               case QMI_UNSIGNED_8_BYTE:
+                       if (qwz_qmi_encode_qword(p, ei, input, 0))
+                               return -1;
+                       break;
+               default:
+                       printf("%s: unhandled QMI struct element type %d\n",
+                           __func__, ei->data_type);
+                       return -1;
+               }
+
+               remain -= ei->elem_size;
+               if (p != NULL)
+                       p += ei->elem_size;
+               *encoded_len += ei->elem_size;
+               ei++;
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_encode_string(uint8_t *p, size_t *encoded_len,
+    const struct qmi_elem_info *string_ei, void *input, size_t input_len)
+{
+       *encoded_len = strnlen(input, input_len);
+       if (*encoded_len > string_ei->elem_len) {
+               printf("%s: QMI message buffer too short\n", __func__);
+               return -1;
+       }
+
+       if (p)
+               memcpy(p, input, *encoded_len);
+
+       return 0;
+}
+
+int
+qwz_qmi_encode_msg(uint8_t **encoded_msg, size_t *encoded_len, int type,
+    uint16_t *txn_id, uint16_t msg_id, size_t msg_len,
+    const struct qmi_elem_info *ei, void *input, size_t input_len)
+{
+       const struct qmi_elem_info *ei0 = ei;
+       struct qmi_header hdr;
+       size_t remain;
+       uint8_t *p, *op;
+
+       *encoded_msg = NULL;
+       *encoded_len = 0;
+
+       /* First pass: Determine length of encoded message. */
+       while (ei->data_type != QMI_EOTI) {
+               int nelem = 1, i;
+
+               if (ei->offset + ei->elem_size > input_len) {
+                       printf("%s: bad input buffer offset at element 0x%x "
+                           "data type 0x%x\n",
+                           __func__, ei->tlv_type, ei->data_type);
+                       goto err;
+               }
+
+               /*
+                * OPT_FLAG determines whether the next element
+                * should be considered for encoding.
+                */
+               if (ei->data_type == QMI_OPT_FLAG) {
+                       uint8_t do_encode, tlv_type;
+
+                       memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
+                       ei++; /* Advance to element we might have to encode. */
+                       if (ei->data_type == QMI_OPT_FLAG ||
+                           ei->data_type == QMI_EOTI) {
+                               printf("%s: bad optional element\n", __func__);
+                               goto err;
+                       }
+                       if (!do_encode) {
+                               /* The element will not be encoded. Skip it. */
+                               tlv_type = ei->tlv_type;
+                               while (ei->data_type != QMI_EOTI &&
+                                   ei->tlv_type == tlv_type)
+                                       ei++;
+                               continue;
+                       }
+               }
+
+               *encoded_len += 3; /* type, length */
+               if (ei->data_type == QMI_DATA_LEN) {
+                       uint32_t datalen = 0;
+                       uint8_t dummy[2];
+
+                       if (qwz_qmi_encode_datalen(dummy, &datalen, ei, input))
+                               goto err;
+                       *encoded_len += ei->elem_size;
+                       ei++;
+                       if (ei->array_type != VAR_LEN_ARRAY) {
+                               printf("%s: data len not for a var array\n",
+                                   __func__);
+                               goto err;
+                       }
+                       nelem = datalen;
+                       if (ei->data_type == QMI_STRUCT) {
+                               for (i = 0; i < nelem; i++) {
+                                       size_t encoded_struct_len = 0;
+                                       size_t inoff = ei->offset + (i * ei->elem_size);
+
+                                       if (qwz_qmi_encode_struct(NULL,
+                                           &encoded_struct_len, ei,
+                                           input + inoff, input_len - inoff))
+                                               goto err;
+
+                                       *encoded_len += encoded_struct_len;
+                               }
+                       } else
+                               *encoded_len += nelem * ei->elem_size;
+                       ei++;
+               } else if (ei->data_type == QMI_STRING) {
+                       size_t encoded_string_len = 0;
+                       size_t inoff = ei->offset;
+
+                       if (qwz_qmi_encode_string(NULL,
+                           &encoded_string_len, ei,
+                           input + inoff, input_len - inoff))
+                               goto err;
+                       *encoded_len += encoded_string_len;
+                       ei++;
+               } else {
+                       *encoded_len += ei->elem_size;
+                       ei++;
+               }
+       }
+
+       *encoded_len += sizeof(hdr);
+       *encoded_msg = malloc(*encoded_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (*encoded_msg == NULL)
+               return ENOMEM;
+
+       hdr.type = type;
+       hdr.txn_id = htole16(*txn_id);
+       hdr.msg_id = htole16(msg_id);
+       hdr.msg_len = htole16(*encoded_len - sizeof(hdr));
+       memcpy(*encoded_msg, &hdr, sizeof(hdr));
+
+       /* Second pass: Encode the message. */
+       ei = ei0;
+       p = *encoded_msg + sizeof(hdr);
+       remain = *encoded_len - sizeof(hdr);
+       while (ei->data_type != QMI_EOTI) {
+               uint32_t datalen = 0;
+               int nelem = 1, i;
+
+               if (ei->data_type == QMI_OPT_FLAG) {
+                       uint8_t do_encode, tlv_type;
+
+                       memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
+                       ei++; /* Advance to element we might have to encode. */
+                       if (ei->data_type == QMI_OPT_FLAG ||
+                           ei->data_type == QMI_EOTI) {
+                               printf("%s: bad optional flag element\n",
+                                   __func__);
+                               goto err;
+                       }
+                       if (!do_encode) {
+                               /* The element will not be encoded. Skip it. */
+                               tlv_type = ei->tlv_type;
+                               while (ei->data_type != QMI_EOTI &&
+                                   ei->tlv_type == tlv_type)
+                                       ei++;
+                               continue;
+                       }
+               }
+
+               if (ei->elem_size + 3 > remain) {
+                       printf("%s: QMI message buffer too short\n", __func__);
+                       goto err;
+               }
+
+               /* 3 bytes of type-length-value header, remember for later */
+               op = p;
+               p += 3;
+
+               if (ei->data_type == QMI_DATA_LEN) {
+                       if (qwz_qmi_encode_datalen(p, &datalen, ei, input))
+                               goto err;
+                       p += ei->elem_size;
+                       ei++;
+                       if (ei->array_type == VAR_LEN_ARRAY)
+                               nelem = datalen;
+               }
+
+               for (i = 0; i < nelem; i++) {
+                       size_t encoded_struct_len = 0;
+                       size_t encoded_string_len = 0;
+                       size_t inoff = ei->offset + (i * ei->elem_size);
+
+                       switch (ei->data_type) {
+                       case QMI_UNSIGNED_1_BYTE:
+                               if (qwz_qmi_encode_byte(p, ei, input, i))
+                                       goto err;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_2_BYTE:
+                       case QMI_SIGNED_2_BYTE_ENUM:
+                               if (qwz_qmi_encode_word(p, ei, input, i))
+                                       goto err;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_4_BYTE:
+                       case QMI_SIGNED_4_BYTE_ENUM:
+                               if (qwz_qmi_encode_dword(p, ei, input, i))
+                                       goto err;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_UNSIGNED_8_BYTE:
+                               if (qwz_qmi_encode_qword(p, ei, input, i))
+                                       goto err;
+                               remain -= ei->elem_size;
+                               p += ei->elem_size;
+                               break;
+                       case QMI_STRUCT:
+                               if (qwz_qmi_encode_struct(p,
+                                   &encoded_struct_len, ei,
+                                   input + inoff, input_len - inoff))
+                                       goto err;
+                               remain -= encoded_struct_len;
+                               p += encoded_struct_len;
+                               break;
+                       case QMI_STRING:
+                               if (qwz_qmi_encode_string(p,
+                                   &encoded_string_len, ei,
+                                   input + inoff, input_len - inoff))
+                                       goto err;
+                               remain -= encoded_string_len;
+                               p += encoded_string_len;
+                               break;
+                       default:
+                               printf("%s: unhandled QMI message element type %d\n",
+                                   __func__, ei->data_type);
+                               goto err;
+                       }
+               }
+
+               op[0] = ei->tlv_type;
+               op[1] = (p - (op + 3)) & 0xff;
+               op[2] = ((p - (op + 3)) >> 8) & 0xff;
+
+               ei++;
+       }
+
+       if (0) {
+               int i;
+               DNPRINTF(QWZ_D_QMI,
+                  "%s: message type 0x%x txnid 0x%x msgid 0x%x "
+                   "msglen %zu encoded:", __func__,
+                   type, *txn_id, msg_id, *encoded_len - sizeof(hdr));
+               for (i = 0; i < *encoded_len; i++) {
+                       DNPRINTF(QWZ_D_QMI, "%s %.2x", i % 16 == 0 ? "\n" : "",
+                           (*encoded_msg)[i]);
+               }
+               if (i % 16)
+                       DNPRINTF(QWZ_D_QMI, "\n");
+       }
+
+       (*txn_id)++; /* wrap-around is fine */
+       return 0;
+err:
+       free(*encoded_msg, M_DEVBUF, *encoded_len);
+       *encoded_msg = NULL;
+       *encoded_len = 0;
+       return -1;
+}
+
+int
+qwz_qmi_send_request(struct qwz_softc *sc, uint16_t msg_id, size_t msg_len,
+    const struct qmi_elem_info *ei, void *req, size_t req_len)
+{
+       struct qrtr_hdr_v1 hdr;
+       struct mbuf *m;
+       uint8_t *encoded_msg;
+       size_t encoded_len;
+       size_t totlen, padlen;
+       int err;
+
+       if (qwz_qmi_encode_msg(&encoded_msg, &encoded_len, QMI_REQUEST,
+           &sc->qmi_txn_id, msg_id, msg_len, ei, req, req_len))
+               return -1;
+
+       totlen = sizeof(hdr) + encoded_len;
+       padlen = roundup(totlen, 4);
+
+       m = m_gethdr(M_DONTWAIT, MT_DATA);
+       if (m == NULL) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       if (padlen <= MCLBYTES)
+               MCLGET(m, M_DONTWAIT);
+       else
+               MCLGETL(m, M_DONTWAIT, padlen);
+       if ((m->m_flags & M_EXT) == 0) {
+               err = ENOBUFS;
+               goto done;
+       }
+
+       m->m_len = m->m_pkthdr.len = padlen;
+
+       memset(&hdr, 0, sizeof(hdr));
+       hdr.version = htole32(QRTR_PROTO_VER_1);
+       hdr.type = htole32(QRTR_TYPE_DATA);
+       hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
+       hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
+       hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
+       hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
+       hdr.size = htole32(encoded_len); 
+
+       err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
+       if (err)
+               goto done;
+
+       err = m_copyback(m, sizeof(hdr), encoded_len, encoded_msg, M_NOWAIT);
+       if (err)
+               goto done;
+
+       /* Zero-pad the mbuf */
+       if (padlen != totlen) {
+               uint32_t pad = 0;
+               err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
+               if (err)
+                       goto done;
+       }
+
+       err = sc->ops.submit_xfer(sc, m);
+done:
+       if (err)
+               m_freem(m);
+       free(encoded_msg, M_DEVBUF, encoded_len);
+       return err;
+}
+
+int
+qwz_qmi_fw_ind_register_send(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_ind_register_req_msg_v01 req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+
+       req.client_id_valid = 1;
+       req.client_id = QMI_WLANFW_CLIENT_ID;
+       req.fw_ready_enable_valid = 1;
+       req.fw_ready_enable = 1;
+       req.cal_done_enable_valid = 1;
+       req.cal_done_enable = 1;
+       req.fw_init_done_enable_valid = 1;
+       req.fw_init_done_enable = 1;
+
+       req.pin_connect_result_enable_valid = 0;
+       req.pin_connect_result_enable = 0;
+
+       /*
+        * WCN6750 doesn't request for DDR memory via QMI,
+        * instead it uses a fixed 12MB reserved memory region in DDR.
+        */
+       if (!sc->hw_params.fixed_fw_mem) {
+               req.request_mem_enable_valid = 1;
+               req.request_mem_enable = 1;
+               req.fw_mem_ready_enable_valid = 1;
+               req.fw_mem_ready_enable = 1;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: qmi indication register request\n", __func__);
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_IND_REGISTER_REQ_V01,
+                              QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+                              qmi_wlanfw_ind_register_req_msg_v01_ei,
+                              &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send indication register request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return -1;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwind",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: fw indication register request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_host_cap_send(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_host_cap_req_msg_v01 req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+       req.num_clients_valid = 1;
+       req.num_clients = 1;
+       req.mem_cfg_mode = sc->hw_params.fw_mem_mode;
+       req.mem_cfg_mode_valid = 1;
+       req.bdf_support_valid = 1;
+       req.bdf_support = 1;
+
+       if (sc->hw_params.m3_fw_support) {
+               req.m3_support_valid = 1;
+               req.m3_support = 1;
+               req.m3_cache_support_valid = 1;
+               req.m3_cache_support = 1;
+       } else {
+               req.m3_support_valid = 0;
+               req.m3_support = 0;
+               req.m3_cache_support_valid = 0;
+               req.m3_cache_support = 0;
+       }
+
+       req.cal_done_valid = 1;
+       req.cal_done = sc->qmi_cal_done;
+
+       if (sc->hw_params.internal_sleep_clock) {
+               req.nm_modem_valid = 1;
+
+               /* Notify firmware that this is non-qualcomm platform. */
+               req.nm_modem |= QWZ_HOST_CSTATE_BIT;
+
+               /* Notify firmware about the sleep clock selection,
+                * nm_modem_bit[1] is used for this purpose. Host driver on
+                * non-qualcomm platforms should select internal sleep
+                * clock.
+                */
+               req.nm_modem |= QWZ_SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+       }
+
+       if (sc->hw_params.global_reset)
+               req.nm_modem |= QWZ_PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
+       req.nm_modem |= QWZ_PLATFORM_CAP_PCIE_PME_D3COLD;
+
+       DNPRINTF(QWZ_D_QMI, "%s: qmi host cap request\n", __func__);
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_HOST_CAP_REQ_V01,
+                              QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+                              qmi_wlanfw_host_cap_req_msg_v01_ei,
+                              &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send host cap request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return -1;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwhcap",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: fw host cap request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_mem_seg_send(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+       struct qmi_wlanfw_request_mem_ind_msg_v01 *ind;
+       uint32_t mem_seg_len;
+       const uint32_t mem_seg_len_max = 64; /* bump if needed by future fw */
+       uint16_t expected_result;
+       size_t total_size;
+       int i, ret;
+
+       sc->fwmem_ready = 0;
+
+       while (sc->sc_req_mem_ind == NULL) {
+               ret = tsleep_nsec(&sc->sc_req_mem_ind, 0, "qwzfwmem",
+                   SEC_TO_NSEC(10));
+               if (ret) {
+                       printf("%s: fw memory request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return -1;
+               }
+       }
+
+       sc->expect_fwmem_req = 0;
+
+       ind = sc->sc_req_mem_ind;
+       mem_seg_len = le32toh(ind->mem_seg_len);
+       if (mem_seg_len > mem_seg_len_max) {
+               printf("%s: firmware requested too many memory segments: %u\n",
+                   sc->sc_dev.dv_xname, mem_seg_len);
+               free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
+               sc->sc_req_mem_ind = NULL;
+               return -1;
+       }
+
+       total_size = 0;
+       for (i = 0; i < mem_seg_len; i++) {
+               if (ind->mem_seg[i].size == 0) {
+                       printf("%s: firmware requested zero-sized "
+                           "memory segment %u\n", sc->sc_dev.dv_xname, i);
+                       free(sc->sc_req_mem_ind, M_DEVBUF,
+                           sizeof(*sc->sc_req_mem_ind));
+                       sc->sc_req_mem_ind = NULL;
+                       return -1;
+               }
+               total_size += le32toh(ind->mem_seg[i].size);
+       }
+
+       req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (req == NULL) {
+               printf("%s: failed to allocate respond memory request\n",
+                   sc->sc_dev.dv_xname);
+               free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
+               sc->sc_req_mem_ind = NULL;
+               return -1;
+       }
+
+       if (total_size == 0) {
+               /* Should not happen. Send back an empty allocation. */
+               printf("%s: firmware has requested no memory\n",
+                   sc->sc_dev.dv_xname);
+               mem_seg_len = 0;
+       } else if (sc->fwmem == NULL || QWZ_DMA_LEN(sc->fwmem) < total_size) {
+               if (sc->fwmem != NULL) 
+                       qwz_dmamem_free(sc->sc_dmat, sc->fwmem);
+               sc->fwmem = qwz_dmamem_alloc(sc->sc_dmat, total_size, 65536);
+               if (sc->fwmem == NULL) {
+                       printf("%s: failed to allocate %zu bytes of DMA "
+                           "memory for firmware\n", sc->sc_dev.dv_xname,
+                           total_size);
+                       /* Send back an empty allocation. */
+                       mem_seg_len = 0;
+               } else
+                       DPRINTF("%s: allocated %zu bytes of DMA memory for "
+                           "firmware\n", sc->sc_dev.dv_xname, total_size);
+       }
+
+       /* Chunk DMA memory block into segments as requested by firmware. */
+       req->mem_seg_len = htole32(mem_seg_len);
+       if (sc->fwmem) {
+               uint64_t paddr = QWZ_DMA_DVA(sc->fwmem);
+
+               for (i = 0; i < mem_seg_len; i++) {
+                       DPRINTF("%s: mem seg[%d] addr=%llx size=%u type=%u\n",
+                           __func__, i, paddr, le32toh(ind->mem_seg[i].size),
+                           le32toh(ind->mem_seg[i].type));
+                       req->mem_seg[i].addr = htole64(paddr);
+                       paddr += le32toh(ind->mem_seg[i].size);
+
+                       /* Values in 'ind' are in little-endian format. */
+                       req->mem_seg[i].size = ind->mem_seg[i].size;
+                       req->mem_seg[i].type = ind->mem_seg[i].type;
+               }
+       }
+
+       free(ind, M_DEVBUF, sizeof(*ind));
+       sc->sc_req_mem_ind = NULL;
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_RESPOND_MEM_REQ_V01,
+                              QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+                              qmi_wlanfw_respond_mem_req_msg_v01_ei,
+                              req, sizeof(*req));
+       free(req, M_DEVBUF, sizeof(*req));
+       if (ret) {
+               printf("%s: failed to send respond memory request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return -1;
+       }
+
+       if (mem_seg_len == 0) {
+               expected_result = QMI_RESULT_FAILURE_V01;
+               sc->qmi_resp.result = QMI_RESULT_SUCCESS_V01;
+       } else {
+               expected_result = QMI_RESULT_SUCCESS_V01;
+               sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       }
+       while (sc->qmi_resp.result != expected_result) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwrespmem",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: fw respond memory request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return -1;
+               }
+       }
+
+       if (mem_seg_len == 0) {
+               sc->expect_fwmem_req = 1;
+               return EBUSY; /* retry */
+       }
+
+       if (!sc->hw_params.fixed_fw_mem) {
+               while (!sc->fwmem_ready) {
+                       ret = tsleep_nsec(&sc->fwmem_ready, 0, "qwzfwrdy",
+                           SEC_TO_NSEC(10));
+                       if (ret) {
+                               printf("%s: fw memory ready timeout\n",
+                                   sc->sc_dev.dv_xname);
+                               return -1;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_core_check_smbios(struct qwz_softc *sc)
+{
+       return 0; /* TODO */
+}
+
+int
+qwz_core_check_dt(struct qwz_softc *sc)
+{
+#ifdef __HAVE_FDT
+       if (sc->sc_node == 0)
+               return 0;
+       
+       OF_getprop(sc->sc_node, "qcom,ath12k-calibration-variant",
+           sc->qmi_target.bdf_ext, sizeof(sc->qmi_target.bdf_ext) - 1);
+#endif
+
+       return 0;
+}
+
+int
+qwz_qmi_request_target_cap(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_cap_req_msg_v01 req;
+       int ret = 0;
+       int r;
+       char *fw_build_id;
+       int fw_build_id_mask_len;
+
+       memset(&req, 0, sizeof(req));
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_CAP_REQ_V01,
+           QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+           qmi_wlanfw_cap_req_msg_v01_ei, &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send qmi cap request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto out;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwcap",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: qmi cap request failed\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       fw_build_id = sc->qmi_target.fw_build_id;
+       fw_build_id_mask_len = strlen(QWZ_FW_BUILD_ID_MASK);
+       if (!strncmp(fw_build_id, QWZ_FW_BUILD_ID_MASK, fw_build_id_mask_len))
+               fw_build_id = fw_build_id + fw_build_id_mask_len;
+
+       DPRINTF("%s: chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
+           sc->sc_dev.dv_xname,
+           sc->qmi_target.chip_id, sc->qmi_target.chip_family,
+           sc->qmi_target.board_id, sc->qmi_target.soc_id);
+
+       DPRINTF("%s: fw_version 0x%x fw_build_timestamp %s fw_build_id %s\n",
+           sc->sc_dev.dv_xname, sc->qmi_target.fw_version,
+           sc->qmi_target.fw_build_timestamp, fw_build_id);
+
+       r = qwz_core_check_smbios(sc);
+       if (r)
+               DPRINTF("%s: SMBIOS bdf variant name not set\n", __func__);
+
+       r = qwz_core_check_dt(sc);
+       if (r)
+               DPRINTF("%s: DT bdf variant name not set\n", __func__);
+
+out:
+       return ret;
+}
+
+int
+qwz_qmi_request_device_info(struct qwz_softc *sc)
+{
+       /* device info message req is only sent for hybrid bus devices */
+       if (!sc->hw_params.hybrid_bus_type)
+               return 0;
+
+       /* TODO */
+       return -1;
+}
+
+int
+_qwz_core_create_board_name(struct qwz_softc *sc, char *name,
+    size_t name_len, int with_variant, int bus_type_mode)
+{
+       /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+       char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+
+       if (with_variant && sc->qmi_target.bdf_ext[0] != '\0')
+               snprintf(variant, sizeof(variant), ",variant=%s",
+                   sc->qmi_target.bdf_ext);
+
+       switch (sc->id.bdf_search) {
+       case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+               if (bus_type_mode)
+                       snprintf(name, name_len, "bus=%s", sc->sc_bus_str);
+               else
+                       snprintf(name, name_len,
+                           "bus=%s,vendor=%04x,device=%04x,"
+                           "subsystem-vendor=%04x,subsystem-device=%04x,"
+                           "qmi-chip-id=%d,qmi-board-id=%d%s",
+                           sc->sc_bus_str, sc->id.vendor, sc->id.device,
+                           sc->id.subsystem_vendor, sc->id.subsystem_device,
+                           sc->qmi_target.chip_id, sc->qmi_target.board_id,
+                           variant);
+               break;
+       default:
+               snprintf(name, name_len,
+                   "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+                   sc->sc_bus_str, sc->qmi_target.chip_id,
+                   sc->qmi_target.board_id, variant);
+               break;
+       }
+
+       DPRINTF("%s: using board name '%s'\n", __func__, name);
+
+       return 0;
+}
+
+int
+qwz_core_create_board_name(struct qwz_softc *sc, char *name, size_t name_len)
+{
+       return _qwz_core_create_board_name(sc, name, name_len, 1, 0);
+}
+
+int
+qwz_core_create_fallback_board_name(struct qwz_softc *sc, char *name,
+    size_t name_len)
+{
+       return _qwz_core_create_board_name(sc, name, name_len, 0, 0);
+}
+
+int
+qwz_core_create_bus_type_board_name(struct qwz_softc *sc, char *name,
+    size_t name_len)
+{
+       return _qwz_core_create_board_name(sc, name, name_len, 0, 1);
+}
+
+struct ath12k_fw_ie {
+       uint32_t id;
+       uint32_t len;
+       uint8_t data[];
+};
+
+enum ath12k_bd_ie_board_type {
+       ATH12K_BD_IE_BOARD_NAME = 0,
+       ATH12K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath12k_bd_ie_regdb_type {
+       ATH12K_BD_IE_REGDB_NAME = 0,
+       ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
+enum ath12k_bd_ie_type {
+       /* contains sub IEs of enum ath12k_bd_ie_board_type */
+       ATH12K_BD_IE_BOARD = 0,
+       /* contains sub IEs of enum ath12k_bd_ie_regdb_type */
+       ATH12K_BD_IE_REGDB = 1,
+};
+
+static inline const char *
+qwz_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+       switch (type) {
+       case ATH12K_BD_IE_BOARD:
+               return "board data";
+       case ATH12K_BD_IE_REGDB:
+               return "regdb data";
+       }
+
+       return "unknown";
+}
+
+int
+qwz_core_parse_bd_ie_board(struct qwz_softc *sc,
+    const u_char **boardfw, size_t *boardfw_len,
+    const void *buf, size_t buf_len,
+    const char *boardname, int ie_id, int name_id, int data_id)
+{
+       const struct ath12k_fw_ie *hdr;
+       int name_match_found = 0;
+       int ret, board_ie_id;
+       size_t board_ie_len;
+       const void *board_ie_data;
+
+       *boardfw = NULL;
+       *boardfw_len = 0;
+
+       /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
+       while (buf_len > sizeof(struct ath12k_fw_ie)) {
+               hdr = buf;
+               board_ie_id = le32toh(hdr->id);
+               board_ie_len = le32toh(hdr->len);
+               board_ie_data = hdr->data;
+
+               buf_len -= sizeof(*hdr);
+               buf += sizeof(*hdr);
+
+               if (buf_len < roundup(board_ie_len, 4)) {
+                       printf("%s: invalid %s length: %zu < %zu\n",
+                           sc->sc_dev.dv_xname, qwz_bd_ie_type_str(ie_id),
+                           buf_len, roundup(board_ie_len, 4));
+                       return EINVAL;
+               }
+
+               if (board_ie_id == name_id) {
+                       if (board_ie_len != strlen(boardname))
+                               goto next;
+
+                       ret = memcmp(board_ie_data, boardname, board_ie_len);
+                       if (ret)
+                               goto next;
+
+                       name_match_found = 1;
+                          DPRINTF("%s: found match %s for name '%s'", __func__,
+                              qwz_bd_ie_type_str(ie_id), boardname);
+               } else if (board_ie_id == data_id) {
+                       if (!name_match_found)
+                               /* no match found */
+                               goto next;
+
+                       DPRINTF("%s: found %s for '%s'", __func__,
+                           qwz_bd_ie_type_str(ie_id), boardname);
+
+                       *boardfw = board_ie_data;
+                       *boardfw_len = board_ie_len;
+                       return 0;
+               } else {
+                       printf("%s: unknown %s id found: %d\n", __func__,
+                           qwz_bd_ie_type_str(ie_id), board_ie_id);
+               }
+next:
+               /* jump over the padding */
+               board_ie_len = roundup(board_ie_len, 4);
+
+               buf_len -= board_ie_len;
+               buf += board_ie_len;
+       }
+
+       /* no match found */
+       return ENOENT;
+}
+
+int
+qwz_core_fetch_board_data_api_n(struct qwz_softc *sc,
+    const u_char **boardfw, size_t *boardfw_len,
+    u_char *fwdata, size_t fwdata_len,
+    const char *boardname, int ie_id_match, int name_id, int data_id)
+{
+       size_t len, magic_len;
+       const uint8_t *data;
+       char *filename;
+       size_t ie_len;
+       struct ath12k_fw_ie *hdr;
+       int ret, ie_id;
+
+       filename = ATH12K_BOARD_API2_FILE;
+
+       *boardfw = NULL;
+       *boardfw_len = 0;
+
+       data = fwdata;
+       len = fwdata_len;
+
+       /* magic has extra null byte padded */
+       magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
+       if (len < magic_len) {
+               printf("%s: failed to find magic value in %s, "
+                   "file too short: %zu\n",
+                   sc->sc_dev.dv_xname, filename, len);
+               return EINVAL;
+       }
+
+       if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
+               DPRINTF("%s: found invalid board magic\n", sc->sc_dev.dv_xname);
+               return EINVAL;
+       }
+
+       /* magic is padded to 4 bytes */
+       magic_len = roundup(magic_len, 4);
+       if (len < magic_len) {
+               printf("%s: %s too small to contain board data, len: %zu\n",
+                   sc->sc_dev.dv_xname, filename, len);
+               return EINVAL;
+       }
+
+       data += magic_len;
+       len -= magic_len;
+
+       while (len > sizeof(struct ath12k_fw_ie)) {
+               hdr = (struct ath12k_fw_ie *)data;
+               ie_id = le32toh(hdr->id);
+               ie_len = le32toh(hdr->len);
+
+               len -= sizeof(*hdr);
+               data = hdr->data;
+
+               if (len < roundup(ie_len, 4)) {
+                       printf("%s: invalid length for board ie_id %d "
+                           "ie_len %zu len %zu\n",
+                           sc->sc_dev.dv_xname, ie_id, ie_len, len);
+                       return EINVAL;
+               }
+
+               if (ie_id == ie_id_match) {
+                       ret = qwz_core_parse_bd_ie_board(sc,
+                           boardfw, boardfw_len, data, ie_len,
+                           boardname, ie_id_match, name_id, data_id);
+                       if (ret == ENOENT)
+                               /* no match found, continue */
+                               goto next;
+                       else if (ret)
+                               /* there was an error, bail out */
+                               return ret;
+                       /* either found or error, so stop searching */
+                       goto out;
+               }
+next:
+               /* jump over the padding */
+               ie_len = roundup(ie_len, 4);
+
+               len -= ie_len;
+               data += ie_len;
+       }
+
+out:
+       if (!*boardfw || !*boardfw_len) {
+               printf("%s: failed to fetch %s for %s from %s\n",
+                   __func__, qwz_bd_ie_type_str(ie_id_match),
+                   boardname, filename);
+               return ENOENT;
+       }
+
+       return 0;
+}
+
+int
+qwz_core_fetch_bdf(struct qwz_softc *sc, u_char **data, size_t *len,
+    const u_char **boardfw, size_t *boardfw_len, const char *filename)
+{
+       char path[PATH_MAX];
+       char boardname[200];
+       int ret;
+
+       ret = snprintf(path, sizeof(path), "%s-%s-%s",
+           ATH12K_FW_DIR, sc->hw_params.fw.dir, filename);
+       if (ret < 0 || ret >= sizeof(path))
+               return ENOSPC;
+
+       ret = qwz_core_create_board_name(sc, boardname, sizeof(boardname));
+       if (ret) {
+               DPRINTF("%s: failed to create board name: %d",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = loadfirmware(path, data, len);
+       if (ret) {
+               printf("%s: could not read %s (error %d)\n",
+                   sc->sc_dev.dv_xname, path, ret);
+               return ret;
+       }
+
+       ret = qwz_core_fetch_board_data_api_n(sc, boardfw, boardfw_len,
+           *data, *len, boardname, ATH12K_BD_IE_BOARD,
+           ATH12K_BD_IE_BOARD_NAME, ATH12K_BD_IE_BOARD_DATA);
+       if (ret) {
+               DPRINTF("%s: failed to fetch board data for %s from %s\n",
+                   sc->sc_dev.dv_xname, boardname, path);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_load_file_target_mem(struct qwz_softc *sc, const u_char *data,
+    size_t len, int type)
+{
+       struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+       const uint8_t *p = data;
+#ifdef notyet
+       void *bdf_addr = NULL;
+#endif
+       int ret = EINVAL; /* empty fw image */
+       uint32_t remaining = len;
+
+       req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!req) {
+               printf("%s: failed to allocate bfd download request\n",
+                   sc->sc_dev.dv_xname);
+               return ENOMEM;
+       }
+
+       if (sc->hw_params.fixed_bdf_addr) {
+#ifdef notyet
+               bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+               if (!bdf_addr) {
+                       ath12k_warn(ab, "qmi ioremap error for bdf_addr\n");
+                       ret = -EIO;
+                       goto err_free_req;
+               }
+#else
+               printf("%s: fixed bdf address not yet supported\n",
+                   sc->sc_dev.dv_xname);
+               ret = EIO;
+               goto err_free_req;
+#endif
+       }
+
+       while (remaining) {
+               req->valid = 1;
+               req->file_id_valid = 1;
+               req->file_id = sc->qmi_target.board_id;
+               req->total_size_valid = 1;
+               req->total_size = remaining;
+               req->seg_id_valid = 1;
+               req->data_valid = 1;
+               req->bdf_type = type;
+               req->bdf_type_valid = 1;
+               req->end_valid = 1;
+               req->end = 0;
+
+               if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+                       req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+               } else {
+                       req->data_len = remaining;
+                       req->end = 1;
+               }
+
+               if (sc->hw_params.fixed_bdf_addr ||
+                   type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+                       req->data_valid = 0;
+                       req->end = 1;
+                       req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+               } else {
+                       memcpy(req->data, p, req->data_len);
+               }
+#ifdef notyet
+               if (ab->hw_params.fixed_bdf_addr) {
+                       if (type == ATH12K_QMI_FILE_TYPE_CALDATA)
+                               bdf_addr += ab->hw_params.fw.cal_offset;
+
+                       memcpy_toio(bdf_addr, p, len);
+               }
+#endif
+               DPRINTF("%s: bdf download req fixed addr type %d\n",
+                   __func__, type);
+
+               ret = qwz_qmi_send_request(sc,
+                   QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+                   QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+                   qmi_wlanfw_bdf_download_req_msg_v01_ei,
+                   req, sizeof(*req));
+               if (ret) {
+                       printf("%s: failed to send bdf download request\n",
+                           sc->sc_dev.dv_xname);
+                       goto err_iounmap;
+               }
+
+               sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+               while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+                       ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzbdf",
+                           SEC_TO_NSEC(1));
+                       if (ret) {
+                               printf("%s: bdf download request timeout\n",
+                                   sc->sc_dev.dv_xname);
+                               goto err_iounmap;
+                       }
+               }
+
+               if (sc->hw_params.fixed_bdf_addr ||
+                   type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+                       remaining = 0;
+               } else {
+                       remaining -= req->data_len;
+                       p += req->data_len;
+                       req->seg_id++;
+                       DPRINTF("%s: bdf download request remaining %i\n",
+                           __func__, remaining);
+               }
+       }
+
+err_iounmap:
+#ifdef notyet
+       if (ab->hw_params.fixed_bdf_addr)
+               iounmap(bdf_addr);
+#endif
+err_free_req:
+       free(req, M_DEVBUF, sizeof(*req));
+
+       return ret;
+}
+
+#define QWZ_ELFMAG     "\177ELF"
+#define QWZ_SELFMAG    4
+
+int
+qwz_qmi_load_bdf_qmi(struct qwz_softc *sc, int regdb)
+{
+       u_char *data = NULL;
+       const u_char *boardfw;
+       size_t len = 0, boardfw_len;
+       uint32_t fw_size;
+       int ret = 0, bdf_type;
+#ifdef notyet
+       const uint8_t *tmp;
+       uint32_t file_type;
+#endif
+       int fw_idx = regdb ? QWZ_FW_REGDB : QWZ_FW_BOARD;
+
+       if (sc->fw_img[fw_idx].data) {
+               boardfw = sc->fw_img[fw_idx].data;
+               boardfw_len = sc->fw_img[fw_idx].size;
+       } else {
+               ret = qwz_core_fetch_bdf(sc, &data, &len,
+                   &boardfw, &boardfw_len,
+                   regdb ? ATH12K_REGDB_FILE : ATH12K_BOARD_API2_FILE);
+               if (ret)
+                       return ret;
+
+               sc->fw_img[fw_idx].data = malloc(boardfw_len, M_DEVBUF,
+                   M_NOWAIT);
+               if (sc->fw_img[fw_idx].data) {
+                       memcpy(sc->fw_img[fw_idx].data, boardfw, boardfw_len);
+                       sc->fw_img[fw_idx].size = boardfw_len;
+               }
+       }
+
+       if (regdb)
+               bdf_type = ATH12K_QMI_BDF_TYPE_REGDB;
+       else if (boardfw_len >= QWZ_SELFMAG &&
+           memcmp(boardfw, QWZ_ELFMAG, QWZ_SELFMAG) == 0)
+               bdf_type = ATH12K_QMI_BDF_TYPE_ELF;
+       else
+               bdf_type = ATH12K_QMI_BDF_TYPE_BIN;
+
+       DPRINTF("%s: bdf_type %d\n", __func__, bdf_type);
+
+       fw_size = MIN(sc->hw_params.fw.board_size, boardfw_len);
+
+       ret = qwz_qmi_load_file_target_mem(sc, boardfw, fw_size, bdf_type);
+       if (ret) {
+               printf("%s: failed to load bdf file\n", __func__);
+               goto out;
+       }
+
+       /* QCA6390/WCN6855 does not support cal data, skip it */
+       if (bdf_type == ATH12K_QMI_BDF_TYPE_ELF || bdf_type == ATH12K_QMI_BDF_TYPE_REGDB)
+               goto out;
+#ifdef notyet
+       if (ab->qmi.target.eeprom_caldata) {
+               file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
+               tmp = filename;
+               fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+       } else {
+               file_type = ATH12K_QMI_FILE_TYPE_CALDATA;
+
+               /* cal-<bus>-<id>.bin */
+               snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+                        ath12k_bus_str(ab->hif.bus), dev_name(dev));
+               fw_entry = ath12k_core_firmware_request(ab, filename);
+               if (!IS_ERR(fw_entry))
+                       goto success;
+
+               fw_entry = ath12k_core_firmware_request(ab, ATH12K_DEFAULT_CAL_FILE);
+               if (IS_ERR(fw_entry)) {
+                       /* Caldata may not be present during first time calibration in
+                        * factory hence allow to boot without loading caldata in ftm mode
+                        */
+                       if (ath12k_ftm_mode) {
+                               ath12k_info(ab,
+                                           "Booting without cal data file in factory test mode\n");
+                               return 0;
+                       }
+                       ret = PTR_ERR(fw_entry);
+                       ath12k_warn(ab,
+                                   "qmi failed to load CAL data file:%s\n",
+                                   filename);
+                       goto out;
+               }
+success:
+               fw_size = MIN(ab->hw_params.fw.board_size, fw_entry->size);
+               tmp = fw_entry->data;
+       }
+
+       ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+       if (ret < 0) {
+               ath12k_warn(ab, "qmi failed to load caldata\n");
+               goto out_qmi_cal;
+       }
+
+       ath12k_dbg(ab, ATH12K_DBG_QMI, "caldata type: %u\n", file_type);
+
+out_qmi_cal:
+       if (!ab->qmi.target.eeprom_caldata)
+               release_firmware(fw_entry);
+#endif
+out:
+       free(data, M_DEVBUF, len);
+       if (ret == 0)
+               DPRINTF("%s: BDF download sequence completed\n", __func__);
+
+       return ret;
+}
+
+int
+qwz_qmi_event_load_bdf(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_qmi_request_target_cap(sc);
+       if (ret < 0) {
+               printf("%s: failed to request qmi target capabilities: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_qmi_request_device_info(sc);
+       if (ret < 0) {
+               printf("%s: failed to request qmi device info: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       if (sc->hw_params.supports_regdb)
+               qwz_qmi_load_bdf_qmi(sc, 1);
+
+       ret = qwz_qmi_load_bdf_qmi(sc, 0);
+       if (ret < 0) {
+               printf("%s: failed to load board data file: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_m3_load(struct qwz_softc *sc)
+{
+       u_char *data;
+       size_t len;
+       char path[PATH_MAX];
+       int ret;
+
+       if (sc->fw_img[QWZ_FW_M3].data) {
+               data = sc->fw_img[QWZ_FW_M3].data;
+               len = sc->fw_img[QWZ_FW_M3].size;
+       } else {
+               ret = snprintf(path, sizeof(path), "%s-%s-%s",
+                   ATH12K_FW_DIR, sc->hw_params.fw.dir, ATH12K_M3_FILE);
+               if (ret < 0 || ret >= sizeof(path))
+                       return ENOSPC;
+
+               ret = loadfirmware(path, &data, &len);
+               if (ret) {
+                       printf("%s: could not read %s (error %d)\n",
+                           sc->sc_dev.dv_xname, path, ret);
+                       return ret;
+               }
+
+               sc->fw_img[QWZ_FW_M3].data = data;
+               sc->fw_img[QWZ_FW_M3].size = len;
+       }
+
+       if (sc->m3_mem == NULL || QWZ_DMA_LEN(sc->m3_mem) < len) {
+               if (sc->m3_mem)
+                       qwz_dmamem_free(sc->sc_dmat, sc->m3_mem);
+               sc->m3_mem = qwz_dmamem_alloc(sc->sc_dmat, len, 65536);
+               if (sc->m3_mem == NULL) {
+                       printf("%s: failed to allocate %zu bytes of DMA "
+                           "memory for M3 firmware\n", sc->sc_dev.dv_xname,
+                           len);
+                       return ENOMEM;
+               }
+       }
+
+       memcpy(QWZ_DMA_KVA(sc->m3_mem), data, len);
+       return 0;
+}
+
+int
+qwz_qmi_wlanfw_m3_info_send(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_m3_info_req_msg_v01 req;
+       int ret = 0;
+       uint64_t paddr;
+       uint32_t size;
+
+       memset(&req, 0, sizeof(req));
+
+       if (sc->hw_params.m3_fw_support) {
+               ret = qwz_qmi_m3_load(sc);
+               if (ret) {
+                       printf("%s: failed to load m3 firmware: %d",
+                           sc->sc_dev.dv_xname, ret);
+                       return ret;
+               }
+
+               paddr = QWZ_DMA_DVA(sc->m3_mem);
+               size = QWZ_DMA_LEN(sc->m3_mem);
+               req.addr = htole64(paddr);
+               req.size = htole32(size);
+       } else {
+               req.addr = 0;
+               req.size = 0;
+       }
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_M3_INFO_REQ_V01,
+           QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+           qmi_wlanfw_m3_info_req_msg_v01_ei, &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send m3 information request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwm3",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: m3 information request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void
+qwz_hal_dump_srng_stats(struct qwz_softc *sc)
+{
+       DPRINTF("%s not implemented\n", __func__);
+}
+
+uint16_t
+qwz_hal_srng_get_entrysize(struct qwz_softc *sc, uint32_t ring_type)
+{
+       struct hal_srng_config *srng_config;
+
+       KASSERT(ring_type < HAL_MAX_RING_TYPES);
+
+       srng_config = &sc->hal.srng_config[ring_type];
+       return (srng_config->entry_size << 2);
+}
+
+uint32_t
+qwz_hal_srng_get_max_entries(struct qwz_softc *sc, uint32_t ring_type)
+{
+       struct hal_srng_config *srng_config;
+
+       KASSERT(ring_type < HAL_MAX_RING_TYPES);
+
+       srng_config = &sc->hal.srng_config[ring_type];
+       return (srng_config->max_size / srng_config->entry_size);
+}
+
+uint32_t *
+qwz_hal_srng_dst_get_next_entry(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       uint32_t *desc;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+               return NULL;
+
+       desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+       srng->u.dst_ring.tp += srng->entry_size;
+
+       /* wrap around to start of ring*/
+       if (srng->u.dst_ring.tp == srng->ring_size)
+               srng->u.dst_ring.tp = 0;
+#ifdef notyet
+       /* Try to prefetch the next descriptor in the ring */
+       if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+               ath12k_hal_srng_prefetch_desc(ab, srng);
+#endif
+       return desc;
+}
+
+int
+qwz_hal_srng_dst_num_free(struct qwz_softc *sc, struct hal_srng *srng,
+    int sync_hw_ptr)
+{
+       uint32_t tp, hp;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       tp = srng->u.dst_ring.tp;
+
+       if (sync_hw_ptr) {
+               hp = *srng->u.dst_ring.hp_addr;
+               srng->u.dst_ring.cached_hp = hp;
+       } else {
+               hp = srng->u.dst_ring.cached_hp;
+       }
+
+       if (hp >= tp)
+               return (hp - tp) / srng->entry_size;
+       else
+               return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+uint32_t *
+qwz_hal_srng_src_get_next_reaped(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       uint32_t *desc;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+               return NULL;
+
+       desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+       srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+                             srng->ring_size;
+
+       return desc;
+}
+
+uint32_t *
+qwz_hal_srng_src_peek(struct qwz_softc *sc, struct hal_srng *srng)
+{
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+           srng->u.src_ring.cached_tp)
+               return NULL;
+
+       return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void
+qwz_get_msi_address(struct qwz_softc *sc, uint32_t *addr_lo,
+    uint32_t *addr_hi)
+{
+       *addr_lo = sc->msi_addr_lo;
+       *addr_hi = sc->msi_addr_hi;
+}
+
+int
+qwz_dp_srng_find_ring_in_mask(int ring_num, const uint8_t *grp_mask)
+{
+       int ext_group_num;
+       uint8_t mask = 1 << ring_num;
+
+       for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
+            ext_group_num++) {
+               if (mask & grp_mask[ext_group_num])
+                       return ext_group_num;
+       }
+
+       return -1;
+}
+
+int
+qwz_dp_srng_calculate_msi_group(struct qwz_softc *sc, enum hal_ring_type type,
+    int ring_num)
+{
+       const uint8_t *grp_mask;
+
+       switch (type) {
+       case HAL_WBM2SW_RELEASE:
+               if (ring_num == DP_RX_RELEASE_RING_NUM) {
+                       grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
+                       ring_num = 0;
+               } else {
+                       grp_mask = &sc->hw_params.ring_mask->tx[0];
+               }
+               break;
+       case HAL_REO_EXCEPTION:
+               grp_mask = &sc->hw_params.ring_mask->rx_err[0];
+               break;
+       case HAL_REO_DST:
+               grp_mask = &sc->hw_params.ring_mask->rx[0];
+               break;
+       case HAL_REO_STATUS:
+               grp_mask = &sc->hw_params.ring_mask->reo_status[0];
+               break;
+       case HAL_RXDMA_MONITOR_STATUS:
+       case HAL_RXDMA_MONITOR_DST:
+               grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
+               break;
+       case HAL_RXDMA_DST:
+               grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
+               break;
+       case HAL_RXDMA_BUF:
+               grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
+               break;
+       case HAL_RXDMA_MONITOR_BUF:
+       case HAL_TCL_DATA:
+       case HAL_TCL_CMD:
+       case HAL_REO_CMD:
+       case HAL_SW2WBM_RELEASE:
+       case HAL_WBM_IDLE_LINK:
+       case HAL_TCL_STATUS:
+       case HAL_REO_REINJECT:
+       case HAL_CE_SRC:
+       case HAL_CE_DST:
+       case HAL_CE_DST_STATUS:
+       default:
+               return -1;
+       }
+
+       return qwz_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+void
+qwz_dp_srng_msi_setup(struct qwz_softc *sc, struct hal_srng_params *ring_params,
+    enum hal_ring_type type, int ring_num)
+{
+       int msi_group_number;
+       uint32_t msi_data_start = 0;
+       uint32_t msi_data_count = 1;
+       uint32_t msi_irq_start = 0;
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+       int ret;
+
+       ret = sc->ops.get_user_msi_vector(sc, "DP",
+           &msi_data_count, &msi_data_start, &msi_irq_start);
+       if (ret)
+               return;
+
+       msi_group_number = qwz_dp_srng_calculate_msi_group(sc, type,
+           ring_num);
+       if (msi_group_number < 0) {
+               DPRINTF("%s ring not part of an ext_group; ring_type %d,"
+                   "ring_num %d\n", __func__, type, ring_num);
+               ring_params->msi_addr = 0;
+               ring_params->msi_data = 0;
+               return;
+       }
+
+       qwz_get_msi_address(sc, &addr_lo, &addr_hi);
+
+       ring_params->msi_addr = addr_lo;
+       ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
+       ring_params->msi_data = (msi_group_number % msi_data_count) +
+           msi_data_start;
+       ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int
+qwz_dp_srng_setup(struct qwz_softc *sc, struct dp_srng *ring,
+    enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
+{
+       struct hal_srng_params params = { 0 };
+       uint16_t entry_sz = qwz_hal_srng_get_entrysize(sc, type);
+       uint32_t max_entries = qwz_hal_srng_get_max_entries(sc, type);
+       int ret;
+       int cached = 0;
+
+       if (num_entries > max_entries)
+               num_entries = max_entries;
+
+       ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+
+#ifdef notyet
+       if (sc->hw_params.alloc_cacheable_memory) {
+               /* Allocate the reo dst and tx completion rings from cacheable memory */
+               switch (type) {
+               case HAL_REO_DST:
+               case HAL_WBM2SW_RELEASE:
+                       cached = true;
+                       break;
+               default:
+                       cached = false;
+               }
+
+               if (cached) {
+                       ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+                       ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+               }
+               if (!ring->vaddr_unaligned)
+                       return -ENOMEM;
+       }
+#endif
+       if (!cached) {
+               ring->mem = qwz_dmamem_alloc(sc->sc_dmat, ring->size,
+                   PAGE_SIZE);
+               if (ring->mem == NULL) {
+                       printf("%s: could not allocate DP SRNG DMA memory\n",
+                           sc->sc_dev.dv_xname);
+                       return ENOMEM;
+
+               }
+       }
+
+       ring->vaddr = QWZ_DMA_KVA(ring->mem);
+       ring->paddr = QWZ_DMA_DVA(ring->mem);
+
+       params.ring_base_vaddr = ring->vaddr;
+       params.ring_base_paddr = ring->paddr;
+       params.num_entries = num_entries;
+       qwz_dp_srng_msi_setup(sc, &params, type, ring_num + mac_id);
+
+       switch (type) {
+       case HAL_REO_DST:
+               params.intr_batch_cntr_thres_entries =
+                   HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+               params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+               break;
+       case HAL_RXDMA_BUF:
+       case HAL_RXDMA_MONITOR_BUF:
+       case HAL_RXDMA_MONITOR_STATUS:
+               params.low_threshold = num_entries >> 3;
+               params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+               params.intr_batch_cntr_thres_entries = 0;
+               params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+               break;
+       case HAL_WBM2SW_RELEASE:
+               if (ring_num < 3) {
+                       params.intr_batch_cntr_thres_entries =
+                           HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+                       params.intr_timer_thres_us =
+                           HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+                       break;
+               }
+               /* follow through when ring_num >= 3 */
+               /* FALLTHROUGH */
+       case HAL_REO_EXCEPTION:
+       case HAL_REO_REINJECT:
+       case HAL_REO_CMD:
+       case HAL_REO_STATUS:
+       case HAL_TCL_DATA:
+       case HAL_TCL_CMD:
+       case HAL_TCL_STATUS:
+       case HAL_WBM_IDLE_LINK:
+       case HAL_SW2WBM_RELEASE:
+       case HAL_RXDMA_DST:
+       case HAL_RXDMA_MONITOR_DST:
+       case HAL_RXDMA_MONITOR_DESC:
+               params.intr_batch_cntr_thres_entries =
+                   HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+               params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+               break;
+       case HAL_RXDMA_DIR_BUF:
+               break;
+       default:
+               printf("%s: Not a valid ring type in dp :%d\n",
+                   sc->sc_dev.dv_xname, type);
+               return EINVAL;
+       }
+
+       if (cached) {
+               params.flags |= HAL_SRNG_FLAGS_CACHED;
+               ring->cached = 1;
+       }
+
+       ret = qwz_hal_srng_setup(sc, type, ring_num, mac_id, &params);
+       if (ret < 0) {
+               printf("%s: failed to setup srng: %d ring_id %d\n",
+                   sc->sc_dev.dv_xname, ret, ring_num);
+               return ret;
+       }
+
+       ring->ring_id = ret;
+       return 0;
+}
+
+void
+qwz_hal_srng_access_begin(struct qwz_softc *sc, struct hal_srng *srng)
+{
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+               srng->u.src_ring.cached_tp =
+                       *(volatile uint32_t *)srng->u.src_ring.tp_addr;
+       } else {
+               srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+       }
+}
+
+void
+qwz_hal_srng_access_end(struct qwz_softc *sc, struct hal_srng *srng)
+{
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       /* TODO: See if we need a write memory barrier here */
+       if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+               /* For LMAC rings, ring pointer updates are done through FW and
+                * hence written to a shared memory location that is read by FW
+                */
+               if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+                       srng->u.src_ring.last_tp =
+                           *(volatile uint32_t *)srng->u.src_ring.tp_addr;
+                       *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+               } else {
+                       srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+                       *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+               }
+       } else {
+               if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+                       srng->u.src_ring.last_tp =
+                           *(volatile uint32_t *)srng->u.src_ring.tp_addr;
+                       sc->ops.write32(sc,
+                           (unsigned long)srng->u.src_ring.hp_addr -
+                           (unsigned long)sc->mem, srng->u.src_ring.hp);
+               } else {
+                       srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+                       sc->ops.write32(sc,
+                           (unsigned long)srng->u.dst_ring.tp_addr -
+                           (unsigned long)sc->mem, srng->u.dst_ring.tp);
+               }
+       }
+#ifdef notyet
+       srng->timestamp = jiffies;
+#endif
+}
+
+int
+qwz_wbm_idle_ring_setup(struct qwz_softc *sc, uint32_t *n_link_desc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       uint32_t n_mpdu_link_desc, n_mpdu_queue_desc;
+       uint32_t n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+       int ret = 0;
+
+       n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+                          HAL_NUM_MPDUS_PER_LINK_DESC;
+
+       n_mpdu_queue_desc = n_mpdu_link_desc /
+                           HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+       n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+                              DP_AVG_MSDUS_PER_FLOW) /
+                             HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+       n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+                              DP_AVG_MSDUS_PER_MPDU) /
+                             HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+       *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+                     n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+       if (*n_link_desc & (*n_link_desc - 1))
+               *n_link_desc = 1 << fls(*n_link_desc);
+
+       ret = qwz_dp_srng_setup(sc, &dp->wbm_idle_ring,
+           HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+       if (ret) {
+               printf("%s: failed to setup wbm_idle_ring: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+       }
+
+       return ret;
+}
+
+void
+qwz_dp_link_desc_bank_free(struct qwz_softc *sc,
+    struct dp_link_desc_bank *link_desc_banks)
+{
+       int i;
+
+       for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+               if (link_desc_banks[i].mem) {
+                       qwz_dmamem_free(sc->sc_dmat, link_desc_banks[i].mem);
+                       link_desc_banks[i].mem = NULL;
+               }
+       }
+}
+
+int
+qwz_dp_link_desc_bank_alloc(struct qwz_softc *sc,
+    struct dp_link_desc_bank *desc_bank, int n_link_desc_bank,
+    int last_bank_sz)
+{
+       struct qwz_dp *dp = &sc->dp;
+       int i;
+       int ret = 0;
+       int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+       for (i = 0; i < n_link_desc_bank; i++) {
+               if (i == (n_link_desc_bank - 1) && last_bank_sz)
+                       desc_sz = last_bank_sz;
+
+               desc_bank[i].mem = qwz_dmamem_alloc(sc->sc_dmat, desc_sz,
+                   PAGE_SIZE);
+               if (!desc_bank[i].mem) {
+                       ret = ENOMEM;
+                       goto err;
+               }
+
+               desc_bank[i].vaddr = QWZ_DMA_KVA(desc_bank[i].mem);
+               desc_bank[i].paddr = QWZ_DMA_DVA(desc_bank[i].mem);
+               desc_bank[i].size = desc_sz;
+       }
+
+       return 0;
+
+err:
+       qwz_dp_link_desc_bank_free(sc, dp->link_desc_banks);
+
+       return ret;
+}
+
+void
+qwz_hal_setup_link_idle_list(struct qwz_softc *sc,
+    struct hal_wbm_idle_scatter_list *sbuf,
+    uint32_t nsbufs, uint32_t tot_link_desc, uint32_t end_offset)
+{
+       struct ath12k_buffer_addr *link_addr;
+       int i;
+       uint32_t reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+       link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+       for (i = 1; i < nsbufs; i++) {
+               link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+               link_addr->info1 = FIELD_PREP(
+                   HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+                   (uint64_t)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+                   FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+                   BASE_ADDR_MATCH_TAG_VAL);
+
+               link_addr = (void *)sbuf[i].vaddr +
+                   HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+       }
+
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+           FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+           FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+           FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+           reg_scatter_buf_sz * nsbufs));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB,
+           FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+           sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+       sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_WBM_REG +
+           HAL_WBM_SCATTERED_RING_BASE_MSB,
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+           (uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+           BASE_ADDR_MATCH_TAG_VAL));
+
+       /* Setup head and tail pointers for the idle list */
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG +
+           HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+           FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+           ((uint64_t)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+           (end_offset >> 2)));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG +
+           HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+           FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
+
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+           FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+           ((uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0));
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+           2 * tot_link_desc);
+
+       /* Enable the SRNG */
+       sc->ops.write32(sc,
+           HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(sc),
+           0x40);
+}
+
+void
+qwz_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, uint32_t cookie,
+    bus_addr_t paddr)
+{
+       desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+           (paddr & HAL_ADDR_LSB_REG_MASK));
+       desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+           ((uint64_t)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+           FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+void
+qwz_dp_scatter_idle_link_desc_cleanup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+       int i;
+
+       for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+               if (slist[i].mem == NULL)
+                       continue;
+
+               qwz_dmamem_free(sc->sc_dmat, slist[i].mem);
+               slist[i].mem = NULL;
+               slist[i].vaddr = NULL;
+               slist[i].paddr = 0L;
+       }
+}
+
+int
+qwz_dp_scatter_idle_link_desc_setup(struct qwz_softc *sc, int size,
+    uint32_t n_link_desc_bank, uint32_t n_link_desc, uint32_t last_bank_sz)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+       struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+       uint32_t n_entries_per_buf;
+       int num_scatter_buf, scatter_idx;
+       struct hal_wbm_link_desc *scatter_buf;
+       int n_entries;
+       bus_addr_t paddr;
+       int rem_entries;
+       int i;
+       int ret = 0;
+       uint32_t end_offset;
+
+       n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+           qwz_hal_srng_get_entrysize(sc, HAL_WBM_IDLE_LINK);
+       num_scatter_buf = howmany(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+       if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+               return EINVAL;
+
+       for (i = 0; i < num_scatter_buf; i++) {
+               slist[i].mem = qwz_dmamem_alloc(sc->sc_dmat,
+                   HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, PAGE_SIZE);
+               if (slist[i].mem == NULL) {
+                       ret = ENOMEM;
+                       goto err;
+               }
+
+               slist[i].vaddr = QWZ_DMA_KVA(slist[i].mem);
+               slist[i].paddr = QWZ_DMA_DVA(slist[i].mem);
+       }
+
+       scatter_idx = 0;
+       scatter_buf = slist[scatter_idx].vaddr;
+       rem_entries = n_entries_per_buf;
+
+       for (i = 0; i < n_link_desc_bank; i++) {
+               n_entries = DP_LINK_DESC_ALLOC_SIZE_THRESH / HAL_LINK_DESC_SIZE;
+               paddr = link_desc_banks[i].paddr;
+               while (n_entries) {
+                       qwz_hal_set_link_desc_addr(scatter_buf, i, paddr);
+                       n_entries--;
+                       paddr += HAL_LINK_DESC_SIZE;
+                       if (rem_entries) {
+                               rem_entries--;
+                               scatter_buf++;
+                               continue;
+                       }
+
+                       rem_entries = n_entries_per_buf;
+                       scatter_idx++;
+                       scatter_buf = slist[scatter_idx].vaddr;
+               }
+       }
+
+       end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+           sizeof(struct hal_wbm_link_desc);
+       qwz_hal_setup_link_idle_list(sc, slist, num_scatter_buf,
+           n_link_desc, end_offset);
+
+       return 0;
+
+err:
+       qwz_dp_scatter_idle_link_desc_cleanup(sc);
+
+       return ret;
+}
+
+uint32_t *
+qwz_hal_srng_src_get_next_entry(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       uint32_t *desc;
+       uint32_t next_hp;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+
+       /* TODO: Using % is expensive, but we have to do this since size of some
+        * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+        * if separate function is defined for rings having power of 2 ring size
+        * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+        * overhead of % by using mask (with &).
+        */
+       next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+       if (next_hp == srng->u.src_ring.cached_tp)
+               return NULL;
+
+       desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+       srng->u.src_ring.hp = next_hp;
+
+       /* TODO: Reap functionality is not used by all rings. If particular
+        * ring does not use reap functionality, we need not update reap_hp
+        * with next_hp pointer. Need to make sure a separate function is used
+        * before doing any optimization by removing below code updating
+        * reap_hp.
+        */
+       srng->u.src_ring.reap_hp = next_hp;
+
+       return desc;
+}
+
+uint32_t *
+qwz_hal_srng_src_reap_next(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       uint32_t *desc;
+       uint32_t next_reap_hp;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+           srng->ring_size;
+
+       if (next_reap_hp == srng->u.src_ring.cached_tp)
+               return NULL;
+
+       desc = srng->ring_base_vaddr + next_reap_hp;
+       srng->u.src_ring.reap_hp = next_reap_hp;
+
+       return desc;
+}
+
+int
+qwz_dp_link_desc_setup(struct qwz_softc *sc,
+    struct dp_link_desc_bank *link_desc_banks, uint32_t ring_type,
+    struct hal_srng *srng, uint32_t n_link_desc)
+{
+       uint32_t tot_mem_sz;
+       uint32_t n_link_desc_bank, last_bank_sz;
+       uint32_t entry_sz, n_entries;
+       uint64_t paddr;
+       uint32_t *desc;
+       int i, ret;
+
+       tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+       tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+       if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+               n_link_desc_bank = 1;
+               last_bank_sz = tot_mem_sz;
+       } else {
+               n_link_desc_bank = tot_mem_sz /
+                   (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN);
+               last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+                   HAL_LINK_DESC_ALIGN);
+
+               if (last_bank_sz)
+                       n_link_desc_bank += 1;
+       }
+
+       if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+               return EINVAL;
+
+       ret = qwz_dp_link_desc_bank_alloc(sc, link_desc_banks,
+           n_link_desc_bank, last_bank_sz);
+       if (ret)
+               return ret;
+
+       /* Setup link desc idle list for HW internal usage */
+       entry_sz = qwz_hal_srng_get_entrysize(sc, ring_type);
+       tot_mem_sz = entry_sz * n_link_desc;
+
+       /* Setup scatter desc list when the total memory requirement is more */
+       if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+           ring_type != HAL_RXDMA_MONITOR_DESC) {
+               ret = qwz_dp_scatter_idle_link_desc_setup(sc, tot_mem_sz,
+                   n_link_desc_bank, n_link_desc, last_bank_sz);
+               if (ret) {
+                       printf("%s: failed to setup scatting idle list "
+                           "descriptor :%d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       goto fail_desc_bank_free;
+               }
+
+               return 0;
+       }
+#if 0
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       for (i = 0; i < n_link_desc_bank; i++) {
+               n_entries = (link_desc_banks[i].size) / HAL_LINK_DESC_SIZE;
+               paddr = link_desc_banks[i].paddr;
+               while (n_entries &&
+                   (desc = qwz_hal_srng_src_get_next_entry(sc, srng))) {
+                       qwz_hal_set_link_desc_addr(
+                           (struct hal_wbm_link_desc *) desc, i, paddr);
+                       n_entries--;
+                       paddr += HAL_LINK_DESC_SIZE;
+               }
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#if 0
+       spin_unlock_bh(&srng->lock);
+#endif
+
+       return 0;
+
+fail_desc_bank_free:
+       qwz_dp_link_desc_bank_free(sc, link_desc_banks);
+
+       return ret;
+}
+
+void
+qwz_dp_srng_cleanup(struct qwz_softc *sc, struct dp_srng *ring)
+{
+       if (ring->mem == NULL)
+               return;
+
+#if 0
+       if (ring->cached)
+               kfree(ring->vaddr_unaligned);
+       else
+#endif
+               qwz_dmamem_free(sc->sc_dmat, ring->mem);
+
+       ring->mem = NULL;
+       ring->vaddr = NULL;
+       ring->paddr = 0;
+}
+
+void
+qwz_dp_shadow_stop_timer(struct qwz_softc *sc,
+    struct qwz_hp_update_timer *update_timer)
+{
+       if (!sc->hw_params.supports_shadow_regs)
+               return;
+
+       timeout_del(&update_timer->timer);
+}
+
+void
+qwz_dp_shadow_start_timer(struct qwz_softc *sc, struct hal_srng *srng,
+    struct qwz_hp_update_timer *update_timer)
+{
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       if (!sc->hw_params.supports_shadow_regs)
+               return;
+
+       update_timer->tx_num++;
+       if (update_timer->started)
+               return;
+
+       update_timer->started = 1;
+       update_timer->timer_tx_num = update_timer->tx_num;
+
+       timeout_add_msec(&update_timer->timer, update_timer->interval);
+}
+
+void
+qwz_dp_shadow_timer_handler(void *arg)
+{
+       struct qwz_hp_update_timer *update_timer = arg;
+       struct qwz_softc *sc = update_timer->sc;
+       struct hal_srng *srng = &sc->hal.srng_list[update_timer->ring_id];
+       int s;
+
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       s = splnet();
+
+       /* 
+        * Update HP if there were no TX operations during the timeout interval,
+        * and stop the timer. Timer will be restarted if more TX happens.
+        */
+       if (update_timer->timer_tx_num != update_timer->tx_num) {
+               update_timer->timer_tx_num = update_timer->tx_num;
+               timeout_add_msec(&update_timer->timer, update_timer->interval);
+       } else {
+               update_timer->started = 0;
+               qwz_hal_srng_shadow_update_hp_tp(sc, srng);
+       }
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       splx(s);
+}
+
+void
+qwz_dp_stop_shadow_timers(struct qwz_softc *sc)
+{
+       int i;
+
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++)
+               qwz_dp_shadow_stop_timer(sc, &sc->dp.tx_ring_timer[i]);
+
+       qwz_dp_shadow_stop_timer(sc, &sc->dp.reo_cmd_timer);
+}
+
+void
+qwz_dp_srng_common_cleanup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       int i;
+
+       qwz_dp_stop_shadow_timers(sc);
+       qwz_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
+       qwz_dp_srng_cleanup(sc, &dp->tcl_cmd_ring);
+       qwz_dp_srng_cleanup(sc, &dp->tcl_status_ring);
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
+               qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
+               qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
+       }
+       qwz_dp_srng_cleanup(sc, &dp->reo_reinject_ring);
+       qwz_dp_srng_cleanup(sc, &dp->rx_rel_ring);
+       qwz_dp_srng_cleanup(sc, &dp->reo_except_ring);
+       qwz_dp_srng_cleanup(sc, &dp->reo_cmd_ring);
+       qwz_dp_srng_cleanup(sc, &dp->reo_status_ring);
+}
+
+void
+qwz_hal_srng_get_params(struct qwz_softc *sc, struct hal_srng *srng,
+    struct hal_srng_params *params)
+{
+       params->ring_base_paddr = srng->ring_base_paddr;
+       params->ring_base_vaddr = srng->ring_base_vaddr;
+       params->num_entries = srng->num_entries;
+       params->intr_timer_thres_us = srng->intr_timer_thres_us;
+       params->intr_batch_cntr_thres_entries =
+               srng->intr_batch_cntr_thres_entries;
+       params->low_threshold = srng->u.src_ring.low_threshold;
+       params->msi_addr = srng->msi_addr;
+       params->msi_data = srng->msi_data;
+       params->flags = srng->flags;
+}
+
+void
+qwz_hal_tx_init_data_ring(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       struct hal_srng_params params;
+       struct hal_tlv_hdr *tlv;
+       int i, entry_size;
+       uint8_t *desc;
+
+       memset(&params, 0, sizeof(params));
+
+       entry_size = qwz_hal_srng_get_entrysize(sc, HAL_TCL_DATA);
+       qwz_hal_srng_get_params(sc, srng, &params);
+       desc = (uint8_t *)params.ring_base_vaddr;
+
+       for (i = 0; i < params.num_entries; i++) {
+               tlv = (struct hal_tlv_hdr *)desc;
+               tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+                   FIELD_PREP(HAL_TLV_HDR_LEN,
+                   sizeof(struct hal_tcl_data_cmd));
+               desc += entry_size;
+       }
+}
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP        TID
+ * 000000      0
+ * 001000      1
+ * 010000      2
+ * 011000      3
+ * 100000      4
+ * 101000      5
+ * 110000      6
+ * 111000      7
+ */
+static const uint8_t dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+       0, 0, 0, 0, 0, 0, 0, 0,
+       1, 1, 1, 1, 1, 1, 1, 1,
+       2, 2, 2, 2, 2, 2, 2, 2,
+       3, 3, 3, 3, 3, 3, 3, 3,
+       4, 4, 4, 4, 4, 4, 4, 4,
+       5, 5, 5, 5, 5, 5, 5, 5,
+       6, 6, 6, 6, 6, 6, 6, 6,
+       7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void
+qwz_hal_tx_set_dscp_tid_map(struct qwz_softc *sc, int id)
+{
+       uint32_t ctrl_reg_val;
+       uint32_t addr;
+       uint8_t hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+       int i;
+       uint32_t value;
+       int cnt = 0;
+
+       ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
+           HAL_TCL1_RING_CMN_CTRL_REG);
+
+       /* Enable read/write access */
+       ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+       sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
+           HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+       addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+              (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+       /* Configure each DSCP-TID mapping in three bits there by configure
+        * three bytes in an iteration.
+        */
+       for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+               value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+                                  dscp_tid_map[i]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+                                  dscp_tid_map[i + 1]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+                                  dscp_tid_map[i + 2]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+                                  dscp_tid_map[i + 3]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+                                  dscp_tid_map[i + 4]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+                                  dscp_tid_map[i + 5]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+                                  dscp_tid_map[i + 6]) |
+                       FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+                                  dscp_tid_map[i + 7]);
+               memcpy(&hw_map_val[cnt], (uint8_t *)&value, 3);
+               cnt += 3;
+       }
+
+       for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+               sc->ops.write32(sc, addr, *(uint32_t *)&hw_map_val[i]);
+               addr += 4;
+       }
+
+       /* Disable read/write access */
+       ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
+           HAL_TCL1_RING_CMN_CTRL_REG);
+       ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+       sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
+           HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+}
+
+void
+qwz_dp_shadow_init_timer(struct qwz_softc *sc,
+    struct qwz_hp_update_timer *update_timer,
+    uint32_t interval, uint32_t ring_id)
+{
+       if (!sc->hw_params.supports_shadow_regs)
+               return;
+
+       update_timer->tx_num = 0;
+       update_timer->timer_tx_num = 0;
+       update_timer->sc = sc;
+       update_timer->ring_id = ring_id;
+       update_timer->interval = interval;
+       update_timer->init = 1;
+       timeout_set(&update_timer->timer, qwz_dp_shadow_timer_handler,
+           update_timer);
+}
+
+void
+qwz_hal_reo_init_cmd_ring(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       struct hal_srng_params params;
+       struct hal_tlv_hdr *tlv;
+       struct hal_reo_get_queue_stats *desc;
+       int i, cmd_num = 1;
+       int entry_size;
+       uint8_t *entry;
+
+       memset(&params, 0, sizeof(params));
+
+       entry_size = qwz_hal_srng_get_entrysize(sc, HAL_REO_CMD);
+       qwz_hal_srng_get_params(sc, srng, &params);
+       entry = (uint8_t *)params.ring_base_vaddr;
+
+       for (i = 0; i < params.num_entries; i++) {
+               tlv = (struct hal_tlv_hdr *)entry;
+               desc = (struct hal_reo_get_queue_stats *)tlv->value;
+               desc->cmd.info0 = FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER,
+                   cmd_num++);
+               entry += entry_size;
+       }
+}
+
+int
+qwz_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath12k_hal_reo_cmd *cmd)
+{
+       struct hal_reo_get_queue_stats *desc;
+
+       tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+           FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+       desc = (struct hal_reo_get_queue_stats *)tlv->value;
+
+       desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+       if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+               desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+       desc->queue_addr_lo = cmd->addr_lo;
+       desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+           cmd->addr_hi);
+       if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+               desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+       return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwz_hal_reo_cmd_flush_cache(struct ath12k_hal *hal, struct hal_tlv_hdr *tlv,
+    struct ath12k_hal_reo_cmd *cmd)
+{
+       struct hal_reo_flush_cache *desc;
+       uint8_t avail_slot = ffz(hal->avail_blk_resource);
+
+       if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+               if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+                       return ENOSPC;
+
+               hal->current_blk_index = avail_slot;
+       }
+
+       tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+           FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+       desc = (struct hal_reo_flush_cache *)tlv->value;
+
+       desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+       if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+               desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+       desc->cache_addr_lo = cmd->addr_lo;
+       desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+           cmd->addr_hi);
+
+       if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+               desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+       if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+               desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+               desc->info0 |=
+                   FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+                   avail_slot);
+       }
+
+       if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+               desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+       if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+               desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+       return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwz_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+    struct ath12k_hal_reo_cmd *cmd)
+{
+       struct hal_reo_update_rx_queue *desc;
+
+       tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+           FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+       desc = (struct hal_reo_update_rx_queue *)tlv->value;
+
+       desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+       if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+               desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+       desc->queue_addr_lo = cmd->addr_lo;
+       desc->info0 =
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+                   cmd->addr_hi) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+                   !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+       desc->info1 =
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+                   cmd->rx_queue_num) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+                   FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+                   FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+               FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+                   !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+       if (cmd->pn_size == 24)
+               cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+       else if (cmd->pn_size == 48)
+               cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+       else if (cmd->pn_size == 128)
+               cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+       if (cmd->ba_window_size < 1)
+               cmd->ba_window_size = 1;
+
+       if (cmd->ba_window_size == 1)
+               cmd->ba_window_size++;
+
+       desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+           cmd->ba_window_size - 1) |
+           FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+           FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+               !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+           FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+               FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+           FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+               !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+           FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+               !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+       return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int
+qwz_hal_reo_cmd_send(struct qwz_softc *sc, struct hal_srng *srng,
+    enum hal_reo_cmd_type type, struct ath12k_hal_reo_cmd *cmd)
+{
+       struct hal_tlv_hdr *reo_desc;
+       int ret;
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+       reo_desc = (struct hal_tlv_hdr *)qwz_hal_srng_src_get_next_entry(sc, srng);
+       if (!reo_desc) {
+               ret = ENOBUFS;
+               goto out;
+       }
+
+       switch (type) {
+       case HAL_REO_CMD_GET_QUEUE_STATS:
+               ret = qwz_hal_reo_cmd_queue_stats(reo_desc, cmd);
+               break;
+       case HAL_REO_CMD_FLUSH_CACHE:
+               ret = qwz_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
+               break;
+       case HAL_REO_CMD_UPDATE_RX_QUEUE:
+               ret = qwz_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+               break;
+       case HAL_REO_CMD_FLUSH_QUEUE:
+       case HAL_REO_CMD_UNBLOCK_CACHE:
+       case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+               printf("%s: unsupported reo command %d\n",
+                  sc->sc_dev.dv_xname, type);
+               ret = ENOTSUP;
+               break;
+       default:
+               printf("%s: unknown reo command %d\n",
+                   sc->sc_dev.dv_xname, type);
+               ret = EINVAL;
+               break;
+       }
+
+       qwz_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer);
+out:
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return ret;
+}
+int
+qwz_dp_srng_common_setup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_srng *srng;
+       int i, ret;
+       uint8_t tcl_num, wbm_num;
+
+       ret = qwz_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
+           0, 0, DP_WBM_RELEASE_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up wbm2sw_release ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->tcl_cmd_ring, HAL_TCL_CMD,
+           0, 0, DP_TCL_CMD_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up tcl_cmd ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->tcl_status_ring, HAL_TCL_STATUS,
+           0, 0, DP_TCL_STATUS_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up tcl_status ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
+               const struct ath12k_hw_hal_params *hal_params;
+
+               hal_params = sc->hw_params.hal_params;
+               tcl_num = hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+               wbm_num = hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
+               ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
+                   HAL_TCL_DATA, tcl_num, 0, sc->hw_params.tx_ring_size);
+               if (ret) {
+                       printf("%s: failed to set up tcl_data ring (%d) :%d\n",
+                           sc->sc_dev.dv_xname, i, ret);
+                       goto err;
+               }
+
+               ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
+                   HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE);
+               if (ret) {
+                       printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
+                           sc->sc_dev.dv_xname, i, ret);
+                       goto err;
+               }
+
+               srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+               qwz_hal_tx_init_data_ring(sc, srng);
+
+               qwz_dp_shadow_init_timer(sc, &dp->tx_ring_timer[i],
+                   ATH12K_SHADOW_DP_TIMER_INTERVAL,
+                   dp->tx_ring[i].tcl_data_ring.ring_id);
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+           0, 0, DP_REO_REINJECT_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up reo_reinject ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+           DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up rx_rel ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+           0, 0, DP_REO_EXCEPTION_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up reo_exception ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_srng_setup(sc, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0,
+           DP_REO_CMD_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up reo_cmd ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       srng = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
+       qwz_hal_reo_init_cmd_ring(sc, srng);
+
+       qwz_dp_shadow_init_timer(sc, &dp->reo_cmd_timer,
+            ATH12K_SHADOW_CTRL_TIMER_INTERVAL, dp->reo_cmd_ring.ring_id);
+
+       ret = qwz_dp_srng_setup(sc, &dp->reo_status_ring, HAL_REO_STATUS,
+           0, 0, DP_REO_STATUS_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to set up reo_status ring :%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       /* When hash based routing of rx packet is enabled, 32 entries to map
+        * the hash values to the ring will be configured.
+        */
+       sc->hw_params.hw_ops->reo_setup(sc);
+       return 0;
+
+err:
+       qwz_dp_srng_common_cleanup(sc);
+
+       return ret;
+}
+
+void
+qwz_dp_link_desc_cleanup(struct qwz_softc *sc,
+    struct dp_link_desc_bank *desc_bank, uint32_t ring_type,
+    struct dp_srng *ring)
+{
+       qwz_dp_link_desc_bank_free(sc, desc_bank);
+
+       if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+               qwz_dp_srng_cleanup(sc, ring);
+               qwz_dp_scatter_idle_link_desc_cleanup(sc);
+       }
+}
+
+void
+qwz_dp_tx_ring_free_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
+{
+       int i;
+
+       if (tx_ring->data == NULL)
+               return;
+
+       for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
+               struct qwz_tx_data *tx_data = &tx_ring->data[i];
+
+               if (tx_data->map) {
+                       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+                       bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
+               }
+
+               m_freem(tx_data->m);
+       }
+
+       free(tx_ring->data, M_DEVBUF,
+           sc->hw_params.tx_ring_size * sizeof(struct qwz_tx_data));
+       tx_ring->data = NULL;
+}
+
+int
+qwz_dp_tx_ring_alloc_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
+{
+       int i, ret;
+
+       tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
+          sizeof(struct qwz_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (tx_ring->data == NULL)
+               return ENOMEM;
+
+       for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
+               struct qwz_tx_data *tx_data = &tx_ring->data[i];
+
+               ret = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
+                   BUS_DMA_NOWAIT, &tx_data->map);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_dp_alloc(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_srng *srng = NULL;
+       size_t size = 0;
+       uint32_t n_link_desc = 0;
+       int ret;
+       int i;
+
+       dp->sc = sc;
+
+       TAILQ_INIT(&dp->reo_cmd_list);
+       TAILQ_INIT(&dp->reo_cmd_cache_flush_list);
+#if 0
+       INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
+       spin_lock_init(&dp->reo_cmd_lock);
+#endif
+
+       dp->reo_cmd_cache_flush_count = 0;
+
+       ret = qwz_wbm_idle_ring_setup(sc, &n_link_desc);
+       if (ret) {
+               printf("%s: failed to setup wbm_idle_ring: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       srng = &sc->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+       ret = qwz_dp_link_desc_setup(sc, dp->link_desc_banks,
+           HAL_WBM_IDLE_LINK, srng, n_link_desc);
+       if (ret) {
+               printf("%s: failed to setup link desc: %d\n",
+                  sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_dp_srng_common_setup(sc);
+       if (ret)
+               goto fail_link_desc_cleanup;
+
+       size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
+#if 0
+               idr_init(&dp->tx_ring[i].txbuf_idr);
+               spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+#endif
+               ret = qwz_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
+               if (ret)
+                       goto fail_cmn_srng_cleanup;
+
+               dp->tx_ring[i].cur = 0;
+               dp->tx_ring[i].queued = 0;
+               dp->tx_ring[i].tcl_data_ring_id = i;
+               dp->tx_ring[i].tx_status_head = 0;
+               dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+               dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
+                   M_NOWAIT | M_ZERO);
+               if (!dp->tx_ring[i].tx_status) {
+                       ret = ENOMEM;
+                       goto fail_cmn_srng_cleanup;
+               }
+       }
+
+       for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+               qwz_hal_tx_set_dscp_tid_map(sc, i);
+
+       /* Init any SOC level resource for DP */
+
+       return 0;
+fail_cmn_srng_cleanup:
+       qwz_dp_srng_common_cleanup(sc);
+fail_link_desc_cleanup:
+       qwz_dp_link_desc_cleanup(sc, dp->link_desc_banks, HAL_WBM_IDLE_LINK,
+           &dp->wbm_idle_ring);
+
+       return ret;
+}
+
+void
+qwz_dp_reo_cmd_list_cleanup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct dp_reo_cmd *cmd, *tmp;
+       struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+       struct dp_rx_tid *rx_tid;
+#ifdef notyet
+       spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+       TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
+               TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
+               rx_tid = &cmd->data;
+               if (rx_tid->mem) {
+                       qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+                       rx_tid->mem = NULL;
+                       rx_tid->vaddr = NULL;
+                       rx_tid->paddr = 0ULL;
+                       rx_tid->size = 0;
+               }
+               free(cmd, M_DEVBUF, sizeof(*cmd));
+       }
+
+       TAILQ_FOREACH_SAFE(cmd_cache, &dp->reo_cmd_cache_flush_list,
+           entry, tmp_cache) {
+               TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, cmd_cache, entry);
+               dp->reo_cmd_cache_flush_count--;
+               rx_tid = &cmd_cache->data;
+               if (rx_tid->mem) {
+                       qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+                       rx_tid->mem = NULL;
+                       rx_tid->vaddr = NULL;
+                       rx_tid->paddr = 0ULL;
+                       rx_tid->size = 0;
+               }
+               free(cmd_cache, M_DEVBUF, sizeof(*cmd_cache));
+       }
+#ifdef notyet
+       spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+}
+
+void
+qwz_dp_free(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       int i;
+
+       qwz_dp_link_desc_cleanup(sc, dp->link_desc_banks,
+           HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+       qwz_dp_srng_common_cleanup(sc);
+       qwz_dp_reo_cmd_list_cleanup(sc);
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
+#if 0
+               spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+               idr_for_each(&dp->tx_ring[i].txbuf_idr,
+                            ath12k_dp_tx_pending_cleanup, ab);
+               idr_destroy(&dp->tx_ring[i].txbuf_idr);
+               spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+#endif
+               qwz_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
+               free(dp->tx_ring[i].tx_status, M_DEVBUF,
+                   sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
+               dp->tx_ring[i].tx_status = NULL;
+       }
+
+       /* Deinit any SOC level resource */
+}
+
+void
+qwz_qmi_process_coldboot_calibration(struct qwz_softc *sc)
+{
+       printf("%s not implemented\n", __func__);
+}
+
+int
+qwz_qmi_wlanfw_wlan_ini_send(struct qwz_softc *sc, int enable)
+{
+       int ret;
+       struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+
+       req.enablefwlog_valid = 1;
+       req.enablefwlog = enable ? 1 : 0;
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_INI_REQ_V01,
+           QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+           qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send wlan ini request, err = %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzini",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: wlan ini request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_wlanfw_wlan_cfg_send(struct qwz_softc *sc)
+{
+       struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+       const struct ce_pipe_config *ce_cfg;
+       const struct service_to_pipe *svc_cfg;
+       int ret = 0, pipe_num;
+
+       ce_cfg  = sc->hw_params.target_ce_config;
+       svc_cfg = sc->hw_params.svc_to_ce_map;
+
+       req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!req)
+               return ENOMEM;
+
+       req->host_version_valid = 1;
+       strlcpy(req->host_version, ATH12K_HOST_VERSION_STRING,
+           sizeof(req->host_version));
+
+       req->tgt_cfg_valid = 1;
+       /* This is number of CE configs */
+       req->tgt_cfg_len = sc->hw_params.target_ce_count;
+       for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
+               req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+               req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+               req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+               req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+               req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+       }
+
+       req->svc_cfg_valid = 1;
+       /* This is number of Service/CE configs */
+       req->svc_cfg_len = sc->hw_params.svc_to_ce_map_len;
+       for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+               req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+               req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+               req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+       }
+       req->shadow_reg_valid = 0;
+
+       /* set shadow v2 configuration */
+       if (sc->hw_params.supports_shadow_regs) {
+               req->shadow_reg_v2_valid = 1;
+               req->shadow_reg_v2_len = MIN(sc->qmi_ce_cfg.shadow_reg_v2_len,
+                   QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
+               memcpy(&req->shadow_reg_v2, sc->qmi_ce_cfg.shadow_reg_v2,
+                      sizeof(uint32_t) * req->shadow_reg_v2_len);
+       } else {
+               req->shadow_reg_v2_valid = 0;
+       }
+
+       DNPRINTF(QWZ_D_QMI, "%s: wlan cfg req\n", __func__);
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_CFG_REQ_V01,
+           QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+           qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req, sizeof(*req));
+       if (ret) {
+               printf("%s: failed to send wlan config request: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto out;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzwlancfg",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: wlan config request failed\n",
+                           sc->sc_dev.dv_xname);
+                       goto out;
+               }
+       }
+out:
+       free(req, M_DEVBUF, sizeof(*req));
+       return ret;
+}
+
+int
+qwz_qmi_wlanfw_mode_send(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
+{
+       int ret;
+       struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
+
+       req.mode = mode;
+       req.hw_debug_valid = 1;
+       req.hw_debug = 0;
+
+       ret = qwz_qmi_send_request(sc, QMI_WLANFW_WLAN_MODE_REQ_V01,
+           QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+           qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req, sizeof(req));
+       if (ret) {
+               printf("%s: failed to send wlan mode request, err = %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       sc->qmi_resp.result = QMI_RESULT_FAILURE_V01; 
+       while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
+               ret = tsleep_nsec(&sc->qmi_resp, 0, "qwzfwmode",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       if (mode == ATH12K_FIRMWARE_MODE_OFF)
+                               return 0;
+                       printf("%s: wlan mode request timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_qmi_firmware_start(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
+{
+       int ret;
+
+       DPRINTF("%s: firmware start\n", sc->sc_dev.dv_xname);
+
+       if (sc->hw_params.fw_wmi_diag_event) {
+               ret = qwz_qmi_wlanfw_wlan_ini_send(sc, 1);
+               if (ret < 0) {
+                       printf("%s: qmi failed to send wlan fw ini: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       return ret;
+               }
+       }
+
+       ret = qwz_qmi_wlanfw_wlan_cfg_send(sc);
+       if (ret) {
+               printf("%s: qmi failed to send wlan cfg: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_qmi_wlanfw_mode_send(sc, mode);
+       if (ret) {
+               printf("%s: qmi failed to send wlan fw mode: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void
+qwz_qmi_firmware_stop(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_qmi_wlanfw_mode_send(sc, ATH12K_FIRMWARE_MODE_OFF);
+       if (ret) {
+               printf("%s: qmi failed to send wlan mode off: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+       }
+}
+
+int
+qwz_core_start_firmware(struct qwz_softc *sc, enum ath12k_firmware_mode mode)
+{
+       int ret;
+
+       qwz_ce_get_shadow_config(sc, &sc->qmi_ce_cfg.shadow_reg_v2,
+           &sc->qmi_ce_cfg.shadow_reg_v2_len);
+
+       ret = qwz_qmi_firmware_start(sc, mode);
+       if (ret) {
+               printf("%s: failed to send firmware start: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+int
+qwz_wmi_pdev_attach(struct qwz_softc *sc, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi_handle;
+
+       if (pdev_id >= sc->hw_params.max_radios)
+               return EINVAL;
+
+       wmi_handle = &sc->wmi.wmi[pdev_id];
+       wmi_handle->wmi = &sc->wmi;
+
+       wmi_handle->tx_ce_desc = 1;
+
+       return 0;
+}
+
+void
+qwz_wmi_detach(struct qwz_softc *sc)
+{
+       qwz_wmi_free_dbring_caps(sc);
+}
+
+int
+qwz_wmi_attach(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_wmi_pdev_attach(sc, 0);
+       if (ret)
+               return ret;
+
+       sc->wmi.sc = sc;
+       sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+       sc->wmi.tx_credits = 1;
+
+       /* It's overwritten when service_ext_ready is handled */
+       if (sc->hw_params.single_pdev_only &&
+           sc->hw_params.num_rxmda_per_pdev > 1)
+               sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
+       return 0;
+}
+
+void
+qwz_wmi_htc_tx_complete(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qwz_pdev_wmi *wmi = NULL;
+       uint32_t i;
+       uint8_t wmi_ep_count;
+       uint8_t eid;
+
+       eid = (uintptr_t)m->m_pkthdr.ph_cookie;
+       m_freem(m);
+
+       if (eid >= ATH12K_HTC_EP_COUNT)
+               return;
+
+       wmi_ep_count = sc->htc.wmi_ep_count;
+       if (wmi_ep_count > sc->hw_params.max_radios)
+               return;
+
+       for (i = 0; i < sc->htc.wmi_ep_count; i++) {
+               if (sc->wmi.wmi[i].eid == eid) {
+                       wmi = &sc->wmi.wmi[i];
+                       break;
+               }
+       }
+
+       if (wmi)
+               wakeup(&wmi->tx_ce_desc);
+}
+
+int
+qwz_wmi_tlv_services_parser(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       const struct wmi_service_available_event *ev;
+       uint32_t *wmi_ext2_service_bitmap;
+       int i, j;
+
+       switch (tag) {
+       case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+               ev = (struct wmi_service_available_event *)ptr;
+               for (i = 0, j = WMI_MAX_SERVICE;
+                   i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
+                   j < WMI_MAX_EXT_SERVICE;
+                   i++) {
+                       do {
+                               if (ev->wmi_service_segment_bitmap[i] &
+                                   BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+                                       setbit(sc->wmi.svc_map, j);
+                       } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+               }
+
+               DNPRINTF(QWZ_D_WMI,
+                   "%s: wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, "
+                   "2:0x%04x, 3:0x%04x\n", __func__,
+                   ev->wmi_service_segment_bitmap[0],
+                   ev->wmi_service_segment_bitmap[1],
+                   ev->wmi_service_segment_bitmap[2],
+                   ev->wmi_service_segment_bitmap[3]);
+               break;
+       case WMI_TAG_ARRAY_UINT32:
+               wmi_ext2_service_bitmap = (uint32_t *)ptr;
+               for (i = 0, j = WMI_MAX_EXT_SERVICE;
+                   i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
+                   j < WMI_MAX_EXT2_SERVICE;
+                   i++) {
+                       do {
+                               if (wmi_ext2_service_bitmap[i] &
+                                   BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+                                       setbit(sc->wmi.svc_map, j);
+                       } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+               }
+
+               DNPRINTF(QWZ_D_WMI,
+                   "%s: wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, "
+                   "2:0x%04x, 3:0x%04x\n", __func__,
+                   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+                   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+               break;
+       }
+
+       return 0;
+}
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+       [WMI_TAG_ARRAY_BYTE]
+               = { .min_len = 0 },
+       [WMI_TAG_ARRAY_UINT32]
+               = { .min_len = 0 },
+       [WMI_TAG_SERVICE_READY_EVENT]
+               = { .min_len = sizeof(struct wmi_service_ready_event) },
+       [WMI_TAG_SERVICE_READY_EXT_EVENT]
+               = { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
+       [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+               = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+       [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+               = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+       [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+               = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+       [WMI_TAG_PEER_DELETE_RESP_EVENT]
+               = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+       [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+               = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+       [WMI_TAG_VDEV_STOPPED_EVENT]
+               = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+       [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+               = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+       [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
+               = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
+       [WMI_TAG_MGMT_RX_HDR]
+               = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+       [WMI_TAG_MGMT_TX_COMPL_EVENT]
+               = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+       [WMI_TAG_SCAN_EVENT]
+               = { .min_len = sizeof(struct wmi_scan_event) },
+       [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+               = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+       [WMI_TAG_ROAM_EVENT]
+               = { .min_len = sizeof(struct wmi_roam_event) },
+       [WMI_TAG_CHAN_INFO_EVENT]
+               = { .min_len = sizeof(struct wmi_chan_info_event) },
+       [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+               = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+       [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+               = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+       [WMI_TAG_READY_EVENT] = {
+               .min_len = sizeof(struct wmi_ready_event_min) },
+       [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+               = {.min_len = sizeof(struct wmi_service_available_event) },
+       [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+               = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+       [WMI_TAG_STATS_EVENT]
+               = { .min_len = sizeof(struct wmi_stats_event) },
+       [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+               = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+       [WMI_TAG_HOST_SWFDA_EVENT] = {
+               .min_len = sizeof(struct wmi_fils_discovery_event) },
+       [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
+               .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
+       [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
+               .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+       [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+               .min_len = sizeof(struct wmi_obss_color_collision_event) },
+       [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+               .min_len = sizeof(struct wmi_11d_new_cc_ev) },
+       [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+               .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+       [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+               .min_len = sizeof(struct wmi_twt_add_dialog_event) },
+};
+
+int
+qwz_wmi_tlv_iter(struct qwz_softc *sc, const void *ptr, size_t len,
+    int (*iter)(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data), void *data)
+{
+       const void *begin = ptr;
+       const struct wmi_tlv *tlv;
+       uint16_t tlv_tag, tlv_len;
+       int ret;
+
+       while (len > 0) {
+               if (len < sizeof(*tlv)) {
+                       printf("%s: wmi tlv parse failure at byte %zd "
+                           "(%zu bytes left, %zu expected)\n", __func__,
+                           ptr - begin, len, sizeof(*tlv));
+                       return EINVAL;
+               }
+
+               tlv = ptr;
+               tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+               tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+               ptr += sizeof(*tlv);
+               len -= sizeof(*tlv);
+
+               if (tlv_len > len) {
+                       printf("%s: wmi tlv parse failure of tag %u "
+                           "at byte %zd (%zu bytes left, %u expected)\n",
+                           __func__, tlv_tag, ptr - begin, len, tlv_len);
+                       return EINVAL;
+               }
+
+               if (tlv_tag < nitems(wmi_tlv_policies) &&
+                   wmi_tlv_policies[tlv_tag].min_len &&
+                   wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+                       printf("%s: wmi tlv parse failure of tag %u "
+                           "at byte %zd (%u bytes is less than "
+                           "min length %zu)\n", __func__,
+                           tlv_tag, ptr - begin, tlv_len,
+                           wmi_tlv_policies[tlv_tag].min_len);
+                       return EINVAL;
+               }
+
+               ret = iter(sc, tlv_tag, tlv_len, ptr, data);
+               if (ret)
+                       return ret;
+
+               ptr += tlv_len;
+               len -= tlv_len;
+       }
+
+       return 0;
+}
+
+int
+qwz_pull_service_ready_tlv(struct qwz_softc *sc, const void *evt_buf,
+    struct ath12k_targ_cap *cap)
+{
+       const struct wmi_service_ready_event *ev = evt_buf;
+
+       if (!ev)
+               return EINVAL;
+
+       cap->phy_capability = ev->phy_capability;
+       cap->max_frag_entry = ev->max_frag_entry;
+       cap->num_rf_chains = ev->num_rf_chains;
+       cap->ht_cap_info = ev->ht_cap_info;
+       cap->vht_cap_info = ev->vht_cap_info;
+       cap->vht_supp_mcs = ev->vht_supp_mcs;
+       cap->hw_min_tx_power = ev->hw_min_tx_power;
+       cap->hw_max_tx_power = ev->hw_max_tx_power;
+       cap->sys_cap_info = ev->sys_cap_info;
+       cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+       cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+       cap->max_num_scan_channels = ev->max_num_scan_channels;
+       cap->max_supported_macs = ev->max_supported_macs;
+       cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+       cap->txrx_chainmask = ev->txrx_chainmask;
+       cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+       cap->num_msdu_desc = ev->num_msdu_desc;
+
+       return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+void
+qwz_wmi_service_bitmap_copy(struct qwz_pdev_wmi *wmi,
+    const uint32_t *wmi_svc_bm)
+{
+       int i, j = 0;
+
+       for (i = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+               do {
+                       if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+                               setbit(wmi->wmi->svc_map, j);
+               } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+       }
+}
+
+int
+qwz_wmi_tlv_svc_rdy_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_ready_parse *svc_ready = data;
+       struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
+       uint16_t expect_len;
+
+       switch (tag) {
+       case WMI_TAG_SERVICE_READY_EVENT:
+               if (qwz_pull_service_ready_tlv(sc, ptr, &sc->target_caps))
+                       return EINVAL;
+               break;
+
+       case WMI_TAG_ARRAY_UINT32:
+               if (!svc_ready->wmi_svc_bitmap_done) {
+                       expect_len = WMI_SERVICE_BM_SIZE * sizeof(uint32_t);
+                       if (len < expect_len) {
+                               printf("%s: invalid len %d for the tag 0x%x\n",
+                                   __func__, len, tag);
+                               return EINVAL;
+                       }
+
+                       qwz_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+                       svc_ready->wmi_svc_bitmap_done = 1;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void
+qwz_service_ready_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_tlv_svc_ready_parse svc_ready = { };
+       int ret;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_svc_rdy_parse, &svc_ready);
+       if (ret) {
+               printf("%s: failed to parse tlv %d\n", __func__, ret);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event service ready\n", __func__);
+}
+
+int
+qwz_pull_svc_ready_ext(struct qwz_pdev_wmi *wmi_handle, const void *ptr,
+    struct ath12k_service_ext_param *param)
+{
+       const struct wmi_service_ready_ext_event *ev = ptr;
+
+       if (!ev)
+               return EINVAL;
+
+       /* Move this to host based bitmap */
+       param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+       param->default_fw_config_bits = ev->default_fw_config_bits;
+       param->he_cap_info = ev->he_cap_info;
+       param->mpdu_density = ev->mpdu_density;
+       param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+       memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+       return 0;
+}
+
+int
+qwz_pull_mac_phy_cap_svc_ready_ext(struct qwz_pdev_wmi *wmi_handle,
+    struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+    struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+    struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+    struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+    uint8_t hw_mode_id, uint8_t phy_id, struct qwz_pdev *pdev)
+{
+       struct wmi_mac_phy_capabilities *mac_phy_caps;
+       struct qwz_softc *sc = wmi_handle->wmi->sc;
+       struct ath12k_band_cap *cap_band;
+       struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+       uint32_t phy_map;
+       uint32_t hw_idx, phy_idx = 0;
+
+       if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+               return EINVAL;
+
+       for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+               if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+                       break;
+
+               phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+               while (phy_map) {
+                       phy_map >>= 1;
+                       phy_idx++;
+               }
+       }
+
+       if (hw_idx == hw_caps->num_hw_modes)
+               return EINVAL;
+
+       phy_idx += phy_id;
+       if (phy_id >= hal_reg_caps->num_phy)
+               return EINVAL;
+
+       mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+       pdev->pdev_id = mac_phy_caps->pdev_id;
+       pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
+       pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+       sc->target_pdev_ids[sc->target_pdev_count].supported_bands =
+           mac_phy_caps->supported_bands;
+       sc->target_pdev_ids[sc->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
+       sc->target_pdev_count++;
+
+       if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+           !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+               return EINVAL;
+
+       /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+        * band to band for a single radio, need to see how this should be
+        * handled.
+        */
+       if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+               pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+               pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+       }
+
+       if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+               pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+               pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+               pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+               pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+               pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+               pdev_cap->nss_ratio_enabled =
+                   WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+               pdev_cap->nss_ratio_info =
+                   WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
+       }
+
+       /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+        * For example, for 4x4 capable macphys, first 4 chains can be used for first
+        * mac and the remaining 4 chains can be used for the second mac or vice-versa.
+        * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+        * will be advertised for second mac or vice-versa. Compute the shift value
+        * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+        * mac80211.
+        */
+       pdev_cap->tx_chain_mask_shift = ffs(pdev_cap->tx_chain_mask);
+       pdev_cap->rx_chain_mask_shift = ffs(pdev_cap->rx_chain_mask);
+
+       if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+               cap_band = &pdev_cap->band[0];
+               cap_band->phy_id = mac_phy_caps->phy_id;
+               cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+               cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+               cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+               cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+               cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+               memcpy(cap_band->he_cap_phy_info,
+                   &mac_phy_caps->he_cap_phy_info_2g,
+                   sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
+               memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+                   sizeof(struct ath12k_ppe_threshold));
+       }
+
+       if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+               cap_band = &pdev_cap->band[1];
+               cap_band->phy_id = mac_phy_caps->phy_id;
+               cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+               cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+               cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+               cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+               cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+               memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+                   sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
+               memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+                   sizeof(struct ath12k_ppe_threshold));
+#if 0
+               cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+               cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+               cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+               cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+               cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+               cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+               memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+                      sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+               memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+                      sizeof(struct ath12k_ppe_threshold));
+#endif
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwz_softc *sc, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+       uint8_t hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+       uint32_t phy_id_map;
+       int pdev_index = 0;
+       int ret;
+
+       svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+       svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+       sc->num_radios = 0;
+       sc->target_pdev_count = 0;
+       phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+       while (phy_id_map && sc->num_radios < MAX_RADIOS) {
+               ret = qwz_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+                   svc_rdy_ext->hw_caps,
+                   svc_rdy_ext->hw_mode_caps,
+                   svc_rdy_ext->soc_hal_reg_caps,
+                   svc_rdy_ext->mac_phy_caps,
+                   hw_mode_id, sc->num_radios, &sc->pdevs[pdev_index]);
+               if (ret) {
+                       printf("%s: failed to extract mac caps, idx: %d\n",
+                           __func__, sc->num_radios);
+                       return ret;
+               }
+
+               sc->num_radios++;
+
+               /* For QCA6390, save mac_phy capability in the same pdev */
+               if (sc->hw_params.single_pdev_only)
+                       pdev_index = 0;
+               else
+                       pdev_index = sc->num_radios;
+
+               /* TODO: mac_phy_cap prints */
+               phy_id_map >>= 1;
+       }
+
+       /* For QCA6390, set num_radios to 1 because host manages
+        * both 2G and 5G radio in one pdev.
+        * Set pdev_id = 0 and 0 means soc level.
+        */
+       if (sc->hw_params.single_pdev_only) {
+               sc->num_radios = 1;
+               sc->pdevs[0].pdev_id = 0;
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_hw_mode_caps_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+       struct wmi_hw_mode_capabilities *hw_mode_cap;
+       uint32_t phy_map = 0;
+
+       if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+               return EPROTO;
+
+       if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+               return ENOBUFS;
+
+       hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+           hw_mode_id);
+       svc_rdy_ext->n_hw_mode_caps++;
+
+       phy_map = hw_mode_cap->phy_id_map;
+       while (phy_map) {
+               svc_rdy_ext->tot_phy_id++;
+               phy_map = phy_map >> 1;
+       }
+
+       return 0;
+}
+
+#define PRIMAP(_hw_mode_) \
+       [_hw_mode_] = _hw_mode_##_PRI
+
+static const int qwz_hw_mode_pri_map[] = {
+       PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+       PRIMAP(WMI_HOST_HW_MODE_DBS),
+       PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+       PRIMAP(WMI_HOST_HW_MODE_SBS),
+       PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+       PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+       /* keep last */
+       PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+int
+qwz_wmi_tlv_hw_mode_caps(struct qwz_softc *sc, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+       struct wmi_hw_mode_capabilities *hw_mode_caps;
+       enum wmi_host_hw_mode_config_type mode, pref;
+       uint32_t i;
+       int ret;
+
+       svc_rdy_ext->n_hw_mode_caps = 0;
+       svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+       ret = qwz_wmi_tlv_iter(sc, ptr, len,
+           qwz_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext);
+       if (ret) {
+               printf("%s: failed to parse tlv %d\n", __func__, ret);
+               return ret;
+       }
+
+       i = 0;
+       while (i < svc_rdy_ext->n_hw_mode_caps) {
+               hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+               mode = hw_mode_caps->hw_mode_id;
+               pref = sc->wmi.preferred_hw_mode;
+
+               if (qwz_hw_mode_pri_map[mode] < qwz_hw_mode_pri_map[pref]) {
+                       svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+                       sc->wmi.preferred_hw_mode = mode;
+               }
+               i++;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: preferred_hw_mode: %d\n", __func__,
+           sc->wmi.preferred_hw_mode);
+       if (sc->wmi.preferred_hw_mode >= WMI_HOST_HW_MODE_MAX)
+               return EINVAL;
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_mac_phy_caps_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+       if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+               return EPROTO;
+
+       if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+               return ENOBUFS;
+
+       len = MIN(len, sizeof(struct wmi_mac_phy_capabilities));
+       if (!svc_rdy_ext->n_mac_phy_caps) {
+               svc_rdy_ext->mac_phy_caps = mallocarray(
+                   svc_rdy_ext->tot_phy_id,
+                   sizeof(struct wmi_mac_phy_capabilities),
+                   M_DEVBUF, M_NOWAIT | M_ZERO);
+               if (!svc_rdy_ext->mac_phy_caps)
+                       return ENOMEM;
+               svc_rdy_ext->mac_phy_caps_size = len * svc_rdy_ext->tot_phy_id;
+       }
+
+       memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps,
+           ptr, len);
+       svc_rdy_ext->n_mac_phy_caps++;
+       return 0;
+}
+
+int
+qwz_wmi_tlv_ext_hal_reg_caps_parse(struct qwz_softc *sc,
+    uint16_t tag, uint16_t len, const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+       if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+               return EPROTO;
+
+       if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+               return ENOBUFS;
+
+       svc_rdy_ext->n_ext_hal_reg_caps++;
+       return 0;
+}
+
+int
+qwz_pull_reg_cap_svc_rdy_ext(struct qwz_pdev_wmi *wmi_handle,
+    struct wmi_soc_hal_reg_capabilities *reg_caps,
+    struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+    uint8_t phy_idx, struct ath12k_hal_reg_capabilities_ext *param)
+{
+       struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+       if (!reg_caps || !wmi_ext_reg_cap)
+               return EINVAL;
+
+       if (phy_idx >= reg_caps->num_phy)
+               return EINVAL;
+
+       ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+       param->phy_id = ext_reg_cap->phy_id;
+       param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+       param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext;
+       param->regcap1 = ext_reg_cap->regcap1;
+       param->regcap2 = ext_reg_cap->regcap2;
+       /* check if param->wireless_mode is needed */
+       param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+       param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+       param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+       param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_ext_hal_reg_caps(struct qwz_softc *sc, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+       struct ath12k_hal_reg_capabilities_ext reg_cap;
+       int ret;
+       uint32_t i;
+
+       svc_rdy_ext->n_ext_hal_reg_caps = 0;
+       svc_rdy_ext->ext_hal_reg_caps =
+           (struct wmi_hal_reg_capabilities_ext *)ptr;
+       ret = qwz_wmi_tlv_iter(sc, ptr, len,
+           qwz_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext);
+       if (ret) {
+               printf("%s: failed to parse tlv %d\n", __func__, ret);
+               return ret;
+       }
+
+       for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+               ret = qwz_pull_reg_cap_svc_rdy_ext(wmi_handle,
+                   svc_rdy_ext->soc_hal_reg_caps,
+                   svc_rdy_ext->ext_hal_reg_caps, i, &reg_cap);
+               if (ret) {
+                       printf("%s: failed to extract reg cap %d\n",
+                           __func__, i);
+                       return ret;
+               }
+
+               memcpy(&sc->hal_reg_cap[reg_cap.phy_id], &reg_cap,
+                   sizeof(sc->hal_reg_cap[0]));
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_dma_ring_caps_parse(struct qwz_softc *sc, uint16_t tag,
+    uint16_t len, const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_ring_caps_parse *parse = data;
+
+       if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+               return EPROTO;
+
+       parse->n_dma_ring_caps++;
+       return 0;
+}
+
+int
+qwz_wmi_alloc_dbring_caps(struct qwz_softc *sc, uint32_t num_cap)
+{
+       void *ptr;
+
+       ptr = mallocarray(num_cap, sizeof(struct qwz_dbring_cap),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!ptr)
+               return ENOMEM;
+
+       sc->db_caps = ptr;
+       sc->num_db_cap = num_cap;
+
+       return 0;
+}
+
+void
+qwz_wmi_free_dbring_caps(struct qwz_softc *sc)
+{
+       free(sc->db_caps, M_DEVBUF,
+           sc->num_db_cap * sizeof(struct qwz_dbring_cap));
+       sc->db_caps = NULL;
+       sc->num_db_cap = 0;
+}
+
+int
+qwz_wmi_tlv_dma_ring_caps(struct qwz_softc *sc, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
+       struct wmi_dma_ring_capabilities *dma_caps;
+       struct qwz_dbring_cap *dir_buff_caps;
+       int ret;
+       uint32_t i;
+
+       dma_caps_parse->n_dma_ring_caps = 0;
+       dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
+       ret = qwz_wmi_tlv_iter(sc, ptr, len,
+           qwz_wmi_tlv_dma_ring_caps_parse, dma_caps_parse);
+       if (ret) {
+               printf("%s: failed to parse dma ring caps tlv %d\n",
+                   __func__, ret);
+               return ret;
+       }
+
+       if (!dma_caps_parse->n_dma_ring_caps)
+               return 0;
+
+       if (sc->num_db_cap) {
+               DNPRINTF(QWZ_D_WMI,
+                   "%s: Already processed, so ignoring dma ring caps\n",
+                   __func__);
+               return 0;
+       }
+
+       ret = qwz_wmi_alloc_dbring_caps(sc, dma_caps_parse->n_dma_ring_caps);
+       if (ret)
+               return ret;
+
+       dir_buff_caps = sc->db_caps;
+       for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+               if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
+                       printf("%s: Invalid module id %d\n", __func__,
+                           dma_caps[i].module_id);
+                       ret = EINVAL;
+                       goto free_dir_buff;
+               }
+
+               dir_buff_caps[i].id = dma_caps[i].module_id;
+               dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
+               dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
+               dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
+               dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
+       }
+
+       return 0;
+
+free_dir_buff:
+       qwz_wmi_free_dbring_caps(sc);
+       return ret;
+}
+
+int
+qwz_wmi_tlv_svc_rdy_ext_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct qwz_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
+       struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+       int ret;
+
+       switch (tag) {
+       case WMI_TAG_SERVICE_READY_EXT_EVENT:
+               ret = qwz_pull_svc_ready_ext(wmi_handle, ptr,
+                   &svc_rdy_ext->param);
+               if (ret) {
+                       printf("%s: unable to extract ext params\n", __func__);
+                       return ret;
+               }
+               break;
+
+       case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+               svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+               svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+               break;
+
+       case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+               ret = qwz_wmi_tlv_ext_soc_hal_reg_caps_parse(sc, len, ptr,
+                   svc_rdy_ext);
+               if (ret)
+                       return ret;
+               break;
+
+       case WMI_TAG_ARRAY_STRUCT:
+               if (!svc_rdy_ext->hw_mode_done) {
+                       ret = qwz_wmi_tlv_hw_mode_caps(sc, len, ptr,
+                           svc_rdy_ext);
+                       if (ret)
+                               return ret;
+
+                       svc_rdy_ext->hw_mode_done = 1;
+               } else if (!svc_rdy_ext->mac_phy_done) {
+                       svc_rdy_ext->n_mac_phy_caps = 0;
+                       ret = qwz_wmi_tlv_iter(sc, ptr, len,
+                           qwz_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext);
+                       if (ret) {
+                               printf("%s: failed to parse tlv %d\n",
+                                   __func__, ret);
+                               return ret;
+                       }
+
+                       svc_rdy_ext->mac_phy_done = 1;
+               } else if (!svc_rdy_ext->ext_hal_reg_done) {
+                       ret = qwz_wmi_tlv_ext_hal_reg_caps(sc, len, ptr,
+                           svc_rdy_ext);
+                       if (ret)
+                               return ret;
+
+                       svc_rdy_ext->ext_hal_reg_done = 1;
+               } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+                       svc_rdy_ext->mac_phy_chainmask_combo_done = 1;
+               } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+                       svc_rdy_ext->mac_phy_chainmask_cap_done = 1;
+               } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+                       svc_rdy_ext->oem_dma_ring_cap_done = 1;
+               } else if (!svc_rdy_ext->dma_ring_cap_done) {
+                       ret = qwz_wmi_tlv_dma_ring_caps(sc, len, ptr,
+                           &svc_rdy_ext->dma_caps_parse);
+                       if (ret)
+                               return ret;
+
+                       svc_rdy_ext->dma_ring_cap_done = 1;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void
+qwz_service_ready_ext_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+       int ret;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext);
+       if (ret) {
+               printf("%s: failed to parse tlv %d\n", __func__, ret);
+               qwz_wmi_free_dbring_caps(sc);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event service ready ext\n", __func__);
+
+       if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_EXT2_MSG))
+               wakeup(&sc->wmi.service_ready);
+
+       free(svc_rdy_ext.mac_phy_caps, M_DEVBUF,
+           svc_rdy_ext.mac_phy_caps_size);
+}
+
+int
+qwz_wmi_tlv_svc_rdy_ext2_parse(struct qwz_softc *sc,
+    uint16_t tag, uint16_t len, const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
+       int ret;
+
+       switch (tag) {
+       case WMI_TAG_ARRAY_STRUCT:
+               if (!parse->dma_ring_cap_done) {
+                       ret = qwz_wmi_tlv_dma_ring_caps(sc, len, ptr,
+                           &parse->dma_caps_parse);
+                       if (ret)
+                               return ret;
+
+                       parse->dma_ring_cap_done = 1;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void
+qwz_service_ready_ext2_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+       int ret;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2);
+       if (ret) {
+               printf("%s: failed to parse ext2 event tlv %d\n",
+                   __func__, ret);
+               qwz_wmi_free_dbring_caps(sc);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event service ready ext2\n", __func__);
+
+       sc->wmi.service_ready = 1;
+       wakeup(&sc->wmi.service_ready);
+}
+
+void
+qwz_service_available_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       int ret;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_services_parser, NULL);
+       if (ret)
+               printf("%s: failed to parse services available tlv %d\n",
+                   sc->sc_dev.dv_xname, ret);
+
+       DNPRINTF(QWZ_D_WMI, "%s: event service available\n", __func__);
+}
+
+int
+qwz_pull_peer_assoc_conf_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+       const void **tb;
+       const struct wmi_peer_assoc_conf_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch peer assoc conf ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       peer_assoc_conf->vdev_id = ev->vdev_id;
+       peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_peer_assoc_conf_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+
+       if (qwz_pull_peer_assoc_conf_ev(sc, m, &peer_assoc_conf) != 0) {
+               printf("%s: failed to extract peer assoc conf event\n",
+                  sc->sc_dev.dv_xname);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event peer assoc conf ev vdev id %d "
+           "macaddr %s\n", __func__, peer_assoc_conf.vdev_id,
+           ether_sprintf((u_char *)peer_assoc_conf.macaddr));
+
+       sc->peer_assoc_done = 1;
+       wakeup(&sc->peer_assoc_done);
+}
+
+int
+qwz_wmi_tlv_rdy_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_rdy_parse *rdy_parse = data;
+       struct wmi_ready_event fixed_param;
+       struct wmi_mac_addr *addr_list;
+       struct qwz_pdev *pdev;
+       uint32_t num_mac_addr;
+       int i;
+
+       switch (tag) {
+       case WMI_TAG_READY_EVENT:
+               memset(&fixed_param, 0, sizeof(fixed_param));
+               memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+                      MIN(sizeof(fixed_param), len));
+               sc->wlan_init_status = fixed_param.ready_event_min.status;
+               rdy_parse->num_extra_mac_addr =
+                       fixed_param.ready_event_min.num_extra_mac_addr;
+
+               IEEE80211_ADDR_COPY(sc->mac_addr,
+                   fixed_param.ready_event_min.mac_addr.addr);
+               sc->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
+               sc->wmi_ready = 1;
+               break;
+       case WMI_TAG_ARRAY_FIXED_STRUCT:
+               addr_list = (struct wmi_mac_addr *)ptr;
+               num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+               if (!(sc->num_radios > 1 && num_mac_addr >= sc->num_radios))
+                       break;
+
+               for (i = 0; i < sc->num_radios; i++) {
+                       pdev = &sc->pdevs[i];
+                       IEEE80211_ADDR_COPY(pdev->mac_addr, addr_list[i].addr);
+               }
+               sc->pdevs_macaddr_valid = 1;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void
+qwz_ready_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_tlv_rdy_parse rdy_parse = { };
+       int ret;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_rdy_parse, &rdy_parse);
+       if (ret) {
+               printf("%s: failed to parse tlv %d\n", __func__, ret);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event ready", __func__);
+
+       sc->wmi.unified_ready = 1;
+       wakeup(&sc->wmi.unified_ready);
+}
+
+int
+qwz_pull_peer_del_resp_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+       const void **tb;
+       const struct wmi_peer_delete_resp_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch peer delete resp ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+       peer_del_resp->vdev_id = ev->vdev_id;
+       IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
+           ev->peer_macaddr.addr);
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_peer_delete_resp_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_peer_delete_resp_event peer_del_resp;
+
+       if (qwz_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
+               printf("%s: failed to extract peer delete resp",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       sc->peer_delete_done = 1;
+       wakeup(&sc->peer_delete_done);
+
+       DNPRINTF(QWZ_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
+           __func__, peer_del_resp.vdev_id,
+           ether_sprintf(peer_del_resp.peer_macaddr.addr));
+}
+
+const char *
+qwz_wmi_vdev_resp_print(uint32_t vdev_resp_status)
+{
+       switch (vdev_resp_status) {
+       case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+               return "invalid vdev id";
+       case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+               return "not supported";
+       case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+               return "dfs violation";
+       case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+               return "invalid regdomain";
+       default:
+               return "unknown";
+       }
+}
+
+int
+qwz_pull_vdev_start_resp_tlv(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+       const void **tb;
+       const struct wmi_vdev_start_resp_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch vdev start resp ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+       vdev_rsp->vdev_id = ev->vdev_id;
+       vdev_rsp->requestor_id = ev->requestor_id;
+       vdev_rsp->resp_type = ev->resp_type;
+       vdev_rsp->status = ev->status;
+       vdev_rsp->chain_mask = ev->chain_mask;
+       vdev_rsp->smps_mode = ev->smps_mode;
+       vdev_rsp->mac_id = ev->mac_id;
+       vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+       vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_vdev_start_resp_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_vdev_start_resp_event vdev_start_resp;
+       uint32_t status;
+
+       if (qwz_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
+               printf("%s: failed to extract vdev start resp",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       status = vdev_start_resp.status;
+       if (status) {
+               printf("%s: vdev start resp error status %d (%s)\n",
+                   sc->sc_dev.dv_xname, status,
+                  qwz_wmi_vdev_resp_print(status));
+       }
+
+       sc->vdev_setup_done = 1;
+       wakeup(&sc->vdev_setup_done);
+
+       DNPRINTF(QWZ_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
+           vdev_start_resp.vdev_id);
+}
+
+int
+qwz_pull_vdev_stopped_param_tlv(struct qwz_softc *sc, struct mbuf *m,
+    uint32_t *vdev_id)
+{
+       const void **tb;
+       const struct wmi_vdev_stopped_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch vdev stop ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       *vdev_id = ev->vdev_id;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_vdev_stopped_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       uint32_t vdev_id = 0;
+
+       if (qwz_pull_vdev_stopped_param_tlv(sc, m, &vdev_id) != 0) {
+               printf("%s: failed to extract vdev stopped event\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       sc->vdev_setup_done = 1;
+       wakeup(&sc->vdev_setup_done);
+
+       DNPRINTF(QWZ_D_WMI, "%s: vdev stopped for vdev id %d", __func__,
+           vdev_id);
+}
+
+int
+qwz_wmi_tlv_iter_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       const void **tb = data;
+
+       if (tag < WMI_TAG_MAX)
+               tb[tag] = ptr;
+
+       return 0;
+}
+
+int
+qwz_wmi_tlv_parse(struct qwz_softc *sc, const void **tb,
+    const void *ptr, size_t len)
+{
+       return qwz_wmi_tlv_iter(sc, ptr, len, qwz_wmi_tlv_iter_parse,
+           (void *)tb);
+}
+
+const void **
+qwz_wmi_tlv_parse_alloc(struct qwz_softc *sc, const void *ptr, size_t len)
+{
+       const void **tb;
+       int ret;
+
+       tb = mallocarray(WMI_TAG_MAX, sizeof(*tb), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!tb)
+               return NULL;
+
+       ret = qwz_wmi_tlv_parse(sc, tb, ptr, len);
+       if (ret) {
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return NULL;
+       }
+
+       return tb;
+}
+
+static void
+qwz_print_reg_rule(struct qwz_softc *sc, const char *band,
+    uint32_t num_reg_rules, struct cur_reg_rule *reg_rule_ptr)
+{
+       struct cur_reg_rule *reg_rule = reg_rule_ptr;
+       uint32_t count;
+
+       DNPRINTF(QWZ_D_WMI, "%s: number of reg rules in %s band: %d\n",
+           __func__, band, num_reg_rules);
+
+       for (count = 0; count < num_reg_rules; count++) {
+               DNPRINTF(QWZ_D_WMI,
+                   "%s: reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
+                   __func__, count + 1, reg_rule->start_freq,
+                   reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain,
+                   reg_rule->reg_power, reg_rule->flags);
+               reg_rule++;
+       }
+}
+
+struct cur_reg_rule *
+qwz_create_reg_rules_from_wmi(uint32_t num_reg_rules,
+    struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+       struct cur_reg_rule *reg_rule_ptr;
+       uint32_t count;
+
+       reg_rule_ptr = mallocarray(num_reg_rules, sizeof(*reg_rule_ptr),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!reg_rule_ptr)
+               return NULL;
+
+       for (count = 0; count < num_reg_rules; count++) {
+               reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ,
+                   wmi_reg_rule[count].freq_info);
+               reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ,
+                   wmi_reg_rule[count].freq_info);
+               reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW,
+                   wmi_reg_rule[count].bw_pwr_info);
+               reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR,
+                   wmi_reg_rule[count].bw_pwr_info);
+               reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN,
+                   wmi_reg_rule[count].bw_pwr_info);
+               reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS,
+                   wmi_reg_rule[count].flag_info);
+       }
+
+       return reg_rule_ptr;
+}
+
+int
+qwz_pull_reg_chan_list_update_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct cur_regulatory_info *reg_info)
+{
+       const void **tb;
+       const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+       struct wmi_regulatory_rule_struct *wmi_reg_rule;
+       uint32_t num_2ghz_reg_rules, num_5ghz_reg_rules;
+       int ret;
+
+       DNPRINTF(QWZ_D_WMI, "%s: processing regulatory channel list\n",
+           __func__);
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM; /* XXX allocation failure or parsing failure? */
+               printf("%s: failed to parse tlv: %d\n", __func__, ret);
+               return ENOMEM;
+       }
+
+       chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+       if (!chan_list_event_hdr) {
+               printf("%s: failed to fetch reg chan list update ev\n",
+                   __func__);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
+       reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
+
+       if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
+               printf("%s: No regulatory rules available in the event info\n",
+                   __func__);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EINVAL;
+       }
+
+       memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN);
+       reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+       reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+       reg_info->num_phy = chan_list_event_hdr->num_phy;
+       reg_info->phy_id = chan_list_event_hdr->phy_id;
+       reg_info->ctry_code = chan_list_event_hdr->country_id;
+       reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+
+       DNPRINTF(QWZ_D_WMI, "%s: CC status_code %s\n", __func__,
+           qwz_cc_status_to_str(reg_info->status_code));
+
+       reg_info->status_code =
+               qwz_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
+
+       reg_info->is_ext_reg_event = false;
+
+       reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
+       reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
+       reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
+       reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
+
+       num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
+       num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
+
+       DNPRINTF(QWZ_D_WMI,
+           "%s: cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d "
+           "max_5ghz %d\n", __func__, reg_info->alpha2, reg_info->dfs_region,
+           reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
+           reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
+
+       DNPRINTF(QWZ_D_WMI,
+           "%s: num_2ghz_reg_rules %d num_5ghz_reg_rules %d\n", __func__,
+           num_2ghz_reg_rules, num_5ghz_reg_rules);
+
+       wmi_reg_rule = (struct wmi_regulatory_rule_struct *)
+           ((uint8_t *)chan_list_event_hdr + sizeof(*chan_list_event_hdr)
+           + sizeof(struct wmi_tlv));
+
+       if (num_2ghz_reg_rules) {
+               reg_info->reg_rules_2ghz_ptr = qwz_create_reg_rules_from_wmi(
+                   num_2ghz_reg_rules, wmi_reg_rule);
+               if (!reg_info->reg_rules_2ghz_ptr) {
+                       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+                       printf("%s: Unable to allocate memory for "
+                           "2 GHz rules\n", __func__);
+                       return ENOMEM;
+               }
+
+               qwz_print_reg_rule(sc, "2 GHz", num_2ghz_reg_rules,
+                   reg_info->reg_rules_2ghz_ptr);
+       }
+
+       if (num_5ghz_reg_rules) {
+               wmi_reg_rule += num_2ghz_reg_rules;
+               reg_info->reg_rules_5ghz_ptr = qwz_create_reg_rules_from_wmi(
+                   num_5ghz_reg_rules, wmi_reg_rule);
+               if (!reg_info->reg_rules_5ghz_ptr) {
+                       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+                       printf("%s: Unable to allocate memory for "
+                           "5 GHz rules\n", __func__);
+                       return ENOMEM;
+               }
+
+               qwz_print_reg_rule(sc, "5 GHz", num_5ghz_reg_rules,
+                   reg_info->reg_rules_5ghz_ptr);
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: processed regulatory channel list\n",
+           __func__);
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+int
+qwz_pull_reg_chan_list_ext_update_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct cur_regulatory_info *reg_info)
+{
+       printf("%s: not implemented\n", __func__);
+       return ENOTSUP;
+}
+
+void
+qwz_init_channels(struct qwz_softc *sc, struct cur_regulatory_info *reg_info)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_channel *chan;
+       struct cur_reg_rule *rule;
+       int i, chnum;
+       uint16_t freq;
+
+       for (i = 0; i < reg_info->num_2ghz_reg_rules; i++) {
+               rule = &reg_info->reg_rules_2ghz_ptr[i];
+               if (rule->start_freq < 2402 ||
+                   rule->start_freq > 2500 ||
+                   rule->start_freq > rule->end_freq) {
+                       DPRINTF("%s: bad regulatory rule: start freq %u, "
+                           "end freq %u\n", __func__, rule->start_freq,
+                           rule->end_freq);
+                       continue;
+               }
+
+               freq = rule->start_freq + 10;
+               chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_2GHZ);
+               if (chnum < 1 || chnum > 14) {
+                       DPRINTF("%s: bad regulatory rule: freq %u, "
+                           "channel %u\n", __func__, freq, chnum);
+                       continue;
+               }
+               while (freq <= rule->end_freq && chnum <= 14) {
+                       chan = &ic->ic_channels[chnum];
+                       if (rule->flags & REGULATORY_CHAN_DISABLED) {
+                               chan->ic_freq = 0;
+                               chan->ic_flags = 0;
+                       } else {
+                               chan->ic_freq = freq;
+                               chan->ic_flags = IEEE80211_CHAN_CCK |
+                                   IEEE80211_CHAN_OFDM |
+                                   IEEE80211_CHAN_DYN |
+                                   IEEE80211_CHAN_2GHZ;
+                       }
+                       chnum++;
+                       freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_2GHZ);
+               }
+       }
+
+       for (i = 0; i < reg_info->num_5ghz_reg_rules; i++) {
+               rule = &reg_info->reg_rules_5ghz_ptr[i];
+               if (rule->start_freq < 5170 ||
+                   rule->start_freq > 6000 ||
+                   rule->start_freq > rule->end_freq) {
+                       DPRINTF("%s: bad regulatory rule: start freq %u, "
+                           "end freq %u\n", __func__, rule->start_freq,
+                           rule->end_freq);
+                       continue;
+               }
+
+               freq = rule->start_freq + 10;
+               chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_5GHZ);
+               if (chnum < 36 || chnum > IEEE80211_CHAN_MAX) {
+                       DPRINTF("%s: bad regulatory rule: freq %u, "
+                           "channel %u\n", __func__, freq, chnum);
+                       continue;
+               }
+               while (freq <= rule->end_freq && freq <= 5885 &&
+                   chnum <= IEEE80211_CHAN_MAX) {
+                       chan = &ic->ic_channels[chnum];
+                       if (rule->flags & (REGULATORY_CHAN_DISABLED |
+                           REGULATORY_CHAN_NO_OFDM)) {
+                               chan->ic_freq = 0;
+                               chan->ic_flags = 0;
+                       } else {
+                               chan->ic_freq = freq;
+                               chan->ic_flags = IEEE80211_CHAN_A;
+                               if (rule->flags & (REGULATORY_CHAN_RADAR |
+                                   REGULATORY_CHAN_NO_IR |
+                                   REGULATORY_CHAN_INDOOR_ONLY)) {
+                                       chan->ic_flags |=
+                                           IEEE80211_CHAN_PASSIVE;
+                               }
+                       }
+                       chnum += 4;
+                       freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_5GHZ);
+               }
+       }
+}
+
+int
+qwz_reg_chan_list_event(struct qwz_softc *sc, struct mbuf *m,
+    enum wmi_reg_chan_list_cmd_type id)
+{
+       struct cur_regulatory_info *reg_info = NULL;
+       int ret = 0;
+#if 0
+       struct ieee80211_regdomain *regd = NULL;
+       bool intersect = false;
+       int pdev_idx, i, j;
+       struct ath12k *ar;
+#endif
+
+       reg_info = malloc(sizeof(*reg_info), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!reg_info) {
+               ret = ENOMEM;
+               goto fallback;
+       }
+
+       if (id == WMI_REG_CHAN_LIST_CC_ID)
+               ret = qwz_pull_reg_chan_list_update_ev(sc, m, reg_info);
+       else
+               ret = qwz_pull_reg_chan_list_ext_update_ev(sc, m, reg_info);
+
+       if (ret) {
+               printf("%s: failed to extract regulatory info from "
+                   "received event\n", sc->sc_dev.dv_xname);
+               goto fallback;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event reg chan list id %d\n", __func__, id);
+
+       if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+               /* In case of failure to set the requested ctry,
+                * fw retains the current regd. We print a failure info
+                * and return from here.
+                */
+               printf("%s: Failed to set the requested Country "
+                   "regulatory setting\n", __func__);
+               goto mem_free;
+       }
+
+       qwz_init_channels(sc, reg_info);
+#if 0
+       pdev_idx = reg_info->phy_id;
+
+       /* Avoid default reg rule updates sent during FW recovery if
+        * it is already available
+        */
+       spin_lock(&ab->base_lock);
+       if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags) &&
+           ab->default_regd[pdev_idx]) {
+               spin_unlock(&ab->base_lock);
+               goto mem_free;
+       }
+       spin_unlock(&ab->base_lock);
+
+       if (pdev_idx >= ab->num_radios) {
+               /* Process the event for phy0 only if single_pdev_only
+                * is true. If pdev_idx is valid but not 0, discard the
+                * event. Otherwise, it goes to fallback.
+                */
+               if (ab->hw_params.single_pdev_only &&
+                   pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+                       goto mem_free;
+               else
+                       goto fallback;
+       }
+
+       /* Avoid multiple overwrites to default regd, during core
+        * stop-start after mac registration.
+        */
+       if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+           !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+                   (char *)reg_info->alpha2, 2))
+               goto mem_free;
+
+       /* Intersect new rules with default regd if a new country setting was
+        * requested, i.e a default regd was already set during initialization
+        * and the regd coming from this event has a valid country info.
+        */
+       if (ab->default_regd[pdev_idx] &&
+           !ath12k_reg_is_world_alpha((char *)
+               ab->default_regd[pdev_idx]->alpha2) &&
+           !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
+               intersect = true;
+
+       regd = ath12k_reg_build_regd(ab, reg_info, intersect);
+       if (!regd) {
+               ath12k_warn(ab, "failed to build regd from reg_info\n");
+               goto fallback;
+       }
+
+       spin_lock(&ab->base_lock);
+       if (ab->default_regd[pdev_idx]) {
+               /* The initial rules from FW after WMI Init is to build
+                * the default regd. From then on, any rules updated for
+                * the pdev could be due to user reg changes.
+                * Free previously built regd before assigning the newly
+                * generated regd to ar. NULL pointer handling will be
+                * taken care by kfree itself.
+                */
+               ar = ab->pdevs[pdev_idx].ar;
+               kfree(ab->new_regd[pdev_idx]);
+               ab->new_regd[pdev_idx] = regd;
+               queue_work(ab->workqueue, &ar->regd_update_work);
+       } else {
+               /* This regd would be applied during mac registration and is
+                * held constant throughout for regd intersection purpose
+                */
+               ab->default_regd[pdev_idx] = regd;
+       }
+       ab->dfs_region = reg_info->dfs_region;
+       spin_unlock(&ab->base_lock);
+#endif
+       goto mem_free;
+
+fallback:
+       /* Fallback to older reg (by sending previous country setting
+        * again if fw has succeeded and we failed to process here.
+        * The Regdomain should be uniform across driver and fw. Since the
+        * FW has processed the command and sent a success status, we expect
+        * this function to succeed as well. If it doesn't, CTRY needs to be
+        * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+        */
+       /* TODO: This is rare, but still should also be handled */
+mem_free:
+       if (reg_info) {
+               free(reg_info->reg_rules_2ghz_ptr, M_DEVBUF,
+                   reg_info->num_2ghz_reg_rules *
+                   sizeof(*reg_info->reg_rules_2ghz_ptr));
+               free(reg_info->reg_rules_5ghz_ptr, M_DEVBUF,
+                   reg_info->num_5ghz_reg_rules *
+                   sizeof(*reg_info->reg_rules_5ghz_ptr));
+#if 0
+               if (reg_info->is_ext_reg_event) {
+                       for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
+                               kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+                       for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
+                               for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
+                                       kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
+               }
+#endif
+               free(reg_info, M_DEVBUF, sizeof(*reg_info));
+       }
+       return ret;
+}
+
+const char *
+qwz_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+    enum wmi_scan_completion_reason reason)
+{
+       switch (type) {
+       case WMI_SCAN_EVENT_STARTED:
+               return "started";
+       case WMI_SCAN_EVENT_COMPLETED:
+               switch (reason) {
+               case WMI_SCAN_REASON_COMPLETED:
+                       return "completed";
+               case WMI_SCAN_REASON_CANCELLED:
+                       return "completed [cancelled]";
+               case WMI_SCAN_REASON_PREEMPTED:
+                       return "completed [preempted]";
+               case WMI_SCAN_REASON_TIMEDOUT:
+                       return "completed [timedout]";
+               case WMI_SCAN_REASON_INTERNAL_FAILURE:
+                       return "completed [internal err]";
+               case WMI_SCAN_REASON_MAX:
+                       break;
+               }
+               return "completed [unknown]";
+       case WMI_SCAN_EVENT_BSS_CHANNEL:
+               return "bss channel";
+       case WMI_SCAN_EVENT_FOREIGN_CHAN:
+               return "foreign channel";
+       case WMI_SCAN_EVENT_DEQUEUED:
+               return "dequeued";
+       case WMI_SCAN_EVENT_PREEMPTED:
+               return "preempted";
+       case WMI_SCAN_EVENT_START_FAILED:
+               return "start failed";
+       case WMI_SCAN_EVENT_RESTARTED:
+               return "restarted";
+       case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+               return "foreign channel exit";
+       default:
+               return "unknown";
+       }
+}
+
+const char *
+qwz_scan_state_str(enum ath12k_scan_state state)
+{
+       switch (state) {
+       case ATH12K_SCAN_IDLE:
+               return "idle";
+       case ATH12K_SCAN_STARTING:
+               return "starting";
+       case ATH12K_SCAN_RUNNING:
+               return "running";
+       case ATH12K_SCAN_ABORTING:
+               return "aborting";
+       }
+
+       return "unknown";
+}
+
+int
+qwz_pull_scan_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_scan_event *scan_evt_param)
+{
+       const void **tb;
+       const struct wmi_scan_event *ev;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               DPRINTF("%s: failed to parse tlv\n", __func__);
+               return EINVAL;
+       }
+
+       ev = tb[WMI_TAG_SCAN_EVENT];
+       if (!ev) {
+               DPRINTF("%s: failed to fetch scan ev\n", __func__);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       scan_evt_param->event_type = ev->event_type;
+       scan_evt_param->reason = ev->reason;
+       scan_evt_param->channel_freq = ev->channel_freq;
+       scan_evt_param->scan_req_id = ev->scan_req_id;
+       scan_evt_param->scan_id = ev->scan_id;
+       scan_evt_param->vdev_id = ev->vdev_id;
+       scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_wmi_event_scan_started(struct qwz_softc *sc)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               printf("%s: received scan started event in an invalid "
+               "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
+               qwz_scan_state_str(sc->scan.state), sc->scan.state);
+               break;
+       case ATH12K_SCAN_STARTING:
+               sc->scan.state = ATH12K_SCAN_RUNNING;
+#if 0
+               if (ar->scan.is_roc)
+                       ieee80211_ready_on_channel(ar->hw);
+#endif
+               wakeup(&sc->scan.state);
+               break;
+       }
+}
+
+void
+qwz_wmi_event_scan_completed(struct qwz_softc *sc)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_STARTING:
+               /* One suspected reason scan can be completed while starting is
+                * if firmware fails to deliver all scan events to the host,
+                * e.g. when transport pipe is full. This has been observed
+                * with spectral scan phyerr events starving wmi transport
+                * pipe. In such case the "scan completed" event should be (and
+                * is) ignored by the host as it may be just firmware's scan
+                * state machine recovering.
+                */
+               printf("%s: received scan completed event in an invalid "
+                   "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
+                   qwz_scan_state_str(sc->scan.state), sc->scan.state);
+               break;
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               qwz_mac_scan_finish(sc);
+               break;
+       }
+}
+
+void
+qwz_wmi_event_scan_bss_chan(struct qwz_softc *sc)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_STARTING:
+               printf("%s: received scan bss chan event in an invalid "
+                   "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
+                   qwz_scan_state_str(sc->scan.state), sc->scan.state);
+               break;
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               sc->scan_channel = 0;
+               break;
+       }
+}
+
+void
+qwz_wmi_event_scan_foreign_chan(struct qwz_softc *sc, uint32_t freq)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_STARTING:
+               printf("%s: received scan foreign chan event in an invalid "
+                   "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
+                   qwz_scan_state_str(sc->scan.state), sc->scan.state);
+               break;
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               sc->scan_channel = ieee80211_mhz2ieee(freq, 0);
+#if 0
+               if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+                       complete(&ar->scan.on_channel);
+#endif
+               break;
+       }
+}
+
+void
+qwz_wmi_event_scan_start_failed(struct qwz_softc *sc)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               printf("%s: received scan start failed event in an invalid "
+                   "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
+                   qwz_scan_state_str(sc->scan.state), sc->scan.state);
+               break;
+       case ATH12K_SCAN_STARTING:
+               wakeup(&sc->scan.state);
+               qwz_mac_scan_finish(sc);
+               break;
+       }
+}
+
+
+void
+qwz_scan_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_scan_event scan_ev = { 0 };
+       struct qwz_vif *arvif;
+
+       if (qwz_pull_scan_ev(sc, m, &scan_ev) != 0) {
+               printf("%s: failed to extract scan event",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+#ifdef notyet
+       rcu_read_lock();
+#endif
+       TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
+               if (arvif->vdev_id == scan_ev.vdev_id)
+                       break;
+       }
+
+       if (!arvif) {
+               printf("%s: received scan event for unknown vdev\n",
+                   sc->sc_dev.dv_xname);
+#if 0
+               rcu_read_unlock();
+#endif
+               return;
+       }
+#if 0
+       spin_lock_bh(&ar->data_lock);
+#endif
+       DNPRINTF(QWZ_D_WMI,
+           "%s: event scan %s type %d reason %d freq %d req_id %d scan_id %d "
+           "vdev_id %d state %s (%d)\n", __func__,
+           qwz_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+           scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+           scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+           qwz_scan_state_str(sc->scan.state), sc->scan.state);
+
+       switch (scan_ev.event_type) {
+       case WMI_SCAN_EVENT_STARTED:
+               qwz_wmi_event_scan_started(sc);
+               break;
+       case WMI_SCAN_EVENT_COMPLETED:
+               qwz_wmi_event_scan_completed(sc);
+               break;
+       case WMI_SCAN_EVENT_BSS_CHANNEL:
+               qwz_wmi_event_scan_bss_chan(sc);
+               break;
+       case WMI_SCAN_EVENT_FOREIGN_CHAN:
+               qwz_wmi_event_scan_foreign_chan(sc, scan_ev.channel_freq);
+               break;
+       case WMI_SCAN_EVENT_START_FAILED:
+               printf("%s: received scan start failure event\n",
+                   sc->sc_dev.dv_xname);
+               qwz_wmi_event_scan_start_failed(sc);
+               break;
+       case WMI_SCAN_EVENT_DEQUEUED:
+               qwz_mac_scan_finish(sc);
+               break;
+       case WMI_SCAN_EVENT_PREEMPTED:
+       case WMI_SCAN_EVENT_RESTARTED:
+       case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+       default:
+               break;
+       }
+#if 0
+       spin_unlock_bh(&ar->data_lock);
+
+       rcu_read_unlock();
+#endif
+}
+
+int
+qwz_pull_chan_info_ev(struct qwz_softc *sc, uint8_t *evt_buf, uint32_t len,
+    struct wmi_chan_info_event *ch_info_ev)
+{
+       const void **tb;
+       const struct wmi_chan_info_event *ev;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, evt_buf, len);
+       if (tb == NULL) {
+               printf("%s: failed to parse tlv\n", sc->sc_dev.dv_xname);
+               return EINVAL;
+       }
+
+       ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch chan info ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       ch_info_ev->err_code = ev->err_code;
+       ch_info_ev->freq = ev->freq;
+       ch_info_ev->cmd_flags = ev->cmd_flags;
+       ch_info_ev->noise_floor = ev->noise_floor;
+       ch_info_ev->rx_clear_count = ev->rx_clear_count;
+       ch_info_ev->cycle_count = ev->cycle_count;
+       ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+       ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+       ch_info_ev->rx_frame_count = ev->rx_frame_count;
+       ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+       ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+       ch_info_ev->vdev_id = ev->vdev_id;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_chan_info_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qwz_vif *arvif;
+       struct wmi_chan_info_event ch_info_ev = {0};
+       struct qwz_survey_info *survey;
+       int idx;
+       /* HW channel counters frequency value in hertz */
+       uint32_t cc_freq_hz = sc->cc_freq_hz;
+
+       if (qwz_pull_chan_info_ev(sc, mtod(m, void *), m->m_pkthdr.len,
+           &ch_info_ev) != 0) {
+               printf("%s: failed to extract chan info event\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event chan info vdev_id %d err_code %d "
+           "freq %d cmd_flags %d noise_floor %d rx_clear_count %d "
+           "cycle_count %d mac_clk_mhz %d\n", __func__,
+           ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+           ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+           ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+           ch_info_ev.mac_clk_mhz);
+
+       if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+               DNPRINTF(QWZ_D_WMI, "chan info report completed\n");
+               return;
+       }
+#ifdef notyet
+       rcu_read_lock();
+#endif
+       TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
+               if (arvif->vdev_id == ch_info_ev.vdev_id)
+                       break;
+       }
+       if (!arvif) {
+               printf("%s: invalid vdev id in chan info ev %d\n",
+                  sc->sc_dev.dv_xname, ch_info_ev.vdev_id);
+#ifdef notyet
+               rcu_read_unlock();
+#endif
+               return;
+       }
+#ifdef notyet
+       spin_lock_bh(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+       case ATH12K_SCAN_STARTING:
+               printf("%s: received chan info event without a scan request, "
+                   "ignoring\n", sc->sc_dev.dv_xname);
+               goto exit;
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               break;
+       }
+
+       idx = ieee80211_mhz2ieee(ch_info_ev.freq, 0);
+       if (idx >= nitems(sc->survey)) {
+               printf("%s: invalid frequency %d (idx %d out of bounds)\n",
+                   sc->sc_dev.dv_xname, ch_info_ev.freq, idx);
+               goto exit;
+       }
+
+       /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+        * HW channel counters frequency value
+        */
+       if (ch_info_ev.mac_clk_mhz)
+               cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+       if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+               survey = &sc->survey[idx];
+               memset(survey, 0, sizeof(*survey));
+               survey->noise = ch_info_ev.noise_floor;
+               survey->time = ch_info_ev.cycle_count / cc_freq_hz;
+               survey->time_busy = ch_info_ev.rx_clear_count / cc_freq_hz;
+       }
+exit:
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+       rcu_read_unlock();
+#else
+       return;
+#endif
+}
+
+int
+qwz_wmi_tlv_mgmt_rx_parse(struct qwz_softc *sc, uint16_t tag, uint16_t len,
+    const void *ptr, void *data)
+{
+       struct wmi_tlv_mgmt_rx_parse *parse = data;
+
+       switch (tag) {
+       case WMI_TAG_MGMT_RX_HDR:
+               parse->fixed = ptr;
+               break;
+       case WMI_TAG_ARRAY_BYTE:
+               if (!parse->frame_buf_done) {
+                       parse->frame_buf = ptr;
+                       parse->frame_buf_done = 1;
+               }
+               break;
+       }
+       return 0;
+}
+
+int
+qwz_pull_mgmt_rx_params_tlv(struct qwz_softc *sc, struct mbuf *m,
+    struct mgmt_rx_event_params *hdr)
+{
+       struct wmi_tlv_mgmt_rx_parse parse = { 0 };
+       const struct wmi_mgmt_rx_hdr *ev;
+       const uint8_t *frame;
+       int ret;
+       size_t totlen, hdrlen;
+
+       ret = qwz_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
+           qwz_wmi_tlv_mgmt_rx_parse, &parse);
+       if (ret) {
+               printf("%s: failed to parse mgmt rx tlv %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = parse.fixed;
+       frame = parse.frame_buf;
+
+       if (!ev || !frame) {
+               printf("%s: failed to fetch mgmt rx hdr\n",
+                   sc->sc_dev.dv_xname);
+               return EPROTO;
+       }
+
+       hdr->pdev_id =  ev->pdev_id;
+       hdr->chan_freq = le32toh(ev->chan_freq);
+       hdr->channel = le32toh(ev->channel);
+       hdr->snr = le32toh(ev->snr);
+       hdr->rate = le32toh(ev->rate);
+       hdr->phy_mode = le32toh(ev->phy_mode);
+       hdr->buf_len = le32toh(ev->buf_len);
+       hdr->status = le32toh(ev->status);
+       hdr->flags = le32toh(ev->flags);
+       hdr->rssi = le32toh(ev->rssi);
+       hdr->tsf_delta = le32toh(ev->tsf_delta);
+       memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+       if (frame < mtod(m, uint8_t *) ||
+           frame >= mtod(m, uint8_t *) + m->m_pkthdr.len) {
+               printf("%s: invalid mgmt rx frame pointer\n",
+                   sc->sc_dev.dv_xname);
+               return EPROTO;
+       }
+       hdrlen = frame - mtod(m, uint8_t *);
+
+       if (hdrlen + hdr->buf_len < hdr->buf_len) {
+               printf("%s: length overflow in mgmt rx hdr ev\n",
+                   sc->sc_dev.dv_xname);
+               return EPROTO;
+       }
+       totlen = hdrlen + hdr->buf_len;
+       if (m->m_pkthdr.len < totlen) {
+               printf("%s: invalid length in mgmt rx hdr ev\n",
+                   sc->sc_dev.dv_xname);
+               return EPROTO;
+       }
+
+       /* shift the mbuf to point at `frame` */
+       m->m_len = m->m_pkthdr.len = totlen;
+       m_adj(m, hdrlen);
+
+#if 0 /* Not needed on OpenBSD? */
+       ath12k_ce_byte_swap(skb->data, hdr->buf_len);
+#endif
+       return 0;
+}
+
+void
+qwz_mgmt_rx_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       struct mgmt_rx_event_params rx_ev = {0};
+       struct ieee80211_rxinfo rxi;
+       struct ieee80211_frame *wh;
+       struct ieee80211_node *ni;
+
+       if (qwz_pull_mgmt_rx_params_tlv(sc, m, &rx_ev) != 0) {
+               printf("%s: failed to extract mgmt rx event\n",
+                   sc->sc_dev.dv_xname);
+               m_freem(m);
+               return;
+       }
+
+       memset(&rxi, 0, sizeof(rxi));
+
+       DNPRINTF(QWZ_D_MGMT, "%s: event mgmt rx status %08x\n", __func__,
+           rx_ev.status);
+#ifdef notyet
+       rcu_read_lock();
+#endif
+       if (rx_ev.pdev_id >= nitems(sc->pdevs)) {
+               printf("%s: invalid pdev_id %d in mgmt_rx_event\n",
+                   sc->sc_dev.dv_xname, rx_ev.pdev_id);
+               m_freem(m);
+               goto exit;
+       }
+
+       if ((test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) ||
+           (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+           WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+               m_freem(m);
+               goto exit;
+       }
+
+       if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) {
+               ic->ic_stats.is_ccmp_dec_errs++;
+               m_freem(m);
+               goto exit;
+       }
+
+       rxi.rxi_chan = rx_ev.channel;
+       rxi.rxi_rssi = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
+#if 0
+       status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+#endif
+
+       wh = mtod(m, struct ieee80211_frame *);
+       ni = ieee80211_find_rxnode(ic, wh);
+#if 0
+       /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+        * Don't clear that. Also, FW delivers broadcast management frames
+        * (ex: group privacy action frames in mesh) as encrypted payload.
+        */
+       if (ieee80211_has_protected(hdr->frame_control) &&
+           !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+               status->flag |= RX_FLAG_DECRYPTED;
+
+               if (!ieee80211_is_robust_mgmt_frame(skb)) {
+                       status->flag |= RX_FLAG_IV_STRIPPED |
+                                       RX_FLAG_MMIC_STRIPPED;
+                       hdr->frame_control = __cpu_to_le16(fc &
+                                            ~IEEE80211_FCTL_PROTECTED);
+               }
+       }
+
+       if (ieee80211_is_beacon(hdr->frame_control))
+               ath12k_mac_handle_beacon(ar, skb);
+#endif
+
+       DNPRINTF(QWZ_D_MGMT,
+           "%s: event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+           __func__, m, m->m_pkthdr.len,
+           wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK,
+           wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
+
+       DNPRINTF(QWZ_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
+           __func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
+
+#if NBPFILTER > 0
+       if (sc->sc_drvbpf != NULL) {
+               struct qwz_rx_radiotap_header *tap = &sc->sc_rxtap;
+
+               bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
+                   m, BPF_DIRECTION_IN);
+       }
+#endif
+       ieee80211_input(ifp, m, ni, &rxi);
+       ieee80211_release_node(ic, ni);
+exit:
+#ifdef notyet
+       rcu_read_unlock();
+#else
+       return;
+#endif
+}
+
+int
+qwz_pull_mgmt_tx_compl_param_tlv(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_mgmt_tx_compl_event *param)
+{
+       const void **tb;
+       const struct wmi_mgmt_tx_compl_event *ev;
+       int ret = 0;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ENOMEM;
+       }
+
+       ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch mgmt tx compl ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       param->pdev_id = ev->pdev_id;
+       param->desc_id = ev->desc_id;
+       param->status = ev->status;
+       param->ack_rssi = ev->ack_rssi;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_wmi_process_mgmt_tx_comp(struct qwz_softc *sc,
+    struct wmi_mgmt_tx_compl_event *tx_compl_param)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       struct ifnet *ifp = &ic->ic_if;
+       struct qwz_tx_data *tx_data;
+
+       if (tx_compl_param->desc_id >= nitems(arvif->txmgmt.data)) {
+               printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
+                   sc->sc_dev.dv_xname, tx_compl_param->desc_id);
+               return;
+       }
+
+       tx_data = &arvif->txmgmt.data[tx_compl_param->desc_id];
+       if (tx_data->m == NULL) {
+               printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
+                   sc->sc_dev.dv_xname, tx_compl_param->desc_id);
+               return;
+       }
+
+       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+       m_freem(tx_data->m);
+       tx_data->m = NULL;
+
+       ieee80211_release_node(ic, tx_data->ni);
+       tx_data->ni = NULL;
+
+       if (arvif->txmgmt.queued > 0)
+               arvif->txmgmt.queued--;
+
+       if (tx_compl_param->status != 0)
+               ifp->if_oerrors++;
+
+       if (arvif->txmgmt.queued < nitems(arvif->txmgmt.data) - 1) {
+               sc->qfullmsk &= ~(1U << QWZ_MGMT_QUEUE_ID);
+               if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
+                       ifq_clr_oactive(&ifp->if_snd);
+                       (*ifp->if_start)(ifp);
+               }
+       }
+}
+
+void
+qwz_mgmt_tx_compl_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_mgmt_tx_compl_event tx_compl_param = { 0 };
+
+       if (qwz_pull_mgmt_tx_compl_param_tlv(sc, m, &tx_compl_param) != 0) {
+               printf("%s: failed to extract mgmt tx compl event\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       qwz_wmi_process_mgmt_tx_comp(sc, &tx_compl_param);
+
+       DNPRINTF(QWZ_D_MGMT, "%s: event mgmt tx compl ev pdev_id %d, "
+           "desc_id %d, status %d ack_rssi %d", __func__,
+           tx_compl_param.pdev_id, tx_compl_param.desc_id,
+           tx_compl_param.status, tx_compl_param.ack_rssi);
+}
+
+int
+qwz_pull_roam_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_roam_event *roam_ev)
+{
+       const void **tb;
+       const struct wmi_roam_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_ROAM_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch roam ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       roam_ev->vdev_id = ev->vdev_id;
+       roam_ev->reason = ev->reason;
+       roam_ev->rssi = ev->rssi;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_mac_handle_beacon_miss(struct qwz_softc *sc, uint32_t vdev_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+
+       if ((ic->ic_opmode != IEEE80211_M_STA) ||
+           (ic->ic_state != IEEE80211_S_RUN))
+               return;
+
+       if (ic->ic_mgt_timer == 0) {
+               if (ic->ic_if.if_flags & IFF_DEBUG)
+                       printf("%s: receiving no beacons from %s; checking if "
+                           "this AP is still responding to probe requests\n",
+                           sc->sc_dev.dv_xname,
+                           ether_sprintf(ic->ic_bss->ni_macaddr));
+               /*
+                * Rather than go directly to scan state, try to send a
+                * directed probe request first. If that fails then the
+                * state machine will drop us into scanning after timing
+                * out waiting for a probe response.
+                */
+               IEEE80211_SEND_MGMT(ic, ic->ic_bss,
+                   IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
+       }
+}
+
+void
+qwz_roam_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_roam_event roam_ev = {};
+
+       if (qwz_pull_roam_ev(sc, m, &roam_ev) != 0) {
+               printf("%s: failed to extract roam event\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event roam vdev %u reason 0x%08x rssi %d\n",
+           __func__, roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+       if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+               return;
+
+       switch (roam_ev.reason) {
+       case WMI_ROAM_REASON_BEACON_MISS:
+               qwz_mac_handle_beacon_miss(sc, roam_ev.vdev_id);
+               break;
+       case WMI_ROAM_REASON_BETTER_AP:
+       case WMI_ROAM_REASON_LOW_RSSI:
+       case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+       case WMI_ROAM_REASON_HO_FAILED:
+               break;
+       }
+}
+
+int
+qwz_pull_vdev_install_key_compl_ev(struct qwz_softc *sc, struct mbuf *m,
+    struct wmi_vdev_install_key_complete_arg *arg)
+{
+       const void **tb;
+       const struct wmi_vdev_install_key_compl_event *ev;
+       int ret;
+
+       tb = qwz_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
+       if (tb == NULL) {
+               ret = ENOMEM;
+               printf("%s: failed to parse tlv: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+       if (!ev) {
+               printf("%s: failed to fetch vdev install key compl ev\n",
+                   sc->sc_dev.dv_xname);
+               free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+               return EPROTO;
+       }
+
+       arg->vdev_id = ev->vdev_id;
+       arg->macaddr = ev->peer_macaddr.addr;
+       arg->key_idx = ev->key_idx;
+       arg->key_flags = ev->key_flags;
+       arg->status = ev->status;
+
+       free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
+       return 0;
+}
+
+void
+qwz_vdev_install_key_compl_event(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_vdev_install_key_complete_arg install_key_compl = { 0 };
+       struct qwz_vif *arvif;
+
+       if (qwz_pull_vdev_install_key_compl_ev(sc, m,
+           &install_key_compl) != 0) {
+               printf("%s: failed to extract install key compl event\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: event vdev install key ev idx %d flags %08x "
+           "macaddr %s status %d\n", __func__, install_key_compl.key_idx,
+           install_key_compl.key_flags,
+           ether_sprintf((u_char *)install_key_compl.macaddr),
+           install_key_compl.status);
+
+       TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
+               if (arvif->vdev_id == install_key_compl.vdev_id)
+                       break;
+       }
+       if (!arvif) {
+               printf("%s: invalid vdev id in install key compl ev %d\n",
+                   sc->sc_dev.dv_xname, install_key_compl.vdev_id);
+               return;
+       }
+
+       sc->install_key_status = 0;
+
+       if (install_key_compl.status !=
+           WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+               printf("%s: install key failed for %s status %d\n",
+                   sc->sc_dev.dv_xname,
+                   ether_sprintf((u_char *)install_key_compl.macaddr),
+                   install_key_compl.status);
+               sc->install_key_status = install_key_compl.status;
+       }
+
+       sc->install_key_done = 1;
+       wakeup(&sc->install_key_done);
+}
+
+void
+qwz_wmi_tlv_op_rx(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_tlv_event_id id;
+
+       cmd_hdr = mtod(m, struct wmi_cmd_hdr *);
+       id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+       m_adj(m, sizeof(struct wmi_cmd_hdr));
+
+       switch (id) {
+               /* Process all the WMI events here */
+       case WMI_SERVICE_READY_EVENTID:
+               qwz_service_ready_event(sc, m);
+               break;
+       case WMI_SERVICE_READY_EXT_EVENTID:
+               qwz_service_ready_ext_event(sc, m);
+               break;
+       case WMI_SERVICE_READY_EXT2_EVENTID:
+               qwz_service_ready_ext2_event(sc, m);
+               break;
+       case WMI_REG_CHAN_LIST_CC_EVENTID:
+               qwz_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_ID);
+               break;
+       case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
+               qwz_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_EXT_ID);
+               break;
+       case WMI_READY_EVENTID:
+               qwz_ready_event(sc, m);
+               break;
+       case WMI_PEER_DELETE_RESP_EVENTID:
+               qwz_peer_delete_resp_event(sc, m);
+               break;
+       case WMI_VDEV_START_RESP_EVENTID:
+               qwz_vdev_start_resp_event(sc, m);
+               break;
+#if 0
+       case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+               ath12k_bcn_tx_status_event(ab, skb);
+               break;
+#endif
+       case WMI_VDEV_STOPPED_EVENTID:
+               qwz_vdev_stopped_event(sc, m);
+               break;
+       case WMI_MGMT_RX_EVENTID:
+               qwz_mgmt_rx_event(sc, m);
+               /* mgmt_rx_event() owns the skb now! */
+               return;
+       case WMI_MGMT_TX_COMPLETION_EVENTID:
+               qwz_mgmt_tx_compl_event(sc, m);
+               break;
+       case WMI_SCAN_EVENTID:
+               qwz_scan_event(sc, m);
+               break;
+#if 0
+       case WMI_PEER_STA_KICKOUT_EVENTID:
+               ath12k_peer_sta_kickout_event(ab, skb);
+               break;
+#endif
+       case WMI_ROAM_EVENTID:
+               qwz_roam_event(sc, m);
+               break;
+       case WMI_CHAN_INFO_EVENTID:
+               qwz_chan_info_event(sc, m);
+               break;
+#if 0
+       case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+               ath12k_pdev_bss_chan_info_event(ab, skb);
+               break;
+#endif
+       case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+               qwz_vdev_install_key_compl_event(sc, m);
+               break;
+       case WMI_SERVICE_AVAILABLE_EVENTID:
+               qwz_service_available_event(sc, m);
+               break;
+       case WMI_PEER_ASSOC_CONF_EVENTID:
+               qwz_peer_assoc_conf_event(sc, m);
+               break;
+       case WMI_UPDATE_STATS_EVENTID:
+               /* ignore */
+               break;
+#if 0
+       case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+               ath12k_pdev_ctl_failsafe_check_event(ab, skb);
+               break;
+       case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+               ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+               break;
+       case WMI_PDEV_UTF_EVENTID:
+               ath12k_tm_wmi_event(ab, id, skb);
+               break;
+       case WMI_PDEV_TEMPERATURE_EVENTID:
+               ath12k_wmi_pdev_temperature_event(ab, skb);
+               break;
+       case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+               ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+               break;
+       case WMI_HOST_FILS_DISCOVERY_EVENTID:
+               ath12k_fils_discovery_event(ab, skb);
+               break;
+       case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
+               ath12k_probe_resp_tx_status_event(ab, skb);
+               break;
+       case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+               ath12k_wmi_obss_color_collision_event(ab, skb);
+               break;
+       case WMI_TWT_ADD_DIALOG_EVENTID:
+               ath12k_wmi_twt_add_dialog_event(ab, skb);
+               break;
+       case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+               ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+               break;
+       case WMI_VDEV_DELETE_RESP_EVENTID:
+               ath12k_vdev_delete_resp_event(ab, skb);
+               break;
+       case WMI_WOW_WAKEUP_HOST_EVENTID:
+               ath12k_wmi_event_wow_wakeup_host(ab, skb);
+               break;
+       case WMI_11D_NEW_COUNTRY_EVENTID:
+               ath12k_reg_11d_new_cc_event(ab, skb);
+               break;
+#endif
+       case WMI_DIAG_EVENTID:
+               /* Ignore. These events trigger tracepoints in Linux. */
+               break;
+#if 0
+       case WMI_PEER_STA_PS_STATECHG_EVENTID:
+               ath12k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+               break;
+       case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+               ath12k_wmi_gtk_offload_status_event(ab, skb);
+               break;
+#endif
+       case WMI_UPDATE_FW_MEM_DUMP_EVENTID:
+               DPRINTF("%s: 0x%x: update fw mem dump\n", __func__, id);
+               break;
+       case WMI_PDEV_SET_HW_MODE_RESP_EVENTID:
+               DPRINTF("%s: 0x%x: set HW mode response event\n", __func__, id);
+               break;
+       case WMI_WLAN_FREQ_AVOID_EVENTID:
+               DPRINTF("%s: 0x%x: wlan freq avoid event\n", __func__, id);
+               break;
+       default:
+               DPRINTF("%s: unsupported event id 0x%x\n", __func__, id);
+               break;
+       }
+
+       m_freem(m);
+}
+
+void
+qwz_wmi_op_ep_tx_credits(struct qwz_softc *sc)
+{
+       struct qwz_htc *htc = &sc->htc;
+       int i;
+
+       /* try to send pending beacons first. they take priority */
+       sc->wmi.tx_credits = 1;
+       wakeup(&sc->wmi.tx_credits);
+
+       if (!sc->hw_params.credit_flow)
+               return;
+
+       for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
+               struct qwz_htc_ep *ep = &htc->endpoint[i];
+               if (ep->tx_credit_flow_enabled && ep->tx_credits > 0)
+                       wakeup(&ep->tx_credits);
+       }
+}
+
+int
+qwz_connect_pdev_htc_service(struct qwz_softc *sc, uint32_t pdev_idx)
+{
+       int status;
+       uint32_t svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
+           ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+           ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+       struct qwz_htc_svc_conn_req conn_req;
+       struct qwz_htc_svc_conn_resp conn_resp;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       /* these fields are the same for all service endpoints */
+       conn_req.ep_ops.ep_tx_complete = qwz_wmi_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = qwz_wmi_tlv_op_rx;
+       conn_req.ep_ops.ep_tx_credits = qwz_wmi_op_ep_tx_credits;
+
+       /* connect to control service */
+       conn_req.service_id = svc_id[pdev_idx];
+
+       status = qwz_htc_connect_service(&sc->htc, &conn_req, &conn_resp);
+       if (status) {
+               printf("%s: failed to connect to WMI CONTROL service "
+                   "status: %d\n", sc->sc_dev.dv_xname, status);
+               return status;
+       }
+
+       sc->wmi.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+       sc->wmi.wmi[pdev_idx].eid = conn_resp.eid;
+       sc->wmi.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+       sc->wmi.wmi[pdev_idx].tx_ce_desc = 0;
+
+       return 0;
+}
+
+int
+qwz_wmi_connect(struct qwz_softc *sc)
+{
+       uint32_t i;
+       uint8_t wmi_ep_count;
+
+       wmi_ep_count = sc->htc.wmi_ep_count;
+       if (wmi_ep_count > sc->hw_params.max_radios)
+               return -1;
+
+       for (i = 0; i < wmi_ep_count; i++)
+               qwz_connect_pdev_htc_service(sc, i);
+
+       return 0;
+}
+
+void
+qwz_htc_reset_endpoint_states(struct qwz_htc *htc)
+{
+       struct qwz_htc_ep *ep;
+       int i;
+
+       for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
+               ep = &htc->endpoint[i];
+               ep->service_id = ATH12K_HTC_SVC_ID_UNUSED;
+               ep->max_ep_message_len = 0;
+               ep->max_tx_queue_depth = 0;
+               ep->eid = i;
+               ep->htc = htc;
+               ep->tx_credit_flow_enabled = 1;
+       }
+}
+
+void
+qwz_htc_control_tx_complete(struct qwz_softc *sc, struct mbuf *m)
+{
+       printf("%s: not implemented\n", __func__);
+
+       m_freem(m);
+}
+
+void
+qwz_htc_control_rx_complete(struct qwz_softc *sc, struct mbuf *m)
+{
+       printf("%s: not implemented\n", __func__);
+
+       m_freem(m);
+}
+
+uint8_t
+qwz_htc_get_credit_allocation(struct qwz_htc *htc, uint16_t service_id)
+{
+       uint8_t i, allocation = 0;
+
+       for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+               if (htc->service_alloc_table[i].service_id == service_id) {
+                       allocation =
+                           htc->service_alloc_table[i].credit_allocation;
+               }
+       }
+
+       return allocation;
+}
+
+const char *
+qwz_htc_service_name(enum ath12k_htc_svc_id id)
+{
+       switch (id) {
+       case ATH12K_HTC_SVC_ID_RESERVED:
+               return "Reserved";
+       case ATH12K_HTC_SVC_ID_RSVD_CTRL:
+               return "Control";
+       case ATH12K_HTC_SVC_ID_WMI_CONTROL:
+               return "WMI";
+       case ATH12K_HTC_SVC_ID_WMI_DATA_BE:
+               return "DATA BE";
+       case ATH12K_HTC_SVC_ID_WMI_DATA_BK:
+               return "DATA BK";
+       case ATH12K_HTC_SVC_ID_WMI_DATA_VI:
+               return "DATA VI";
+       case ATH12K_HTC_SVC_ID_WMI_DATA_VO:
+               return "DATA VO";
+       case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+               return "WMI MAC1";
+       case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+               return "WMI MAC2";
+       case ATH12K_HTC_SVC_ID_NMI_CONTROL:
+               return "NMI Control";
+       case ATH12K_HTC_SVC_ID_NMI_DATA:
+               return "NMI Data";
+       case ATH12K_HTC_SVC_ID_HTT_DATA_MSG:
+               return "HTT Data";
+       case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS:
+               return "RAW";
+       case ATH12K_HTC_SVC_ID_IPA_TX:
+               return "IPA TX";
+       case ATH12K_HTC_SVC_ID_PKT_LOG:
+               return "PKT LOG";
+       }
+
+       return "Unknown";
+}
+
+struct mbuf *
+qwz_htc_alloc_mbuf(size_t payload_size)
+{
+       struct mbuf *m;
+       size_t size = sizeof(struct ath12k_htc_hdr) + payload_size;
+
+       m = m_gethdr(M_DONTWAIT, MT_DATA);
+       if (m == NULL)
+               return NULL;
+
+       if (size <= MCLBYTES)
+               MCLGET(m, M_DONTWAIT);
+       else
+               MCLGETL(m, M_DONTWAIT, size);
+       if ((m->m_flags & M_EXT) == 0) {
+               m_freem(m);
+               return NULL;
+       }
+
+       m->m_len = m->m_pkthdr.len = size;
+       memset(mtod(m, void *), 0, size);
+
+       return m;
+}
+
+struct mbuf *
+qwz_htc_build_tx_ctrl_mbuf(void)
+{
+       size_t size;
+
+       size = ATH12K_HTC_CONTROL_BUFFER_SIZE - sizeof(struct ath12k_htc_hdr);
+
+       return qwz_htc_alloc_mbuf(size);
+}
+
+void
+qwz_htc_prepare_tx_mbuf(struct qwz_htc_ep *ep, struct mbuf *m)
+{
+       struct ath12k_htc_hdr *hdr;
+
+       hdr = mtod(m, struct ath12k_htc_hdr *);
+
+       memset(hdr, 0, sizeof(*hdr));
+       hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+           FIELD_PREP(HTC_HDR_PAYLOADLEN, (m->m_pkthdr.len - sizeof(*hdr)));
+
+       if (ep->tx_credit_flow_enabled)
+               hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
+                   ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE);
+#ifdef notyet
+       spin_lock_bh(&ep->htc->tx_lock);
+#endif
+       hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+#ifdef notyet
+       spin_unlock_bh(&ep->htc->tx_lock);
+#endif
+}
+
+int
+qwz_htc_send(struct qwz_htc *htc, enum ath12k_htc_ep_id eid, struct mbuf *m)
+{
+       struct qwz_htc_ep *ep = &htc->endpoint[eid];
+       struct qwz_softc *sc = htc->sc;
+       struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
+       void *ctx;
+       struct qwz_tx_data *tx_data;
+       int credits = 0;
+       int ret;
+       int credit_flow_enabled = (sc->hw_params.credit_flow &&
+           ep->tx_credit_flow_enabled);
+
+       if (eid >= ATH12K_HTC_EP_COUNT) {
+               printf("%s: Invalid endpoint id: %d\n", __func__, eid);
+               return ENOENT;
+       }
+
+       if (credit_flow_enabled) {
+               credits = howmany(m->m_pkthdr.len, htc->target_credit_size);
+#ifdef notyet
+               spin_lock_bh(&htc->tx_lock);
+#endif
+               if (ep->tx_credits < credits) {
+                       DNPRINTF(QWZ_D_HTC,
+                           "%s: ep %d insufficient credits required %d "
+                           "total %d\n", __func__, eid, credits,
+                           ep->tx_credits);
+#ifdef notyet
+                       spin_unlock_bh(&htc->tx_lock);
+#endif
+                       return EAGAIN;
+               }
+               ep->tx_credits -= credits;
+               DNPRINTF(QWZ_D_HTC, "%s: ep %d credits consumed %d total %d\n",
+                   __func__, eid, credits, ep->tx_credits);
+#ifdef notyet
+               spin_unlock_bh(&htc->tx_lock);
+#endif
+       }
+
+       qwz_htc_prepare_tx_mbuf(ep, m);
+
+       ctx = pipe->src_ring->per_transfer_context[pipe->src_ring->write_index];
+       tx_data = (struct qwz_tx_data *)ctx;
+
+       tx_data->eid = eid;
+       ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
+           m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
+       if (ret) {
+               printf("%s: can't map mbuf (error %d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               if (ret != ENOBUFS)
+                       m_freem(m);
+               goto err_credits;
+       }
+
+       DNPRINTF(QWZ_D_HTC, "%s: tx mbuf %p eid %d paddr %lx\n",
+           __func__, m, tx_data->eid, tx_data->map->dm_segs[0].ds_addr);
+#ifdef QWZ_DEBUG
+       {
+               int i;
+               uint8_t *p = mtod(m, uint8_t *);
+               DNPRINTF(QWZ_D_HTC, "%s message buffer:", __func__);
+               for (i = 0; i < m->m_pkthdr.len; i++) {
+                       DNPRINTF(QWZ_D_HTC, "%s %.2x",
+                           i % 16 == 0 ? "\n" : "", p[i]);
+               }
+               if (i % 16)
+                       DNPRINTF(QWZ_D_HTC, "\n");
+       }
+#endif
+       ret = qwz_ce_send(htc->sc, m, ep->ul_pipe_id, ep->eid);
+       if (ret)
+               goto err_unmap;
+
+       return 0;
+
+err_unmap:
+       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+err_credits:
+       if (credit_flow_enabled) {
+#ifdef notyet
+               spin_lock_bh(&htc->tx_lock);
+#endif
+               ep->tx_credits += credits;
+               DNPRINTF(QWZ_D_HTC, "%s: ep %d credits reverted %d total %d\n",
+                   __func__, eid, credits, ep->tx_credits);
+#ifdef notyet
+               spin_unlock_bh(&htc->tx_lock);
+#endif
+
+               if (ep->ep_ops.ep_tx_credits)
+                       ep->ep_ops.ep_tx_credits(htc->sc);
+       }
+       return ret;
+}
+
+int
+qwz_htc_connect_service(struct qwz_htc *htc,
+    struct qwz_htc_svc_conn_req *conn_req,
+    struct qwz_htc_svc_conn_resp *conn_resp)
+{
+       struct qwz_softc *sc = htc->sc;
+       struct ath12k_htc_conn_svc *req_msg;
+       struct ath12k_htc_conn_svc_resp resp_msg_dummy;
+       struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+       enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT;
+       struct qwz_htc_ep *ep;
+       struct mbuf *m;
+       unsigned int max_msg_size = 0;
+       int length, status = 0;
+       int disable_credit_flow_ctrl = 0;
+       uint16_t flags = 0;
+       uint16_t message_id, service_id;
+       uint8_t tx_alloc = 0;
+
+       /* special case for HTC pseudo control service */
+       if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) {
+               disable_credit_flow_ctrl = 1;
+               assigned_eid = ATH12K_HTC_EP_0;
+               max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN;
+               memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+               goto setup;
+       }
+
+       tx_alloc = qwz_htc_get_credit_allocation(htc, conn_req->service_id);
+       if (!tx_alloc)
+               DNPRINTF(QWZ_D_HTC,
+                   "%s: htc service %s does not allocate target credits\n",
+                   sc->sc_dev.dv_xname,
+                   qwz_htc_service_name(conn_req->service_id));
+
+       m = qwz_htc_build_tx_ctrl_mbuf();
+       if (!m) {
+               printf("%s: Failed to allocate HTC packet\n",
+                   sc->sc_dev.dv_xname);
+               return ENOMEM;
+       }
+
+       length = sizeof(*req_msg);
+       m->m_len = m->m_pkthdr.len = sizeof(struct ath12k_htc_hdr) + length;
+
+       req_msg = (struct ath12k_htc_conn_svc *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       memset(req_msg, 0, length);
+       req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+           ATH12K_HTC_MSG_CONNECT_SERVICE_ID);
+
+       flags |= FIELD_PREP(ATH12K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+       /* Only enable credit flow control for WMI ctrl service */
+       if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL ||
+             conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+             conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+               flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+               disable_credit_flow_ctrl = 1;
+       }
+
+       if (!sc->hw_params.credit_flow) {
+               flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+               disable_credit_flow_ctrl = 1;
+       }
+
+       req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+       req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+           conn_req->service_id);
+
+       sc->ctl_resp = 0;
+
+       status = qwz_htc_send(htc, ATH12K_HTC_EP_0, m);
+       if (status) {
+               if (status != ENOBUFS)
+                       m_freem(m);
+               return status;
+       }
+
+       while (!sc->ctl_resp) {
+               int ret = tsleep_nsec(&sc->ctl_resp, 0, "qwzhtcinit",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: Service connect timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       /* we controlled the buffer creation, it's aligned */
+       resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer;
+       message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+       service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+                              resp_msg->msg_svc_id);
+       if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+           (htc->control_resp_len < sizeof(*resp_msg))) {
+               printf("%s: Invalid resp message ID 0x%x", __func__,
+                   message_id);
+               return EPROTO;
+       }
+
+       DNPRINTF(QWZ_D_HTC, "%s: service %s connect response status 0x%lx "
+           "assigned ep 0x%lx\n", __func__, qwz_htc_service_name(service_id),
+           FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+           FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+       conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+           resp_msg->flags_len);
+
+       /* check response status */
+       if (conn_resp->connect_resp_code !=
+           ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) {
+               printf("%s: HTC Service %s connect request failed: 0x%x)\n",
+                   __func__, qwz_htc_service_name(service_id),
+                   conn_resp->connect_resp_code);
+               return EPROTO;
+       }
+
+       assigned_eid = (enum ath12k_htc_ep_id)FIELD_GET(
+           HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len);
+
+       max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+           resp_msg->flags_len);
+setup:
+       if (assigned_eid >= ATH12K_HTC_EP_COUNT)
+               return EPROTO;
+
+       if (max_msg_size == 0)
+               return EPROTO;
+
+       ep = &htc->endpoint[assigned_eid];
+       ep->eid = assigned_eid;
+
+       if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED)
+               return EPROTO;
+
+       /* return assigned endpoint to caller */
+       conn_resp->eid = assigned_eid;
+       conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+           resp_msg->flags_len);
+
+       /* setup the endpoint */
+       ep->service_id = conn_req->service_id;
+       ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+       ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+           resp_msg->flags_len);
+       ep->tx_credits = tx_alloc;
+
+       /* copy all the callbacks */
+       ep->ep_ops = conn_req->ep_ops;
+
+       status = sc->ops.map_service_to_pipe(htc->sc, ep->service_id,
+           &ep->ul_pipe_id, &ep->dl_pipe_id);
+       if (status)
+               return status;
+
+       DNPRINTF(QWZ_D_HTC,
+           "%s: htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+           __func__, qwz_htc_service_name(ep->service_id), ep->ul_pipe_id,
+           ep->dl_pipe_id, ep->eid);
+
+       if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+               ep->tx_credit_flow_enabled = 0;
+               DNPRINTF(QWZ_D_HTC,
+                   "%s: htc service '%s' eid %d tx flow control disabled\n",
+                   __func__, qwz_htc_service_name(ep->service_id),
+                   assigned_eid);
+       }
+
+       return status;
+}
+
+int
+qwz_htc_start(struct qwz_htc *htc)
+{
+       struct mbuf *m;
+       int status = 0;
+       struct qwz_softc *sc = htc->sc;
+       struct ath12k_htc_setup_complete_extended *msg;
+
+       m = qwz_htc_build_tx_ctrl_mbuf();
+       if (!m)
+               return ENOMEM;
+
+       m->m_len = m->m_pkthdr.len = sizeof(struct ath12k_htc_hdr) +
+           sizeof(*msg);
+
+       msg = (struct ath12k_htc_setup_complete_extended *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+           ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+       if (sc->hw_params.credit_flow)
+               DNPRINTF(QWZ_D_HTC, "%s: using tx credit flow control\n",
+                   __func__);
+       else
+               msg->flags |= ATH12K_GLOBAL_DISABLE_CREDIT_FLOW;
+
+       status = qwz_htc_send(htc, ATH12K_HTC_EP_0, m);
+       if (status) {
+               m_freem(m);
+               return status;
+       }
+
+       return 0;
+}
+
+int
+qwz_htc_init(struct qwz_softc *sc)
+{
+       struct qwz_htc *htc = &sc->htc;
+       struct qwz_htc_svc_conn_req conn_req;
+       struct qwz_htc_svc_conn_resp conn_resp;
+       int ret;
+#ifdef notyet
+       spin_lock_init(&htc->tx_lock);
+#endif
+       qwz_htc_reset_endpoint_states(htc);
+
+       htc->sc = sc;
+
+       switch (sc->wmi.preferred_hw_mode) {
+       case WMI_HOST_HW_MODE_SINGLE:
+               htc->wmi_ep_count = 1;
+               break;
+       case WMI_HOST_HW_MODE_DBS:
+       case WMI_HOST_HW_MODE_DBS_OR_SBS:
+               htc->wmi_ep_count = 2;
+               break;
+       case WMI_HOST_HW_MODE_DBS_SBS:
+               htc->wmi_ep_count = 3;
+               break;
+       default:
+               htc->wmi_ep_count = sc->hw_params.max_radios;
+               break;
+       }
+
+       /* setup our pseudo HTC control endpoint connection */
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+       conn_req.ep_ops.ep_tx_complete = qwz_htc_control_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = qwz_htc_control_rx_complete;
+       conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS;
+       conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL;
+
+       /* connect fake service */
+       ret = qwz_htc_connect_service(htc, &conn_req, &conn_resp);
+       if (ret) {
+               printf("%s: could not connect to htc service (%d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_htc_setup_target_buffer_assignments(struct qwz_htc *htc)
+{
+       struct qwz_htc_svc_tx_credits *serv_entry;
+       uint32_t svc_id[] = {
+               ATH12K_HTC_SVC_ID_WMI_CONTROL,
+               ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+               ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+       };
+       int i, credits;
+
+       credits =  htc->total_transmit_credits;
+       serv_entry = htc->service_alloc_table;
+
+       if ((htc->wmi_ep_count == 0) ||
+           (htc->wmi_ep_count > nitems(svc_id)))
+               return EINVAL;
+
+       /* Divide credits among number of endpoints for WMI */
+       credits = credits / htc->wmi_ep_count;
+       for (i = 0; i < htc->wmi_ep_count; i++) {
+               serv_entry[i].service_id = svc_id[i];
+               serv_entry[i].credit_allocation = credits;
+       }
+
+       return 0;
+}
+
+int
+qwz_htc_wait_target(struct qwz_softc *sc)
+{
+       struct qwz_htc *htc = &sc->htc;
+       int polling = 0, ret;
+       uint16_t i;
+       struct ath12k_htc_ready *ready;
+       uint16_t message_id;
+       uint16_t credit_count;
+       uint16_t credit_size;
+
+       sc->ctl_resp = 0;
+       while (!sc->ctl_resp) {
+               ret = tsleep_nsec(&sc->ctl_resp, 0, "qwzhtcinit",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       if (ret != EWOULDBLOCK)
+                               return ret;
+
+                       if (polling) {
+                               printf("%s: failed to receive control response "
+                                   "completion\n", sc->sc_dev.dv_xname);
+                               return ret;
+                       }
+
+                       printf("%s: failed to receive control response "
+                           "completion, polling...\n", sc->sc_dev.dv_xname);
+                       polling = 1;
+
+                       for (i = 0; i < sc->hw_params.ce_count; i++)
+                               qwz_ce_per_engine_service(sc, i);
+               }
+       }
+
+       if (htc->control_resp_len < sizeof(*ready)) {
+               printf("%s: Invalid HTC ready msg len:%d\n", __func__,
+                   htc->control_resp_len);
+               return EINVAL;
+       }
+
+       ready = (struct ath12k_htc_ready *)htc->control_resp_buffer;
+       message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+       credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+           ready->id_credit_count);
+       credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+       if (message_id != ATH12K_HTC_MSG_READY_ID) {
+               printf("%s: Invalid HTC ready msg: 0x%x\n", __func__,
+                   message_id);
+               return EINVAL;
+       }
+
+       htc->total_transmit_credits = credit_count;
+       htc->target_credit_size = credit_size;
+
+       DNPRINTF(QWZ_D_HTC, "%s: target ready total_transmit_credits %d "
+           "target_credit_size %d\n", __func__,
+           htc->total_transmit_credits, htc->target_credit_size);
+
+       if ((htc->total_transmit_credits == 0) ||
+           (htc->target_credit_size == 0)) {
+               printf("%s: Invalid credit size received\n", __func__);
+               return EINVAL;
+       }
+
+       /* For QCA6390, wmi endpoint uses 1 credit to avoid
+        * back-to-back write.
+        */
+       if (sc->hw_params.supports_shadow_regs)
+               htc->total_transmit_credits = 1;
+
+       qwz_htc_setup_target_buffer_assignments(htc);
+
+       return 0;
+}
+
+void
+qwz_dp_htt_htc_tx_complete(struct qwz_softc *sc, struct mbuf *m)
+{
+       /* Just free the mbuf, no further action required. */
+       m_freem(m);
+}
+
+static inline void
+qwz_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
+{
+#if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
+       if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+               addr_l32 = swab32(addr_l32);
+               addr_h16 = swab16(addr_h16);
+       }
+#endif
+       uint32_t val32;
+       uint16_t val16;
+
+       val32 = le32toh(addr_l32);
+       memcpy(addr, &val32, 4);
+       val16 = le16toh(addr_h16);
+       memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
+}
+
+void
+qwz_peer_map_event(struct qwz_softc *sc, uint8_t vdev_id, uint16_t peer_id,
+    uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni;
+       struct qwz_node *nq;
+       struct ath12k_peer *peer;
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+       ni = ieee80211_find_node(ic, mac_addr);
+       if (ni == NULL)
+               return;
+       nq = (struct qwz_node *)ni;
+       peer = &nq->peer;
+
+       peer->vdev_id = vdev_id;
+       peer->peer_id = peer_id;
+       peer->ast_hash = ast_hash;
+       peer->hw_peer_id = hw_peer_id;
+#if 0
+       ether_addr_copy(peer->addr, mac_addr);
+       list_add(&peer->list, &ab->peers);
+#endif
+       sc->peer_mapped = 1;
+       wakeup(&sc->peer_mapped);
+
+       DNPRINTF(QWZ_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
+           __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+}
+
+struct ieee80211_node *
+qwz_peer_find_by_id(struct qwz_softc *sc, uint16_t peer_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni = NULL;
+       int s;
+
+       s = splnet();
+       RBT_FOREACH(ni, ieee80211_tree, &ic->ic_tree) {
+               struct qwz_node *nq = (struct qwz_node *)ni;
+               if (nq->peer.peer_id == peer_id)
+                       break;
+       }
+       splx(s);
+
+       return ni;
+}
+
+void
+qwz_peer_unmap_event(struct qwz_softc *sc, uint16_t peer_id)
+{
+       struct ieee80211_node *ni;
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+       ni = qwz_peer_find_by_id(sc, peer_id);
+       if (!ni) {
+               printf("%s: peer-unmap-event: unknown peer id %d\n",
+                   sc->sc_dev.dv_xname, peer_id);
+               goto exit;
+       }
+
+       DNPRINTF(QWZ_D_HTT, "%s: peer unmap peer %s id %d\n",
+           __func__, ether_sprintf(ni->ni_macaddr), peer_id);
+#if 0
+       list_del(&peer->list);
+       kfree(peer);
+#endif
+       sc->peer_mapped = 1;
+       wakeup(&sc->peer_mapped);
+exit:
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+       return;
+}
+
+void
+qwz_dp_htt_htc_t2h_msg_handler(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
+       enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
+           *(uint32_t *)resp);
+       uint16_t peer_id;
+       uint8_t vdev_id;
+       uint8_t mac_addr[IEEE80211_ADDR_LEN];
+       uint16_t peer_mac_h16;
+       uint16_t ast_hash;
+       uint16_t hw_peer_id;
+
+       DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
+
+       switch (type) {
+       case HTT_T2H_MSG_TYPE_VERSION_CONF:
+               dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+                   resp->version_msg.version);
+               dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+                   resp->version_msg.version);
+               dp->htt_tgt_version_received = 1;
+               wakeup(&dp->htt_tgt_version_received);
+               break;
+       case HTT_T2H_MSG_TYPE_PEER_MAP:
+               vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+                   resp->peer_map_ev.info);
+               peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+                   resp->peer_map_ev.info);
+               peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+                   resp->peer_map_ev.info1);
+               qwz_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+                   peer_mac_h16, mac_addr);
+               qwz_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
+               break;
+       case HTT_T2H_MSG_TYPE_PEER_MAP2:
+               vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+                   resp->peer_map_ev.info);
+               peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+                   resp->peer_map_ev.info);
+               peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+                   resp->peer_map_ev.info1);
+               qwz_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+                   peer_mac_h16, mac_addr);
+               ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+                   resp->peer_map_ev.info2);
+               hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
+                                      resp->peer_map_ev.info1);
+               qwz_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
+                   hw_peer_id);
+               break;
+       case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+       case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+               peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+                   resp->peer_unmap_ev.info);
+               qwz_peer_unmap_event(sc, peer_id);
+               break;
+#if 0
+       case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+               ath12k_htt_pull_ppdu_stats(ab, skb);
+               break;
+       case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+               ath12k_debugfs_htt_ext_stats_handler(ab, skb);
+               break;
+       case HTT_T2H_MSG_TYPE_PKTLOG:
+               ath12k_htt_pktlog(ab, skb);
+               break;
+       case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+               ath12k_htt_backpressure_event_handler(ab, skb);
+               break;
+#endif
+       default:
+               printf("%s: htt event %d not handled\n", __func__, type);
+               break;
+       }
+
+       m_freem(m);
+}
+
+int
+qwz_dp_htt_connect(struct qwz_dp *dp)
+{
+       struct qwz_htc_svc_conn_req conn_req;
+       struct qwz_htc_svc_conn_resp conn_resp;
+       int status;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       conn_req.ep_ops.ep_tx_complete = qwz_dp_htt_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = qwz_dp_htt_htc_t2h_msg_handler;
+
+       /* connect to control service */
+       conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
+
+       status = qwz_htc_connect_service(&dp->sc->htc, &conn_req, &conn_resp);
+
+       if (status)
+               return status;
+
+       dp->eid = conn_resp.eid;
+
+       return 0;
+}
+
+void
+qwz_dp_pdev_reo_cleanup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       int i;
+
+       for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+               qwz_dp_srng_cleanup(sc, &dp->reo_dst_ring[i]);
+}
+
+int
+qwz_dp_pdev_reo_setup(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       int ret;
+       int i;
+
+       for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+               ret = qwz_dp_srng_setup(sc, &dp->reo_dst_ring[i],
+                   HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE);
+               if (ret) {
+                       printf("%s: failed to setup reo_dst_ring\n", __func__);
+                       qwz_dp_pdev_reo_cleanup(sc);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void
+qwz_dp_rx_pdev_srng_free(struct qwz_softc *sc, int mac_id)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       int i;
+
+       qwz_dp_srng_cleanup(sc, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               if (sc->hw_params.rx_mac_buf_ring)
+                       qwz_dp_srng_cleanup(sc, &dp->rx_mac_buf_ring[i]);
+
+               qwz_dp_srng_cleanup(sc, &dp->rxdma_err_dst_ring[i]);
+               qwz_dp_srng_cleanup(sc,
+                   &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
+       }
+
+       qwz_dp_srng_cleanup(sc, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+int
+qwz_dp_rx_pdev_srng_alloc(struct qwz_softc *sc)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+#if 0
+       struct dp_srng *srng = NULL;
+#endif
+       int i;
+       int ret;
+
+       ret = qwz_dp_srng_setup(sc, &dp->rx_refill_buf_ring.refill_buf_ring,
+           HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+       if (ret) {
+               printf("%s: failed to setup rx_refill_buf_ring\n",
+                   sc->sc_dev.dv_xname);
+               return ret;
+       }
+
+       if (sc->hw_params.rx_mac_buf_ring) {
+               for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+                       ret = qwz_dp_srng_setup(sc, &dp->rx_mac_buf_ring[i],
+                           HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024);
+                       if (ret) {
+                               printf("%s: failed to setup "
+                                   "rx_mac_buf_ring %d\n",
+                                   sc->sc_dev.dv_xname, i);
+                               return ret;
+                       }
+               }
+       }
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               ret = qwz_dp_srng_setup(sc, &dp->rxdma_err_dst_ring[i],
+                   HAL_RXDMA_DST, 0, dp->mac_id + i,
+                   DP_RXDMA_ERR_DST_RING_SIZE);
+               if (ret) {
+                       printf("%s: failed to setup rxdma_err_dst_ring %d\n",
+                          sc->sc_dev.dv_xname, i);
+                       return ret;
+               }
+       }
+#if 0
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+               ret = qwz_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0,
+                   dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE);
+               if (ret) {
+                       printf("%s: failed to setup "
+                           "rx_mon_status_refill_ring %d\n",
+                           sc->sc_dev.dv_xname, i);
+                       return ret;
+               }
+       }
+#endif
+       /* if rxdma1_enable is false, then it doesn't need
+        * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
+        * and rxdma_mon_desc_ring.
+        * init reap timer for QCA6390.
+        */
+       if (!sc->hw_params.rxdma1_enable) {
+               timeout_set(&sc->mon_reap_timer, qwz_dp_service_mon_ring, sc);
+               return 0;
+       }
+#if 0
+       ret = ath12k_dp_srng_setup(ar->ab,
+                                  &dp->rxdma_mon_buf_ring.refill_buf_ring,
+                                  HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+                                  DP_RXDMA_MONITOR_BUF_RING_SIZE);
+       if (ret) {
+               ath12k_warn(ar->ab,
+                           "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+               return ret;
+       }
+
+       ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+                                  HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+                                  DP_RXDMA_MONITOR_DST_RING_SIZE);
+       if (ret) {
+               ath12k_warn(ar->ab,
+                           "failed to setup HAL_RXDMA_MONITOR_DST\n");
+               return ret;
+       }
+
+       ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+                                  HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+                                  DP_RXDMA_MONITOR_DESC_RING_SIZE);
+       if (ret) {
+               ath12k_warn(ar->ab,
+                           "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+               return ret;
+       }
+#endif
+       return 0;
+}
+
+void
+qwz_dp_rxdma_buf_ring_free(struct qwz_softc *sc, struct dp_rxdma_ring *rx_ring)
+{
+       int i;
+
+       for (i = 0; i < rx_ring->bufs_max; i++) {
+               struct qwz_rx_data *rx_data = &rx_ring->rx_data[i];
+
+               if (rx_data->map == NULL)
+                       continue;
+
+               if (rx_data->m) {
+                       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+                       m_free(rx_data->m);
+                       rx_data->m = NULL;
+               }
+
+               bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
+               rx_data->map = NULL;
+       }
+
+       free(rx_ring->rx_data, M_DEVBUF,
+           sizeof(rx_ring->rx_data[0]) * rx_ring->bufs_max);
+       rx_ring->rx_data = NULL;
+       rx_ring->bufs_max = 0;
+       memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
+}
+
+void
+qwz_dp_rxdma_pdev_buf_free(struct qwz_softc *sc, int mac_id)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+       int i;
+
+       qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
+
+       rx_ring = &dp->rxdma_mon_buf_ring;
+       qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               rx_ring = &dp->rx_mon_status_refill_ring[i];
+               qwz_dp_rxdma_buf_ring_free(sc, rx_ring);
+       }
+}
+
+void
+qwz_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie,
+    uint8_t manager)
+{
+       struct ath12k_buffer_addr *binfo = (struct ath12k_buffer_addr *)desc;
+       uint32_t paddr_lo, paddr_hi;
+
+       paddr_lo = paddr & 0xffffffff;
+       paddr_hi = paddr >> 32;
+       binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+       binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+           FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+           FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void
+qwz_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie,
+    uint8_t *rbm)
+{
+       struct ath12k_buffer_addr *binfo = (struct ath12k_buffer_addr *)desc;
+
+       *paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+           binfo->info1)) << 32) |
+           FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+       *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+       *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+int
+qwz_next_free_rxbuf_idx(struct dp_rxdma_ring *rx_ring)
+{
+       int i, idx;
+
+       for (i = 0; i < nitems(rx_ring->freemap); i++) {
+               idx = ffs(rx_ring->freemap[i]);
+               if (idx > 0)
+                       return ((idx - 1) + (i * 8));
+       }
+
+       return -1;
+}
+
+int
+qwz_dp_rxbufs_replenish(struct qwz_softc *sc, int mac_id,
+    struct dp_rxdma_ring *rx_ring, int req_entries,
+    enum hal_rx_buf_return_buf_manager mgr)
+{
+       struct hal_srng *srng;
+       uint32_t *desc;
+       struct mbuf *m;
+       int num_free;
+       int num_remain;
+       int ret, idx;
+       uint32_t cookie;
+       uint64_t paddr;
+       struct qwz_rx_data *rx_data;
+
+       req_entries = MIN(req_entries, rx_ring->bufs_max);
+
+       srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       num_free = qwz_hal_srng_src_num_free(sc, srng, 1);
+       if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+               req_entries = num_free;
+
+       req_entries = MIN(num_free, req_entries);
+       num_remain = req_entries;
+
+       while (num_remain > 0) {
+               const size_t size = DP_RX_BUFFER_SIZE;
+
+               m = m_gethdr(M_DONTWAIT, MT_DATA);
+               if (m == NULL)
+                       goto fail_free_mbuf;
+
+               if (size <= MCLBYTES)
+                       MCLGET(m, M_DONTWAIT);
+               else
+                       MCLGETL(m, M_DONTWAIT, size);
+               if ((m->m_flags & M_EXT) == 0)
+                       goto fail_free_mbuf;
+
+               m->m_len = m->m_pkthdr.len = size;
+
+               idx = qwz_next_free_rxbuf_idx(rx_ring);
+               if (idx == -1)
+                       goto fail_free_mbuf;
+
+               rx_data = &rx_ring->rx_data[idx];
+               if (rx_data->map == NULL) {
+                       ret = bus_dmamap_create(sc->sc_dmat, size, 1,
+                           size, 0, BUS_DMA_NOWAIT, &rx_data->map);
+                       if (ret)
+                               goto fail_free_mbuf;
+               }
+               
+               ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
+                   BUS_DMA_READ | BUS_DMA_NOWAIT);
+               if (ret) {
+                       printf("%s: can't map mbuf (error %d)\n",
+                           sc->sc_dev.dv_xname, ret);
+                       goto fail_free_mbuf;
+               }
+
+               desc = qwz_hal_srng_src_get_next_entry(sc, srng);
+               if (!desc)
+                       goto fail_dma_unmap;
+
+               rx_data->m = m;
+               m = NULL;
+
+               cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+                   FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, idx);
+
+               clrbit(rx_ring->freemap, idx);
+               num_remain--;
+
+               paddr = rx_data->map->dm_segs[0].ds_addr;
+               qwz_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return 0;
+
+fail_dma_unmap:
+       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+fail_free_mbuf:
+       m_free(m);
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return ENOBUFS;
+}
+
+int
+qwz_dp_rxdma_ring_buf_setup(struct qwz_softc *sc,
+    struct dp_rxdma_ring *rx_ring, uint32_t ringtype)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       int num_entries;
+
+       num_entries = rx_ring->refill_buf_ring.size /
+           qwz_hal_srng_get_entrysize(sc, ringtype);
+
+       KASSERT(rx_ring->rx_data == NULL);
+       rx_ring->rx_data = mallocarray(num_entries, sizeof(rx_ring->rx_data[0]),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (rx_ring->rx_data == NULL)
+               return ENOMEM;
+
+       rx_ring->bufs_max = num_entries;
+       memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
+
+       return qwz_dp_rxbufs_replenish(sc, dp->mac_id, rx_ring, num_entries,
+           sc->hw_params.hal_params->rx_buf_rbm);
+}
+
+int
+qwz_dp_rxdma_pdev_buf_setup(struct qwz_softc *sc)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       struct dp_rxdma_ring *rx_ring;
+       int ret;
+#if 0
+       int i;
+#endif
+
+       rx_ring = &dp->rx_refill_buf_ring;
+       ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
+       if (ret)
+               return ret;
+
+       if (sc->hw_params.rxdma1_enable) {
+               rx_ring = &dp->rxdma_mon_buf_ring;
+               ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring,
+                   HAL_RXDMA_MONITOR_BUF);
+               if (ret)
+                       return ret;
+       }
+#if 0
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               rx_ring = &dp->rx_mon_status_refill_ring[i];
+               ret = qwz_dp_rxdma_ring_buf_setup(sc, rx_ring,
+                   HAL_RXDMA_MONITOR_STATUS);
+               if (ret)
+                       return ret;
+       }
+#endif
+       return 0;
+}
+
+void
+qwz_dp_rx_pdev_free(struct qwz_softc *sc, int mac_id)
+{
+       qwz_dp_rx_pdev_srng_free(sc, mac_id);
+       qwz_dp_rxdma_pdev_buf_free(sc, mac_id);
+}
+
+bus_addr_t
+qwz_hal_srng_get_hp_addr(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+               return 0;
+
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+               return sc->hal.wrp.paddr +
+                   ((unsigned long)srng->u.src_ring.hp_addr -
+                   (unsigned long)sc->hal.wrp.vaddr);
+       } else {
+               return sc->hal.rdp.paddr +
+                   ((unsigned long)srng->u.dst_ring.hp_addr -
+                   (unsigned long)sc->hal.rdp.vaddr);
+       }
+}
+
+bus_addr_t
+qwz_hal_srng_get_tp_addr(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+               return 0;
+
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+               return sc->hal.rdp.paddr +
+                   ((unsigned long)srng->u.src_ring.tp_addr -
+                   (unsigned long)sc->hal.rdp.vaddr);
+       } else {
+               return sc->hal.wrp.paddr +
+                   ((unsigned long)srng->u.dst_ring.tp_addr -
+                   (unsigned long)sc->hal.wrp.vaddr);
+       }
+}
+
+int
+qwz_dp_tx_get_ring_id_type(struct qwz_softc *sc, int mac_id, uint32_t ring_id,
+    enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type,
+    enum htt_srng_ring_id *htt_ring_id)
+{
+       int lmac_ring_id_offset = 0;
+
+       switch (ring_type) {
+       case HAL_RXDMA_BUF:
+               lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+               /* for QCA6390, host fills rx buffer to fw and fw fills to
+                * rxbuf ring for each rxdma
+                */
+               if (!sc->hw_params.rx_mac_buf_ring) {
+                       if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+                           lmac_ring_id_offset) ||
+                           ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+                           lmac_ring_id_offset)))
+                               return EINVAL;
+                       *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+                       *htt_ring_type = HTT_SW_TO_HW_RING;
+               } else {
+                       if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
+                               *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+                               *htt_ring_type = HTT_SW_TO_SW_RING;
+                       } else {
+                               *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+                               *htt_ring_type = HTT_SW_TO_HW_RING;
+                       }
+               }
+               break;
+       case HAL_RXDMA_DST:
+               *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+               *htt_ring_type = HTT_HW_TO_SW_RING;
+               break;
+       case HAL_RXDMA_MONITOR_BUF:
+               *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+               *htt_ring_type = HTT_SW_TO_HW_RING;
+               break;
+       case HAL_RXDMA_MONITOR_STATUS:
+               *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+               *htt_ring_type = HTT_SW_TO_HW_RING;
+               break;
+       case HAL_RXDMA_MONITOR_DST:
+               *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+               *htt_ring_type = HTT_HW_TO_SW_RING;
+               break;
+       case HAL_RXDMA_MONITOR_DESC:
+               *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+               *htt_ring_type = HTT_SW_TO_HW_RING;
+               break;
+       default:
+               printf("%s: Unsupported ring type in DP :%d\n",
+                   sc->sc_dev.dv_xname, ring_type);
+               return EINVAL;
+       }
+
+       return 0;
+}
+
+int
+qwz_dp_tx_htt_srng_setup(struct qwz_softc *sc, uint32_t ring_id, int mac_id,
+    enum hal_ring_type ring_type)
+{
+       struct htt_srng_setup_cmd *cmd;
+       struct hal_srng *srng = &sc->hal.srng_list[ring_id];
+       struct hal_srng_params params;
+       struct mbuf *m;
+       uint32_t ring_entry_sz;
+       uint64_t hp_addr, tp_addr;
+       enum htt_srng_ring_type htt_ring_type;
+       enum htt_srng_ring_id htt_ring_id;
+       int ret;
+
+       m = qwz_htc_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       memset(&params, 0, sizeof(params));
+       qwz_hal_srng_get_params(sc, srng, &params);
+
+       hp_addr = qwz_hal_srng_get_hp_addr(sc, srng);
+       tp_addr = qwz_hal_srng_get_tp_addr(sc, srng);
+
+       ret = qwz_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
+           ring_type, &htt_ring_type, &htt_ring_id);
+       if (ret)
+               goto err_free;
+
+       cmd = (struct htt_srng_setup_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+           HTT_H2T_MSG_TYPE_SRING_SETUP);
+       if (htt_ring_type == HTT_SW_TO_HW_RING ||
+           htt_ring_type == HTT_HW_TO_SW_RING)
+               cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+                   DP_SW2HW_MACID(mac_id));
+       else
+               cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+                   mac_id);
+       cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+           htt_ring_type);
+       cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+       cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK;
+
+       cmd->ring_base_addr_hi = (uint64_t)params.ring_base_paddr >>
+           HAL_ADDR_MSB_REG_SHIFT;
+
+       ring_entry_sz = qwz_hal_srng_get_entrysize(sc, ring_type);
+
+       ring_entry_sz >>= 2;
+       cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+           ring_entry_sz);
+       cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+           params.num_entries * ring_entry_sz);
+       cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+           !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+       cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+           !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+       cmd->info1 |= FIELD_PREP(
+           HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+           !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+       if (htt_ring_type == HTT_SW_TO_HW_RING)
+               cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+       cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+       cmd->ring_head_off32_remote_addr_hi = hp_addr >> HAL_ADDR_MSB_REG_SHIFT;
+
+       cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+       cmd->ring_tail_off32_remote_addr_hi = tp_addr >> HAL_ADDR_MSB_REG_SHIFT;
+
+       cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
+       cmd->ring_msi_addr_hi = 0;
+       cmd->msi_data = params.msi_data;
+
+       cmd->intr_info = FIELD_PREP(
+           HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+           params.intr_batch_cntr_thres_entries * ring_entry_sz);
+       cmd->intr_info |= FIELD_PREP(
+           HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+           params.intr_timer_thres_us >> 3);
+
+       cmd->info2 = 0;
+       if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+               cmd->info2 = FIELD_PREP(
+                   HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+                   params.low_threshold);
+       }
+
+       DNPRINTF(QWZ_D_HTT, "%s: htt srng setup msi_addr_lo 0x%x "
+           "msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d "
+           "intr_info 0x%x flags 0x%x\n", __func__, cmd->ring_msi_addr_lo,
+           cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type,
+           cmd->intr_info, cmd->info2);
+
+       ret = qwz_htc_send(&sc->htc, sc->dp.eid, m);
+       if (ret)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       m_freem(m);
+
+       return ret;
+}
+
+int
+qwz_dp_tx_htt_h2t_ppdu_stats_req(struct qwz_softc *sc, uint32_t mask,
+    uint8_t pdev_id)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct mbuf *m;
+       struct htt_ppdu_stats_cfg_cmd *cmd;
+       int len = sizeof(*cmd);
+       uint8_t pdev_mask;
+       int ret;
+       int i;
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               m = qwz_htc_alloc_mbuf(len);
+               if (!m)
+                       return ENOMEM;
+
+               cmd = (struct htt_ppdu_stats_cfg_cmd *)(mtod(m, uint8_t *) +
+                   sizeof(struct ath12k_htc_hdr));
+               cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+                                     HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+               pdev_mask = 1 << (pdev_id + i);
+               cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+               cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK,
+                   mask);
+
+               ret = qwz_htc_send(&sc->htc, dp->eid, m);
+               if (ret) {
+                       m_freem(m);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_dp_tx_htt_rx_filter_setup(struct qwz_softc *sc, uint32_t ring_id,
+    int mac_id, enum hal_ring_type ring_type, size_t rx_buf_size,
+    struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+       struct htt_rx_ring_selection_cfg_cmd *cmd;
+       struct hal_srng *srng = &sc->hal.srng_list[ring_id];
+       struct hal_srng_params params;
+       struct mbuf *m;
+       int len = sizeof(*cmd);
+       enum htt_srng_ring_type htt_ring_type;
+       enum htt_srng_ring_id htt_ring_id;
+       int ret;
+
+       m = qwz_htc_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       memset(&params, 0, sizeof(params));
+       qwz_hal_srng_get_params(sc, srng, &params);
+
+       ret = qwz_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
+           ring_type, &htt_ring_type, &htt_ring_id);
+       if (ret)
+               goto err_free;
+
+       cmd = (struct htt_rx_ring_selection_cfg_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+           HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+       if (htt_ring_type == HTT_SW_TO_HW_RING ||
+           htt_ring_type == HTT_HW_TO_SW_RING) {
+               cmd->info0 |=
+                   FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+                   DP_SW2HW_MACID(mac_id));
+       } else {
+               cmd->info0 |=
+                   FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+                   mac_id);
+       }
+       cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+           htt_ring_id);
+       cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+           !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+       cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+           !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+       cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+           rx_buf_size);
+       cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+       cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+       cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+       cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+       cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+       ret = qwz_htc_send(&sc->htc, sc->dp.eid, m);
+       if (ret)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       m_freem(m);
+
+       return ret;
+}
+
+int
+qwz_dp_rx_pdev_alloc(struct qwz_softc *sc, int mac_id)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       uint32_t ring_id;
+       int i;
+       int ret;
+
+       ret = qwz_dp_rx_pdev_srng_alloc(sc);
+       if (ret) {
+               printf("%s: failed to setup rx srngs: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_dp_rxdma_pdev_buf_setup(sc);
+       if (ret) {
+               printf("%s: failed to setup rxdma ring: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+       ret = qwz_dp_tx_htt_srng_setup(sc, ring_id, mac_id, HAL_RXDMA_BUF);
+       if (ret) {
+               printf("%s: failed to configure rx_refill_buf_ring: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       if (sc->hw_params.rx_mac_buf_ring) {
+               for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+                       ring_id = dp->rx_mac_buf_ring[i].ring_id;
+                       ret = qwz_dp_tx_htt_srng_setup(sc, ring_id,
+                           mac_id + i, HAL_RXDMA_BUF);
+                       if (ret) {
+                               printf("%s: failed to configure "
+                                   "rx_mac_buf_ring%d: %d\n",
+                                   sc->sc_dev.dv_xname, i, ret);
+                               return ret;
+                       }
+               }
+       }
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+               ret = qwz_dp_tx_htt_srng_setup(sc, ring_id, mac_id + i,
+                   HAL_RXDMA_DST);
+               if (ret) {
+                       printf("%s: failed to configure "
+                           "rxdma_err_dest_ring%d %d\n",
+                           sc->sc_dev.dv_xname, i, ret);
+                       return ret;
+               }
+       }
+
+       if (!sc->hw_params.rxdma1_enable)
+               goto config_refill_ring;
+#if 0
+       ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+       ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+                                         mac_id, HAL_RXDMA_MONITOR_BUF);
+       if (ret) {
+               ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+                           ret);
+               return ret;
+       }
+       ret = ath12k_dp_tx_htt_srng_setup(ab,
+                                         dp->rxdma_mon_dst_ring.ring_id,
+                                         mac_id, HAL_RXDMA_MONITOR_DST);
+       if (ret) {
+               ath12k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+                           ret);
+               return ret;
+       }
+       ret = ath12k_dp_tx_htt_srng_setup(ab,
+                                         dp->rxdma_mon_desc_ring.ring_id,
+                                         mac_id, HAL_RXDMA_MONITOR_DESC);
+       if (ret) {
+               ath12k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+                           ret);
+               return ret;
+       }
+#endif
+config_refill_ring:
+#if 0
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               ret = qwz_dp_tx_htt_srng_setup(sc,
+                   dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id,
+                   mac_id + i, HAL_RXDMA_MONITOR_STATUS);
+               if (ret) {
+                       printf("%s: failed to configure "
+                           "mon_status_refill_ring%d %d\n",
+                           sc->sc_dev.dv_xname, i, ret);
+                       return ret;
+               }
+       }
+#endif
+       return 0;
+}
+
+void
+qwz_dp_pdev_free(struct qwz_softc *sc)
+{
+       int i;
+
+       timeout_del(&sc->mon_reap_timer);
+
+       for (i = 0; i < sc->num_radios; i++)
+               qwz_dp_rx_pdev_free(sc, i);
+}
+
+int
+qwz_dp_pdev_alloc(struct qwz_softc *sc)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               ret = qwz_dp_rx_pdev_alloc(sc, i);
+               if (ret) {
+                       printf("%s: failed to allocate pdev rx "
+                           "for pdev_id %d\n", sc->sc_dev.dv_xname, i);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       qwz_dp_pdev_free(sc);
+
+       return ret;
+}
+
+int
+qwz_dp_tx_htt_h2t_ver_req_msg(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct mbuf *m;
+       struct htt_ver_req_cmd *cmd;
+       int len = sizeof(*cmd);
+       int ret;
+
+       dp->htt_tgt_version_received = 0;
+
+       m = qwz_htc_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct htt_ver_req_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+           HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+       ret = qwz_htc_send(&sc->htc, dp->eid, m);
+       if (ret) {
+               m_freem(m);
+               return ret;
+       }
+
+       while (!dp->htt_tgt_version_received) {
+               ret = tsleep_nsec(&dp->htt_tgt_version_received, 0,
+                   "qwztgtver", SEC_TO_NSEC(3));
+               if (ret)
+                       return ETIMEDOUT;
+       }
+
+       if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+               printf("%s: unsupported htt major version %d "
+                   "supported version is %d\n", __func__,
+                   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+               return ENOTSUP;
+       }
+
+       return 0;
+}
+
+void
+qwz_dp_update_vdev_search(struct qwz_softc *sc, struct qwz_vif *arvif)
+{
+        /* When v2_map_support is true:for STA mode, enable address
+         * search index, tcl uses ast_hash value in the descriptor.
+         * When v2_map_support is false: for STA mode, don't enable
+         * address search index.
+         */
+       switch (arvif->vdev_type) {
+       case WMI_VDEV_TYPE_STA:
+               if (sc->hw_params.htt_peer_map_v2) {
+                       arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+                       arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+               } else {
+                       arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+                       arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+               }
+               break;
+       case WMI_VDEV_TYPE_AP:
+       case WMI_VDEV_TYPE_IBSS:
+               arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+               arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+               break;
+       case WMI_VDEV_TYPE_MONITOR:
+       default:
+               return;
+       }
+}
+
+void
+qwz_dp_vdev_tx_attach(struct qwz_softc *sc, struct qwz_pdev *pdev,
+    struct qwz_vif *arvif)
+{
+       arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+           FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) |
+           FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, pdev->pdev_id);
+
+       /* set HTT extension valid bit to 0 by default */
+       arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+       qwz_dp_update_vdev_search(sc, arvif);
+}
+
+void
+qwz_dp_tx_status_parse(struct qwz_softc *sc, struct hal_wbm_release_ring *desc,
+    struct hal_tx_status *ts)
+{
+       ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+           desc->info0);
+       if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+           ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+               return;
+
+       if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+               return;
+
+       ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+           desc->info0);
+       ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+           desc->info1);
+       ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+           desc->info1);
+       ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+           desc->info2);
+       if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+           ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+       ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+       ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+       if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+               ts->rate_stats = desc->rate_stats.info0;
+       else
+               ts->rate_stats = 0;
+}
+
+void
+qwz_dp_tx_free_txbuf(struct qwz_softc *sc, int msdu_id,
+    struct dp_tx_ring *tx_ring)
+{
+       struct qwz_tx_data *tx_data;
+
+       if (msdu_id >= sc->hw_params.tx_ring_size)
+               return;
+
+       tx_data = &tx_ring->data[msdu_id];
+
+       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+       m_freem(tx_data->m);
+       tx_data->m = NULL;
+
+       if (tx_ring->queued > 0)
+               tx_ring->queued--;
+}
+
+void
+qwz_dp_tx_htt_tx_complete_buf(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
+    struct qwz_dp_htt_wbm_tx_status *ts)
+{
+       /* Not using Tx status info for now. Just free the buffer. */
+       qwz_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
+}
+
+void
+qwz_dp_tx_process_htt_tx_complete(struct qwz_softc *sc, void *desc,
+    uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
+{
+       struct htt_tx_wbm_completion *status_desc;
+       struct qwz_dp_htt_wbm_tx_status ts = {0};
+       enum hal_wbm_htt_tx_comp_status wbm_status;
+
+       status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+       wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+           status_desc->info0);
+
+       switch (wbm_status) {
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+               ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+               ts.msdu_id = msdu_id;
+               ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+                   status_desc->info1);
+
+               if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
+                       ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
+                           status_desc->info2);
+               else
+                       ts.peer_id = HTT_INVALID_PEER_ID;
+
+               qwz_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
+               break;
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+               qwz_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
+               break;
+       case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+               /* This event is to be handled only when the driver decides to
+                * use WDS offload functionality.
+                */
+               break;
+       default:
+               printf("%s: Unknown htt tx status %d\n",
+                   sc->sc_dev.dv_xname, wbm_status);
+               break;
+       }
+}
+
+int
+qwz_mac_hw_ratecode_to_legacy_rate(struct ieee80211_node *ni, uint8_t hw_rc,
+    uint8_t preamble, uint8_t *rateidx, uint16_t *rate)
+{
+       struct ieee80211_rateset *rs = &ni->ni_rates;
+       int i;
+
+       if (preamble == WMI_RATE_PREAMBLE_CCK) {
+               hw_rc &= ~ATH12k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+               switch (hw_rc) {
+                       case ATH12K_HW_RATE_CCK_LP_1M:
+                               *rate = 2;
+                               break;
+                       case ATH12K_HW_RATE_CCK_LP_2M:
+                       case ATH12K_HW_RATE_CCK_SP_2M:
+                               *rate = 4;
+                               break;
+                       case ATH12K_HW_RATE_CCK_LP_5_5M:
+                       case ATH12K_HW_RATE_CCK_SP_5_5M:
+                               *rate = 11;
+                               break;
+                       case ATH12K_HW_RATE_CCK_LP_11M:
+                       case ATH12K_HW_RATE_CCK_SP_11M:
+                               *rate = 22;
+                               break;
+                       default:
+                               return EINVAL;
+               }
+       } else {
+               switch (hw_rc) {
+                       case ATH12K_HW_RATE_OFDM_6M:
+                               *rate = 12;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_9M:
+                               *rate = 18;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_12M:
+                               *rate = 24;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_18M:
+                               *rate = 36;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_24M:
+                               *rate = 48;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_36M:
+                               *rate = 72;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_48M:
+                               *rate = 96;
+                               break;
+                       case ATH12K_HW_RATE_OFDM_54M:
+                               *rate = 104;
+                               break;
+                       default:
+                               return EINVAL;
+               }
+       }
+
+       for (i = 0; i < rs->rs_nrates; i++) {
+               uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+               if (rval == *rate) {
+                       *rateidx = i;
+                       return 0;
+               }
+       }
+
+       return EINVAL;
+}
+
+void
+qwz_dp_tx_complete_msdu(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
+    uint32_t msdu_id, struct hal_tx_status *ts)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_tx_data *tx_data = &tx_ring->data[msdu_id];
+       uint8_t pkt_type, mcs, rateidx;
+       uint16_t rate;
+
+       if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) {
+               /* Must not happen */
+               return;
+       }
+
+       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+       m_freem(tx_data->m);
+       tx_data->m = NULL;
+
+       pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, ts->rate_stats);
+       mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, ts->rate_stats);
+       if (qwz_mac_hw_ratecode_to_legacy_rate(tx_data->ni, mcs, pkt_type,
+           &rateidx, &rate) == 0)
+               tx_data->ni->ni_txrate = rateidx;
+
+       ieee80211_release_node(ic, tx_data->ni);
+       tx_data->ni = NULL;
+       
+       if (tx_ring->queued > 0)
+               tx_ring->queued--;
+}
+
+#define QWZ_TX_COMPL_NEXT(x)   (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+int
+qwz_dp_tx_completion_handler(struct qwz_softc *sc, int ring_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       struct qwz_dp *dp = &sc->dp;
+       int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+       struct hal_srng *status_ring = &sc->hal.srng_list[hal_ring_id];
+       struct hal_tx_status ts = { 0 };
+       struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+       uint32_t *desc;
+       uint32_t msdu_id;
+       uint8_t mac_id;
+#ifdef notyet
+       spin_lock_bh(&status_ring->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, status_ring);
+
+       while ((QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+               tx_ring->tx_status_tail) &&
+              (desc = qwz_hal_srng_dst_get_next_entry(sc, status_ring))) {
+               memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
+                   sizeof(struct hal_wbm_release_ring));
+               tx_ring->tx_status_head =
+                   QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head);
+       }
+#if 0
+       if (unlikely((ath12k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+                    (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
+                     tx_ring->tx_status_tail))) {
+               /* TODO: Process pending tx_status messages when kfifo_is_full() */
+               ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+       }
+#endif
+       qwz_hal_srng_access_end(sc, status_ring);
+#ifdef notyet
+       spin_unlock_bh(&status_ring->lock);
+#endif
+       while (QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
+           tx_ring->tx_status_head) {
+               struct hal_wbm_release_ring *tx_status;
+               uint32_t desc_id;
+
+               tx_ring->tx_status_tail =
+                  QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+               tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+               qwz_dp_tx_status_parse(sc, tx_status, &ts);
+
+               desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+                   tx_status->buf_addr_info.info1);
+               mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+               if (mac_id >= MAX_RADIOS)
+                       continue;
+               msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+               if (msdu_id >= sc->hw_params.tx_ring_size)
+                       continue;
+
+               if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+                       qwz_dp_tx_process_htt_tx_complete(sc,
+                           (void *)tx_status, mac_id, msdu_id, tx_ring);
+                       continue;
+               }
+#if 0
+               spin_lock(&tx_ring->tx_idr_lock);
+               msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+               if (unlikely(!msdu)) {
+                       ath12k_warn(ab, "tx completion for unknown msdu_id %d\n",
+                                   msdu_id);
+                       spin_unlock(&tx_ring->tx_idr_lock);
+                       continue;
+               }
+
+               spin_unlock(&tx_ring->tx_idr_lock);
+               ar = ab->pdevs[mac_id].ar;
+
+               if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+                       wake_up(&ar->dp.tx_empty_waitq);
+#endif
+               qwz_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
+       }
+
+       if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
+               sc->qfullmsk &= ~(1 << ring_id);
+               if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
+                       ifq_clr_oactive(&ifp->if_snd);
+                       (*ifp->if_start)(ifp);
+               }
+       }
+
+       return 0;
+}
+
+void
+qwz_hal_rx_reo_ent_paddr_get(struct qwz_softc *sc, void *desc, uint64_t *paddr,
+    uint32_t *desc_bank)
+{
+       struct ath12k_buffer_addr *buff_addr = desc;
+
+       *paddr = ((uint64_t)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+           buff_addr->info1)) << 32) |
+           FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+       *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+int
+qwz_hal_desc_reo_parse_err(struct qwz_softc *sc, uint32_t *rx_desc,
+    uint64_t *paddr, uint32_t *desc_bank)
+{
+       struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+       enum hal_reo_dest_ring_push_reason push_reason;
+       enum hal_reo_dest_ring_error_code err_code;
+
+       push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+           desc->info0);
+       err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+           desc->info0);
+#if 0
+       ab->soc_stats.reo_error[err_code]++;
+#endif
+       if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+           push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+               printf("%s: expected error push reason code, received %d\n",
+                   sc->sc_dev.dv_xname, push_reason);
+               return EINVAL;
+       }
+
+       if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+           HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+               printf("%s: expected buffer type link_desc",
+                   sc->sc_dev.dv_xname);
+               return EINVAL;
+       }
+
+       qwz_hal_rx_reo_ent_paddr_get(sc, rx_desc, paddr, desc_bank);
+
+       return 0;
+}
+
+void
+qwz_hal_rx_msdu_link_info_get(void *link_desc, uint32_t *num_msdus,
+    uint32_t *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm)
+{
+       struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
+       struct hal_rx_msdu_details *msdu;
+       int i;
+
+       *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+       msdu = &link->msdu_link[0];
+       *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+           msdu->buf_addr_info.info1);
+
+       for (i = 0; i < *num_msdus; i++) {
+               msdu = &link->msdu_link[i];
+
+               if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+                   msdu->buf_addr_info.info0)) {
+                       *num_msdus = i;
+                       break;
+               }
+               *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+                   msdu->buf_addr_info.info1);
+               msdu_cookies++;
+       }
+}
+
+void
+qwz_hal_rx_msdu_link_desc_set(struct qwz_softc *sc, void *desc,
+    void *link_desc, enum hal_wbm_rel_bm_act action)
+{
+       struct hal_wbm_release_ring *dst_desc = desc;
+       struct hal_wbm_release_ring *src_desc = link_desc;
+
+       dst_desc->buf_addr_info = src_desc->buf_addr_info;
+       dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+           HAL_WBM_REL_SRC_MODULE_SW) |
+           FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+           FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+           HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+int
+qwz_dp_rx_link_desc_return(struct qwz_softc *sc, uint32_t *link_desc,
+    enum hal_wbm_rel_bm_act action)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_srng *srng;
+       uint32_t *desc;
+       int ret = 0;
+
+       srng = &sc->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       desc = qwz_hal_srng_src_get_next_entry(sc, srng);
+       if (!desc) {
+               ret = ENOBUFS;
+               goto exit;
+       }
+
+       qwz_hal_rx_msdu_link_desc_set(sc, (void *)desc, (void *)link_desc,
+           action);
+
+exit:
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return ret;
+}
+
+int
+qwz_dp_rx_frag_h_mpdu(struct qwz_softc *sc, struct mbuf *m,
+    uint32_t *ring_desc)
+{
+       printf("%s: not implemented\n", __func__);
+       return ENOTSUP;
+}
+
+static inline uint16_t
+qwz_dp_rx_h_msdu_start_msdu_len(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
+}
+
+void
+qwz_dp_process_rx_err_buf(struct qwz_softc *sc, uint32_t *ring_desc,
+    int buf_id, int drop)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+       struct mbuf *m;
+       struct qwz_rx_data *rx_data;
+       struct hal_rx_desc *rx_desc;
+       uint16_t msdu_len;
+       uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
+
+       if (buf_id >= rx_ring->bufs_max || isset(rx_ring->freemap, buf_id))
+               return;
+
+       rx_data = &rx_ring->rx_data[buf_id];
+       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+       m = rx_data->m;
+       rx_data->m = NULL;
+       setbit(rx_ring->freemap, buf_id);
+
+       if (drop) {
+               m_freem(m);
+               return;
+       }
+
+       rx_desc = mtod(m, struct hal_rx_desc *);
+       msdu_len = qwz_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
+       if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+#if 0
+               uint8_t *hdr_status = ath12k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+               ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+               ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", hdr_status,
+                               sizeof(struct ieee80211_hdr));
+               ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+                               sizeof(struct hal_rx_desc));
+#endif
+               m_freem(m);
+               return;
+       }
+
+       if (qwz_dp_rx_frag_h_mpdu(sc, m, ring_desc)) {
+               qwz_dp_rx_link_desc_return(sc, ring_desc,
+                   HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+       }
+
+       m_freem(m);
+}
+
+int
+qwz_dp_process_rx_err(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+       struct dp_link_desc_bank *link_desc_banks;
+       enum hal_rx_buf_return_buf_manager rbm;
+       int tot_n_bufs_reaped, ret, i;
+       int n_bufs_reaped[MAX_RADIOS] = {0};
+       struct dp_rxdma_ring *rx_ring;
+       struct dp_srng *reo_except;
+       uint32_t desc_bank, num_msdus;
+       struct hal_srng *srng;
+       struct qwz_dp *dp;
+       void *link_desc_va;
+       int buf_id, mac_id;
+       uint64_t paddr;
+       uint32_t *desc;
+       int is_frag;
+       uint8_t drop = 0;
+
+       tot_n_bufs_reaped = 0;
+
+       dp = &sc->dp;
+       reo_except = &dp->reo_except_ring;
+       link_desc_banks = dp->link_desc_banks;
+
+       srng = &sc->hal.srng_list[reo_except->ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       while ((desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
+               struct hal_reo_dest_ring *reo_desc =
+                   (struct hal_reo_dest_ring *)desc;
+#if 0
+               ab->soc_stats.err_ring_pkts++;
+#endif
+               ret = qwz_hal_desc_reo_parse_err(sc, desc, &paddr, &desc_bank);
+               if (ret) {
+                       printf("%s: failed to parse error reo desc %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       continue;
+               }
+               link_desc_va = link_desc_banks[desc_bank].vaddr +
+                   (paddr - link_desc_banks[desc_bank].paddr);
+               qwz_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+                   msdu_cookies, &rbm);
+               if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+                   rbm != HAL_RX_BUF_RBM_SW3_BM) {
+#if 0
+                       ab->soc_stats.invalid_rbm++;
+#endif
+                       printf("%s: invalid return buffer manager %d\n",
+                           sc->sc_dev.dv_xname, rbm);
+                       qwz_dp_rx_link_desc_return(sc, desc,
+                           HAL_WBM_REL_BM_ACT_REL_MSDU);
+                       continue;
+               }
+
+               is_frag = !!(reo_desc->rx_mpdu_info.info0 &
+                   RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+               /* Process only rx fragments with one msdu per link desc below,
+                * and drop msdu's indicated due to error reasons.
+                */
+               if (!is_frag || num_msdus > 1) {
+                       drop = 1;
+                       /* Return the link desc back to wbm idle list */
+                       qwz_dp_rx_link_desc_return(sc, desc,
+                          HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+               }
+
+               for (i = 0; i < num_msdus; i++) {
+                       buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+                           msdu_cookies[i]);
+
+                       mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+                           msdu_cookies[i]);
+
+                       qwz_dp_process_rx_err_buf(sc, desc, buf_id, drop);
+                       n_bufs_reaped[mac_id]++;
+                       tot_n_bufs_reaped++;
+               }
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       for (i = 0; i < sc->num_radios; i++) {
+               if (!n_bufs_reaped[i])
+                       continue;
+
+               rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
+
+               qwz_dp_rxbufs_replenish(sc, i, rx_ring, n_bufs_reaped[i],
+                   sc->hw_params.hal_params->rx_buf_rbm);
+       }
+
+       ifp->if_ierrors += tot_n_bufs_reaped;
+
+       return tot_n_bufs_reaped;
+}
+
+int
+qwz_hal_wbm_desc_parse_err(void *desc, struct hal_rx_wbm_rel_info *rel_info)
+{
+       struct hal_wbm_release_ring *wbm_desc = desc;
+       enum hal_wbm_rel_desc_type type;
+       enum hal_wbm_rel_src_module rel_src;
+       enum hal_rx_buf_return_buf_manager ret_buf_mgr;
+
+       type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE, wbm_desc->info0);
+
+       /* We expect only WBM_REL buffer type */
+       if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU)
+               return -EINVAL;
+
+       rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+           wbm_desc->info0);
+       if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+           rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+               return EINVAL;
+
+       ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+           wbm_desc->buf_addr_info.info1);
+       if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+#if 0
+               ab->soc_stats.invalid_rbm++;
+#endif
+               return EINVAL;
+       }
+
+       rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+           wbm_desc->buf_addr_info.info1);
+       rel_info->err_rel_src = rel_src;
+       if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+               rel_info->push_reason = FIELD_GET(
+                   HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON, wbm_desc->info0);
+               rel_info->err_code = FIELD_GET(
+                   HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE, wbm_desc->info0);
+       } else {
+               rel_info->push_reason = FIELD_GET(
+                   HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON, wbm_desc->info0);
+               rel_info->err_code = FIELD_GET(
+                   HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE, wbm_desc->info0);
+       }
+
+       rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+           wbm_desc->info2);
+       rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+           wbm_desc->info2);
+
+       return 0;
+}
+
+int
+qwz_dp_rx_h_null_q_desc(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct qwz_rx_msdu_list *msdu_list)
+{
+       printf("%s: not implemented\n", __func__);
+       return ENOTSUP;
+}
+
+int
+qwz_dp_rx_h_reo_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct qwz_rx_msdu_list *msdu_list)
+{
+       int drop = 0;
+#if 0
+       ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+#endif
+       switch (msdu->err_code) {
+       case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+               if (qwz_dp_rx_h_null_q_desc(sc, msdu, msdu_list))
+                       drop = 1;
+               break;
+       case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+               /* TODO: Do not drop PN failed packets in the driver;
+                * instead, it is good to drop such packets in mac80211
+                * after incrementing the replay counters.
+                */
+               /* fallthrough */
+       default:
+               /* TODO: Review other errors and process them to mac80211
+                * as appropriate.
+                */
+               drop = 1;
+               break;
+       }
+
+       return drop;
+}
+
+int
+qwz_dp_rx_h_rxdma_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       int drop = 0;
+#if 0
+       ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+#endif
+       switch (msdu->err_code) {
+       case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+               ic->ic_stats.is_rx_locmicfail++;
+               drop = 1;
+               break;
+       default:
+               /* TODO: Review other rxdma error code to check if anything is
+                * worth reporting to mac80211
+                */
+               drop = 1;
+               break;
+       }
+
+       return drop;
+}
+
+void
+qwz_dp_rx_wbm_err(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct qwz_rx_msdu_list *msdu_list)
+{
+       int drop = 1;
+
+       switch (msdu->err_rel_src) {
+       case HAL_WBM_REL_SRC_MODULE_REO:
+               drop = qwz_dp_rx_h_reo_err(sc, msdu, msdu_list);
+               break;
+       case HAL_WBM_REL_SRC_MODULE_RXDMA:
+               drop = qwz_dp_rx_h_rxdma_err(sc, msdu);
+               break;
+       default:
+               /* msdu will get freed */
+               break;
+       }
+
+       if (drop) {
+               m_freem(msdu->m);
+               msdu->m = NULL;
+               return;
+       }
+
+       qwz_dp_rx_deliver_msdu(sc, msdu);
+}
+
+int
+qwz_dp_rx_process_wbm_err(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       struct qwz_dp *dp = &sc->dp;
+       struct dp_rxdma_ring *rx_ring;
+       struct hal_rx_wbm_rel_info err_info;
+       struct hal_srng *srng;
+       struct qwz_rx_msdu_list msdu_list[MAX_RADIOS];
+       struct qwz_rx_msdu *msdu;
+       struct mbuf *m;
+       struct qwz_rx_data *rx_data;
+       uint32_t *rx_desc;
+       int idx, mac_id;
+       int num_buffs_reaped[MAX_RADIOS] = {0};
+       int total_num_buffs_reaped = 0;
+       int ret, i;
+
+       for (i = 0; i < sc->num_radios; i++)
+               TAILQ_INIT(&msdu_list[i]);
+
+       srng = &sc->hal.srng_list[dp->rx_rel_ring.ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       while ((rx_desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
+               ret = qwz_hal_wbm_desc_parse_err(rx_desc, &err_info);
+               if (ret) {
+                       printf("%s: failed to parse rx error in wbm_rel "
+                           "ring desc %d\n", sc->sc_dev.dv_xname, ret);
+                       continue;
+               }
+
+               idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+               mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+               if (mac_id >= MAX_RADIOS)
+                       continue;
+       
+               rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
+               if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
+                       continue;
+
+               rx_data = &rx_ring->rx_data[idx];
+               bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+               m = rx_data->m;
+               rx_data->m = NULL;
+               setbit(rx_ring->freemap, idx);
+
+               num_buffs_reaped[mac_id]++;
+               total_num_buffs_reaped++;
+
+               if (err_info.push_reason !=
+                   HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+                       m_freem(m);
+                       continue;
+               }
+
+               msdu = &rx_data->rx_msdu;
+               memset(&msdu->rxi, 0, sizeof(msdu->rxi));
+               msdu->m = m;
+               msdu->err_rel_src = err_info.err_rel_src;
+               msdu->err_code = err_info.err_code;
+               msdu->rx_desc = mtod(m, struct hal_rx_desc *);
+               TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       if (!total_num_buffs_reaped)
+               goto done;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               if (!num_buffs_reaped[i])
+                       continue;
+
+               rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
+               qwz_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
+                   sc->hw_params.hal_params->rx_buf_rbm);
+       }
+
+       for (i = 0; i < sc->num_radios; i++) {
+               while ((msdu = TAILQ_FIRST(msdu_list))) {
+                       TAILQ_REMOVE(msdu_list, msdu, entry);
+                       if (test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) {
+                               m_freem(msdu->m);
+                               msdu->m = NULL;
+                               continue;
+                       }
+                       qwz_dp_rx_wbm_err(sc, msdu, &msdu_list[i]);
+                       msdu->m = NULL;
+               }
+       }
+done:
+       ifp->if_ierrors += total_num_buffs_reaped;
+
+       return total_num_buffs_reaped;
+}
+
+struct qwz_rx_msdu *
+qwz_dp_rx_get_msdu_last_buf(struct qwz_rx_msdu_list *msdu_list,
+    struct qwz_rx_msdu *first)
+{
+       struct qwz_rx_msdu *msdu;
+
+       if (!first->is_continuation)
+               return first;
+
+       TAILQ_FOREACH(msdu, msdu_list, entry) {
+               if (!msdu->is_continuation)
+                       return msdu;
+       }
+
+       return NULL;
+}
+
+static inline void *
+qwz_dp_rx_get_attention(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_attention(desc);
+}
+
+int
+qwz_dp_rx_h_attn_is_mcbc(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       struct rx_attention *attn = qwz_dp_rx_get_attention(sc, desc);
+
+       return qwz_dp_rx_h_msdu_end_first_msdu(sc, desc) &&
+               (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+                le32toh(attn->info1)));
+}
+
+static inline uint8_t
+qwz_dp_rx_h_msdu_end_l3pad(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static inline int
+qwz_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
+{
+       return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, le32toh(attn->info2));
+}
+
+static inline uint32_t
+qwz_dp_rx_h_msdu_start_freq(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
+}
+
+uint32_t
+qwz_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
+{
+       uint32_t info = le32toh(attn->info1);
+       uint32_t errmap = 0;
+
+       if (info & RX_ATTENTION_INFO1_FCS_ERR)
+               errmap |= DP_RX_MPDU_ERR_FCS;
+
+       if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+               errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+       if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+               errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+       if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+               errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+       if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+               errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+       if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+               errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+       if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+               errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+       return errmap;
+}
+
+int
+qwz_dp_rx_h_attn_msdu_len_err(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       struct rx_attention *rx_attention;
+       uint32_t errmap;
+
+       rx_attention = qwz_dp_rx_get_attention(sc, desc);
+       errmap = qwz_dp_rx_h_attn_mpdu_err(rx_attention);
+
+       return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
+int
+qwz_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
+{
+       return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+           le32toh(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+int
+qwz_dp_rx_msdu_coalesce(struct qwz_softc *sc, struct qwz_rx_msdu_list *msdu_list,
+    struct qwz_rx_msdu *first, struct qwz_rx_msdu *last, uint8_t l3pad_bytes,
+    int msdu_len)
+{
+       printf("%s: not implemented\n", __func__);
+       return ENOTSUP;
+}
+
+void
+qwz_dp_rx_h_rate(struct qwz_softc *sc, struct hal_rx_desc *rx_desc,
+    struct ieee80211_rxinfo *rxi)
+{
+       /* TODO */
+}
+
+void
+qwz_dp_rx_h_ppdu(struct qwz_softc *sc, struct hal_rx_desc *rx_desc,
+    struct ieee80211_rxinfo *rxi)
+{
+       uint8_t channel_num;
+       uint32_t meta_data;
+
+       meta_data = qwz_dp_rx_h_msdu_start_freq(sc, rx_desc);
+       channel_num = meta_data & 0xff;
+
+       rxi->rxi_chan = channel_num;
+
+       qwz_dp_rx_h_rate(sc, rx_desc, rxi);
+}
+
+void
+qwz_dp_rx_h_undecap_nwifi(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    uint8_t *first_hdr, enum hal_encrypt_type enctype)
+{
+       /*
+       * This function will need to do some work once we are receiving
+       * aggregated frames. For now, it needs to do nothing.
+       */
+
+       if (!msdu->is_first_msdu)
+               printf("%s: not implemented\n", __func__);
+}
+
+void
+qwz_dp_rx_h_undecap_raw(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    enum hal_encrypt_type enctype, int decrypted)
+{
+#if 0
+       struct ieee80211_hdr *hdr;
+       size_t hdr_len;
+       size_t crypto_len;
+#endif
+
+       if (!msdu->is_first_msdu ||
+           !(msdu->is_first_msdu && msdu->is_last_msdu))
+               return;
+
+       m_adj(msdu->m, -IEEE80211_CRC_LEN);
+#if 0
+       if (!decrypted)
+               return;
+
+       hdr = (void *)msdu->data;
+
+       /* Tail */
+       if (status->flag & RX_FLAG_IV_STRIPPED) {
+               skb_trim(msdu, msdu->len -
+                        ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+               skb_trim(msdu, msdu->len -
+                        ath12k_dp_rx_crypto_icv_len(ar, enctype));
+       } else {
+               /* MIC */
+               if (status->flag & RX_FLAG_MIC_STRIPPED)
+                       skb_trim(msdu, msdu->len -
+                                ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+               /* ICV */
+               if (status->flag & RX_FLAG_ICV_STRIPPED)
+                       skb_trim(msdu, msdu->len -
+                                ath12k_dp_rx_crypto_icv_len(ar, enctype));
+       }
+
+       /* MMIC */
+       if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+           !ieee80211_has_morefrags(hdr->frame_control) &&
+           enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+               skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+       /* Head */
+       if (status->flag & RX_FLAG_IV_STRIPPED) {
+               hdr_len = ieee80211_hdrlen(hdr->frame_control);
+               crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+
+               memmove((void *)msdu->data + crypto_len,
+                       (void *)msdu->data, hdr_len);
+               skb_pull(msdu, crypto_len);
+       }
+#endif
+}
+
+static inline uint8_t *
+qwz_dp_rx_h_80211_hdr(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
+}
+
+static inline enum hal_encrypt_type
+qwz_dp_rx_h_mpdu_start_enctype(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       if (!sc->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
+               return HAL_ENCRYPT_TYPE_OPEN;
+
+       return sc->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
+}
+
+static inline uint8_t
+qwz_dp_rx_h_msdu_start_decap_type(struct qwz_softc *sc, struct hal_rx_desc *desc)
+{
+       return sc->hw_params.hw_ops->rx_desc_get_decap_type(desc);
+}
+
+void
+qwz_dp_rx_h_undecap(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype,
+    int decrypted)
+{
+       uint8_t *first_hdr;
+       uint8_t decap;
+
+       first_hdr = qwz_dp_rx_h_80211_hdr(sc, rx_desc);
+       decap = qwz_dp_rx_h_msdu_start_decap_type(sc, rx_desc);
+
+       switch (decap) {
+       case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+               qwz_dp_rx_h_undecap_nwifi(sc, msdu, first_hdr, enctype);
+               break;
+       case DP_RX_DECAP_TYPE_RAW:
+               qwz_dp_rx_h_undecap_raw(sc, msdu, enctype, decrypted);
+               break;
+#if 0
+       case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+               ehdr = (struct ethhdr *)msdu->data;
+
+               /* mac80211 allows fast path only for authorized STA */
+               if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+                       ATH12K_SKB_RXCB(msdu)->is_eapol = true;
+                       ath12k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+                                                  enctype, status);
+                       break;
+               }
+
+               /* PN for mcast packets will be validated in mac80211;
+                * remove eth header and add 802.11 header.
+                */
+               if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+                       ath12k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+                                                  enctype, status);
+               break;
+       case DP_RX_DECAP_TYPE_8023:
+               /* TODO: Handle undecap for these formats */
+               break;
+#endif
+       }
+}
+
+int
+qwz_dp_rx_h_mpdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct hal_rx_desc *rx_desc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       int fill_crypto_hdr = 0;
+       enum hal_encrypt_type enctype;
+       int is_decrypted = 0;
+#if 0
+       struct ath12k_skb_rxcb *rxcb;
+#endif
+       struct ieee80211_frame *wh;
+#if 0
+       struct ath12k_peer *peer;
+#endif
+       struct rx_attention *rx_attention;
+       uint32_t err_bitmap;
+
+       /* PN for multicast packets will be checked in net80211 */
+       fill_crypto_hdr = qwz_dp_rx_h_attn_is_mcbc(sc, rx_desc);
+       msdu->is_mcbc = fill_crypto_hdr;
+#if 0
+       if (rxcb->is_mcbc) {
+               rxcb->peer_id = ath12k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+               rxcb->seq_no = ath12k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+       }
+
+       spin_lock_bh(&ar->ab->base_lock);
+       peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+       if (peer) {
+               if (rxcb->is_mcbc)
+                       enctype = peer->sec_type_grp;
+               else
+                       enctype = peer->sec_type;
+       } else {
+#endif
+               enctype = qwz_dp_rx_h_mpdu_start_enctype(sc, rx_desc);
+#if 0
+       }
+       spin_unlock_bh(&ar->ab->base_lock);
+#endif
+       rx_attention = qwz_dp_rx_get_attention(sc, rx_desc);
+       err_bitmap = qwz_dp_rx_h_attn_mpdu_err(rx_attention);
+       if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+               is_decrypted = qwz_dp_rx_h_attn_is_decrypted(rx_attention);
+#if 0
+       /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+       rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+                            RX_FLAG_MMIC_ERROR |
+                            RX_FLAG_DECRYPTED |
+                            RX_FLAG_IV_STRIPPED |
+                            RX_FLAG_MMIC_STRIPPED);
+
+#endif
+       if (err_bitmap & DP_RX_MPDU_ERR_FCS) {
+               if (ic->ic_flags & IEEE80211_F_RSNON)
+                       ic->ic_stats.is_rx_decryptcrc++;
+               else
+                       ic->ic_stats.is_rx_decap++;
+       }
+
+       /* XXX Trusting firmware to handle Michael MIC counter-measures... */
+       if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+               ic->ic_stats.is_rx_locmicfail++;
+
+       if (err_bitmap & DP_RX_MPDU_ERR_DECRYPT)
+               ic->ic_stats.is_rx_wepfail++;
+
+       if (is_decrypted) {
+#if 0
+               rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+               if (fill_crypto_hdr)
+                       rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+                                       RX_FLAG_ICV_STRIPPED;
+               else
+                       rx_status->flag |= RX_FLAG_IV_STRIPPED |
+                                          RX_FLAG_PN_VALIDATED;
+#endif
+               msdu->rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
+       }
+#if 0
+       ath12k_dp_rx_h_csum_offload(ar, msdu);
+#endif
+       qwz_dp_rx_h_undecap(sc, msdu, rx_desc, enctype, is_decrypted);
+
+       if (is_decrypted && !fill_crypto_hdr &&
+           qwz_dp_rx_h_msdu_start_decap_type(sc, rx_desc) !=
+           DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+               /* Hardware has stripped the IV. */
+               wh = mtod(msdu->m, struct ieee80211_frame *);
+               wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
+       }
+
+       return err_bitmap ? EIO : 0;
+}
+
+int
+qwz_dp_rx_process_msdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu,
+    struct qwz_rx_msdu_list *msdu_list)
+{
+       struct hal_rx_desc *rx_desc, *lrx_desc;
+       struct rx_attention *rx_attention;
+       struct qwz_rx_msdu *last_buf;
+       uint8_t l3_pad_bytes;
+       uint16_t msdu_len;
+       int ret;
+       uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
+
+       last_buf = qwz_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+       if (!last_buf) {
+               DPRINTF("%s: No valid Rx buffer to access "
+                   "Atten/MSDU_END/MPDU_END tlvs\n", __func__);
+               return EIO;
+       }
+
+       rx_desc = mtod(msdu->m, struct hal_rx_desc *);
+       if (qwz_dp_rx_h_attn_msdu_len_err(sc, rx_desc)) {
+               DPRINTF("%s: msdu len not valid\n", __func__);
+               return EIO;
+       }
+
+       lrx_desc = mtod(last_buf->m, struct hal_rx_desc *);
+       rx_attention = qwz_dp_rx_get_attention(sc, lrx_desc);
+       if (!qwz_dp_rx_h_attn_msdu_done(rx_attention)) {
+               DPRINTF("%s: msdu_done bit in attention is not set\n",
+                   __func__);
+               return EIO;
+       }
+
+       msdu->rx_desc = rx_desc;
+       msdu_len = qwz_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
+       l3_pad_bytes = qwz_dp_rx_h_msdu_end_l3pad(sc, lrx_desc);
+
+       if (msdu->is_frag) {
+               m_adj(msdu->m, hal_rx_desc_sz);
+               msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
+       } else if (!msdu->is_continuation) {
+               if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+#if 0
+                       uint8_t *hdr_status;
+
+                       hdr_status = ath12k_dp_rx_h_80211_hdr(ab, rx_desc);
+#endif
+                       DPRINTF("%s: invalid msdu len %u\n",
+                           __func__, msdu_len);
+#if 0
+                       ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", hdr_status,
+                                       sizeof(struct ieee80211_hdr));
+                       ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+                                       sizeof(struct hal_rx_desc));
+#endif
+                       return EINVAL;
+               }
+               m_adj(msdu->m, hal_rx_desc_sz + l3_pad_bytes);
+               msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
+       } else {
+               ret = qwz_dp_rx_msdu_coalesce(sc, msdu_list, msdu, last_buf,
+                   l3_pad_bytes, msdu_len);
+               if (ret) {
+                       DPRINTF("%s: failed to coalesce msdu rx buffer%d\n",
+                           __func__, ret);
+                       return ret;
+               }
+       }
+
+       memset(&msdu->rxi, 0, sizeof(msdu->rxi));
+       qwz_dp_rx_h_ppdu(sc, rx_desc, &msdu->rxi);
+
+       return qwz_dp_rx_h_mpdu(sc, msdu, rx_desc);
+}
+
+void
+qwz_dp_rx_deliver_msdu(struct qwz_softc *sc, struct qwz_rx_msdu *msdu)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       struct ieee80211_frame *wh;
+       struct ieee80211_node *ni;
+
+       wh = mtod(msdu->m, struct ieee80211_frame *);
+       ni = ieee80211_find_rxnode(ic, wh);
+
+#if NBPFILTER > 0
+       if (sc->sc_drvbpf != NULL) {
+               struct qwz_rx_radiotap_header *tap = &sc->sc_rxtap;
+
+               bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
+                   msdu->m, BPF_DIRECTION_IN);
+       }
+#endif
+       ieee80211_input(ifp, msdu->m, ni, &msdu->rxi);
+       ieee80211_release_node(ic, ni);
+}
+
+void
+qwz_dp_rx_process_received_packets(struct qwz_softc *sc,
+    struct qwz_rx_msdu_list *msdu_list, int mac_id)
+{
+       struct qwz_rx_msdu *msdu;
+       int ret;
+
+       while ((msdu = TAILQ_FIRST(msdu_list))) {
+               TAILQ_REMOVE(msdu_list, msdu, entry);
+               ret = qwz_dp_rx_process_msdu(sc, msdu, msdu_list);
+               if (ret) {
+                       DNPRINTF(QWZ_D_MAC, "Unable to process msdu: %d", ret);
+                       m_freem(msdu->m);
+                       msdu->m = NULL;
+                       continue;
+               }
+
+               qwz_dp_rx_deliver_msdu(sc, msdu);
+               msdu->m = NULL;
+       }
+}
+
+int
+qwz_dp_process_rx(struct qwz_softc *sc, int ring_id)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct qwz_pdev_dp *pdev_dp = &sc->pdev_dp;
+       struct dp_rxdma_ring *rx_ring;
+       int num_buffs_reaped[MAX_RADIOS] = {0};
+       struct qwz_rx_msdu_list msdu_list[MAX_RADIOS];
+       struct qwz_rx_msdu *msdu;
+       struct mbuf *m;
+       struct qwz_rx_data *rx_data;
+       int total_msdu_reaped = 0;
+       struct hal_srng *srng;
+       int done = 0;
+       int idx;
+       unsigned int mac_id;
+       struct hal_reo_dest_ring *desc;
+       enum hal_reo_dest_ring_push_reason push_reason;
+       uint32_t cookie;
+       int i;
+
+       for (i = 0; i < MAX_RADIOS; i++)
+               TAILQ_INIT(&msdu_list[i]);
+
+       srng = &sc->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+try_again:
+       qwz_hal_srng_access_begin(sc, srng);
+
+       while ((desc = (struct hal_reo_dest_ring *)
+           qwz_hal_srng_dst_get_next_entry(sc, srng))) {
+               cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+                   desc->buf_addr_info.info1);
+               idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+               mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+
+               if (mac_id >= MAX_RADIOS)
+                       continue;
+
+               rx_ring = &pdev_dp->rx_refill_buf_ring;
+               if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
+                       continue;
+
+               rx_data = &rx_ring->rx_data[idx];
+               bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+               m = rx_data->m;
+               rx_data->m = NULL;
+               setbit(rx_ring->freemap, idx);
+
+               num_buffs_reaped[mac_id]++;
+
+               push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+                   desc->info0);
+               if (push_reason !=
+                   HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+                       m_freem(m);
+#if 0
+                       sc->soc_stats.hal_reo_error[
+                           dp->reo_dst_ring[ring_id].ring_id]++;
+#endif
+                       continue;
+               }
+
+               msdu = &rx_data->rx_msdu;
+               msdu->m = m;
+               msdu->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+                   RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+               msdu->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+                   RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+               msdu->is_continuation = !!(desc->rx_msdu_info.info0 &
+                   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+               msdu->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+                   desc->rx_mpdu_info.meta_data);
+               msdu->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+                   desc->rx_mpdu_info.info0);
+               msdu->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+                   desc->info0);
+
+               msdu->mac_id = mac_id;
+               TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
+
+               if (msdu->is_continuation) {
+                       done = 0;
+               } else {
+                       total_msdu_reaped++;
+                       done = 1;
+               }
+       }
+
+       /* Hw might have updated the head pointer after we cached it.
+        * In this case, even though there are entries in the ring we'll
+        * get rx_desc NULL. Give the read another try with updated cached
+        * head pointer so that we can reap complete MPDU in the current
+        * rx processing.
+        */
+       if (!done && qwz_hal_srng_dst_num_free(sc, srng, 1)) {
+               qwz_hal_srng_access_end(sc, srng);
+               goto try_again;
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       if (!total_msdu_reaped)
+               goto exit;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               if (!num_buffs_reaped[i])
+                       continue;
+
+               qwz_dp_rx_process_received_packets(sc, &msdu_list[i], i);
+
+               rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
+
+               qwz_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
+                   sc->hw_params.hal_params->rx_buf_rbm);
+       }
+exit:
+       return total_msdu_reaped;
+}
+
+struct mbuf *
+qwz_dp_rx_alloc_mon_status_buf(struct qwz_softc *sc,
+    struct dp_rxdma_ring *rx_ring, int *buf_idx)
+{
+       struct mbuf *m;
+       struct qwz_rx_data *rx_data;
+       const size_t size = DP_RX_BUFFER_SIZE;
+       int ret, idx;
+
+       m = m_gethdr(M_DONTWAIT, MT_DATA);
+       if (m == NULL)
+               return NULL;
+
+       if (size <= MCLBYTES)
+               MCLGET(m, M_DONTWAIT);
+       else
+               MCLGETL(m, M_DONTWAIT, size);
+       if ((m->m_flags & M_EXT) == 0)
+               goto fail_free_mbuf;
+
+       m->m_len = m->m_pkthdr.len = size;
+       idx = qwz_next_free_rxbuf_idx(rx_ring);
+       if (idx == -1)
+               goto fail_free_mbuf;
+
+       rx_data = &rx_ring->rx_data[idx];
+       if (rx_data->m != NULL)
+               goto fail_free_mbuf;
+
+       if (rx_data->map == NULL) {
+               ret = bus_dmamap_create(sc->sc_dmat, size, 1,
+                   size, 0, BUS_DMA_NOWAIT, &rx_data->map);
+               if (ret)
+                       goto fail_free_mbuf;
+       }
+       
+       ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
+           BUS_DMA_READ | BUS_DMA_NOWAIT);
+       if (ret) {
+               printf("%s: can't map mbuf (error %d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto fail_free_mbuf;
+       }
+
+       *buf_idx = idx;
+       rx_data->m = m;
+       clrbit(rx_ring->freemap, idx);
+       return m;
+
+fail_free_mbuf:
+       m_freem(m);
+       return NULL;
+}
+
+int
+qwz_dp_rx_reap_mon_status_ring(struct qwz_softc *sc, int mac_id,
+    struct mbuf_list *ml)
+{
+       const struct ath12k_hw_hal_params *hal_params;
+       struct qwz_pdev_dp *dp;
+       struct dp_rxdma_ring *rx_ring;
+       struct qwz_mon_data *pmon;
+       struct hal_srng *srng;
+       void *rx_mon_status_desc;
+       struct mbuf *m;
+       struct qwz_rx_data *rx_data;
+       struct hal_tlv_hdr *tlv;
+       uint32_t cookie;
+       int buf_idx, srng_id;
+       uint64_t paddr;
+       uint8_t rbm;
+       int num_buffs_reaped = 0;
+
+       dp = &sc->pdev_dp;
+       pmon = &dp->mon_data;
+
+       srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
+           mac_id);
+       rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+       srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+       while (1) {
+               rx_mon_status_desc = qwz_hal_srng_src_peek(sc, srng);
+               if (!rx_mon_status_desc) {
+                       pmon->buf_state = DP_MON_STATUS_REPLINISH;
+                       break;
+               }
+
+               qwz_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+                   &cookie, &rbm);
+               if (paddr) {
+                       buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+                       if (buf_idx >= rx_ring->bufs_max ||
+                           isset(rx_ring->freemap, buf_idx)) {
+                               pmon->buf_state = DP_MON_STATUS_REPLINISH;
+                               goto move_next;
+                       }
+
+                       rx_data = &rx_ring->rx_data[buf_idx];
+
+                       bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0,
+                           rx_data->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
+
+                       tlv = mtod(rx_data->m, struct hal_tlv_hdr *);
+                       if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+                           HAL_RX_STATUS_BUFFER_DONE) {
+                               /* If done status is missing, hold onto status
+                                * ring until status is done for this status
+                                * ring buffer.
+                                * Keep HP in mon_status_ring unchanged,
+                                * and break from here.
+                                * Check status for same buffer for next time
+                                */
+                               pmon->buf_state = DP_MON_STATUS_NO_DMA;
+                               break;
+                       }
+
+                       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+                       m = rx_data->m;
+                       rx_data->m = NULL;
+                       setbit(rx_ring->freemap, buf_idx);
+#if 0
+                       if (ab->hw_params.full_monitor_mode) {
+                               ath12k_dp_rx_mon_update_status_buf_state(pmon, tlv);
+                               if (paddr == pmon->mon_status_paddr)
+                                       pmon->buf_state = DP_MON_STATUS_MATCH;
+                       }
+#endif
+                       ml_enqueue(ml, m);
+               } else {
+                       pmon->buf_state = DP_MON_STATUS_REPLINISH;
+               }
+move_next:
+               m = qwz_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx);
+               if (!m) {
+                       hal_params = sc->hw_params.hal_params;
+                       qwz_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+                           hal_params->rx_buf_rbm);
+                       num_buffs_reaped++;
+                       break;
+               }
+               rx_data = &rx_ring->rx_data[buf_idx];
+
+               cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+                   FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx);
+
+               paddr = rx_data->map->dm_segs[0].ds_addr;
+               qwz_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr,
+                   cookie, sc->hw_params.hal_params->rx_buf_rbm);
+               qwz_hal_srng_src_get_next_entry(sc, srng);
+               num_buffs_reaped++;
+       }
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return num_buffs_reaped;
+}
+
+enum hal_rx_mon_status
+qwz_hal_rx_parse_mon_status(struct qwz_softc *sc,
+    struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m)
+{
+       /* TODO */
+       return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+int
+qwz_dp_rx_process_mon_status(struct qwz_softc *sc, int mac_id)
+{
+       enum hal_rx_mon_status hal_status;
+       struct mbuf *m;
+       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+#if 0
+       struct ath12k_peer *peer;
+       struct ath12k_sta *arsta;
+#endif
+       int num_buffs_reaped = 0;
+#if 0
+       uint32_t rx_buf_sz;
+       uint16_t log_type;
+#endif
+       struct qwz_mon_data *pmon = (struct qwz_mon_data *)&sc->pdev_dp.mon_data;
+#if  0
+       struct qwz_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+#endif
+       struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+
+       num_buffs_reaped = qwz_dp_rx_reap_mon_status_ring(sc, mac_id, &ml);
+       if (!num_buffs_reaped)
+               goto exit;
+
+       memset(ppdu_info, 0, sizeof(*ppdu_info));
+       ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+       while ((m = ml_dequeue(&ml))) {
+#if 0
+               if (ath12k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+                       log_type = ATH12K_PKTLOG_TYPE_LITE_RX;
+                       rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+               } else if (ath12k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+                       log_type = ATH12K_PKTLOG_TYPE_RX_STATBUF;
+                       rx_buf_sz = DP_RX_BUFFER_SIZE;
+               } else {
+                       log_type = ATH12K_PKTLOG_TYPE_INVALID;
+                       rx_buf_sz = 0;
+               }
+
+               if (log_type != ATH12K_PKTLOG_TYPE_INVALID)
+                       trace_ath12k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+#endif
+
+               memset(ppdu_info, 0, sizeof(*ppdu_info));
+               ppdu_info->peer_id = HAL_INVALID_PEERID;
+               hal_status = qwz_hal_rx_parse_mon_status(sc, ppdu_info, m);
+#if 0
+               if (test_bit(ATH12K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+                   pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+                   hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+                       rx_mon_stats->status_ppdu_done++;
+                       pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+                       ath12k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
+                       pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+               }
+#endif
+               if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+                   hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+                       m_freem(m);
+                       continue;
+               }
+#if 0
+               rcu_read_lock();
+               spin_lock_bh(&ab->base_lock);
+               peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+               if (!peer || !peer->sta) {
+                       ath12k_dbg(ab, ATH12K_DBG_DATA,
+                                  "failed to find the peer with peer_id %d\n",
+                                  ppdu_info->peer_id);
+                       goto next_skb;
+               }
+
+               arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+               ath12k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+               if (ath12k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+                       trace_ath12k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+               spin_unlock_bh(&ab->base_lock);
+               rcu_read_unlock();
+
+               dev_kfree_skb_any(skb);
+               memset(ppdu_info, 0, sizeof(*ppdu_info));
+               ppdu_info->peer_id = HAL_INVALID_PEERID;
+#endif
+       }
+exit:
+       return num_buffs_reaped;
+}
+
+int
+qwz_dp_rx_process_mon_rings(struct qwz_softc *sc, int mac_id)
+{
+       int ret = 0;
+#if 0
+       if (test_bit(ATH12K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+           ab->hw_params.full_monitor_mode)
+               ret = ath12k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
+       else
+#endif
+               ret = qwz_dp_rx_process_mon_status(sc, mac_id);
+
+       return ret;
+}
+
+void
+qwz_dp_service_mon_ring(void *arg)
+{
+       struct qwz_softc *sc = arg;
+       int i;
+
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++)
+               qwz_dp_rx_process_mon_rings(sc, i);
+
+       timeout_add(&sc->mon_reap_timer, ATH12K_MON_TIMER_INTERVAL);
+}
+
+int
+qwz_dp_process_rxdma_err(struct qwz_softc *sc, int mac_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       struct dp_srng *err_ring;
+       struct dp_rxdma_ring *rx_ring;
+       struct dp_link_desc_bank *link_desc_banks = sc->dp.link_desc_banks;
+       struct hal_srng *srng;
+       uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+       enum hal_rx_buf_return_buf_manager rbm;
+       enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+       struct qwz_rx_data *rx_data;
+       struct hal_reo_entrance_ring *entr_ring;
+       void *desc;
+       int num_buf_freed = 0;
+       uint64_t paddr;
+       uint32_t desc_bank;
+       void *link_desc_va;
+       int num_msdus;
+       int i, idx, srng_id;
+
+       srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
+           mac_id);
+       err_ring = &sc->pdev_dp.rxdma_err_dst_ring[srng_id];
+       rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
+
+       srng = &sc->hal.srng_list[err_ring->ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       while ((desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
+               qwz_hal_rx_reo_ent_paddr_get(sc, desc, &paddr, &desc_bank);
+
+               entr_ring = (struct hal_reo_entrance_ring *)desc;
+               rxdma_err_code = FIELD_GET(
+                   HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+                   entr_ring->info1);
+#if 0
+               ab->soc_stats.rxdma_error[rxdma_err_code]++;
+#endif
+               link_desc_va = link_desc_banks[desc_bank].vaddr +
+                    (paddr - link_desc_banks[desc_bank].paddr);
+               qwz_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+                   msdu_cookies, &rbm);
+
+               for (i = 0; i < num_msdus; i++) {
+                       idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+                           msdu_cookies[i]);
+                       if (idx >= rx_ring->bufs_max ||
+                           isset(rx_ring->freemap, idx))
+                               continue;
+
+                       rx_data = &rx_ring->rx_data[idx];
+
+                       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+                       m_freem(rx_data->m);
+                       rx_data->m = NULL;
+                       setbit(rx_ring->freemap, idx);
+
+                       num_buf_freed++;
+               }
+
+               qwz_dp_rx_link_desc_return(sc, desc,
+                   HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       if (num_buf_freed)
+               qwz_dp_rxbufs_replenish(sc, mac_id, rx_ring, num_buf_freed,
+                   sc->hw_params.hal_params->rx_buf_rbm);
+
+       ifp->if_ierrors += num_buf_freed;
+
+       return num_buf_freed;
+}
+
+void
+qwz_hal_reo_status_queue_stats(struct qwz_softc *sc, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_get_queue_stats_status *desc =
+           (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num =
+           FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status =
+           FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+#if 0
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n");
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n",
+                  status->uniform_hdr.cmd_num,
+                  status->uniform_hdr.cmd_status);
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %ld cur_idx %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+                            desc->info0),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+                            desc->info0));
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+                  desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+       ath12k_dbg(ab, ATH12K_DBG_HAL,
+                  "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+                  desc->last_rx_enqueue_timestamp,
+                  desc->last_rx_dequeue_timestamp);
+       ath12k_dbg(ab, ATH12K_DBG_HAL,
+                  "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+                  desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+                  desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+                  desc->rx_bitmap[6], desc->rx_bitmap[7]);
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+                            desc->info1),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+                            desc->info1));
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+                            desc->info2),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+                            desc->info2),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+                            desc->info2));
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+                            desc->info3),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+                            desc->info3));
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+                  desc->num_mpdu_frames, desc->num_msdu_frames,
+                  desc->total_bytes);
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+                            desc->info4),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+                            desc->info4),
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+                            desc->info4));
+       ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %ld\n",
+                  FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+                            desc->info5));
+#endif
+}
+
+void
+qwz_hal_reo_flush_queue_status(struct qwz_softc *sc, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_flush_queue_status *desc =
+           (struct hal_reo_flush_queue_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+          HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+       status->u.flush_queue.err_detected = FIELD_GET(
+           HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED, desc->info0);
+}
+
+void
+qwz_hal_reo_flush_cache_status(struct qwz_softc *sc, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_flush_cache_status *desc =
+           (struct hal_reo_flush_cache_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+
+       status->u.flush_cache.err_detected = FIELD_GET(
+           HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
+       status->u.flush_cache.err_code = FIELD_GET(
+           HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE, desc->info0);
+       if (!status->u.flush_cache.err_code)
+               hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+       status->u.flush_cache.cache_controller_flush_status_hit = FIELD_GET(
+           HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT, desc->info0);
+
+       status->u.flush_cache.cache_controller_flush_status_desc_type =
+           FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+           desc->info0);
+       status->u.flush_cache.cache_controller_flush_status_client_id =
+           FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+           desc->info0);
+       status->u.flush_cache.cache_controller_flush_status_err =
+           FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+           desc->info0);
+       status->u.flush_cache.cache_controller_flush_status_cnt =
+           FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+           desc->info0);
+}
+
+void
+qwz_hal_reo_unblk_cache_status(struct qwz_softc *sc, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_unblock_cache_status *desc =
+          (struct hal_reo_unblock_cache_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+
+       status->u.unblock_cache.err_detected = FIELD_GET(
+           HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
+       status->u.unblock_cache.unblock_type = FIELD_GET(
+           HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE, desc->info0);
+
+       if (!status->u.unblock_cache.err_detected &&
+           status->u.unblock_cache.unblock_type ==
+           HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+               hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void
+qwz_hal_reo_flush_timeout_list_status(struct qwz_softc *ab, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_flush_timeout_list_status *desc =
+           (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+
+       status->u.timeout_list.err_detected = FIELD_GET(
+           HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR, desc->info0);
+       status->u.timeout_list.list_empty = FIELD_GET(
+           HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY, desc->info0);
+
+       status->u.timeout_list.release_desc_cnt = FIELD_GET(
+           HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT, desc->info1);
+       status->u.timeout_list.fwd_buf_cnt = FIELD_GET(
+           HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT, desc->info1);
+}
+
+void
+qwz_hal_reo_desc_thresh_reached_status(struct qwz_softc *sc, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_desc_thresh_reached_status *desc =
+           (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
+
+       status->u.desc_thresh_reached.threshold_idx = FIELD_GET(
+           HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX, desc->info0);
+
+       status->u.desc_thresh_reached.link_desc_counter0 = FIELD_GET(
+           HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0, desc->info1);
+
+       status->u.desc_thresh_reached.link_desc_counter1 = FIELD_GET(
+           HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1, desc->info2);
+
+       status->u.desc_thresh_reached.link_desc_counter2 = FIELD_GET(
+           HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2, desc->info3);
+
+       status->u.desc_thresh_reached.link_desc_counter_sum = FIELD_GET(
+           HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+           desc->info4);
+}
+
+void
+qwz_hal_reo_update_rx_reo_queue_status(struct qwz_softc *ab, uint32_t *reo_desc,
+    struct hal_reo_status *status)
+{
+       struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+       struct hal_reo_status_hdr *desc =
+           (struct hal_reo_status_hdr *)tlv->value;
+
+       status->uniform_hdr.cmd_num = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->info0);
+       status->uniform_hdr.cmd_status = FIELD_GET(
+           HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->info0);
+}
+
+int
+qwz_dp_process_reo_status(struct qwz_softc *sc)
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_srng *srng;
+       struct dp_reo_cmd *cmd, *tmp;
+       int found = 0, ret = 0;
+       uint32_t *reo_desc;
+       uint16_t tag;
+       struct hal_reo_status reo_status;
+
+       srng = &sc->hal.srng_list[dp->reo_status_ring.ring_id];
+       memset(&reo_status, 0, sizeof(reo_status));
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       while ((reo_desc = qwz_hal_srng_dst_get_next_entry(sc, srng))) {
+               ret = 1;
+
+               tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+               switch (tag) {
+               case HAL_REO_GET_QUEUE_STATS_STATUS:
+                       qwz_hal_reo_status_queue_stats(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_FLUSH_QUEUE_STATUS:
+                       qwz_hal_reo_flush_queue_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_FLUSH_CACHE_STATUS:
+                       qwz_hal_reo_flush_cache_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_UNBLOCK_CACHE_STATUS:
+                       qwz_hal_reo_unblk_cache_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+                       qwz_hal_reo_flush_timeout_list_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+                       qwz_hal_reo_desc_thresh_reached_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+                       qwz_hal_reo_update_rx_reo_queue_status(sc, reo_desc,
+                           &reo_status);
+                       break;
+               default:
+                       printf("%s: Unknown reo status type %d\n",
+                           sc->sc_dev.dv_xname, tag);
+                       continue;
+               }
+#ifdef notyet
+               spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+               TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
+                       if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+                               found = 1;
+                               TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
+                               break;
+                       }
+               }
+#ifdef notyet
+               spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+               if (found) {
+                       cmd->handler(dp, (void *)&cmd->data,
+                           reo_status.uniform_hdr.cmd_status);
+                       free(cmd, M_DEVBUF, sizeof(*cmd));
+               }
+               found = 0;
+       }
+
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return ret;
+}
+
+int
+qwz_dp_service_srng(struct qwz_softc *sc, int grp_id)
+{
+       struct qwz_pdev_dp *dp = &sc->pdev_dp;
+       int i, j, ret = 0;
+
+       for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
+               const struct ath12k_hw_tcl2wbm_rbm_map *map;
+
+               map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i];
+               if ((sc->hw_params.ring_mask->tx[grp_id]) &
+                   (1 << (map->wbm_ring_num)) &&
+                   qwz_dp_tx_completion_handler(sc, i))
+                       ret = 1;
+       }
+
+       if (sc->hw_params.ring_mask->rx_err[grp_id] &&
+           qwz_dp_process_rx_err(sc))
+               ret = 1;
+
+       if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] &&
+           qwz_dp_rx_process_wbm_err(sc))
+               ret = 1;
+
+       if (sc->hw_params.ring_mask->rx[grp_id]) {
+               i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1;
+               if (qwz_dp_process_rx(sc, i))
+                       ret = 1;
+       }
+
+       for (i = 0; i < sc->num_radios; i++) {
+               for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
+                       int id = i * sc->hw_params.num_rxmda_per_pdev + j;
+
+                       if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
+                          (1 << id)) == 0)
+                               continue;
+
+                       if (qwz_dp_rx_process_mon_rings(sc, id))
+                               ret = 1;
+               }
+       }
+
+       if (sc->hw_params.ring_mask->reo_status[grp_id] &&
+           qwz_dp_process_reo_status(sc))
+               ret = 1;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
+                       int id = i * sc->hw_params.num_rxmda_per_pdev + j;
+
+                       if (sc->hw_params.ring_mask->rxdma2host[grp_id] &
+                          (1 << (id))) {
+                               if (qwz_dp_process_rxdma_err(sc, id))
+                                       ret = 1;
+                       }
+
+                       if (sc->hw_params.ring_mask->host2rxdma[grp_id] &
+                           (1 << id)) {
+                               qwz_dp_rxbufs_replenish(sc, id,
+                                   &dp->rx_refill_buf_ring, 0,
+                                   sc->hw_params.hal_params->rx_buf_rbm);
+                       }
+               }
+       }
+
+       return ret;
+}
+
+int
+qwz_wmi_wait_for_service_ready(struct qwz_softc *sc)
+{
+       int ret;
+
+       while (!sc->wmi.service_ready) {
+               ret = tsleep_nsec(&sc->wmi.service_ready, 0, "qwzwmirdy",
+                   SEC_TO_NSEC(5));
+               if (ret)
+                       return -1;
+       }
+
+       return 0;
+}
+
+void
+qwz_fill_band_to_mac_param(struct qwz_softc *sc,
+    struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+       uint8_t i;
+       struct ath12k_hal_reg_capabilities_ext *hal_reg_cap;
+       struct qwz_pdev *pdev;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               pdev = &sc->pdevs[i];
+               hal_reg_cap = &sc->hal_reg_cap[i];
+               band_to_mac[i].pdev_id = pdev->pdev_id;
+
+               switch (pdev->cap.supported_bands) {
+               case WMI_HOST_WLAN_2G_5G_CAP:
+                       band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+                       band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+                       break;
+               case WMI_HOST_WLAN_2G_CAP:
+                       band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+                       band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+                       break;
+               case WMI_HOST_WLAN_5G_CAP:
+                       band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+                       band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+struct mbuf *
+qwz_wmi_alloc_mbuf(size_t len)
+{
+       struct mbuf *m;
+       uint32_t round_len = roundup(len, 4);
+
+       m = qwz_htc_alloc_mbuf(sizeof(struct wmi_cmd_hdr) + round_len);
+       if (!m)
+               return NULL;
+
+       return m;
+}
+
+int
+qwz_wmi_cmd_send_nowait(struct qwz_pdev_wmi *wmi, struct mbuf *m,
+    uint32_t cmd_id)
+{
+       struct qwz_softc *sc = wmi->wmi->sc;
+       struct wmi_cmd_hdr *cmd_hdr;
+       uint32_t cmd = 0;
+
+       cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+       cmd_hdr = (struct wmi_cmd_hdr *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr));
+       cmd_hdr->cmd_id = htole32(cmd);
+
+       DNPRINTF(QWZ_D_WMI, "%s: sending WMI command 0x%u\n", __func__, cmd);
+       return qwz_htc_send(&sc->htc, wmi->eid, m);
+}
+
+int
+qwz_wmi_cmd_send(struct qwz_pdev_wmi *wmi, struct mbuf *m, uint32_t cmd_id)
+{
+       struct qwz_wmi_base *wmi_sc = wmi->wmi;
+       int ret = EOPNOTSUPP;
+       struct qwz_softc *sc = wmi_sc->sc;
+#ifdef notyet
+       might_sleep();
+#endif
+       if (sc->hw_params.credit_flow) {
+               struct qwz_htc *htc = &sc->htc;
+               struct qwz_htc_ep *ep = &htc->endpoint[wmi->eid];
+
+               while (!ep->tx_credits) {
+                       ret = tsleep_nsec(&ep->tx_credits, 0, "qwztxcrd",
+                           SEC_TO_NSEC(3));
+                       if (ret) {
+                               printf("%s: tx credits timeout\n",
+                                   sc->sc_dev.dv_xname);
+                               if (test_bit(ATH12K_FLAG_CRASH_FLUSH,
+                                   sc->sc_flags))
+                                       return ESHUTDOWN;
+                               else
+                                       return EAGAIN;
+                       }
+               }
+       } else {
+               while (!wmi->tx_ce_desc) {
+                       ret = tsleep_nsec(&wmi->tx_ce_desc, 0, "qwztxce",
+                           SEC_TO_NSEC(3));
+                       if (ret) {
+                               printf("%s: tx ce desc timeout\n",
+                                   sc->sc_dev.dv_xname);
+                               if (test_bit(ATH12K_FLAG_CRASH_FLUSH,
+                                   sc->sc_flags))
+                                       return ESHUTDOWN;
+                               else
+                                       return EAGAIN;
+                       }
+               }
+       }
+
+       ret = qwz_wmi_cmd_send_nowait(wmi, m, cmd_id);
+
+       if (ret == EAGAIN)
+               printf("%s: wmi command %d timeout\n",
+                   sc->sc_dev.dv_xname, cmd_id);
+
+       if (ret == ENOBUFS)
+               printf("%s: ce desc not available for wmi command %d\n",
+                   sc->sc_dev.dv_xname, cmd_id);
+
+       return ret;
+}
+
+int
+qwz_wmi_pdev_set_param(struct qwz_softc *sc, uint32_t param_id,
+    uint32_t param_value, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_pdev_set_param_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_pdev_set_param_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->pdev_id = pdev_id;
+       cmd->param_id = param_id;
+       cmd->param_value = param_value;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PDEV_SET_PARAM_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd pdev set param %d pdev id %d value %d\n",
+           __func__, param_id, pdev_id, param_value);
+
+       return 0;
+}
+
+int
+qwz_wmi_pdev_lro_cfg(struct qwz_softc *sc, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct ath12k_wmi_pdev_lro_config_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       arc4random_buf(cmd->th_4, sizeof(uint32_t) * ATH12K_IPV4_TH_SEED_SIZE);
+       arc4random_buf(cmd->th_6, sizeof(uint32_t) * ATH12K_IPV6_TH_SEED_SIZE);
+
+       cmd->pdev_id = pdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_LRO_CONFIG_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send lro cfg req wmi cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd lro config pdev_id 0x%x\n",
+           __func__, pdev_id);
+
+       return 0;
+}
+
+int
+qwz_wmi_pdev_set_ps_mode(struct qwz_softc *sc, int vdev_id, uint8_t pdev_id,
+    enum wmi_sta_ps_mode psmode)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_pdev_set_ps_mode_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_pdev_set_ps_mode_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = vdev_id;
+       cmd->sta_ps_mode = psmode;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_MODE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd sta powersave mode psmode %d vdev id %d\n",
+           __func__, psmode, vdev_id);
+
+       return 0;
+}
+
+int
+qwz_wmi_scan_prob_req_oui(struct qwz_softc *sc, const uint8_t *mac_addr,
+    uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct mbuf *m;
+       struct wmi_scan_prob_req_oui_cmd *cmd;
+       uint32_t prob_req_oui;
+       int len, ret;
+
+       prob_req_oui = (((uint32_t)mac_addr[0]) << 16) |
+                      (((uint32_t)mac_addr[1]) << 8) | mac_addr[2];
+
+       len = sizeof(*cmd);
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_scan_prob_req_oui_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->prob_req_oui = prob_req_oui;
+
+       DNPRINTF(QWZ_D_WMI, "%s: scan prob req oui %d\n", __func__,
+           prob_req_oui);
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_SCAN_PROB_REQ_OUI_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_SCAN_PROB_REQ_OUI cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwz_softc *sc, uint32_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_dfs_phyerr_offload_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_dfs_phyerr_offload_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->pdev_id = pdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m,
+           WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send "
+                           "WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_free(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd pdev dfs phyerr offload enable "
+           "pdev id %d\n", __func__, pdev_id);
+
+       return 0;
+}
+
+int
+qwz_wmi_send_scan_chan_list_cmd(struct qwz_softc *sc, uint8_t pdev_id,
+    struct scan_chan_list_params *chan_list)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_scan_chan_list_cmd *cmd;
+       struct mbuf *m;
+       struct wmi_channel *chan_info;
+       struct channel_param *tchan_info;
+       struct wmi_tlv *tlv;
+       void *ptr;
+       int i, ret, len;
+       uint16_t num_send_chans, num_sends = 0, max_chan_limit = 0;
+       uint32_t *reg1, *reg2;
+
+       tchan_info = chan_list->ch_param;
+       while (chan_list->nallchans) {
+               len = sizeof(*cmd) + TLV_HDR_SIZE;
+               max_chan_limit = (wmi->wmi->max_msg_len[pdev_id] - len) /
+                   sizeof(*chan_info);
+
+               if (chan_list->nallchans > max_chan_limit)
+                       num_send_chans = max_chan_limit;
+               else
+                       num_send_chans = chan_list->nallchans;
+
+               chan_list->nallchans -= num_send_chans;
+               len += sizeof(*chan_info) * num_send_chans;
+
+               m = qwz_wmi_alloc_mbuf(len);
+               if (!m)
+                       return ENOMEM;
+
+               cmd = (struct wmi_scan_chan_list_cmd *)(mtod(m, uint8_t *) +
+                   sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+               cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                   WMI_TAG_SCAN_CHAN_LIST_CMD) |
+                   FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+               cmd->pdev_id = chan_list->pdev_id;
+               cmd->num_scan_chans = num_send_chans;
+               if (num_sends)
+                       cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
+
+               DNPRINTF(QWZ_D_WMI, "%s: no.of chan = %d len = %d "
+                   "pdev_id = %d num_sends = %d\n", __func__, num_send_chans,
+                   len, cmd->pdev_id, num_sends);
+
+               ptr = (void *)(mtod(m, uint8_t *) +
+                   sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
+                   sizeof(*cmd));
+
+               len = sizeof(*chan_info) * num_send_chans;
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+                   FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+               ptr += TLV_HDR_SIZE;
+
+               for (i = 0; i < num_send_chans; ++i) {
+                       chan_info = ptr;
+                       memset(chan_info, 0, sizeof(*chan_info));
+                       len = sizeof(*chan_info);
+                       chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                           WMI_TAG_CHANNEL) |
+                           FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+                       reg1 = &chan_info->reg_info_1;
+                       reg2 = &chan_info->reg_info_2;
+                       chan_info->mhz = tchan_info->mhz;
+                       chan_info->band_center_freq1 = tchan_info->cfreq1;
+                       chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+                       if (tchan_info->is_chan_passive)
+                               chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+                       if (tchan_info->allow_he)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+                       else if (tchan_info->allow_vht)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+                       else if (tchan_info->allow_ht)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+                       if (tchan_info->half_rate)
+                               chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+                       if (tchan_info->quarter_rate)
+                               chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+                       if (tchan_info->psc_channel)
+                               chan_info->info |= WMI_CHAN_INFO_PSC;
+                       if (tchan_info->dfs_set)
+                               chan_info->info |= WMI_CHAN_INFO_DFS;
+
+                       chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+                           tchan_info->phy_mode);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+                           tchan_info->minpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+                           tchan_info->maxpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+                           tchan_info->maxregpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+                           tchan_info->reg_class_id);
+                       *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+                           tchan_info->antennamax);
+                       *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+                           tchan_info->maxregpower);
+
+                       DNPRINTF(QWZ_D_WMI, "%s: chan scan list "
+                           "chan[%d] = %u, chan_info->info %8x\n",
+                           __func__, i, chan_info->mhz, chan_info->info);
+
+                       ptr += sizeof(*chan_info);
+
+                       tchan_info++;
+               }
+
+               ret = qwz_wmi_cmd_send(wmi, m, WMI_SCAN_CHAN_LIST_CMDID);
+               if (ret) {
+                       if (ret != ESHUTDOWN) {
+                               printf("%s: failed to send WMI_SCAN_CHAN_LIST "
+                                   "cmd\n", sc->sc_dev.dv_xname);
+                       }
+                       m_freem(m);
+                       return ret;
+               }
+
+               DNPRINTF(QWZ_D_WMI, "%s: cmd scan chan list channels %d\n",
+                   __func__, num_send_chans);
+
+               num_sends++;
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_send_11d_scan_start_cmd(struct qwz_softc *sc,
+    struct wmi_11d_scan_start_params *param, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_11d_scan_start_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_11d_scan_start_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = param->vdev_id;
+       cmd->scan_period_msec = param->scan_period_msec;
+       cmd->start_interval_msec = param->start_interval_msec;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_11D_SCAN_START_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_11D_SCAN_START_CMDID: "
+                           "%d\n", sc->sc_dev.dv_xname, ret);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd 11d scan start vdev id %d period %d "
+           "ms internal %d ms\n", __func__, cmd->vdev_id,
+           cmd->scan_period_msec, cmd->start_interval_msec);
+
+       return 0;
+}
+
+static inline void
+qwz_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+    struct scan_req_params *param)
+{
+       /* Scan events subscription */
+       if (param->scan_ev_started)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
+       if (param->scan_ev_completed)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
+       if (param->scan_ev_bss_chan)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
+       if (param->scan_ev_foreign_chan)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
+       if (param->scan_ev_dequeued)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
+       if (param->scan_ev_preempted)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
+       if (param->scan_ev_start_failed)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
+       if (param->scan_ev_restarted)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
+       if (param->scan_ev_foreign_chn_exit)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+       if (param->scan_ev_suspended)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
+       if (param->scan_ev_resumed)
+               cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
+
+       /** Set scan control flags */
+       cmd->scan_ctrl_flags = 0;
+       if (param->scan_f_passive)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
+       if (param->scan_f_strict_passive_pch)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+       if (param->scan_f_promisc_mode)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
+       if (param->scan_f_capture_phy_err)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
+       if (param->scan_f_half_rate)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+       if (param->scan_f_quarter_rate)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+       if (param->scan_f_cck_rates)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
+       if (param->scan_f_ofdm_rates)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
+       if (param->scan_f_chan_stat_evnt)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
+       if (param->scan_f_filter_prb_req)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
+       if (param->scan_f_bcast_probe)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
+       if (param->scan_f_offchan_mgmt_tx)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
+       if (param->scan_f_offchan_data_tx)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
+       if (param->scan_f_force_active_dfs_chn)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+       if (param->scan_f_add_tpc_ie_in_probe)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+       if (param->scan_f_add_ds_ie_in_probe)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+       if (param->scan_f_add_spoofed_mac_in_probe)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+       if (param->scan_f_add_rand_seq_in_probe)
+               cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+       if (param->scan_f_en_ie_whitelist_in_probe)
+               cmd->scan_ctrl_flags |=
+                        WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+       /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+       WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+           param->adaptive_dwell_time_mode);
+
+       cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
+}
+
+int
+qwz_wmi_send_scan_start_cmd(struct qwz_softc *sc,
+    struct scan_req_params *params)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[params->pdev_id];
+       struct wmi_start_scan_cmd *cmd;
+       struct wmi_ssid *ssid = NULL;
+       struct wmi_mac_addr *bssid;
+       struct mbuf *m;
+       struct wmi_tlv *tlv;
+       void *ptr;
+       int i, ret, len;
+       uint32_t *tmp_ptr;
+       uint16_t extraie_len_with_pad = 0;
+       struct hint_short_ssid *s_ssid = NULL;
+       struct hint_bssid *hint_bssid = NULL;
+
+       len = sizeof(*cmd);
+
+       len += TLV_HDR_SIZE;
+       if (params->num_chan)
+               len += params->num_chan * sizeof(uint32_t);
+
+       len += TLV_HDR_SIZE;
+       if (params->num_ssids)
+               len += params->num_ssids * sizeof(*ssid);
+
+       len += TLV_HDR_SIZE;
+       if (params->num_bssid)
+               len += sizeof(*bssid) * params->num_bssid;
+
+       len += TLV_HDR_SIZE;
+       if (params->extraie.len && params->extraie.len <= 0xFFFF) {
+               extraie_len_with_pad = roundup(params->extraie.len,
+                   sizeof(uint32_t));
+       }
+       len += extraie_len_with_pad;
+
+       if (params->num_hint_bssid) {
+               len += TLV_HDR_SIZE +
+                   params->num_hint_bssid * sizeof(struct hint_bssid);
+       }
+
+       if (params->num_hint_s_ssid) {
+               len += TLV_HDR_SIZE +
+                   params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+       }
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
+           sizeof(struct wmi_cmd_hdr));
+
+       cmd = ptr;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->scan_id = params->scan_id;
+       cmd->scan_req_id = params->scan_req_id;
+       cmd->vdev_id = params->vdev_id;
+       cmd->scan_priority = params->scan_priority;
+       cmd->notify_scan_events = params->notify_scan_events;
+
+       qwz_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+       cmd->dwell_time_active = params->dwell_time_active;
+       cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+       cmd->dwell_time_passive = params->dwell_time_passive;
+       cmd->dwell_time_active_6g = params->dwell_time_active_6g;
+       cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
+       cmd->min_rest_time = params->min_rest_time;
+       cmd->max_rest_time = params->max_rest_time;
+       cmd->repeat_probe_time = params->repeat_probe_time;
+       cmd->probe_spacing_time = params->probe_spacing_time;
+       cmd->idle_time = params->idle_time;
+       cmd->max_scan_time = params->max_scan_time;
+       cmd->probe_delay = params->probe_delay;
+       cmd->burst_duration = params->burst_duration;
+       cmd->num_chan = params->num_chan;
+       cmd->num_bssid = params->num_bssid;
+       cmd->num_ssids = params->num_ssids;
+       cmd->ie_len = params->extraie.len;
+       cmd->n_probes = params->n_probes;
+       IEEE80211_ADDR_COPY(cmd->mac_addr.addr, params->mac_addr.addr);
+       IEEE80211_ADDR_COPY(cmd->mac_mask.addr, params->mac_mask.addr);
+
+       ptr += sizeof(*cmd);
+
+       len = params->num_chan * sizeof(uint32_t);
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE;
+       tmp_ptr = (uint32_t *)ptr;
+
+       for (i = 0; i < params->num_chan; ++i)
+               tmp_ptr[i] = params->chan_list[i];
+
+       ptr += len;
+
+       len = params->num_ssids * sizeof(*ssid);
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+
+       ptr += TLV_HDR_SIZE;
+
+       if (params->num_ssids) {
+               ssid = ptr;
+               for (i = 0; i < params->num_ssids; ++i) {
+                       ssid->ssid_len = params->ssid[i].length;
+                       memcpy(ssid->ssid, params->ssid[i].ssid,
+                              params->ssid[i].length);
+                       ssid++;
+               }
+       }
+
+       ptr += (params->num_ssids * sizeof(*ssid));
+       len = params->num_bssid * sizeof(*bssid);
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+
+       ptr += TLV_HDR_SIZE;
+       bssid = ptr;
+
+       if (params->num_bssid) {
+               for (i = 0; i < params->num_bssid; ++i) {
+                       IEEE80211_ADDR_COPY(bssid->addr,
+                           params->bssid_list[i].addr);
+                       bssid++;
+               }
+       }
+
+       ptr += params->num_bssid * sizeof(*bssid);
+
+       len = extraie_len_with_pad;
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE;
+
+       if (extraie_len_with_pad)
+               memcpy(ptr, params->extraie.ptr, params->extraie.len);
+
+       ptr += extraie_len_with_pad;
+
+       if (params->num_hint_s_ssid) {
+               len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG,
+                   WMI_TAG_ARRAY_FIXED_STRUCT) |
+                   FIELD_PREP(WMI_TLV_LEN, len);
+               ptr += TLV_HDR_SIZE;
+               s_ssid = ptr;
+               for (i = 0; i < params->num_hint_s_ssid; ++i) {
+                       s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
+                       s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
+                       s_ssid++;
+               }
+               ptr += len;
+       }
+
+       if (params->num_hint_bssid) {
+               len = params->num_hint_bssid * sizeof(struct hint_bssid);
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG,
+                   WMI_TAG_ARRAY_FIXED_STRUCT) |
+                   FIELD_PREP(WMI_TLV_LEN, len);
+               ptr += TLV_HDR_SIZE;
+               hint_bssid = ptr;
+               for (i = 0; i < params->num_hint_bssid; ++i) {
+                       hint_bssid->freq_flags =
+                               params->hint_bssid[i].freq_flags;
+                       IEEE80211_ADDR_COPY(
+                           &params->hint_bssid[i].bssid.addr[0],
+                           &hint_bssid->bssid.addr[0]);
+                       hint_bssid++;
+               }
+       }
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_START_SCAN_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_START_SCAN_CMDID\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd start scan", __func__);
+
+       return 0;
+}
+
+int
+qwz_wmi_send_scan_stop_cmd(struct qwz_softc *sc,
+    struct scan_cancel_param *param)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
+       struct wmi_stop_scan_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_stop_scan_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = param->vdev_id;
+       cmd->requestor = param->requester;
+       cmd->scan_id = param->scan_id;
+       cmd->pdev_id = param->pdev_id;
+       /* stop the scan with the corresponding scan_id */
+       if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+               /* Cancelling all scans */
+               cmd->req_type =  WMI_SCAN_STOP_ALL;
+       } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+               /* Cancelling VAP scans */
+               cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
+       } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+               /* Cancelling specific scan */
+               cmd->req_type =  WMI_SCAN_STOP_ONE;
+       } else {
+               printf("%s: invalid scan cancel param %d\n",
+                   sc->sc_dev.dv_xname, param->req_type);
+               m_freem(m);
+               return EINVAL;
+       }
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_STOP_SCAN_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_STOP_SCAN_CMDID\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd stop scan\n", __func__);
+       return ret;
+}
+
+int
+qwz_wmi_send_peer_create_cmd(struct qwz_softc *sc, uint8_t pdev_id,
+    struct peer_create_params *param)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_peer_create_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
+       cmd->peer_type = param->peer_type;
+       cmd->vdev_id = param->vdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
+           __func__, param->vdev_id, ether_sprintf(param->peer_addr));
+
+       return ret;
+}
+
+int
+qwz_wmi_send_peer_delete_cmd(struct qwz_softc *sc, const uint8_t *peer_addr,
+    uint8_t vdev_id, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_peer_delete_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
+       cmd->vdev_id = vdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_PEER_DELETE cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
+           __func__, vdev_id, peer_addr);
+
+       return 0;
+}
+
+int
+qwz_wmi_vdev_install_key(struct qwz_softc *sc,
+    struct wmi_vdev_install_key_arg *arg, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_install_key_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct mbuf *m;
+       int ret, len;
+       int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+       len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (m == NULL)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_install_key_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = arg->vdev_id;
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, arg->macaddr);
+       cmd->key_idx = arg->key_idx;
+       cmd->key_flags = arg->key_flags;
+       cmd->key_cipher = arg->key_cipher;
+       cmd->key_len = arg->key_len;
+       cmd->key_txmic_len = arg->key_txmic_len;
+       cmd->key_rxmic_len = arg->key_rxmic_len;
+
+       if (arg->key_rsc_counter)
+               memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+                      sizeof(struct wmi_key_seq_counter));
+
+       tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
+           sizeof(*cmd));
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+           FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+       if (arg->key_data)
+               memcpy(tlv->value, (uint8_t *)arg->key_data,
+                   key_len_aligned);
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_INSTALL_KEY_CMDID);
+       if (ret) {
+               printf("%s: failed to send WMI_VDEV_INSTALL_KEY cmd\n",
+                   sc->sc_dev.dv_xname);
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI,
+           "%s: cmd vdev install key idx %d cipher %d len %d\n",
+           __func__, arg->key_idx, arg->key_cipher, arg->key_len);
+
+       return ret;
+}
+
+void
+qwz_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+    struct peer_assoc_params *param, int hw_crypto_disabled)
+{
+       cmd->peer_flags = 0;
+
+       if (param->is_wme_set) {
+               if (param->qos_flag)
+                       cmd->peer_flags |= WMI_PEER_QOS;
+               if (param->apsd_flag)
+                       cmd->peer_flags |= WMI_PEER_APSD;
+               if (param->ht_flag)
+                       cmd->peer_flags |= WMI_PEER_HT;
+               if (param->bw_40)
+                       cmd->peer_flags |= WMI_PEER_40MHZ;
+               if (param->bw_80)
+                       cmd->peer_flags |= WMI_PEER_80MHZ;
+               if (param->bw_160)
+                       cmd->peer_flags |= WMI_PEER_160MHZ;
+
+               /* Typically if STBC is enabled for VHT it should be enabled
+                * for HT as well
+                **/
+               if (param->stbc_flag)
+                       cmd->peer_flags |= WMI_PEER_STBC;
+
+               /* Typically if LDPC is enabled for VHT it should be enabled
+                * for HT as well
+                **/
+               if (param->ldpc_flag)
+                       cmd->peer_flags |= WMI_PEER_LDPC;
+
+               if (param->static_mimops_flag)
+                       cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+               if (param->dynamic_mimops_flag)
+                       cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+               if (param->spatial_mux_flag)
+                       cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+               if (param->vht_flag)
+                       cmd->peer_flags |= WMI_PEER_VHT;
+               if (param->he_flag)
+                       cmd->peer_flags |= WMI_PEER_HE;
+               if (param->twt_requester)
+                       cmd->peer_flags |= WMI_PEER_TWT_REQ;
+               if (param->twt_responder)
+                       cmd->peer_flags |= WMI_PEER_TWT_RESP;
+       }
+
+       /* Suppress authorization for all AUTH modes that need 4-way handshake
+        * (during re-association).
+        * Authorization will be done for these modes on key installation.
+        */
+       if (param->auth_flag)
+               cmd->peer_flags |= WMI_PEER_AUTH;
+       if (param->need_ptk_4_way) {
+               cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+               if (!hw_crypto_disabled && param->is_assoc)
+                       cmd->peer_flags &= ~WMI_PEER_AUTH;
+       }
+       if (param->need_gtk_2_way)
+               cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+       /* safe mode bypass the 4-way handshake */
+       if (param->safe_mode_enabled)
+               cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+                                    WMI_PEER_NEED_GTK_2_WAY);
+
+       if (param->is_pmf_enabled)
+               cmd->peer_flags |= WMI_PEER_PMF;
+
+       /* Disable AMSDU for station transmit, if user configures it */
+       /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+        * it
+        * if (param->amsdu_disable) Add after FW support
+        **/
+
+       /* Target asserts if node is marked HT and all MCS is set to 0.
+        * Mark the node as non-HT if all the mcs rates are disabled through
+        * iwpriv
+        **/
+       if (param->peer_ht_rates.num_rates == 0)
+               cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int
+qwz_wmi_send_peer_assoc_cmd(struct qwz_softc *sc, uint8_t pdev_id,
+    struct peer_assoc_params *param)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_peer_assoc_complete_cmd *cmd;
+       struct wmi_vht_rate_set *mcs;
+       struct wmi_he_rate_set *he_mcs;
+       struct mbuf *m;
+       struct wmi_tlv *tlv;
+       void *ptr;
+       uint32_t peer_legacy_rates_align;
+       uint32_t peer_ht_rates_align;
+       int i, ret, len;
+
+       peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+           sizeof(uint32_t));
+       peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+           sizeof(uint32_t));
+
+       len = sizeof(*cmd) +
+             TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(uint8_t)) +
+             TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(uint8_t)) +
+             sizeof(*mcs) + TLV_HDR_SIZE +
+             (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
+           sizeof(struct wmi_cmd_hdr));
+
+       cmd = ptr;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = param->vdev_id;
+
+       cmd->peer_new_assoc = param->peer_new_assoc;
+       cmd->peer_associd = param->peer_associd;
+
+       qwz_wmi_copy_peer_flags(cmd, param,
+           test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags));
+
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_mac);
+
+       cmd->peer_rate_caps = param->peer_rate_caps;
+       cmd->peer_caps = param->peer_caps;
+       cmd->peer_listen_intval = param->peer_listen_intval;
+       cmd->peer_ht_caps = param->peer_ht_caps;
+       cmd->peer_max_mpdu = param->peer_max_mpdu;
+       cmd->peer_mpdu_density = param->peer_mpdu_density;
+       cmd->peer_vht_caps = param->peer_vht_caps;
+       cmd->peer_phymode = param->peer_phymode;
+
+       /* Update 11ax capabilities */
+       cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+       cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+       cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+       cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
+       cmd->peer_he_ops = param->peer_he_ops;
+       memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+              sizeof(param->peer_he_cap_phyinfo));
+       memcpy(&cmd->peer_ppet, &param->peer_ppet,
+              sizeof(param->peer_ppet));
+
+       /* Update peer legacy rate information */
+       ptr += sizeof(*cmd);
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+           FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+       ptr += TLV_HDR_SIZE;
+
+       cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+       memcpy(ptr, param->peer_legacy_rates.rates,
+           param->peer_legacy_rates.num_rates);
+
+       /* Update peer HT rate information */
+       ptr += peer_legacy_rates_align;
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+           FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+       ptr += TLV_HDR_SIZE;
+       cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+       memcpy(ptr, param->peer_ht_rates.rates,
+           param->peer_ht_rates.num_rates);
+
+       /* VHT Rates */
+       ptr += peer_ht_rates_align;
+
+       mcs = ptr;
+
+       mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+       cmd->peer_nss = param->peer_nss;
+
+       /* Update bandwidth-NSS mapping */
+       cmd->peer_bw_rxnss_override = 0;
+       cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+       if (param->vht_capable) {
+               mcs->rx_max_rate = param->rx_max_rate;
+               mcs->rx_mcs_set = param->rx_mcs_set;
+               mcs->tx_max_rate = param->tx_max_rate;
+               mcs->tx_mcs_set = param->tx_mcs_set;
+       }
+
+       /* HE Rates */
+       cmd->peer_he_mcs = param->peer_he_mcs_count;
+       cmd->min_data_rate = param->min_data_rate;
+
+       ptr += sizeof(*mcs);
+
+       len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE;
+
+       /* Loop through the HE rate set */
+       for (i = 0; i < param->peer_he_mcs_count; i++) {
+               he_mcs = ptr;
+               he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                   WMI_TAG_HE_RATE_SET) |
+                   FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+               he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
+               he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
+               ptr += sizeof(*he_mcs);
+       }
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_ASSOC_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_PEER_ASSOC_CMDID\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd peer assoc vdev id %d assoc id %d "
+           "peer mac %s peer_flags %x rate_caps %x peer_caps %x "
+           "listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d "
+           "peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x "
+           "he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+           __func__, cmd->vdev_id, cmd->peer_associd,
+           ether_sprintf(param->peer_mac),
+           cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+           cmd->peer_listen_intval, cmd->peer_ht_caps,
+           cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+           cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info,
+           cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+           cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+           cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override);
+
+       return 0;
+}
+
+void
+qwz_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+    struct target_resource_config *tg_cfg)
+{
+       wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+       wmi_cfg->num_peers = tg_cfg->num_peers;
+       wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+       wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+       wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+       wmi_cfg->num_tids = tg_cfg->num_tids;
+       wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+       wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+       wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+       wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+       wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+       wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+       wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+       wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+       wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+       wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+       wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+       wmi_cfg->roam_offload_max_ap_profiles =
+           tg_cfg->roam_offload_max_ap_profiles;
+       wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+       wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+       wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+       wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+       wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+       wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+       wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+       wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+           tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+       wmi_cfg->vow_config = tg_cfg->vow_config;
+       wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+       wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+       wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+       wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+       wmi_cfg->num_tdls_conn_table_entries =
+           tg_cfg->num_tdls_conn_table_entries;
+       wmi_cfg->beacon_tx_offload_max_vdev =
+           tg_cfg->beacon_tx_offload_max_vdev;
+       wmi_cfg->num_multicast_filter_entries =
+           tg_cfg->num_multicast_filter_entries;
+       wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+       wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+       wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+       wmi_cfg->max_tdls_concurrent_sleep_sta =
+           tg_cfg->max_tdls_concurrent_sleep_sta;
+       wmi_cfg->max_tdls_concurrent_buffer_sta =
+           tg_cfg->max_tdls_concurrent_buffer_sta;
+       wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+       wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+       wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+       wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+       wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+       wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+       wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+       wmi_cfg->flag1 = tg_cfg->flag1;
+       wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+       wmi_cfg->sched_params = tg_cfg->sched_params;
+       wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+       wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+#ifdef notyet /* 6 GHz support */
+       wmi_cfg->host_service_flags &=
+           ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+       wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
+           WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+       wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
+       wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
+       wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
+#endif
+}
+
+int
+qwz_init_cmd_send(struct qwz_pdev_wmi *wmi, struct wmi_init_cmd_param *param)
+{
+       struct mbuf *m;
+       struct wmi_init_cmd *cmd;
+       struct wmi_resource_config *cfg;
+       struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+       struct wmi_pdev_band_to_mac *band_to_mac;
+       struct wlan_host_mem_chunk *host_mem_chunks;
+       struct wmi_tlv *tlv;
+       size_t ret, len;
+       void *ptr;
+       uint32_t hw_mode_len = 0;
+       uint16_t idx;
+
+       if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+               hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+                   (param->num_band_to_mac * sizeof(*band_to_mac));
+
+       len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+           (param->num_mem_chunks ?
+           (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_init_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       ptr = mtod(m, uint8_t *) + sizeof(struct ath12k_htc_hdr) +
+          sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
+       cfg = ptr;
+
+       qwz_wmi_copy_resource_config(cfg, param->res_cfg);
+
+       cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+       ptr += sizeof(*cfg);
+       host_mem_chunks = ptr + TLV_HDR_SIZE;
+       len = sizeof(struct wlan_host_mem_chunk);
+
+       for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+               host_mem_chunks[idx].tlv_header =
+                   FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+                   FIELD_PREP(WMI_TLV_LEN, len);
+
+               host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+               host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+               host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+               DNPRINTF(QWZ_D_WMI,
+                   "%s: host mem chunk req_id %d paddr 0x%llx len %d\n",
+                   __func__, param->mem_chunks[idx].req_id,
+                   (uint64_t)param->mem_chunks[idx].paddr,
+                   param->mem_chunks[idx].len);
+       }
+       cmd->num_host_mem_chunks = param->num_mem_chunks;
+       len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+       /* num_mem_chunks is zero */
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE + len;
+
+       if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+               hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
+               hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                   WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+                   FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+               hw_mode->hw_mode_index = param->hw_mode_id;
+               hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+               ptr += sizeof(*hw_mode);
+
+               len = param->num_band_to_mac * sizeof(*band_to_mac);
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+                   FIELD_PREP(WMI_TLV_LEN, len);
+
+               ptr += TLV_HDR_SIZE;
+               len = sizeof(*band_to_mac);
+
+               for (idx = 0; idx < param->num_band_to_mac; idx++) {
+                       band_to_mac = (void *)ptr;
+
+                       band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                           WMI_TAG_PDEV_BAND_TO_MAC) |
+                           FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+                       band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+                       band_to_mac->start_freq =
+                           param->band_to_mac[idx].start_freq;
+                       band_to_mac->end_freq =
+                           param->band_to_mac[idx].end_freq;
+                       ptr += sizeof(*band_to_mac);
+               }
+       }
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_INIT_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN)
+                       printf("%s: failed to send WMI_INIT_CMDID\n", __func__);
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd wmi init\n", __func__);
+
+       return 0;
+}
+
+int
+qwz_wmi_cmd_init(struct qwz_softc *sc)
+{
+       struct qwz_wmi_base *wmi_sc = &sc->wmi;
+       struct wmi_init_cmd_param init_param;
+       struct target_resource_config  config;
+
+       memset(&init_param, 0, sizeof(init_param));
+       memset(&config, 0, sizeof(config));
+
+       sc->hw_params.hw_ops->wmi_init_config(sc, &config);
+
+       if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT))
+               config.is_reg_cc_ext_event_supported = 1;
+
+       memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
+
+       init_param.res_cfg = &wmi_sc->wlan_resource_config;
+       init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
+       init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
+       init_param.mem_chunks = wmi_sc->mem_chunks;
+
+       if (sc->hw_params.single_pdev_only)
+               init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+       init_param.num_band_to_mac = sc->num_radios;
+       qwz_fill_band_to_mac_param(sc, init_param.band_to_mac);
+
+       return qwz_init_cmd_send(&wmi_sc->wmi[0], &init_param);
+}
+
+int
+qwz_wmi_wait_for_unified_ready(struct qwz_softc *sc)
+{
+       int ret;
+
+       while (!sc->wmi.unified_ready) {
+               ret = tsleep_nsec(&sc->wmi.unified_ready, 0, "qwzunfrdy",
+                   SEC_TO_NSEC(5));
+               if (ret)
+                       return -1;
+       }
+
+       return 0;
+}
+
+int
+qwz_wmi_set_hw_mode(struct qwz_softc *sc,
+    enum wmi_host_hw_mode_config_type mode)
+{
+       struct wmi_pdev_set_hw_mode_cmd_param *cmd;
+       struct mbuf *m;
+       struct qwz_wmi_base *wmi = &sc->wmi;
+       int len;
+       int ret;
+
+       len = sizeof(*cmd);
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->pdev_id = WMI_PDEV_ID_SOC;
+       cmd->hw_mode_index = mode;
+
+       ret = qwz_wmi_cmd_send(&wmi->wmi[0], m, WMI_PDEV_SET_HW_MODE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send "
+                           "WMI_PDEV_SET_HW_MODE_CMDID\n", __func__);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd pdev set hw mode %d\n", __func__,
+           cmd->hw_mode_index);
+
+       return 0;
+}
+
+int
+qwz_wmi_set_sta_ps_param(struct qwz_softc *sc, uint32_t vdev_id,
+     uint8_t pdev_id, uint32_t param, uint32_t param_value)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_sta_powersave_param_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_sta_powersave_param_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = vdev_id;
+       cmd->param = param;
+       cmd->value = param_value;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_PARAM_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send "
+                           "WMI_STA_POWERSAVE_PARAM_CMDID",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd set powersave param vdev_id %d param %d "
+           "value %d\n", __func__, vdev_id, param, param_value);
+
+       return 0;
+}
+
+int
+qwz_wmi_mgmt_send(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
+    uint32_t buf_id, struct mbuf *frame, struct qwz_tx_data *tx_data)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_mgmt_send_cmd *cmd;
+       struct wmi_tlv *frame_tlv;
+       struct mbuf *m;
+       uint32_t buf_len;
+       int ret, len;
+       uint64_t paddr;
+
+       paddr = tx_data->map->dm_segs[0].ds_addr;
+
+       buf_len = frame->m_pkthdr.len < WMI_MGMT_SEND_DOWNLD_LEN ?
+           frame->m_pkthdr.len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+       len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_mgmt_send_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = arvif->vdev_id;
+       cmd->desc_id = buf_id;
+       cmd->chanfreq = 0;
+       cmd->paddr_lo = paddr & 0xffffffff;
+       cmd->paddr_hi = paddr >> 32;
+       cmd->frame_len = frame->m_pkthdr.len;
+       cmd->buf_len = buf_len;
+       cmd->tx_params_valid = 0;
+
+       frame_tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
+           sizeof(*cmd));
+       frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+           FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+       memcpy(frame_tlv->value, mtod(frame, void *), buf_len);
+#if 0 /* Not needed on OpenBSD? */
+       ath12k_ce_byte_swap(frame_tlv->value, buf_len);
+#endif
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_MGMT_TX_SEND_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit "
+                           "WMI_MGMT_TX_SEND_CMDID cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd mgmt tx send", __func__);
+
+       tx_data->m = frame;
+       return 0;
+}
+
+int
+qwz_wmi_vdev_create(struct qwz_softc *sc, uint8_t *macaddr,
+    struct vdev_create_params *param)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
+       struct wmi_vdev_create_cmd *cmd;
+       struct mbuf *m;
+       struct wmi_vdev_txrx_streams *txrx_streams;
+       struct wmi_tlv *tlv;
+       int ret, len;
+       void *ptr;
+
+       /* It can be optimized my sending tx/rx chain configuration
+        * only for supported bands instead of always sending it for
+        * both the bands.
+        */
+       len = sizeof(*cmd) + TLV_HDR_SIZE +
+               (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_create_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = param->if_id;
+       cmd->vdev_type = param->type;
+       cmd->vdev_subtype = param->subtype;
+       cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+       cmd->pdev_id = param->pdev_id;
+       cmd->mbssid_flags = param->mbssid_flags;
+       cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
+
+       IEEE80211_ADDR_COPY(cmd->vdev_macaddr.addr, macaddr);
+
+       ptr = (void *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
+           sizeof(*cmd));
+       len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, len);
+
+       ptr += TLV_HDR_SIZE;
+       txrx_streams = ptr;
+       len = sizeof(*txrx_streams);
+       txrx_streams->tlv_header =
+           FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+           FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+       txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+       txrx_streams->supported_tx_streams = param->chains[0].tx;
+       txrx_streams->supported_rx_streams = param->chains[0].rx;
+
+       txrx_streams++;
+       txrx_streams->tlv_header =
+           FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+           FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+       txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+       txrx_streams->supported_tx_streams = param->chains[1].tx;
+       txrx_streams->supported_rx_streams = param->chains[1].rx;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_CREATE_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit WMI_VDEV_CREATE_CMDID\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev create id %d type %d subtype %d "
+           "macaddr %s pdevid %d\n", __func__, param->if_id, param->type,
+           param->subtype, ether_sprintf(macaddr), param->pdev_id);
+
+       return ret;
+}
+
+int
+qwz_wmi_vdev_set_param_cmd(struct qwz_softc *sc, uint32_t vdev_id,
+    uint8_t pdev_id, uint32_t param_id, uint32_t param_value)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_set_param_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_set_param_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = vdev_id;
+       cmd->param_id = param_id;
+       cmd->param_value = param_value;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_SET_PARAM_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_VDEV_SET_PARAM_CMDID\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev set param vdev 0x%x param %d "
+           "value %d\n", __func__, vdev_id, param_id, param_value);
+
+       return 0;
+}
+
+int
+qwz_wmi_vdev_up(struct qwz_softc *sc, uint32_t vdev_id, uint32_t pdev_id,
+    uint32_t aid, const uint8_t *bssid, uint8_t *tx_bssid,
+    uint32_t nontx_profile_idx, uint32_t nontx_profile_cnt)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_up_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_up_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = vdev_id;
+       cmd->vdev_assoc_id = aid;
+
+       IEEE80211_ADDR_COPY(cmd->vdev_bssid.addr, bssid);
+
+       cmd->nontx_profile_idx = nontx_profile_idx;
+       cmd->nontx_profile_cnt = nontx_profile_cnt;
+       if (tx_bssid)
+               IEEE80211_ADDR_COPY(cmd->tx_vdev_bssid.addr, tx_bssid);
+#if 0
+       if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+               bss_conf = &arvif->vif->bss_conf;
+
+               if (bss_conf->nontransmitted) {
+                       ether_addr_copy(cmd->tx_vdev_bssid.addr,
+                                       bss_conf->transmitter_bssid);
+                       cmd->nontx_profile_idx = bss_conf->bssid_index;
+                       cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
+               }
+       }
+#endif
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_UP_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit WMI_VDEV_UP cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev up id 0x%x assoc id %d bssid %s\n",
+           __func__, vdev_id, aid, ether_sprintf((u_char *)bssid));
+
+       return 0;
+}
+
+int
+qwz_wmi_vdev_down(struct qwz_softc *sc, uint32_t vdev_id, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_down_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_down_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = vdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_DOWN_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit WMI_VDEV_DOWN cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev down id 0x%x\n", __func__, vdev_id);
+
+       return 0;
+}
+
+void
+qwz_wmi_put_wmi_channel(struct wmi_channel *chan,
+    struct wmi_vdev_start_req_arg *arg)
+{
+       uint32_t center_freq1 = arg->channel.band_center_freq1;
+
+       memset(chan, 0, sizeof(*chan));
+
+       chan->mhz = arg->channel.freq;
+       chan->band_center_freq1 = arg->channel.band_center_freq1;
+
+       if (arg->channel.mode == MODE_11AX_HE160) {
+               if (arg->channel.freq > arg->channel.band_center_freq1)
+                       chan->band_center_freq1 = center_freq1 + 40;
+               else
+                       chan->band_center_freq1 = center_freq1 - 40;
+
+               chan->band_center_freq2 = arg->channel.band_center_freq1;
+       } else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
+           (arg->channel.mode == MODE_11AX_HE80_80)) {
+               chan->band_center_freq2 = arg->channel.band_center_freq2;
+       } else
+               chan->band_center_freq2 = 0;
+
+       chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+       if (arg->channel.passive)
+               chan->info |= WMI_CHAN_INFO_PASSIVE;
+       if (arg->channel.allow_ibss)
+               chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+       if (arg->channel.allow_ht)
+               chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+       if (arg->channel.allow_vht)
+               chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+       if (arg->channel.allow_he)
+               chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+       if (arg->channel.ht40plus)
+               chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+       if (arg->channel.chan_radar)
+               chan->info |= WMI_CHAN_INFO_DFS;
+       if (arg->channel.freq2_radar)
+               chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+       chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+           arg->channel.max_power) |
+           FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+           arg->channel.max_reg_power);
+
+       chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+           arg->channel.max_antenna_gain) |
+           FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+           arg->channel.max_power);
+}
+
+int
+qwz_wmi_vdev_stop(struct qwz_softc *sc, uint8_t vdev_id, uint8_t pdev_id)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_stop_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_stop_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = vdev_id;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_VDEV_STOP_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit WMI_VDEV_STOP cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev stop id 0x%x\n", __func__, vdev_id);
+
+       return ret;
+}
+
+int
+qwz_wmi_vdev_start(struct qwz_softc *sc, struct wmi_vdev_start_req_arg *arg,
+    int pdev_id, int restart)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_vdev_start_request_cmd *cmd;
+       struct mbuf *m;
+       struct wmi_channel *chan;
+       struct wmi_tlv *tlv;
+       void *ptr;
+       int ret, len;
+
+       if (arg->ssid_len > sizeof(cmd->ssid.ssid))
+               return EINVAL;
+
+       len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+       m = qwz_wmi_alloc_mbuf(len);
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_VDEV_START_REQUEST_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->vdev_id = arg->vdev_id;
+       cmd->beacon_interval = arg->bcn_intval;
+       cmd->bcn_tx_rate = arg->bcn_tx_rate;
+       cmd->dtim_period = arg->dtim_period;
+       cmd->num_noa_descriptors = arg->num_noa_descriptors;
+       cmd->preferred_rx_streams = arg->pref_rx_streams;
+       cmd->preferred_tx_streams = arg->pref_tx_streams;
+       cmd->cac_duration_ms = arg->cac_duration_ms;
+       cmd->regdomain = arg->regdomain;
+       cmd->he_ops = arg->he_ops;
+       cmd->mbssid_flags = arg->mbssid_flags;
+       cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
+
+       if (!restart) {
+               if (arg->ssid) {
+                       cmd->ssid.ssid_len = arg->ssid_len;
+                       memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+               }
+               if (arg->hidden_ssid)
+                       cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+               if (arg->pmf_enabled)
+                       cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+       }
+
+       cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+       if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
+               cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
+
+       ptr = mtod(m, void *) + sizeof(struct ath12k_htc_hdr) +
+           sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
+       chan = ptr;
+
+       qwz_wmi_put_wmi_channel(chan, arg);
+
+       chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
+       ptr += sizeof(*chan);
+
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+           FIELD_PREP(WMI_TLV_LEN, 0);
+
+       /* Note: This is a nested TLV containing:
+        * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+        */
+
+       ptr += sizeof(*tlv);
+
+       ret = qwz_wmi_cmd_send(wmi, m, restart ?
+           WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to submit vdev_%s cmd\n",
+                           sc->sc_dev.dv_xname, restart ? "restart" : "start");
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
+          __func__, restart ? "restart" : "start", arg->vdev_id,
+          arg->channel.freq, arg->channel.mode);
+
+       return ret;
+}
+
+int
+qwz_core_start(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_wmi_attach(sc);
+       if (ret) {
+               printf("%s: failed to attach wmi: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_htc_init(sc);
+       if (ret) {
+               printf("%s: failed to init htc: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_wmi_detach;
+       }
+
+       ret = sc->ops.start(sc);
+       if (ret) {
+               printf("%s: failed to start host interface: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_wmi_detach;
+       }
+
+       ret = qwz_htc_wait_target(sc);
+       if (ret) {
+               printf("%s: failed to connect to HTC: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_hif_stop;
+       }
+
+       ret = qwz_dp_htt_connect(&sc->dp);
+       if (ret) {
+               printf("%s: failed to connect to HTT: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_hif_stop;
+       }
+
+       ret = qwz_wmi_connect(sc);
+       if (ret) {
+               printf("%s: failed to connect wmi: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_hif_stop;
+       }
+
+       sc->wmi.service_ready = 0;
+
+       ret = qwz_htc_start(&sc->htc);
+       if (ret) {
+               printf("%s: failed to start HTC: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_hif_stop;
+       }
+
+       ret = qwz_wmi_wait_for_service_ready(sc);
+       if (ret) {
+               printf("%s: failed to receive wmi service ready event: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_hif_stop;
+       }
+#if 0
+       ret = ath12k_mac_allocate(ab);
+       if (ret) {
+               ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+                          ret);
+               goto err_hif_stop;
+       }
+       ath12k_dp_pdev_pre_alloc(sc);
+#endif
+       ret = qwz_dp_pdev_reo_setup(sc);
+       if (ret) {
+               printf("%s: failed to initialize reo destination rings: %d\n",
+                   __func__, ret);
+               goto err_mac_destroy;
+       }
+
+       ret = qwz_wmi_cmd_init(sc);
+       if (ret) {
+               printf("%s: failed to send wmi init cmd: %d\n", __func__, ret);
+               goto err_reo_cleanup;
+       }
+
+       ret = qwz_wmi_wait_for_unified_ready(sc);
+       if (ret) {
+               printf("%s: failed to receive wmi unified ready event: %d\n",
+                   __func__, ret);
+               goto err_reo_cleanup;
+       }
+
+       /* put hardware to DBS mode */
+       if (sc->hw_params.single_pdev_only &&
+           sc->hw_params.num_rxmda_per_pdev > 1) {
+               ret = qwz_wmi_set_hw_mode(sc, WMI_HOST_HW_MODE_DBS);
+               if (ret) {
+                       printf("%s: failed to send dbs mode: %d\n",
+                           __func__, ret);
+                       goto err_hif_stop;
+               }
+       }
+
+       ret = qwz_dp_tx_htt_h2t_ver_req_msg(sc);
+       if (ret) {
+               if (ret != ENOTSUP) {
+                       printf("%s: failed to send htt version "
+                           "request message: %d\n", __func__, ret);
+               }
+               goto err_reo_cleanup;
+       }
+
+       return 0;
+err_reo_cleanup:
+       qwz_dp_pdev_reo_cleanup(sc);
+err_mac_destroy:
+#if 0
+       ath12k_mac_destroy(ab);
+#endif
+err_hif_stop:
+       sc->ops.stop(sc);
+err_wmi_detach:
+       qwz_wmi_detach(sc);
+       return ret;
+}
+
+void
+qwz_core_stop(struct qwz_softc *sc)
+{
+       if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+               qwz_qmi_firmware_stop(sc);
+       
+       sc->ops.stop(sc);
+       qwz_wmi_detach(sc);
+       qwz_dp_pdev_reo_cleanup(sc);
+}
+
+void
+qwz_core_pdev_destroy(struct qwz_softc *sc)
+{
+       qwz_dp_pdev_free(sc);
+}
+
+int
+qwz_core_pdev_create(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_dp_pdev_alloc(sc);
+       if (ret) {
+               printf("%s: failed to attach DP pdev: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_mac_register(sc);
+       if (ret) {
+               printf("%s: failed register the radio with mac80211: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_dp_pdev_free;
+       }
+#if 0
+
+       ret = ath12k_thermal_register(ab);
+       if (ret) {
+               ath12k_err(ab, "could not register thermal device: %d\n",
+                          ret);
+               goto err_mac_unregister;
+       }
+
+       ret = ath12k_spectral_init(ab);
+       if (ret) {
+               ath12k_err(ab, "failed to init spectral %d\n", ret);
+               goto err_thermal_unregister;
+       }
+#endif
+       return 0;
+#if 0
+err_thermal_unregister:
+       ath12k_thermal_unregister(ab);
+err_mac_unregister:
+       ath12k_mac_unregister(ab);
+#endif
+err_dp_pdev_free:
+       qwz_dp_pdev_free(sc);
+#if 0
+err_pdev_debug:
+       ath12k_debugfs_pdev_destroy(ab);
+#endif
+       return ret;
+}
+
+void
+qwz_core_deinit(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       int s = splnet();
+
+#ifdef notyet
+       mutex_lock(&ab->core_lock);
+#endif
+       sc->ops.irq_disable(sc);
+
+       qwz_core_stop(sc);
+       qwz_core_pdev_destroy(sc);
+#ifdef notyet
+       mutex_unlock(&ab->core_lock);
+#endif
+       sc->ops.power_down(sc);
+#if 0
+       ath12k_mac_destroy(ab);
+       ath12k_debugfs_soc_destroy(ab);
+#endif
+       qwz_dp_free(sc);
+#if 0
+       ath12k_reg_free(ab);
+#endif
+       qwz_qmi_deinit_service(sc);
+
+       hal->num_shadow_reg_configured = 0;
+
+       splx(s);
+}
+
+int
+qwz_core_qmi_firmware_ready(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_core_start_firmware(sc, sc->fw_mode);
+       if (ret) {
+               printf("%s: failed to start firmware: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_ce_init_pipes(sc);
+       if (ret) {
+               printf("%s: failed to initialize CE: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_firmware_stop;
+       }
+
+       ret = qwz_dp_alloc(sc);
+       if (ret) {
+               printf("%s: failed to init DP: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_firmware_stop;
+       }
+
+       switch (sc->crypto_mode) {
+       case ATH12K_CRYPT_MODE_SW:
+               set_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
+               set_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
+               break;
+       case ATH12K_CRYPT_MODE_HW:
+               clear_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
+               clear_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
+               break;
+       default:
+               printf("%s: invalid crypto_mode: %d\n",
+                   sc->sc_dev.dv_xname, sc->crypto_mode);
+               return EINVAL;
+       }
+
+       if (sc->frame_mode == ATH12K_HW_TXRX_RAW)
+               set_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags);
+#if 0
+       mutex_lock(&ab->core_lock);
+#endif
+       ret = qwz_core_start(sc);
+       if (ret) {
+               printf("%s: failed to start core: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_dp_free;
+       }
+
+       if (!sc->attached) {
+               printf("%s: %s fw 0x%x address %s\n", sc->sc_dev.dv_xname,
+                   sc->hw_params.name, sc->qmi_target.fw_version,
+                   ether_sprintf(sc->mac_addr));
+       }
+
+       ret = qwz_core_pdev_create(sc);
+       if (ret) {
+               printf("%s: failed to create pdev core: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_core_stop;
+       }
+
+#if 0 /* TODO: Is this in the right spot for OpenBSD? */
+       sc->ops.irq_enable(sc);
+#endif
+
+#if 0
+       mutex_unlock(&ab->core_lock);
+#endif
+
+       return 0;
+err_core_stop:
+       qwz_core_stop(sc);
+#if 0
+       ath12k_mac_destroy(ab);
+#endif
+err_dp_free:
+       qwz_dp_free(sc);
+#if 0
+       mutex_unlock(&ab->core_lock);
+#endif
+err_firmware_stop:
+       qwz_qmi_firmware_stop(sc);
+
+       return ret;
+}
+
+void
+qwz_qmi_fw_init_done(struct qwz_softc *sc)
+{
+       int ret = 0;
+
+       clear_bit(ATH12K_FLAG_QMI_FAIL, sc->sc_flags);
+
+       if (sc->qmi_cal_done == 0 && sc->hw_params.cold_boot_calib) {
+               qwz_qmi_process_coldboot_calibration(sc);
+       } else {
+               clear_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
+               clear_bit(ATH12K_FLAG_RECOVERY, sc->sc_flags);
+               ret = qwz_core_qmi_firmware_ready(sc);
+               if (ret) {
+                       set_bit(ATH12K_FLAG_QMI_FAIL, sc->sc_flags);
+                       return;
+               }
+       }
+}
+
+int
+qwz_qmi_event_server_arrive(struct qwz_softc *sc)
+{
+       int ret;
+
+       sc->fw_init_done = 0;
+       sc->expect_fwmem_req = 1;
+
+       ret = qwz_qmi_fw_ind_register_send(sc);
+       if (ret < 0) {
+               printf("%s: failed to send qmi firmware indication: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               sc->expect_fwmem_req = 0;
+               return ret;
+       }
+
+       ret = qwz_qmi_host_cap_send(sc);
+       if (ret < 0) {
+               printf("%s: failed to send qmi host cap: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               sc->expect_fwmem_req = 0;
+               return ret;
+       }
+
+       ret = qwz_qmi_mem_seg_send(sc);
+       if (ret == EBUSY)
+               ret = qwz_qmi_mem_seg_send(sc);
+       sc->expect_fwmem_req = 0;
+       if (ret) {
+               printf("%s: failed to send qmi memory segments: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_qmi_event_load_bdf(sc);
+       if (ret < 0) {
+               printf("%s: qmi failed to download BDF:%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_qmi_wlanfw_m3_info_send(sc);
+       if (ret) {
+               printf("%s: qmi m3 info send failed:%d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       while (!sc->fw_init_done) {
+               ret = tsleep_nsec(&sc->fw_init_done, 0, "qwzfwinit",
+                   SEC_TO_NSEC(10));
+               if (ret) {
+                       printf("%s: fw init timeout\n", sc->sc_dev.dv_xname);
+                       return -1;
+               }
+       }
+
+       qwz_qmi_fw_init_done(sc);
+       return 0;
+}
+
+int
+qwz_core_init(struct qwz_softc *sc)
+{
+       int error;
+
+       error = qwz_qmi_init_service(sc);
+       if (error) {
+               printf("failed to initialize qmi :%d\n", error);
+               return error;
+       }
+
+       error = sc->ops.power_up(sc);
+       if (error)
+               qwz_qmi_deinit_service(sc);
+
+       return error;
+}
+
+int
+qwz_init_hw_params(struct qwz_softc *sc)
+{
+       const struct ath12k_hw_params *hw_params = NULL;
+       int i;
+
+       for (i = 0; i < nitems(ath12k_hw_params); i++) {
+               hw_params = &ath12k_hw_params[i];
+
+               if (hw_params->hw_rev == sc->sc_hw_rev)
+                       break;
+       }
+
+       if (i == nitems(ath12k_hw_params)) {
+               printf("%s: Unsupported hardware version: 0x%x\n",
+                   sc->sc_dev.dv_xname, sc->sc_hw_rev);
+               return EINVAL;
+       }
+
+       sc->hw_params = *hw_params;
+
+       DPRINTF("%s: %s\n", sc->sc_dev.dv_xname, sc->hw_params.name);
+
+       return 0;
+}
+
+static const struct hal_srng_config hw_srng_config_templ[QWZ_NUM_SRNG_CFG] = {
+       /* TODO: max_rings can populated by querying HW capabilities */
+       { /* REO_DST */
+               .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+               .max_rings = 4,
+               .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+       },
+
+       { /* REO_EXCEPTION */
+               /* Designating REO2TCL ring as exception ring. This ring is
+                * similar to other REO2SW rings though it is named as REO2TCL.
+                * Any of theREO2SW rings can be used as exception ring.
+                */
+               .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* REO_REINJECT */
+               .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* REO_CMD */
+               .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+               .max_rings = 1,
+               .entry_size = (sizeof(struct hal_tlv_hdr) +
+                       sizeof(struct hal_reo_get_queue_stats)) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* REO_STATUS */
+               .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+               .max_rings = 1,
+               .entry_size = (sizeof(struct hal_tlv_hdr) +
+                       sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* TCL_DATA */
+               .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+               .max_rings = 3,
+               .entry_size = (sizeof(struct hal_tlv_hdr) +
+                            sizeof(struct hal_tcl_data_cmd)) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* TCL_CMD */
+               .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+               .max_rings = 1,
+               .entry_size = (sizeof(struct hal_tlv_hdr) +
+                            sizeof(struct hal_tcl_gse_cmd)) >> 2,
+               .lmac_ring =  false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* TCL_STATUS */
+               .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+               .max_rings = 1,
+               .entry_size = (sizeof(struct hal_tlv_hdr) +
+                            sizeof(struct hal_tcl_status_ring)) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* CE_SRC */
+               .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+               .max_rings = 12,
+               .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* CE_DST */
+               .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+               .max_rings = 12,
+               .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* CE_DST_STATUS */
+               .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+               .max_rings = 12,
+               .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* WBM_IDLE_LINK */
+               .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* SW2WBM_RELEASE */
+               .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* WBM2SW_RELEASE */
+               .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+               .max_rings = 5,
+               .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+               .lmac_ring = false,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+       },
+       { /* RXDMA_BUF */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+               .max_rings = 2,
+               .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA_DST */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA_MONITOR_BUF */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA_MONITOR_STATUS */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA_MONITOR_DST */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_DST,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA_MONITOR_DESC */
+               .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+               .max_rings = 1,
+               .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+       { /* RXDMA DIR BUF */
+               .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+               .max_rings = 1,
+               .entry_size = 8 >> 2, /* TODO: Define the struct */
+               .lmac_ring = true,
+               .ring_dir = HAL_SRNG_DIR_SRC,
+               .max_size = HAL_RXDMA_RING_MAX_SIZE,
+       },
+};
+
+int
+qwz_hal_srng_create_config(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       struct hal_srng_config *s;
+
+       memcpy(hal->srng_config, hw_srng_config_templ,
+           sizeof(hal->srng_config));
+
+       s = &hal->srng_config[HAL_REO_DST];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(sc);
+       s->reg_size[0] = HAL_REO2_RING_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc);
+       s->reg_size[1] = HAL_REO2_RING_HP(sc) - HAL_REO1_RING_HP(sc);
+
+       s = &hal->srng_config[HAL_REO_EXCEPTION];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(sc);
+
+       s = &hal->srng_config[HAL_REO_REINJECT];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(sc);
+
+       s = &hal->srng_config[HAL_REO_CMD];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(sc);
+
+       s = &hal->srng_config[HAL_REO_STATUS];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(sc);
+
+       s = &hal->srng_config[HAL_TCL_DATA];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+       s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc);
+       s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+       s = &hal->srng_config[HAL_TCL_CMD];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+       s = &hal->srng_config[HAL_TCL_STATUS];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+       s = &hal->srng_config[HAL_CE_SRC];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
+               ATH12K_CE_OFFSET(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_HP +
+               ATH12K_CE_OFFSET(sc);
+       s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
+       s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
+
+       s = &hal->srng_config[HAL_CE_DST];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
+               ATH12K_CE_OFFSET(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_HP +
+               ATH12K_CE_OFFSET(sc);
+       s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
+       s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
+
+       s = &hal->srng_config[HAL_CE_DST_STATUS];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) +
+               HAL_CE_DST_STATUS_RING_BASE_LSB + ATH12K_CE_OFFSET(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_STATUS_RING_HP +
+               ATH12K_CE_OFFSET(sc);
+       s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
+       s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
+               HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
+
+       s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+       s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
+
+       s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+       s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
+       s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+       s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(sc) -
+               HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
+       s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+       return 0;
+}
+
+int
+qwz_hal_srng_get_ring_id(struct qwz_softc *sc,
+    enum hal_ring_type type, int ring_num, int mac_id)
+{
+       struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
+       int ring_id;
+
+       if (ring_num >= srng_config->max_rings) {
+               printf("%s: invalid ring number :%d\n", __func__, ring_num);
+               return -1;
+       }
+
+       ring_id = srng_config->start_ring_id + ring_num;
+       if (srng_config->lmac_ring)
+               ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+       if (ring_id >= HAL_SRNG_RING_ID_MAX) {
+               printf("%s: invalid ring ID :%d\n", __func__, ring_id);
+               return -1;
+       }
+
+       return ring_id;
+}
+
+void
+qwz_hal_srng_update_hp_tp_addr(struct qwz_softc *sc, int shadow_cfg_idx,
+    enum hal_ring_type ring_type, int ring_num)
+{
+       struct hal_srng *srng;
+       struct ath12k_hal *hal = &sc->hal;
+       int ring_id;
+       struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+       ring_id = qwz_hal_srng_get_ring_id(sc, ring_type, ring_num, 0);
+       if (ring_id < 0)
+               return;
+
+       srng = &hal->srng_list[ring_id];
+
+       if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+               srng->u.dst_ring.tp_addr = (uint32_t *)(
+                   HAL_SHADOW_REG(sc, shadow_cfg_idx) +
+                   (unsigned long)sc->mem);
+       else
+               srng->u.src_ring.hp_addr = (uint32_t *)(
+                   HAL_SHADOW_REG(sc, shadow_cfg_idx) +
+                   (unsigned long)sc->mem);
+}
+
+void
+qwz_hal_srng_shadow_update_hp_tp(struct qwz_softc *sc, struct hal_srng *srng)
+{
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       /* Update the shadow HP if the ring isn't empty. */
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+           *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+               qwz_hal_srng_access_end(sc, srng);
+}
+
+int
+qwz_hal_srng_update_shadow_config(struct qwz_softc *sc,
+    enum hal_ring_type ring_type, int ring_num)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+       int shadow_cfg_idx = hal->num_shadow_reg_configured;
+       uint32_t target_reg;
+
+       if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+               return EINVAL;
+
+       hal->num_shadow_reg_configured++;
+
+       target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+       target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+               ring_num;
+
+       /* For destination ring, shadow the TP */
+       if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+               target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+       hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+       /* update hp/tp addr to hal structure*/
+       qwz_hal_srng_update_hp_tp_addr(sc, shadow_cfg_idx, ring_type, ring_num);
+
+       DPRINTF("%s: target_reg %x, shadow reg 0x%x shadow_idx 0x%x, "
+           "ring_type %d, ring num %d\n", __func__, target_reg,
+            HAL_SHADOW_REG(sc, shadow_cfg_idx), shadow_cfg_idx,
+            ring_type, ring_num);
+
+       return 0;
+}
+
+void
+qwz_hal_srng_shadow_config(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       int ring_type, ring_num;
+       struct hal_srng_config *cfg;
+
+       /* update all the non-CE srngs. */
+       for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+               cfg = &hal->srng_config[ring_type];
+
+               if (ring_type == HAL_CE_SRC ||
+                   ring_type == HAL_CE_DST ||
+                       ring_type == HAL_CE_DST_STATUS)
+                       continue;
+
+               if (cfg->lmac_ring)
+                       continue;
+
+               for (ring_num = 0; ring_num < cfg->max_rings; ring_num++) {
+                       qwz_hal_srng_update_shadow_config(sc, ring_type,
+                           ring_num);
+               }
+       }
+}
+
+void
+qwz_hal_srng_get_shadow_config(struct qwz_softc *sc, uint32_t **cfg,
+    uint32_t *len)
+{
+       struct ath12k_hal *hal = &sc->hal;
+
+       *len = hal->num_shadow_reg_configured;
+       *cfg = hal->shadow_reg_addr;
+}
+
+int
+qwz_hal_alloc_cont_rdp(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       size_t size = sizeof(uint32_t) * HAL_SRNG_RING_ID_MAX;
+
+       if (hal->rdpmem == NULL) {
+               hal->rdpmem = qwz_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
+               if (hal->rdpmem == NULL) {
+                       printf("%s: could not allocate RDP DMA memory\n",
+                           sc->sc_dev.dv_xname);
+                       return ENOMEM;
+
+               }
+       }
+
+       hal->rdp.vaddr = QWZ_DMA_KVA(hal->rdpmem);
+       hal->rdp.paddr = QWZ_DMA_DVA(hal->rdpmem);
+       return 0;
+}
+
+void
+qwz_hal_free_cont_rdp(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+
+       if (hal->rdpmem == NULL)
+               return;
+
+       hal->rdp.vaddr = NULL;
+       hal->rdp.paddr = 0L;
+       qwz_dmamem_free(sc->sc_dmat, hal->rdpmem);
+       hal->rdpmem = NULL;
+}
+
+int
+qwz_hal_alloc_cont_wrp(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       size_t size = sizeof(uint32_t) * HAL_SRNG_NUM_LMAC_RINGS;
+
+       if (hal->wrpmem == NULL) {
+               hal->wrpmem = qwz_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
+               if (hal->wrpmem == NULL) {
+                       printf("%s: could not allocate WDP DMA memory\n",
+                           sc->sc_dev.dv_xname);
+                       return ENOMEM;
+
+               }
+       }
+
+       hal->wrp.vaddr = QWZ_DMA_KVA(hal->wrpmem);
+       hal->wrp.paddr = QWZ_DMA_DVA(hal->wrpmem);
+       return 0;
+}
+
+void
+qwz_hal_free_cont_wrp(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+
+       if (hal->wrpmem == NULL)
+               return;
+
+       hal->wrp.vaddr = NULL;
+       hal->wrp.paddr = 0L;
+       qwz_dmamem_free(sc->sc_dmat, hal->wrpmem);
+       hal->wrpmem = NULL;
+}
+
+int
+qwz_hal_srng_init(struct qwz_softc *sc)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       int ret;
+
+       memset(hal, 0, sizeof(*hal));
+
+       ret = qwz_hal_srng_create_config(sc);
+       if (ret)
+               goto err_hal;
+
+       ret = qwz_hal_alloc_cont_rdp(sc);
+       if (ret)
+               goto err_hal;
+
+       ret = qwz_hal_alloc_cont_wrp(sc);
+       if (ret)
+               goto err_free_cont_rdp;
+
+#ifdef notyet
+       qwz_hal_register_srng_key(sc);
+#endif
+
+       return 0;
+err_free_cont_rdp:
+       qwz_hal_free_cont_rdp(sc);
+
+err_hal:
+       return ret;
+}
+
+void
+qwz_hal_srng_dst_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       uint32_t val;
+       uint64_t hp_addr;
+       uint32_t reg_base;
+
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+       if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+               sc->ops.write32(sc,
+                   reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc),
+                   srng->msi_addr);
+
+               val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+                   ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
+                   HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+               sc->ops.write32(sc,
+                   reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc), val);
+
+               sc->ops.write32(sc,
+                   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(sc),
+                   srng->msi_data);
+       }
+
+       sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
+
+       val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+           ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+           (srng->entry_size * srng->num_entries));
+       sc->ops.write32(sc,
+           reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(sc), val);
+
+       val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+           FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+       sc->ops.write32(sc, reg_base + HAL_REO1_RING_ID_OFFSET(sc), val);
+
+       /* interrupt setup */
+       val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+           (srng->intr_timer_thres_us >> 3));
+
+       val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+           (srng->intr_batch_cntr_thres_entries * srng->entry_size));
+
+       sc->ops.write32(sc,
+           reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc), val);
+
+       hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr -
+           (unsigned long)hal->rdp.vaddr);
+       sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc),
+           hp_addr & HAL_ADDR_LSB_REG_MASK);
+       sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc),
+           hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+       /* Initialize head and tail pointers to indicate ring is empty */
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+       sc->ops.write32(sc, reg_base, 0);
+       sc->ops.write32(sc, reg_base + HAL_REO1_RING_TP_OFFSET(sc), 0);
+       *srng->u.dst_ring.hp_addr = 0;
+
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+       val = 0;
+       if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+               val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+       if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+               val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+       if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+               val |= HAL_REO1_RING_MISC_MSI_SWAP;
+       val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+       sc->ops.write32(sc, reg_base + HAL_REO1_RING_MISC_OFFSET(sc), val);
+}
+
+void
+qwz_hal_srng_src_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       uint32_t val;
+       uint64_t tp_addr;
+       uint32_t reg_base;
+
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+       if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc),
+                   srng->msi_addr);
+
+               val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+                   ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
+                     HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc),
+                   val);
+
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(sc),
+                   srng->msi_data);
+       }
+
+       sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
+
+       val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+           ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+           (srng->entry_size * srng->num_entries));
+       sc->ops.write32(sc, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
+
+       val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+       sc->ops.write32(sc, reg_base + HAL_TCL1_RING_ID_OFFSET(sc), val);
+
+       if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+               sc->ops.write32(sc, reg_base, (uint32_t)srng->ring_base_paddr);
+               val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+                   ((uint64_t)srng->ring_base_paddr >>
+                   HAL_ADDR_MSB_REG_SHIFT)) |
+                   FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+                   (srng->entry_size * srng->num_entries));
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
+       }
+
+       /* interrupt setup */
+       /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+        * unit of 8 usecs instead of 1 usec (as required by v1).
+        */
+       val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+           srng->intr_timer_thres_us);
+
+       val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+           (srng->intr_batch_cntr_thres_entries * srng->entry_size));
+
+       sc->ops.write32(sc,
+           reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc), val);
+
+       val = 0;
+       if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+               val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+                   srng->u.src_ring.low_threshold);
+       }
+       sc->ops.write32(sc,
+           reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc), val);
+
+       if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+               tp_addr = hal->rdp.paddr +
+                   ((unsigned long)srng->u.src_ring.tp_addr -
+                   (unsigned long)hal->rdp.vaddr);
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc),
+                   tp_addr & HAL_ADDR_LSB_REG_MASK);
+               sc->ops.write32(sc,
+                   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc),
+                   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+       }
+
+       /* Initialize head and tail pointers to indicate ring is empty */
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+       sc->ops.write32(sc, reg_base, 0);
+       sc->ops.write32(sc, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+       *srng->u.src_ring.tp_addr = 0;
+
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+       val = 0;
+       if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+               val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+       if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+               val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+       if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+               val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+       /* Loop count is not used for SRC rings */
+       val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+       val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+       sc->ops.write32(sc, reg_base + HAL_TCL1_RING_MISC_OFFSET(sc), val);
+}
+
+void
+qwz_hal_srng_hw_init(struct qwz_softc *sc, struct hal_srng *srng)
+{
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+               qwz_hal_srng_src_hw_init(sc, srng);
+       else
+               qwz_hal_srng_dst_hw_init(sc, srng);
+}
+
+void
+qwz_hal_ce_dst_setup(struct qwz_softc *sc, struct hal_srng *srng, int ring_num)
+{
+       struct hal_srng_config *srng_config = &sc->hal.srng_config[HAL_CE_DST];
+       uint32_t addr;
+       uint32_t val;
+
+       addr = HAL_CE_DST_RING_CTRL +
+           srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+           ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+       val = sc->ops.read32(sc, addr);
+       val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+       val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+           srng->u.dst_ring.max_buffer_length);
+       sc->ops.write32(sc, addr, val);
+}
+
+void
+qwz_hal_ce_src_set_desc(void *buf, uint64_t paddr, uint32_t len, uint32_t id,
+    uint8_t byte_swap_data)
+{
+       struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
+
+       desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+       desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+           (paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+           FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+           byte_swap_data) |
+           FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+           FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+       desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void
+qwz_hal_ce_dst_set_desc(void *buf, uint64_t paddr)
+{
+       struct hal_ce_srng_dest_desc *desc =
+           (struct hal_ce_srng_dest_desc *)buf;
+
+       desc->buffer_addr_low = htole32(paddr & HAL_ADDR_LSB_REG_MASK);
+       desc->buffer_addr_info = htole32(FIELD_PREP(
+           HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+           (paddr >> HAL_ADDR_MSB_REG_SHIFT)));
+}
+
+uint32_t
+qwz_hal_ce_dst_status_get_length(void *buf)
+{
+       struct hal_ce_srng_dst_status_desc *desc =
+               (struct hal_ce_srng_dst_status_desc *)buf;
+       uint32_t len;
+
+       len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+       desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+       return len;
+}
+
+
+int
+qwz_hal_srng_setup(struct qwz_softc *sc, enum hal_ring_type type,
+    int ring_num, int mac_id, struct hal_srng_params *params)
+{
+       struct ath12k_hal *hal = &sc->hal;
+       struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
+       struct hal_srng *srng;
+       int ring_id;
+       uint32_t lmac_idx;
+       int i;
+       uint32_t reg_base;
+
+       ring_id = qwz_hal_srng_get_ring_id(sc, type, ring_num, mac_id);
+       if (ring_id < 0)
+               return ring_id;
+
+       srng = &hal->srng_list[ring_id];
+
+       srng->ring_id = ring_id;
+       srng->ring_dir = srng_config->ring_dir;
+       srng->ring_base_paddr = params->ring_base_paddr;
+       srng->ring_base_vaddr = params->ring_base_vaddr;
+       srng->entry_size = srng_config->entry_size;
+       srng->num_entries = params->num_entries;
+       srng->ring_size = srng->entry_size * srng->num_entries;
+       srng->intr_batch_cntr_thres_entries =
+           params->intr_batch_cntr_thres_entries;
+       srng->intr_timer_thres_us = params->intr_timer_thres_us;
+       srng->flags = params->flags;
+       srng->msi_addr = params->msi_addr;
+       srng->msi_data = params->msi_data;
+       srng->initialized = 1;
+#if 0
+       spin_lock_init(&srng->lock);
+       lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
+#endif
+
+       for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+               srng->hwreg_base[i] = srng_config->reg_start[i] +
+                   (ring_num * srng_config->reg_size[i]);
+       }
+
+       memset(srng->ring_base_vaddr, 0,
+           (srng->entry_size * srng->num_entries) << 2);
+
+#if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
+       /* TODO: Add comments on these swap configurations */
+       if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+               srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+                              HAL_SRNG_FLAGS_RING_PTR_SWAP;
+#endif
+       reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+       if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+               srng->u.src_ring.hp = 0;
+               srng->u.src_ring.cached_tp = 0;
+               srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+               srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+               srng->u.src_ring.low_threshold = params->low_threshold *
+                   srng->entry_size;
+               if (srng_config->lmac_ring) {
+                       lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+                       srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+                           lmac_idx);
+                       srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+               } else {
+                       if (!sc->hw_params.supports_shadow_regs)
+                               srng->u.src_ring.hp_addr =
+                                   (uint32_t *)((unsigned long)sc->mem +
+                                   reg_base);
+                       else
+                               DPRINTF("%s: type %d ring_num %d reg_base "
+                                   "0x%x shadow 0x%lx\n",
+                                   sc->sc_dev.dv_xname, type, ring_num, reg_base,
+                                  (unsigned long)srng->u.src_ring.hp_addr -
+                                  (unsigned long)sc->mem);
+               }
+       } else {
+               /* During initialization loop count in all the descriptors
+                * will be set to zero, and HW will set it to 1 on completing
+                * descriptor update in first loop, and increments it by 1 on
+                * subsequent loops (loop count wraps around after reaching
+                * 0xffff). The 'loop_cnt' in SW ring state is the expected
+                * loop count in descriptors updated by HW (to be processed
+                * by SW).
+                */
+               srng->u.dst_ring.loop_cnt = 1;
+               srng->u.dst_ring.tp = 0;
+               srng->u.dst_ring.cached_hp = 0;
+               srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+               if (srng_config->lmac_ring) {
+                       /* For LMAC rings, tail pointer updates will be done
+                        * through FW by writing to a shared memory location
+                        */
+                       lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+                       srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+                           lmac_idx);
+                       srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+               } else {
+                       if (!sc->hw_params.supports_shadow_regs)
+                               srng->u.dst_ring.tp_addr =
+                                   (uint32_t *)((unsigned long)sc->mem +
+                                   reg_base + (HAL_REO1_RING_TP(sc) -
+                                   HAL_REO1_RING_HP(sc)));
+                       else
+                               DPRINTF("%s: type %d ring_num %d target_reg "
+                                   "0x%x shadow 0x%lx\n", sc->sc_dev.dv_xname,
+                                   type, ring_num,
+                                   reg_base + (HAL_REO1_RING_TP(sc) -
+                                   HAL_REO1_RING_HP(sc)),
+                                   (unsigned long)srng->u.dst_ring.tp_addr -
+                                   (unsigned long)sc->mem);
+               }
+       }
+
+       if (srng_config->lmac_ring)
+               return ring_id;
+
+       qwz_hal_srng_hw_init(sc, srng);
+
+       if (type == HAL_CE_DST) {
+               srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+               qwz_hal_ce_dst_setup(sc, srng, ring_num);
+       }
+
+       return ring_id;
+}
+
+size_t
+qwz_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+       switch (type) {
+       case HAL_CE_DESC_SRC:
+               return sizeof(struct hal_ce_srng_src_desc);
+       case HAL_CE_DESC_DST:
+               return sizeof(struct hal_ce_srng_dest_desc);
+       case HAL_CE_DESC_DST_STATUS:
+               return sizeof(struct hal_ce_srng_dst_status_desc);
+       }
+
+       return 0;
+}
+
+void
+qwz_htc_tx_completion_handler(struct qwz_softc *sc, struct mbuf *m)
+{
+       printf("%s: not implemented\n", __func__);
+}
+
+struct qwz_tx_data *
+qwz_ce_completed_send_next(struct qwz_ce_pipe *pipe)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct hal_srng *srng;
+       unsigned int sw_index;
+       unsigned int nentries_mask;
+       void *ctx;
+       struct qwz_tx_data *tx_data = NULL;
+       uint32_t *desc;
+#ifdef notyet
+       spin_lock_bh(&ab->ce.ce_lock);
+#endif
+       sw_index = pipe->src_ring->sw_index;
+       nentries_mask = pipe->src_ring->nentries_mask;
+
+       srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       desc = qwz_hal_srng_src_reap_next(sc, srng);
+       if (!desc)
+               goto err_unlock;
+
+       ctx = pipe->src_ring->per_transfer_context[sw_index];
+       tx_data = (struct qwz_tx_data *)ctx;
+
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+
+       spin_unlock_bh(&ab->ce.ce_lock);
+#endif
+       return tx_data;
+}
+
+int
+qwz_ce_tx_process_cb(struct qwz_ce_pipe *pipe)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct qwz_tx_data *tx_data;
+       struct mbuf *m;
+       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+       int ret = 0;
+
+       while ((tx_data = qwz_ce_completed_send_next(pipe)) != NULL) {
+               bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+               m = tx_data->m;
+               tx_data->m = NULL;
+
+               if ((!pipe->send_cb) || sc->hw_params.credit_flow) {
+                       m_freem(m);
+                       continue;
+               }
+
+               ml_enqueue(&ml, m);
+               ret = 1;
+       }
+
+       while ((m = ml_dequeue(&ml))) {
+               DNPRINTF(QWZ_D_CE, "%s: tx ce pipe %d len %d\n", __func__,
+                   pipe->pipe_num, m->m_len);
+               pipe->send_cb(sc, m);
+       }
+
+       return ret;
+}
+
+void
+qwz_ce_poll_send_completed(struct qwz_softc *sc, uint8_t pipe_id)
+{
+       struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
+       const struct ce_attr *attr =  &sc->hw_params.host_ce_config[pipe_id];
+
+       if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
+               qwz_ce_tx_process_cb(pipe);
+}
+
+void
+qwz_htc_process_credit_report(struct qwz_htc *htc,
+    const struct ath12k_htc_credit_report *report, int len,
+    enum ath12k_htc_ep_id eid)
+{
+       struct qwz_softc *sc = htc->sc;
+       struct qwz_htc_ep *ep;
+       int i, n_reports;
+
+       if (len % sizeof(*report))
+               printf("%s: Uneven credit report len %d", __func__, len);
+
+       n_reports = len / sizeof(*report);
+#ifdef notyet
+       spin_lock_bh(&htc->tx_lock);
+#endif
+       for (i = 0; i < n_reports; i++, report++) {
+               if (report->eid >= ATH12K_HTC_EP_COUNT)
+                       break;
+
+               ep = &htc->endpoint[report->eid];
+               ep->tx_credits += report->credits;
+
+               DNPRINTF(QWZ_D_HTC, "%s: ep %d credits got %d total %d\n",
+                   __func__, report->eid, report->credits, ep->tx_credits);
+
+               if (ep->ep_ops.ep_tx_credits) {
+#ifdef notyet
+                       spin_unlock_bh(&htc->tx_lock);
+#endif
+                       ep->ep_ops.ep_tx_credits(sc);
+#ifdef notyet
+                       spin_lock_bh(&htc->tx_lock);
+#endif
+               }
+       }
+#ifdef notyet
+       spin_unlock_bh(&htc->tx_lock);
+#endif
+}
+
+int
+qwz_htc_process_trailer(struct qwz_htc *htc, uint8_t *buffer, int length,
+    enum ath12k_htc_ep_id src_eid)
+{
+       struct qwz_softc *sc = htc->sc;
+       int status = 0;
+       struct ath12k_htc_record *record;
+       size_t len;
+
+       while (length > 0) {
+               record = (struct ath12k_htc_record *)buffer;
+
+               if (length < sizeof(record->hdr)) {
+                       status = EINVAL;
+                       break;
+               }
+
+               if (record->hdr.len > length) {
+                       /* no room left in buffer for record */
+                       printf("%s: Invalid record length: %d\n",
+                           __func__, record->hdr.len);
+                       status = EINVAL;
+                       break;
+               }
+
+               if (sc->hw_params.credit_flow) {
+                       switch (record->hdr.id) {
+                       case ATH12K_HTC_RECORD_CREDITS:
+                               len = sizeof(struct ath12k_htc_credit_report);
+                               if (record->hdr.len < len) {
+                                       printf("%s: Credit report too long\n",
+                                           __func__);
+                                       status = EINVAL;
+                                       break;
+                               }
+                               qwz_htc_process_credit_report(htc,
+                                   record->credit_report,
+                                   record->hdr.len, src_eid);
+                               break;
+                       default:
+                               printf("%s: unhandled record: id:%d length:%d\n",
+                                   __func__, record->hdr.id, record->hdr.len);
+                               break;
+                       }
+               }
+
+               if (status)
+                       break;
+
+               /* multiple records may be present in a trailer */
+               buffer += sizeof(record->hdr) + record->hdr.len;
+               length -= sizeof(record->hdr) + record->hdr.len;
+       }
+
+       return status;
+}
+
+void
+qwz_htc_suspend_complete(struct qwz_softc *sc, int ack)
+{
+       printf("%s: not implemented\n", __func__);
+}
+
+void
+qwz_htc_wakeup_from_suspend(struct qwz_softc *sc)
+{
+       /* TODO This is really all the Linux driver does here... silence it? */
+       printf("%s: wakeup from suspend received\n", __func__);
+}
+
+void
+qwz_htc_rx_completion_handler(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qwz_htc *htc = &sc->htc;
+       struct ath12k_htc_hdr *hdr;
+       struct qwz_htc_ep *ep;
+       uint16_t payload_len;
+       uint32_t message_id, trailer_len = 0;
+       uint8_t eid;
+       int trailer_present;
+
+       m = m_pullup(m, sizeof(struct ath12k_htc_hdr));
+       if (m == NULL) {
+               printf("%s: m_pullup failed\n", __func__);
+               m = NULL; /* already freed */
+               goto out;
+       }
+
+       hdr = mtod(m, struct ath12k_htc_hdr *);
+
+       eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+       if (eid >= ATH12K_HTC_EP_COUNT) {
+               printf("%s: HTC Rx: invalid eid %d\n", __func__, eid);
+               printf("%s: HTC info: 0x%x\n", __func__, hdr->htc_info);
+               printf("%s: CTRL info: 0x%x\n", __func__, hdr->ctrl_info);
+               goto out;
+       }
+
+       ep = &htc->endpoint[eid];
+
+       payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+       if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) {
+               printf("%s: HTC rx frame too long, len: %zu\n", __func__,
+                   payload_len + sizeof(*hdr));
+               goto out;
+       }
+
+       if (m->m_pkthdr.len < payload_len) {
+               printf("%s: HTC Rx: insufficient length, got %d, "
+                   "expected %d\n", __func__, m->m_pkthdr.len, payload_len);
+               goto out;
+       }
+
+       /* get flags to check for trailer */
+       trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+           ATH12K_HTC_FLAG_TRAILER_PRESENT;
+
+       DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p trailer_present %d\n",
+           __func__, eid, m, trailer_present);
+
+       if (trailer_present) {
+               int status = 0;
+               uint8_t *trailer;
+               int trim;
+               size_t min_len;
+
+               trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+               min_len = sizeof(struct ath12k_htc_record_hdr);
+
+               if ((trailer_len < min_len) ||
+                   (trailer_len > payload_len)) {
+                       printf("%s: Invalid trailer length: %d\n", __func__,
+                           trailer_len);
+                       goto out;
+               }
+
+               trailer = (uint8_t *)hdr;
+               trailer += sizeof(*hdr);
+               trailer += payload_len;
+               trailer -= trailer_len;
+               status = qwz_htc_process_trailer(htc, trailer,
+                   trailer_len, eid);
+               if (status)
+                       goto out;
+
+               trim = trailer_len;
+               m_adj(m, -trim);
+       }
+
+       if (trailer_len >= payload_len)
+               /* zero length packet with trailer data, just drop these */
+               goto out;
+
+       m_adj(m, sizeof(*hdr));
+
+       if (eid == ATH12K_HTC_EP_0) {
+               struct ath12k_htc_msg *msg;
+
+               msg = mtod(m, struct ath12k_htc_msg *);
+               message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
+
+               DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p message_id %d\n",
+                   __func__, eid, m, message_id);
+
+               switch (message_id) {
+               case ATH12K_HTC_MSG_READY_ID:
+               case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+                       /* handle HTC control message */
+                       if (sc->ctl_resp) {
+                               /* this is a fatal error, target should not be
+                                * sending unsolicited messages on the ep 0
+                                */
+                               printf("%s: HTC rx ctrl still processing\n",
+                                   __func__);
+                               goto out;
+                       }
+
+                       htc->control_resp_len =
+                           MIN(m->m_pkthdr.len, ATH12K_HTC_MAX_CTRL_MSG_LEN);
+
+                       m_copydata(m, 0, htc->control_resp_len,
+                           htc->control_resp_buffer);
+
+                       sc->ctl_resp = 1;
+                       wakeup(&sc->ctl_resp);
+                       break;
+               case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+                       qwz_htc_suspend_complete(sc, 1);
+                       break;
+               case ATH12K_HTC_MSG_NACK_SUSPEND:
+                       qwz_htc_suspend_complete(sc, 0);
+                       break;
+               case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+                       qwz_htc_wakeup_from_suspend(sc);
+                       break;
+               default:
+                       printf("%s: ignoring unsolicited htc ep0 event %ld\n",
+                           __func__,
+                           FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
+                       break;
+               }
+               goto out;
+       }
+
+       DNPRINTF(QWZ_D_HTC, "%s: rx ep %d mbuf %p\n", __func__, eid, m);
+
+       ep->ep_ops.ep_rx_complete(sc, m);
+
+       /* poll tx completion for interrupt disabled CE's */
+       qwz_ce_poll_send_completed(sc, ep->ul_pipe_id);
+
+       /* mbuf is now owned by the rx completion handler */
+       m = NULL;
+out:
+       m_freem(m);
+}
+
+void
+qwz_ce_free_ring(struct qwz_softc *sc, struct qwz_ce_ring *ring)
+{
+       bus_size_t dsize;
+       size_t size;
+       
+       if (ring == NULL)
+               return;
+
+       if (ring->base_addr) {
+               dsize = ring->nentries * ring->desc_sz;
+               bus_dmamem_unmap(sc->sc_dmat, ring->base_addr, dsize);
+       }
+       if (ring->nsegs)
+               bus_dmamem_free(sc->sc_dmat, &ring->dsegs, ring->nsegs);
+       if (ring->dmap)
+               bus_dmamap_destroy(sc->sc_dmat, ring->dmap);
+
+       size = sizeof(*ring) + (ring->nentries *
+           sizeof(ring->per_transfer_context[0]));
+       free(ring, M_DEVBUF, size);
+}
+
+static inline int
+qwz_ce_need_shadow_fix(int ce_id)
+{
+       /* only ce4 needs shadow workaround */
+       return (ce_id == 4);
+}
+
+void
+qwz_ce_stop_shadow_timers(struct qwz_softc *sc)
+{
+       int i;
+
+       if (!sc->hw_params.supports_shadow_regs)
+               return;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++)
+               if (qwz_ce_need_shadow_fix(i))
+                       qwz_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
+}
+
+void
+qwz_ce_free_pipes(struct qwz_softc *sc)
+{
+       struct qwz_ce_pipe *pipe;
+       int i;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               pipe = &sc->ce.ce_pipe[i];
+               if (qwz_ce_need_shadow_fix(i))
+                       qwz_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
+               if (pipe->src_ring) {
+                       qwz_ce_free_ring(sc, pipe->src_ring);
+                       pipe->src_ring = NULL;
+               }
+
+               if (pipe->dest_ring) {
+                       qwz_ce_free_ring(sc, pipe->dest_ring);
+                       pipe->dest_ring = NULL;
+               }
+
+               if (pipe->status_ring) {
+                       qwz_ce_free_ring(sc, pipe->status_ring);
+                       pipe->status_ring = NULL;
+               }
+       }
+}
+
+int
+qwz_ce_alloc_src_ring_transfer_contexts(struct qwz_ce_pipe *pipe,
+    const struct ce_attr *attr)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct qwz_tx_data *txdata;
+       size_t size;
+       int ret, i;
+
+       /* Allocate an array of qwz_tx_data structures. */
+       txdata = mallocarray(pipe->src_ring->nentries, sizeof(*txdata),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (txdata == NULL)
+               return ENOMEM;
+
+       size = sizeof(*txdata) * pipe->src_ring->nentries;
+
+       /* Create per-transfer DMA maps. */
+       for (i = 0; i < pipe->src_ring->nentries; i++) {
+               struct qwz_tx_data *ctx = &txdata[i];
+               ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
+                   attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
+               if (ret) {
+                       int j;
+                       for (j = 0; j < i; j++) {
+                               struct qwz_tx_data *ctx = &txdata[j];
+                               bus_dmamap_destroy(sc->sc_dmat, ctx->map);
+                       }
+                       free(txdata, M_DEVBUF, size);
+                       return ret;
+               }
+               pipe->src_ring->per_transfer_context[i] = ctx;
+       }
+
+       return 0;
+}
+
+int
+qwz_ce_alloc_dest_ring_transfer_contexts(struct qwz_ce_pipe *pipe,
+    const struct ce_attr *attr)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct qwz_rx_data *rxdata;
+       size_t size;
+       int ret, i;
+
+       /* Allocate an array of qwz_rx_data structures. */
+       rxdata = mallocarray(pipe->dest_ring->nentries, sizeof(*rxdata),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (rxdata == NULL)
+               return ENOMEM;
+
+       size = sizeof(*rxdata) * pipe->dest_ring->nentries;
+
+       /* Create per-transfer DMA maps. */
+       for (i = 0; i < pipe->dest_ring->nentries; i++) {
+               struct qwz_rx_data *ctx = &rxdata[i];
+               ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
+                   attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
+               if (ret) {
+                       int j;
+                       for (j = 0; j < i; j++) {
+                               struct qwz_rx_data *ctx = &rxdata[j];
+                               bus_dmamap_destroy(sc->sc_dmat, ctx->map);
+                       }
+                       free(rxdata, M_DEVBUF, size);
+                       return ret;
+               }
+               pipe->dest_ring->per_transfer_context[i] = ctx;
+       }
+
+       return 0;
+}
+
+struct qwz_ce_ring *
+qwz_ce_alloc_ring(struct qwz_softc *sc, int nentries, size_t desc_sz)
+{
+       struct qwz_ce_ring *ce_ring;
+       size_t size = sizeof(*ce_ring) +
+           (nentries * sizeof(ce_ring->per_transfer_context[0]));
+       bus_size_t dsize;
+
+       ce_ring = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (ce_ring == NULL)
+               return NULL;
+
+       ce_ring->nentries = nentries;
+       ce_ring->nentries_mask = nentries - 1;
+       ce_ring->desc_sz = desc_sz;
+
+       dsize = nentries * desc_sz;
+       if (bus_dmamap_create(sc->sc_dmat, dsize, 1, dsize, 0, BUS_DMA_NOWAIT,
+           &ce_ring->dmap)) {
+               free(ce_ring, M_DEVBUF, size);
+               return NULL;
+       }
+
+       if (bus_dmamem_alloc(sc->sc_dmat, dsize, CE_DESC_RING_ALIGN, 0,
+           &ce_ring->dsegs, 1, &ce_ring->nsegs,
+           BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
+               qwz_ce_free_ring(sc, ce_ring);
+               return NULL;
+       }
+
+       if (bus_dmamem_map(sc->sc_dmat, &ce_ring->dsegs, 1, dsize,
+           &ce_ring->base_addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
+               qwz_ce_free_ring(sc, ce_ring);
+               return NULL;
+       }
+
+       if (bus_dmamap_load(sc->sc_dmat, ce_ring->dmap, ce_ring->base_addr,
+           dsize, NULL, BUS_DMA_NOWAIT)) {
+               qwz_ce_free_ring(sc, ce_ring);
+               return NULL;
+       }
+
+       return ce_ring;
+}
+
+int
+qwz_ce_alloc_pipe(struct qwz_softc *sc, int ce_id)
+{
+       struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
+       const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
+       struct qwz_ce_ring *ring;
+       int nentries;
+       size_t desc_sz;
+
+       pipe->attr_flags = attr->flags;
+
+       if (attr->src_nentries) {
+               pipe->send_cb = attr->send_cb;
+               nentries = qwz_roundup_pow_of_two(attr->src_nentries);
+               desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+               ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
+               if (ring == NULL)
+                       return ENOMEM;
+               pipe->src_ring = ring;
+               if (qwz_ce_alloc_src_ring_transfer_contexts(pipe, attr))
+                       return ENOMEM;
+       }
+
+       if (attr->dest_nentries) {
+               pipe->recv_cb = attr->recv_cb;
+               nentries = qwz_roundup_pow_of_two(attr->dest_nentries);
+               desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+               ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
+               if (ring == NULL)
+                       return ENOMEM;
+               pipe->dest_ring = ring;
+               if (qwz_ce_alloc_dest_ring_transfer_contexts(pipe, attr))
+                       return ENOMEM;
+
+               desc_sz = qwz_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+               ring = qwz_ce_alloc_ring(sc, nentries, desc_sz);
+               if (ring == NULL)
+                       return ENOMEM;
+               pipe->status_ring = ring;
+       }
+
+       return 0;
+}
+
+void
+qwz_ce_rx_pipe_cleanup(struct qwz_ce_pipe *pipe)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct qwz_ce_ring *ring = pipe->dest_ring;
+       void *ctx;
+       struct qwz_rx_data *rx_data;
+       int i;
+
+       if (!(ring && pipe->buf_sz))
+               return;
+
+       for (i = 0; i < ring->nentries; i++) {
+               ctx = ring->per_transfer_context[i];
+               if (!ctx)
+                       continue;
+
+               rx_data = (struct qwz_rx_data *)ctx;
+               if (rx_data->m) {
+                       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+                       m_freem(rx_data->m);
+                       rx_data->m = NULL;
+               }
+       }
+}
+
+void
+qwz_ce_shadow_config(struct qwz_softc *sc)
+{
+       int i;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               if (sc->hw_params.host_ce_config[i].src_nentries)
+                       qwz_hal_srng_update_shadow_config(sc, HAL_CE_SRC, i);
+
+               if (sc->hw_params.host_ce_config[i].dest_nentries) {
+                       qwz_hal_srng_update_shadow_config(sc, HAL_CE_DST, i);
+
+                       qwz_hal_srng_update_shadow_config(sc,
+                           HAL_CE_DST_STATUS, i);
+               }
+       }
+}
+
+void
+qwz_ce_get_shadow_config(struct qwz_softc *sc, uint32_t **shadow_cfg,
+    uint32_t *shadow_cfg_len)
+{
+       if (!sc->hw_params.supports_shadow_regs)
+               return;
+
+       qwz_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
+
+       /* shadow is already configured */
+       if (*shadow_cfg_len)
+               return;
+
+       /* shadow isn't configured yet, configure now.
+        * non-CE srngs are configured firstly, then
+        * all CE srngs.
+        */
+       qwz_hal_srng_shadow_config(sc);
+       qwz_ce_shadow_config(sc);
+
+       /* get the shadow configuration */
+       qwz_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
+}
+
+void
+qwz_ce_cleanup_pipes(struct qwz_softc *sc)
+{
+       struct qwz_ce_pipe *pipe;
+       int pipe_num;
+
+       qwz_ce_stop_shadow_timers(sc);
+
+       for (pipe_num = 0; pipe_num < sc->hw_params.ce_count; pipe_num++) {
+               pipe = &sc->ce.ce_pipe[pipe_num];
+               qwz_ce_rx_pipe_cleanup(pipe);
+
+               /* Cleanup any src CE's which have interrupts disabled */
+               qwz_ce_poll_send_completed(sc, pipe_num);
+       }
+}
+
+int
+qwz_ce_alloc_pipes(struct qwz_softc *sc)
+{
+       struct qwz_ce_pipe *pipe;
+       int i;
+       int ret;
+       const struct ce_attr *attr;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               attr = &sc->hw_params.host_ce_config[i];
+               pipe = &sc->ce.ce_pipe[i];
+               pipe->pipe_num = i;
+               pipe->sc = sc;
+               pipe->buf_sz = attr->src_sz_max;
+
+               ret = qwz_ce_alloc_pipe(sc, i);
+               if (ret) {
+                       /* Free any partial successful allocation */
+                       qwz_ce_free_pipes(sc);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void
+qwz_get_ce_msi_idx(struct qwz_softc *sc, uint32_t ce_id,
+    uint32_t *msi_data_idx)
+{
+       *msi_data_idx = ce_id;
+}
+
+void
+qwz_ce_srng_msi_ring_params_setup(struct qwz_softc *sc, uint32_t ce_id,
+    struct hal_srng_params *ring_params)
+{
+       uint32_t msi_data_start = 0;
+       uint32_t msi_data_count = 1, msi_data_idx;
+       uint32_t msi_irq_start = 0;
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+       int ret;
+
+       ret = sc->ops.get_user_msi_vector(sc, "CE",
+           &msi_data_count, &msi_data_start, &msi_irq_start);
+       if (ret)
+               return;
+
+       qwz_get_msi_address(sc, &addr_lo, &addr_hi);
+       qwz_get_ce_msi_idx(sc, ce_id, &msi_data_idx);
+
+       ring_params->msi_addr = addr_lo;
+       ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
+       ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+       ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int
+qwz_ce_init_ring(struct qwz_softc *sc, struct qwz_ce_ring *ce_ring,
+    int ce_id, enum hal_ring_type type)
+{
+       struct hal_srng_params params = { 0 };
+       int ret;
+
+       params.ring_base_paddr = ce_ring->dmap->dm_segs[0].ds_addr;
+       params.ring_base_vaddr = (uint32_t *)ce_ring->base_addr;
+       params.num_entries = ce_ring->nentries;
+
+       if (!(CE_ATTR_DIS_INTR & sc->hw_params.host_ce_config[ce_id].flags))
+               qwz_ce_srng_msi_ring_params_setup(sc, ce_id, &params);
+
+       switch (type) {
+       case HAL_CE_SRC:
+               if (!(CE_ATTR_DIS_INTR &
+                   sc->hw_params.host_ce_config[ce_id].flags))
+                       params.intr_batch_cntr_thres_entries = 1;
+               break;
+       case HAL_CE_DST:
+               params.max_buffer_len =
+                   sc->hw_params.host_ce_config[ce_id].src_sz_max;
+               if (!(sc->hw_params.host_ce_config[ce_id].flags &
+                   CE_ATTR_DIS_INTR)) {
+                       params.intr_timer_thres_us = 1024;
+                       params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+                       params.low_threshold = ce_ring->nentries - 3;
+               }
+               break;
+       case HAL_CE_DST_STATUS:
+               if (!(sc->hw_params.host_ce_config[ce_id].flags &
+                   CE_ATTR_DIS_INTR)) {
+                       params.intr_batch_cntr_thres_entries = 1;
+                       params.intr_timer_thres_us = 0x1000;
+               }
+               break;
+       default:
+               printf("%s: Invalid CE ring type %d\n",
+                   sc->sc_dev.dv_xname, type);
+               return EINVAL;
+       }
+
+       /* TODO: Init other params needed by HAL to init the ring */
+
+       ret = qwz_hal_srng_setup(sc, type, ce_id, 0, &params);
+       if (ret < 0) {
+               printf("%s: failed to setup srng: ring_id %d ce_id %d\n",
+                   sc->sc_dev.dv_xname, ret, ce_id);
+               return ret;
+       }
+
+       ce_ring->hal_ring_id = ret;
+
+       if (sc->hw_params.supports_shadow_regs &&
+           qwz_ce_need_shadow_fix(ce_id))
+               qwz_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
+                   ATH12K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id);
+
+       return 0;
+}
+
+int
+qwz_ce_init_pipes(struct qwz_softc *sc)
+{
+       struct qwz_ce_pipe *pipe;
+       int i;
+       int ret;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               pipe = &sc->ce.ce_pipe[i];
+
+               if (pipe->src_ring) {
+                       ret = qwz_ce_init_ring(sc, pipe->src_ring, i,
+                           HAL_CE_SRC);
+                       if (ret) {
+                               printf("%s: failed to init src ring: %d\n",
+                                   sc->sc_dev.dv_xname, ret);
+                               /* Should we clear any partial init */
+                               return ret;
+                       }
+
+                       pipe->src_ring->write_index = 0;
+                       pipe->src_ring->sw_index = 0;
+               }
+
+               if (pipe->dest_ring) {
+                       ret = qwz_ce_init_ring(sc, pipe->dest_ring, i,
+                           HAL_CE_DST);
+                       if (ret) {
+                               printf("%s: failed to init dest ring: %d\n",
+                                   sc->sc_dev.dv_xname, ret);
+                               /* Should we clear any partial init */
+                               return ret;
+                       }
+
+                       pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+                           pipe->dest_ring->nentries - 2 : 0;
+
+                       pipe->dest_ring->write_index = 0;
+                       pipe->dest_ring->sw_index = 0;
+               }
+
+               if (pipe->status_ring) {
+                       ret = qwz_ce_init_ring(sc, pipe->status_ring, i,
+                           HAL_CE_DST_STATUS);
+                       if (ret) {
+                               printf("%s: failed to init status ring: %d\n",
+                                   sc->sc_dev.dv_xname, ret);
+                               /* Should we clear any partial init */
+                               return ret;
+                       }
+
+                       pipe->status_ring->write_index = 0;
+                       pipe->status_ring->sw_index = 0;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_hal_srng_src_num_free(struct qwz_softc *sc, struct hal_srng *srng,
+    int sync_hw_ptr)
+{
+       uint32_t tp, hp;
+#ifdef notyet
+       lockdep_assert_held(&srng->lock);
+#endif
+       hp = srng->u.src_ring.hp;
+
+       if (sync_hw_ptr) {
+               tp = *srng->u.src_ring.tp_addr;
+               srng->u.src_ring.cached_tp = tp;
+       } else {
+               tp = srng->u.src_ring.cached_tp;
+       }
+
+       if (tp > hp)
+               return ((tp - hp) / srng->entry_size) - 1;
+       else
+               return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+int
+qwz_ce_rx_buf_enqueue_pipe(struct qwz_ce_pipe *pipe, bus_dmamap_t map)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct qwz_ce_ring *ring = pipe->dest_ring;
+       struct hal_srng *srng;
+       unsigned int write_index;
+       unsigned int nentries_mask = ring->nentries_mask;
+       uint32_t *desc;
+       uint64_t paddr;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ab->ce.ce_lock);
+#endif
+       write_index = ring->write_index;
+
+       srng = &sc->hal.srng_list[ring->hal_ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+       bus_dmamap_sync(sc->sc_dmat, map, 0,
+           srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_POSTREAD);
+
+       if (qwz_hal_srng_src_num_free(sc, srng, 0) < 1) {
+               ret = ENOSPC;
+               goto exit;
+       }
+
+       desc = qwz_hal_srng_src_get_next_entry(sc, srng);
+       if (!desc) {
+               ret = ENOSPC;
+               goto exit;
+       }
+
+       paddr = map->dm_segs[0].ds_addr;
+       qwz_hal_ce_dst_set_desc(desc, paddr);
+
+       write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+       ring->write_index = write_index;
+
+       pipe->rx_buf_needed--;
+
+       ret = 0;
+exit:
+       qwz_hal_srng_access_end(sc, srng);
+       bus_dmamap_sync(sc->sc_dmat, map, 0,
+           srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+#endif
+       return ret;
+}
+
+int
+qwz_ce_rx_post_pipe(struct qwz_ce_pipe *pipe)
+{
+       struct qwz_softc *sc = pipe->sc;
+       int ret = 0;
+       unsigned int idx;
+       void *ctx;
+       struct qwz_rx_data *rx_data;
+       struct mbuf *m;
+
+       if (!pipe->dest_ring)
+               return 0;
+
+#ifdef notyet
+       spin_lock_bh(&ab->ce.ce_lock);
+#endif
+       while (pipe->rx_buf_needed) {
+               m = m_gethdr(M_DONTWAIT, MT_DATA);
+               if (m == NULL) {
+                       ret = ENOBUFS;
+                       goto done;
+               }
+
+               if (pipe->buf_sz <= MCLBYTES)
+                       MCLGET(m, M_DONTWAIT);
+               else
+                       MCLGETL(m, M_DONTWAIT, pipe->buf_sz);
+               if ((m->m_flags & M_EXT) == 0) {
+                       ret = ENOBUFS;
+                       goto done;
+               }
+
+               idx = pipe->dest_ring->write_index;
+               ctx = pipe->dest_ring->per_transfer_context[idx];
+               rx_data = (struct qwz_rx_data *)ctx;
+
+               m->m_len = m->m_pkthdr.len = pipe->buf_sz;
+               ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
+                   m, BUS_DMA_READ | BUS_DMA_NOWAIT);
+               if (ret) {
+                       printf("%s: can't map mbuf (error %d)\n",
+                           sc->sc_dev.dv_xname, ret);
+                       m_freem(m);
+                       goto done;
+               }
+
+               ret = qwz_ce_rx_buf_enqueue_pipe(pipe, rx_data->map);
+               if (ret) {
+                       printf("%s: failed to enqueue rx buf: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+                       m_freem(m);
+                       break;
+               } else
+                       rx_data->m = m;
+       }
+
+done:
+#ifdef notyet
+       spin_unlock_bh(&ab->ce.ce_lock);
+#endif
+       return ret;
+}
+
+void
+qwz_ce_rx_post_buf(struct qwz_softc *sc)
+{
+       struct qwz_ce_pipe *pipe;
+       int i;
+       int ret;
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               pipe = &sc->ce.ce_pipe[i];
+               ret = qwz_ce_rx_post_pipe(pipe);
+               if (ret) {
+                       if (ret == ENOSPC)
+                               continue;
+
+                       printf("%s: failed to post rx buf to pipe: %d err: %d\n",
+                           sc->sc_dev.dv_xname, i, ret);
+#ifdef notyet
+                       mod_timer(&ab->rx_replenish_retry,
+                                 jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+#endif
+
+                       return;
+               }
+       }
+}
+
+int
+qwz_ce_completed_recv_next(struct qwz_ce_pipe *pipe,
+    void **per_transfer_contextp, int *nbytes)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct hal_srng *srng;
+       unsigned int sw_index;
+       unsigned int nentries_mask;
+       uint32_t *desc;
+       int ret = 0;
+#ifdef notyet
+       spin_lock_bh(&ab->ce.ce_lock);
+#endif
+       sw_index = pipe->dest_ring->sw_index;
+       nentries_mask = pipe->dest_ring->nentries_mask;
+
+       srng = &sc->hal.srng_list[pipe->status_ring->hal_ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       desc = qwz_hal_srng_dst_get_next_entry(sc, srng);
+       if (!desc) {
+               ret = EIO;
+               goto err;
+       }
+
+       *nbytes = qwz_hal_ce_dst_status_get_length(desc);
+       if (*nbytes == 0) {
+               ret = EIO;
+               goto err;
+       }
+
+       if (per_transfer_contextp) {
+               *per_transfer_contextp =
+                   pipe->dest_ring->per_transfer_context[sw_index];
+       }
+
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       pipe->dest_ring->sw_index = sw_index;
+
+       pipe->rx_buf_needed++;
+err:
+       qwz_hal_srng_access_end(sc, srng);
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+       spin_unlock_bh(&ab->ce.ce_lock);
+#endif
+       return ret;
+}
+
+int
+qwz_ce_recv_process_cb(struct qwz_ce_pipe *pipe)
+{
+       struct qwz_softc *sc = pipe->sc;
+       struct mbuf *m;
+       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+       void *transfer_context;
+       unsigned int nbytes, max_nbytes;
+       int ret = 0, err;
+
+       while (qwz_ce_completed_recv_next(pipe, &transfer_context,
+           &nbytes) == 0) {
+               struct qwz_rx_data *rx_data = transfer_context;
+
+               bus_dmamap_unload(sc->sc_dmat, rx_data->map);
+               m = rx_data->m;
+               rx_data->m = NULL;
+
+               max_nbytes = m->m_pkthdr.len;
+               if (max_nbytes < nbytes) {
+                       printf("%s: received more than expected (nbytes %d, "
+                           "max %d)", __func__, nbytes, max_nbytes);
+                       m_freem(m);
+                       continue;
+               }
+               m->m_len = m->m_pkthdr.len = nbytes;
+               ml_enqueue(&ml, m);
+               ret = 1;
+       }
+
+       while ((m = ml_dequeue(&ml))) {
+               DNPRINTF(QWZ_D_CE, "%s: rx ce pipe %d len %d\n", __func__,
+                   pipe->pipe_num, m->m_len);
+               pipe->recv_cb(sc, m);
+       }
+
+       err = qwz_ce_rx_post_pipe(pipe);
+       if (err && err != ENOSPC) {
+               printf("%s: failed to post rx buf to pipe: %d err: %d\n",
+                   __func__, pipe->pipe_num, err);
+#ifdef notyet
+               mod_timer(&ab->rx_replenish_retry,
+                         jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+#endif
+       }
+
+       return ret;
+}
+
+int
+qwz_ce_per_engine_service(struct qwz_softc *sc, uint16_t ce_id)
+{
+       struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
+       const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
+       int ret = 0;
+
+       if (attr->src_nentries) {
+               if (qwz_ce_tx_process_cb(pipe))
+                       ret = 1;
+       }
+
+       if (pipe->recv_cb) {
+               if (qwz_ce_recv_process_cb(pipe))
+                       ret = 1;
+       }
+
+       return ret;
+}
+
+int
+qwz_ce_send(struct qwz_softc *sc, struct mbuf *m, uint8_t pipe_id,
+    uint16_t transfer_id)
+{
+       struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
+       struct hal_srng *srng;
+       uint32_t *desc;
+       unsigned int write_index, sw_index;
+       unsigned int nentries_mask;
+       int ret = 0;
+       uint8_t byte_swap_data = 0;
+       int num_used;
+       uint64_t paddr;
+       void *ctx;
+       struct qwz_tx_data *tx_data;
+
+       /* Check if some entries could be regained by handling tx completion if
+        * the CE has interrupts disabled and the used entries is more than the
+        * defined usage threshold.
+        */
+       if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+#ifdef notyet
+               spin_lock_bh(&ab->ce.ce_lock);
+#endif
+               write_index = pipe->src_ring->write_index;
+
+               sw_index = pipe->src_ring->sw_index;
+
+               if (write_index >= sw_index)
+                       num_used = write_index - sw_index;
+               else
+                       num_used = pipe->src_ring->nentries - sw_index +
+                           write_index;
+#ifdef notyet
+               spin_unlock_bh(&ab->ce.ce_lock);
+#endif
+               if (num_used > ATH12K_CE_USAGE_THRESHOLD)
+                       qwz_ce_poll_send_completed(sc, pipe->pipe_num);
+       }
+
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+               return ESHUTDOWN;
+#ifdef notyet
+       spin_lock_bh(&ab->ce.ce_lock);
+#endif
+       write_index = pipe->src_ring->write_index;
+       nentries_mask = pipe->src_ring->nentries_mask;
+
+       srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
+#ifdef notyet
+       spin_lock_bh(&srng->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, srng);
+
+       if (qwz_hal_srng_src_num_free(sc, srng, 0) < 1) {
+               qwz_hal_srng_access_end(sc, srng);
+               ret = ENOBUFS;
+               goto err_unlock;
+       }
+
+       desc = qwz_hal_srng_src_get_next_reaped(sc, srng);
+       if (!desc) {
+               qwz_hal_srng_access_end(sc, srng);
+               ret = ENOBUFS;
+               goto err_unlock;
+       }
+
+       if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+               byte_swap_data = 1;
+
+       ctx = pipe->src_ring->per_transfer_context[write_index];
+       tx_data = (struct qwz_tx_data *)ctx;
+
+       paddr = tx_data->map->dm_segs[0].ds_addr;
+       qwz_hal_ce_src_set_desc(desc, paddr, m->m_pkthdr.len,
+           transfer_id, byte_swap_data);
+
+       pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+           write_index);
+
+       qwz_hal_srng_access_end(sc, srng);
+
+       if (qwz_ce_need_shadow_fix(pipe_id))
+               qwz_dp_shadow_start_timer(sc, srng, &sc->ce.hp_timer[pipe_id]);
+
+err_unlock:
+#ifdef notyet
+       spin_unlock_bh(&srng->lock);
+
+       spin_unlock_bh(&ab->ce.ce_lock);
+#endif
+       return ret;
+}
+
+int
+qwz_get_num_chains(uint32_t mask)
+{
+       int num_chains = 0;
+
+       while (mask) {
+               if (mask & 0x1)
+                       num_chains++;
+               mask >>= 1;
+       }
+
+       return num_chains;
+}
+
+int
+qwz_set_antenna(struct qwz_pdev *pdev, uint32_t tx_ant, uint32_t rx_ant)
+{
+       struct qwz_softc *sc = pdev->sc;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       sc->cfg_tx_chainmask = tx_ant;
+       sc->cfg_rx_chainmask = rx_ant;
+#if 0
+       if (ar->state != ATH12K_STATE_ON &&
+           ar->state != ATH12K_STATE_RESTARTED)
+               return 0;
+#endif
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+           tx_ant, pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to set tx-chainmask: %d, req 0x%x\n",
+                   sc->sc_dev.dv_xname, ret, tx_ant);
+               return ret;
+       }
+
+       sc->num_tx_chains = qwz_get_num_chains(tx_ant);
+
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+           rx_ant, pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to set rx-chainmask: %d, req 0x%x\n",
+                   sc->sc_dev.dv_xname, ret, rx_ant);
+               return ret;
+       }
+
+       sc->num_rx_chains = qwz_get_num_chains(rx_ant);
+#if 0
+       /* Reload HT/VHT/HE capability */
+       ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+       ath12k_mac_setup_he_cap(ar, &ar->pdev->cap);
+#endif
+       return 0;
+}
+
+int
+qwz_reg_update_chan_list(struct qwz_softc *sc, uint8_t pdev_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct scan_chan_list_params *params;
+       struct ieee80211_channel *channel, *lastc;
+       struct channel_param *ch;
+       int num_channels = 0;
+       size_t params_size;
+       int ret;
+#if 0
+       if (ar->state == ATH12K_STATE_RESTARTING)
+               return 0;
+#endif
+       lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
+       for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
+               if (channel->ic_flags == 0)
+                       continue;
+               num_channels++;
+       }
+
+       if (!num_channels)
+               return EINVAL;
+
+       params_size = sizeof(*params) +
+           num_channels * sizeof(*params->ch_param);
+
+       /*
+        * TODO: This is a temporary list for qwz_wmi_send_scan_chan_list_cmd
+        * to loop over. Could that function loop over ic_channels directly?
+        */
+       params = malloc(params_size, M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (!params)
+               return ENOMEM;
+
+       params->pdev_id = pdev_id;
+       params->nallchans = num_channels;
+
+       ch = params->ch_param;
+       lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
+       for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
+               if (channel->ic_flags == 0)
+                       continue;
+#ifdef notyet
+               /* TODO: Set to true/false based on some condition? */
+               ch->allow_ht = true;
+               ch->allow_vht = true;
+               ch->allow_he = true;
+#endif
+               ch->dfs_set = !!(IEEE80211_IS_CHAN_5GHZ(channel) &&
+                   (channel->ic_flags & IEEE80211_CHAN_PASSIVE));
+               ch->is_chan_passive = !!(channel->ic_flags &
+                   IEEE80211_CHAN_PASSIVE);
+               ch->is_chan_passive |= ch->dfs_set;
+               ch->mhz = ieee80211_ieee2mhz(ieee80211_chan2ieee(ic, channel),
+                   channel->ic_flags);
+               ch->cfreq1 = ch->mhz;
+               ch->minpower = 0;
+               ch->maxpower = 40; /* XXX from Linux debug trace */
+               ch->maxregpower = ch->maxpower; 
+               ch->antennamax = 0;
+
+               /* TODO: Use appropriate phymodes */
+               if (IEEE80211_IS_CHAN_A(channel))
+                       ch->phy_mode = MODE_11A;
+               else if (IEEE80211_IS_CHAN_G(channel))
+                       ch->phy_mode = MODE_11G;
+               else
+                       ch->phy_mode = MODE_11B;
+#ifdef notyet
+               if (channel->band == NL80211_BAND_6GHZ &&
+                   cfg80211_channel_is_psc(channel))
+                       ch->psc_channel = true;
+#endif
+               DNPRINTF(QWZ_D_WMI, "%s: mac channel freq %d maxpower %d "
+                   "regpower %d antenna %d mode %d\n", __func__,
+                   ch->mhz, ch->maxpower, ch->maxregpower,
+                   ch->antennamax, ch->phy_mode);
+
+               ch++;
+               /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+                * set_agile, reg_class_idx
+                */
+       }
+
+       ret = qwz_wmi_send_scan_chan_list_cmd(sc, pdev_id, params);
+       free(params, M_DEVBUF, params_size);
+
+       return ret;
+}
+
+static const struct htt_rx_ring_tlv_filter qwz_mac_mon_status_filter_default = {
+       .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+           HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+           HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+       .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+       .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+       .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+       .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+           HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+int
+qwz_mac_register(struct qwz_softc *sc)
+{
+       /* Initialize channel counters frequency value in hertz */
+       sc->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+
+       sc->free_vdev_map = (1U << (sc->num_radios * TARGET_NUM_VDEVS(sc))) - 1;
+
+       if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
+               IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr, sc->mac_addr);
+
+       return 0;
+}
+
+int
+qwz_mac_config_mon_status_default(struct qwz_softc *sc, int enable)
+{
+       struct htt_rx_ring_tlv_filter tlv_filter = { 0 };
+       int ret = 0;
+#if 0
+       int i;
+       struct dp_rxdma_ring *ring;
+#endif
+
+       if (enable)
+               tlv_filter = qwz_mac_mon_status_filter_default;
+#if 0 /* mon status info is not useful and the code triggers mbuf corruption */
+       for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
+               ring = &sc->pdev_dp.rx_mon_status_refill_ring[i];
+               ret = qwz_dp_tx_htt_rx_filter_setup(sc,
+                   ring->refill_buf_ring.ring_id, sc->pdev_dp.mac_id + i,
+                   HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter);
+               if (ret)
+                       return ret;
+       }
+
+       if (enable && !sc->hw_params.rxdma1_enable) {
+               timeout_add_msec(&sc->mon_reap_timer,
+                   ATH12K_MON_TIMER_INTERVAL);
+       }
+#endif
+       return ret;
+}
+
+int
+qwz_mac_txpower_recalc(struct qwz_softc *sc, struct qwz_pdev *pdev)
+{
+       struct qwz_vif *arvif;
+       int ret, txpower = -1;
+       uint32_t param;
+       uint32_t min_tx_power = sc->target_caps.hw_min_tx_power;
+       uint32_t max_tx_power = sc->target_caps.hw_max_tx_power;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
+               if (arvif->txpower <= 0)
+                       continue;
+
+               if (txpower == -1)
+                       txpower = arvif->txpower;
+               else
+                       txpower = MIN(txpower, arvif->txpower);
+       }
+
+       if (txpower == -1)
+               return 0;
+
+       /* txpwr is set as 2 units per dBm in FW*/
+       txpower = MIN(MAX(min_tx_power, txpower), max_tx_power) * 2;
+       DNPRINTF(QWZ_D_MAC, "txpower to set in hw %d\n", txpower / 2);
+
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+               param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+               ret = qwz_wmi_pdev_set_param(sc, param, txpower,
+                   pdev->pdev_id);
+               if (ret)
+                       goto fail;
+       }
+
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+               param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+               ret = qwz_wmi_pdev_set_param(sc, param, txpower,
+                   pdev->pdev_id);
+               if (ret)
+                       goto fail;
+       }
+
+       return 0;
+
+fail:
+       DNPRINTF(QWZ_D_MAC, "%s: failed to recalc txpower limit %d "
+           "using pdev param %d: %d\n", sc->sc_dev.dv_xname, txpower / 2,
+           param, ret);
+
+       return ret;
+}
+
+int
+qwz_mac_op_start(struct qwz_pdev *pdev)
+{
+       struct qwz_softc *sc = pdev->sc;
+       struct ieee80211com *ic = &sc->sc_ic;
+       int ret;
+
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_PMF_QOS, 1,
+           pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to enable PMF QOS for pdev %d: %d\n",
+                   sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+           pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to enable dynamic bw for pdev %d: %d\n",
+                   sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT)) {
+               ret = qwz_wmi_scan_prob_req_oui(sc, ic->ic_myaddr,
+                   pdev->pdev_id);
+               if (ret) {
+                       printf("%s: failed to set prob req oui for "
+                           "pdev %d: %i\n", sc->sc_dev.dv_xname,
+                           pdev->pdev_id, ret);
+                       goto err;
+               }
+       }
+
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0,
+           pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to set ac override for ARP for "
+                   "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       ret = qwz_wmi_send_dfs_phyerr_offload_enable_cmd(sc, pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to offload radar detection for "
+                   "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       ret = qwz_dp_tx_htt_h2t_ppdu_stats_req(sc, HTT_PPDU_STATS_TAG_DEFAULT,
+           pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to req ppdu stats for pdev %d: %d\n",
+                   sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1,
+           pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to enable MESH MCAST ENABLE for "
+                   "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       qwz_set_antenna(pdev, pdev->cap.tx_chain_mask, pdev->cap.rx_chain_mask);
+
+       /* TODO: Do we need to enable ANI? */
+
+       ret = qwz_reg_update_chan_list(sc, pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to update channel list for pdev %d: %d\n",
+                   sc->sc_dev.dv_xname, pdev->pdev_id, ret);
+               goto err;
+       }
+
+       sc->num_started_vdevs = 0;
+       sc->num_created_vdevs = 0;
+       sc->num_peers = 0;
+       sc->allocated_vdev_map = 0;
+
+       /* Configure monitor status ring with default rx_filter to get rx status
+        * such as rssi, rx_duration.
+        */
+       ret = qwz_mac_config_mon_status_default(sc, 1);
+       if (ret) {
+               printf("%s: failed to configure monitor status ring "
+                   "with default rx_filter: (%d)\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err;
+       }
+
+       /* Configure the hash seed for hash based reo dest ring selection */
+       qwz_wmi_pdev_lro_cfg(sc, pdev->pdev_id);
+
+       /* allow device to enter IMPS */
+       if (sc->hw_params.idle_ps) {
+               ret = qwz_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+                   1, pdev->pdev_id);
+               if (ret) {
+                       printf("%s: failed to enable idle ps: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       goto err;
+               }
+       }
+#ifdef notyet
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       sc->pdevs_active |= (1 << pdev->pdev_id);
+       return 0;
+err:
+#ifdef notyet
+       ar->state = ATH12K_STATE_OFF;
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       return ret;
+}
+
+int
+qwz_mac_setup_vdev_params_mbssid(struct qwz_vif *arvif,
+    uint32_t *flags, uint32_t *tx_vdev_id)
+{
+       *tx_vdev_id = 0;
+       *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
+       return 0;
+}
+
+int
+qwz_mac_setup_vdev_create_params(struct qwz_vif *arvif, struct qwz_pdev *pdev,
+    struct vdev_create_params *params)
+{
+       struct qwz_softc *sc = arvif->sc;
+       int ret;
+
+       params->if_id = arvif->vdev_id;
+       params->type = arvif->vdev_type;
+       params->subtype = arvif->vdev_subtype;
+       params->pdev_id = pdev->pdev_id;
+       params->mbssid_flags = 0;
+       params->mbssid_tx_vdev_id = 0;
+
+       if (!isset(sc->wmi.svc_map,
+           WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
+               ret = qwz_mac_setup_vdev_params_mbssid(arvif,
+                   &params->mbssid_flags, &params->mbssid_tx_vdev_id);
+               if (ret)
+                       return ret;
+       }
+
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+               params->chains[0].tx = sc->num_tx_chains;
+               params->chains[0].rx = sc->num_rx_chains;
+       }
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+               params->chains[1].tx = sc->num_tx_chains;
+               params->chains[1].rx = sc->num_rx_chains;
+       }
+#if 0
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+           ar->supports_6ghz) {
+               params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+               params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+       }
+#endif
+       return 0;
+}
+
+int
+qwz_mac_op_update_vif_offload(struct qwz_softc *sc, struct qwz_pdev *pdev,
+    struct qwz_vif *arvif)
+{
+       uint32_t param_id, param_value;
+       int ret;
+
+       param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+       if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
+               param_value = ATH12K_HW_TXRX_RAW;
+       else
+               param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+       ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+           param_id, param_value);
+       if (ret) {
+               printf("%s: failed to set vdev %d tx encap mode: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+       if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
+               param_value = ATH12K_HW_TXRX_RAW;
+       else
+               param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+       ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+           param_id, param_value);
+       if (ret) {
+               printf("%s: failed to set vdev %d rx decap mode: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void
+qwz_mac_vdev_delete(struct qwz_softc *sc, struct qwz_vif *arvif)
+{
+       printf("%s: not implemented\n", __func__);
+}
+
+int
+qwz_mac_vdev_setup_sync(struct qwz_softc *sc)
+{
+       int ret;
+
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+               return ESHUTDOWN;
+
+       while (!sc->vdev_setup_done) {
+               ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwzvdev",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: vdev start timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+qwz_mac_set_txbf_conf(struct qwz_vif *arvif)
+{
+       /* TX beamforming is not yet supported. */
+       return 0;
+}
+
+int
+qwz_mac_vdev_stop(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
+{
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+#if 0
+       reinit_completion(&ar->vdev_setup_done);
+#endif
+       sc->vdev_setup_done = 0;
+       ret = qwz_wmi_vdev_stop(sc, arvif->vdev_id, pdev_id);
+       if (ret) {
+               printf("%s: failed to stop WMI vdev %i: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = qwz_mac_vdev_setup_sync(sc);
+       if (ret) {
+               printf("%s: failed to synchronize setup for vdev %i: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       if (sc->num_started_vdevs > 0)
+               sc->num_started_vdevs--;
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev vdev_id %d stopped\n", __func__,
+           arvif->vdev_id);
+
+       if (test_bit(ATH12K_CAC_RUNNING, sc->sc_flags)) {
+               clear_bit(ATH12K_CAC_RUNNING, sc->sc_flags);
+               DNPRINTF(QWZ_D_MAC, "%s: CAC Stopped for vdev %d\n", __func__,
+                   arvif->vdev_id);
+       }
+
+       return 0;
+}
+
+int
+qwz_mac_vdev_start_restart(struct qwz_softc *sc, struct qwz_vif *arvif,
+    int pdev_id, int restart)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
+       struct wmi_vdev_start_req_arg arg = {};
+       int ret = 0;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+#if 0
+       reinit_completion(&ar->vdev_setup_done);
+#endif
+       arg.vdev_id = arvif->vdev_id;
+       arg.dtim_period = ic->ic_dtim_period;
+       arg.bcn_intval = ic->ic_lintval;
+
+       arg.channel.freq = chan->ic_freq;
+       arg.channel.band_center_freq1 = chan->ic_freq;
+       arg.channel.band_center_freq2 = chan->ic_freq;
+
+       switch (ic->ic_curmode) {
+       case IEEE80211_MODE_11A:
+               arg.channel.mode = MODE_11A;
+               break;
+       case IEEE80211_MODE_11B:
+               arg.channel.mode = MODE_11B;
+               break;
+       case IEEE80211_MODE_11G:
+               arg.channel.mode = MODE_11G;
+               break;
+       default:
+               printf("%s: unsupported phy mode %d\n",
+                   sc->sc_dev.dv_xname, ic->ic_curmode);
+               return ENOTSUP;
+       }
+
+       arg.channel.min_power = 0;
+       arg.channel.max_power = 20; /* XXX */
+       arg.channel.max_reg_power = 20; /* XXX */
+       arg.channel.max_antenna_gain = 0; /* XXX */
+
+       arg.pref_tx_streams = 1;
+       arg.pref_rx_streams = 1;
+
+       arg.mbssid_flags = 0;
+       arg.mbssid_tx_vdev_id = 0;
+       if (isset(sc->wmi.svc_map,
+           WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
+               ret = qwz_mac_setup_vdev_params_mbssid(arvif,
+                   &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
+               if (ret)
+                       return ret;
+       }
+#if 0
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               arg.ssid = arvif->u.ap.ssid;
+               arg.ssid_len = arvif->u.ap.ssid_len;
+               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+               /* For now allow DFS for AP mode */
+               arg.channel.chan_radar =
+                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+               arg.channel.freq2_radar = ctx->radar_enabled;
+
+               arg.channel.passive = arg.channel.chan_radar;
+
+               spin_lock_bh(&ab->base_lock);
+               arg.regdomain = ar->ab->dfs_region;
+               spin_unlock_bh(&ab->base_lock);
+       }
+#endif
+       /* XXX */
+       arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
+           __func__, arg.vdev_id, arg.channel.freq,
+           qwz_wmi_phymode_str(arg.channel.mode));
+
+       sc->vdev_setup_done = 0;
+       ret = qwz_wmi_vdev_start(sc, &arg, pdev_id, restart);
+       if (ret) {
+               printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
+                   restart ? "restart" : "start", arg.vdev_id);
+               return ret;
+       }
+
+       ret = qwz_mac_vdev_setup_sync(sc);
+       if (ret) {
+               printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
+                   sc->sc_dev.dv_xname, arg.vdev_id,
+                   restart ? "restart" : "start", ret);
+               return ret;
+       }
+
+       if (!restart)
+               sc->num_started_vdevs++;
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
+
+       /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+        * i.e dfs_cac_ms value which will be valid only for radar channels
+        * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+        * done before channel usage. This flags is used to drop rx packets.
+        * during CAC.
+        */
+       /* TODO Set the flag for other interface types as required */
+#if 0
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+           chandef->chan->dfs_cac_ms &&
+           chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+               set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+               ath12k_dbg(ab, ATH12K_DBG_MAC,
+                          "CAC Started in chan_freq %d for vdev %d\n",
+                          arg.channel.freq, arg.vdev_id);
+       }
+#endif
+       ret = qwz_mac_set_txbf_conf(arvif);
+       if (ret)
+               printf("%s: failed to set txbf conf for vdev %d: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+
+       return 0;
+}
+
+int
+qwz_mac_vdev_restart(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
+{
+       return qwz_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
+}
+
+int
+qwz_mac_vdev_start(struct qwz_softc *sc, struct qwz_vif *arvif, int pdev_id)
+{
+       return qwz_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
+}
+
+void
+qwz_vif_free(struct qwz_softc *sc, struct qwz_vif *arvif)
+{
+       struct qwz_txmgmt_queue *txmgmt;
+       int i;
+
+       if (arvif == NULL)
+               return;
+
+       txmgmt = &arvif->txmgmt;
+       for (i = 0; i < nitems(txmgmt->data); i++) {
+               struct qwz_tx_data *tx_data = &txmgmt->data[i];
+
+               if (tx_data->m) {
+                       m_freem(tx_data->m);
+                       tx_data->m = NULL;
+               }
+               if (tx_data->map) {
+                       bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
+                       tx_data->map = NULL;
+               }
+       }
+
+       free(arvif, M_DEVBUF, sizeof(*arvif));
+}
+
+struct qwz_vif *
+qwz_vif_alloc(struct qwz_softc *sc)
+{
+       struct qwz_vif *arvif;
+       struct qwz_txmgmt_queue *txmgmt; 
+       int i, ret = 0;
+       const bus_size_t size = IEEE80211_MAX_LEN;
+
+       arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (arvif == NULL)
+               return NULL;
+
+       txmgmt = &arvif->txmgmt;
+       for (i = 0; i < nitems(txmgmt->data); i++) {
+               struct qwz_tx_data *tx_data = &txmgmt->data[i];
+
+               ret = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
+                   BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &tx_data->map);
+               if (ret) {
+                       qwz_vif_free(sc, arvif);
+                       return NULL;
+               }
+       }
+
+       arvif->sc = sc;
+
+       return arvif;
+}
+
+int
+qwz_mac_op_add_interface(struct qwz_pdev *pdev)
+{
+       struct qwz_softc *sc = pdev->sc;
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_vif *arvif = NULL;
+       struct vdev_create_params vdev_param = { 0 };
+#if 0
+       struct peer_create_params peer_param;
+#endif
+       uint32_t param_id, param_value;
+       uint16_t nss;
+#if 0
+       int i;
+       int fbret;
+#endif
+       int ret, bit;
+#ifdef notyet
+       mutex_lock(&ar->conf_mutex);
+#endif
+#if 0
+       if (vif->type == NL80211_IFTYPE_AP &&
+           ar->num_peers > (ar->max_num_peers - 1)) {
+               ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+               ret = -ENOBUFS;
+               goto err;
+       }
+#endif
+       if (sc->num_created_vdevs > (TARGET_NUM_VDEVS(sc) - 1)) {
+               printf("%s: failed to create vdev %u, reached vdev limit %d\n",
+                   sc->sc_dev.dv_xname, sc->num_created_vdevs,
+                   TARGET_NUM_VDEVS(sc));
+               ret = EBUSY;
+               goto err;
+       }
+
+       arvif = qwz_vif_alloc(sc);
+       if (arvif == NULL) {
+               ret = ENOMEM;
+               goto err;
+       }
+#if 0
+       INIT_DELAYED_WORK(&arvif->connection_loss_work,
+                         ath12k_mac_vif_sta_connection_loss_work);
+       for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+               arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+               arvif->bitrate_mask.control[i].gi = 0;
+               memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+                      sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+               memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+                      sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+               memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+                      sizeof(arvif->bitrate_mask.control[i].he_mcs));
+       }
+#endif
+
+       if (sc->free_vdev_map == 0) {
+               printf("%s: cannot add interface; all vdevs are busy\n",
+                   sc->sc_dev.dv_xname);
+               ret = EBUSY;
+               goto err;
+       }
+       bit = ffs(sc->free_vdev_map) - 1;
+
+       arvif->vdev_id = bit;
+       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+       switch (ic->ic_opmode) {
+       case IEEE80211_M_STA:
+               arvif->vdev_type = WMI_VDEV_TYPE_STA;
+               break;
+#if 0
+       case NL80211_IFTYPE_MESH_POINT:
+               arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+               fallthrough;
+       case NL80211_IFTYPE_AP:
+               arvif->vdev_type = WMI_VDEV_TYPE_AP;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+               ar->monitor_vdev_id = bit;
+               break;
+#endif
+       default:
+               printf("%s: invalid operating mode %d\n",
+                   sc->sc_dev.dv_xname, ic->ic_opmode);
+               ret = EINVAL;
+               goto err;
+       }
+
+       DNPRINTF(QWZ_D_MAC,
+           "%s: add interface id %d type %d subtype %d map 0x%x\n",
+           __func__, arvif->vdev_id, arvif->vdev_type,
+           arvif->vdev_subtype, sc->free_vdev_map);
+
+       ret = qwz_mac_setup_vdev_create_params(arvif, pdev, &vdev_param);
+       if (ret) {
+               printf("%s: failed to create vdev parameters %d: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               goto err;
+       }
+
+       ret = qwz_wmi_vdev_create(sc, ic->ic_myaddr, &vdev_param);
+       if (ret) {
+               printf("%s: failed to create WMI vdev %d %s: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id,
+                   ether_sprintf(ic->ic_myaddr), ret);
+               goto err;
+       }
+
+       sc->num_created_vdevs++;
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %s created, vdev_id %d\n", __func__,
+           ether_sprintf(ic->ic_myaddr), arvif->vdev_id);
+       sc->allocated_vdev_map |= 1U << arvif->vdev_id;
+       sc->free_vdev_map &= ~(1U << arvif->vdev_id);
+#ifdef notyet
+       spin_lock_bh(&ar->data_lock);
+#endif
+       TAILQ_INSERT_TAIL(&sc->vif_list, arvif, entry);
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+#endif
+       ret = qwz_mac_op_update_vif_offload(sc, pdev, arvif);
+       if (ret)
+               goto err_vdev_del;
+
+       nss = qwz_get_num_chains(sc->cfg_tx_chainmask) ? : 1;
+       ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+           WMI_VDEV_PARAM_NSS, nss);
+       if (ret) {
+               printf("%s: failed to set vdev %d chainmask 0x%x, nss %d: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, sc->cfg_tx_chainmask,
+                   nss, ret);
+               goto err_vdev_del;
+       }
+
+       switch (arvif->vdev_type) {
+#if 0
+       case WMI_VDEV_TYPE_AP:
+               peer_param.vdev_id = arvif->vdev_id;
+               peer_param.peer_addr = vif->addr;
+               peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+               ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
+               if (ret) {
+                       ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+                                   arvif->vdev_id, ret);
+                       goto err_vdev_del;
+               }
+
+               ret = ath12k_mac_set_kickout(arvif);
+               if (ret) {
+                       ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+                                   arvif->vdev_id, ret);
+                       goto err_peer_del;
+               }
+
+               ath12k_mac_11d_scan_stop_all(ar->ab);
+               break;
+#endif
+       case WMI_VDEV_TYPE_STA:
+               param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+               param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+               ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
+                   pdev->pdev_id, param_id, param_value);
+               if (ret) {
+                       printf("%s: failed to set vdev %d RX wake policy: %d\n",
+                           sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+                       goto err_peer_del;
+               }
+
+               param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+               param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+               ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
+                   pdev->pdev_id, param_id, param_value);
+               if (ret) {
+                       printf("%s: failed to set vdev %d "
+                           "TX wake threshold: %d\n",
+                           sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+                       goto err_peer_del;
+               }
+
+               param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+               param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+               ret = qwz_wmi_set_sta_ps_param(sc, arvif->vdev_id,
+                   pdev->pdev_id, param_id, param_value);
+               if (ret) {
+                       printf("%s: failed to set vdev %d pspoll count: %d\n",
+                           sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+                       goto err_peer_del;
+               }
+
+               ret = qwz_wmi_pdev_set_ps_mode(sc, arvif->vdev_id,
+                   pdev->pdev_id, WMI_STA_PS_MODE_DISABLED);
+               if (ret) {
+                       printf("%s: failed to disable vdev %d ps mode: %d\n",
+                           sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+                       goto err_peer_del;
+               }
+
+               if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
+                       sc->completed_11d_scan = 0;
+                       sc->state_11d = ATH12K_11D_PREPARING;
+               }
+               break;
+#if 0
+       case WMI_VDEV_TYPE_MONITOR:
+               set_bit(ATH12K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+               break;
+#endif
+       default:
+               printf("%s: invalid vdev type %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_type);
+               ret = EINVAL;
+               goto err;
+       }
+
+       arvif->txpower = 40;
+       ret = qwz_mac_txpower_recalc(sc, pdev);
+       if (ret)
+               goto err_peer_del;
+
+       param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+       param_value = ic->ic_rtsthreshold;
+       ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+           param_id, param_value);
+       if (ret) {
+               printf("%s: failed to set rts threshold for vdev %d: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               goto err_peer_del;
+       }
+
+       qwz_dp_vdev_tx_attach(sc, pdev, arvif);
+#if 0
+       if (vif->type != NL80211_IFTYPE_MONITOR &&
+           test_bit(ATH12K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+               ret = ath12k_mac_monitor_vdev_create(ar);
+               if (ret)
+                       ath12k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+                                   ret);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       return 0;
+
+err_peer_del:
+#if 0
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               fbret = qwz_peer_delete(sc, arvif->vdev_id, vif->addr);
+               if (fbret) {
+                       printf("%s: fallback fail to delete peer addr %pM "
+                           "vdev_id %d ret %d\n", sc->sc_dev.dv_xname,
+                           vif->addr, arvif->vdev_id, fbret);
+                       goto err;
+               }
+       }
+#endif
+err_vdev_del:
+       qwz_mac_vdev_delete(sc, arvif);
+#ifdef notyet
+       spin_lock_bh(&ar->data_lock);
+#endif
+       TAILQ_REMOVE(&sc->vif_list, arvif, entry);
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+#endif
+
+err:
+#ifdef notyet
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       qwz_vif_free(sc, arvif);
+       return ret;
+}
+
+int
+qwz_mac_start(struct qwz_softc *sc)
+{
+       struct qwz_pdev *pdev;
+       int i, error;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               pdev = &sc->pdevs[i];
+               error = qwz_mac_op_start(pdev);
+               if (error)
+                       return error;
+
+               error = qwz_mac_op_add_interface(pdev);
+               if (error)
+                       return error;
+       }
+
+       return 0;
+}
+
+void
+qwz_init_task(void *arg)
+{
+       struct qwz_softc *sc = arg;
+       struct ifnet *ifp = &sc->sc_ic.ic_if;
+       int s = splnet();
+       rw_enter_write(&sc->ioctl_rwl);
+
+       if (ifp->if_flags & IFF_RUNNING)
+               qwz_stop(ifp);
+
+       if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
+               qwz_init(ifp);
+
+       rw_exit(&sc->ioctl_rwl);
+       splx(s);
+}
+
+void
+qwz_mac_11d_scan_start(struct qwz_softc *sc, struct qwz_vif *arvif)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct wmi_11d_scan_start_params param;
+       int ret;
+#ifdef notyet
+       mutex_lock(&ar->ab->vdev_id_11d_lock);
+#endif
+       DNPRINTF(QWZ_D_MAC, "%s: vdev id for 11d scan %d\n", __func__,
+           sc->vdev_id_11d_scan);
+#if 0
+       if (ar->regdom_set_by_user)
+               goto fin;
+#endif
+       if (sc->vdev_id_11d_scan != QWZ_11D_INVALID_VDEV_ID)
+               goto fin;
+
+       if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD))
+               goto fin;
+
+       if (ic->ic_opmode != IEEE80211_M_STA)
+               goto fin;
+
+       param.vdev_id = arvif->vdev_id;
+       param.start_interval_msec = 0;
+       param.scan_period_msec = QWZ_SCAN_11D_INTERVAL;
+
+       DNPRINTF(QWZ_D_MAC, "%s: start 11d scan\n", __func__);
+
+       ret = qwz_wmi_send_11d_scan_start_cmd(sc, &param,
+          0 /* TODO: derive pdev ID from arvif somehow? */);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to start 11d scan; vdev: %d "
+                           "ret: %d\n", sc->sc_dev.dv_xname,
+                           arvif->vdev_id, ret);
+               }
+       } else {
+               sc->vdev_id_11d_scan = arvif->vdev_id;
+               if (sc->state_11d == ATH12K_11D_PREPARING)
+                       sc->state_11d = ATH12K_11D_RUNNING;
+       }
+fin:
+       if (sc->state_11d == ATH12K_11D_PREPARING) {
+               sc->state_11d = ATH12K_11D_IDLE;
+               sc->completed_11d_scan = 0;
+       }
+#ifdef notyet
+       mutex_unlock(&ar->ab->vdev_id_11d_lock);
+#endif
+}
+
+void
+qwz_mac_scan_finish(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       enum ath12k_scan_state ostate;
+
+#ifdef notyet
+       lockdep_assert_held(&ar->data_lock);
+#endif
+       ostate = sc->scan.state;
+       switch (ostate) {
+       case ATH12K_SCAN_IDLE:
+               break;
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+#if 0
+               if (ar->scan.is_roc && ar->scan.roc_notify)
+                       ieee80211_remain_on_channel_expired(ar->hw);
+               fallthrough;
+#endif
+       case ATH12K_SCAN_STARTING:
+               sc->scan.state = ATH12K_SCAN_IDLE;
+               sc->scan_channel = 0;
+               sc->scan.roc_freq = 0;
+
+               timeout_del(&sc->scan.timeout);
+               if (!sc->scan.is_roc)
+                       ieee80211_end_scan(ifp);
+#if 0
+               complete_all(&ar->scan.completed);
+#endif
+               break;
+       }
+}
+
+int
+qwz_mac_get_rate_hw_value(struct ieee80211com *ic,
+    struct ieee80211_node *ni, int bitrate)
+{
+       uint32_t preamble;
+       uint16_t hw_value;
+       int shortpre = 0;
+
+       if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
+               preamble = WMI_RATE_PREAMBLE_CCK;
+       else
+               preamble = WMI_RATE_PREAMBLE_OFDM;
+
+       if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+           IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
+               shortpre = 1;
+
+       switch (bitrate) {
+       case 2:
+               hw_value = ATH12K_HW_RATE_CCK_LP_1M;
+               break;
+       case 4:
+               if (shortpre)
+                       hw_value = ATH12K_HW_RATE_CCK_SP_2M;
+               else
+                       hw_value = ATH12K_HW_RATE_CCK_LP_2M;
+               break;
+       case 11:
+               if (shortpre)
+                       hw_value = ATH12K_HW_RATE_CCK_SP_5_5M;
+               else
+                       hw_value = ATH12K_HW_RATE_CCK_LP_5_5M;
+               break;
+       case 22:
+               if (shortpre)
+                       hw_value = ATH12K_HW_RATE_CCK_SP_11M;
+               else
+                       hw_value = ATH12K_HW_RATE_CCK_LP_11M;
+               break;
+       case 12:
+               hw_value = ATH12K_HW_RATE_OFDM_6M;
+               break;
+       case 18:
+               hw_value = ATH12K_HW_RATE_OFDM_9M;
+               break;
+       case 24:
+               hw_value = ATH12K_HW_RATE_OFDM_12M;
+               break;
+       case 36:
+               hw_value = ATH12K_HW_RATE_OFDM_18M;
+               break;
+       case 48:
+               hw_value = ATH12K_HW_RATE_OFDM_24M;
+               break;
+       case 72:
+               hw_value = ATH12K_HW_RATE_OFDM_36M;
+               break;
+       case 96:
+               hw_value = ATH12K_HW_RATE_OFDM_48M;
+               break;
+       case 108:
+               hw_value = ATH12K_HW_RATE_OFDM_54M;
+               break;
+       default:
+               return -1;
+       }
+
+       return ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
+}
+
+int
+qwz_peer_delete(struct qwz_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
+    uint8_t *addr)
+{
+       int ret;
+
+       sc->peer_mapped = 0;
+       sc->peer_delete_done = 0;
+
+       ret = qwz_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
+       if (ret) {
+               printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
+                   sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
+               return ret;
+       }
+
+       while (!sc->peer_mapped) {
+               ret = tsleep_nsec(&sc->peer_mapped, 0, "qwzpeer",
+                   SEC_TO_NSEC(3));
+               if (ret) {
+                       printf("%s: peer delete unmap timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       while (!sc->peer_delete_done) {
+               ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwzpeerd",
+                   SEC_TO_NSEC(3));
+               if (ret) {
+                       printf("%s: peer delete command timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+       sc->num_peers--;
+       return 0;
+}
+
+int
+qwz_peer_create(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
+    struct ieee80211_node *ni, struct peer_create_params *param)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
+               DPRINTF("%s: failed to create peer due to insufficient "
+                   "peer entry resource in firmware\n", __func__);
+               return ENOBUFS;
+       }
+#ifdef notyet
+       mutex_lock(&ar->ab->tbl_mtx_lock);
+       spin_lock_bh(&ar->ab->base_lock);
+#endif
+       peer = &nq->peer;
+       if (peer) {
+               if (peer->peer_id != HAL_INVALID_PEERID &&
+                   peer->vdev_id == param->vdev_id) {
+#ifdef notyet
+                       spin_unlock_bh(&ar->ab->base_lock);
+                       mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+                       return EINVAL;
+               }
+#if 0
+               /* Assume sta is transitioning to another band.
+                * Remove here the peer from rhash.
+                */
+               ath12k_peer_rhash_delete(ar->ab, peer);
+#endif
+       }
+#ifdef notyet
+       spin_unlock_bh(&ar->ab->base_lock);
+       mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+       sc->peer_mapped = 0;
+
+       ret = qwz_wmi_send_peer_create_cmd(sc, pdev_id, param);
+       if (ret) {
+               printf("%s: failed to send peer create vdev_id %d ret %d\n",
+                   sc->sc_dev.dv_xname, param->vdev_id, ret);
+               return ret;
+       }
+
+       while (!sc->peer_mapped) {
+               ret = tsleep_nsec(&sc->peer_mapped, 0, "qwzpeer",
+                   SEC_TO_NSEC(3));
+               if (ret) {
+                       printf("%s: peer create command timeout\n",
+                           sc->sc_dev.dv_xname);
+                       return ret;
+               }
+       }
+
+#ifdef notyet
+       mutex_lock(&ar->ab->tbl_mtx_lock);
+       spin_lock_bh(&ar->ab->base_lock);
+#endif
+#if 0
+       peer = ath12k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+       if (!peer) {
+               spin_unlock_bh(&ar->ab->base_lock);
+               mutex_unlock(&ar->ab->tbl_mtx_lock);
+               ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+                           param->peer_addr, param->vdev_id);
+
+               ret = -ENOENT;
+               goto cleanup;
+       }
+
+       ret = ath12k_peer_rhash_add(ar->ab, peer);
+       if (ret) {
+               spin_unlock_bh(&ar->ab->base_lock);
+               mutex_unlock(&ar->ab->tbl_mtx_lock);
+               goto cleanup;
+       }
+#endif
+       peer->pdev_id = pdev_id;
+#if 0
+       peer->sta = sta;
+#endif
+       if (ic->ic_opmode == IEEE80211_M_STA) {
+               arvif->ast_hash = peer->ast_hash;
+               arvif->ast_idx = peer->hw_peer_id;
+       }
+#if 0
+       peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+       peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+       if (sta) {
+               struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+               arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+                                      FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+                                                 peer->peer_id);
+
+               /* set HTT extension valid bit to 0 by default */
+               arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+       }
+#endif
+       sc->num_peers++;
+#ifdef notyet
+       spin_unlock_bh(&ar->ab->base_lock);
+       mutex_unlock(&ar->ab->tbl_mtx_lock);
+#endif
+       return 0;
+#if 0
+cleanup:
+       int fbret = qwz_peer_delete(sc, param->vdev_id, param->peer_addr);
+       if (fbret) {
+               printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
+                   sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+                   param->vdev_id, fbret);
+       }
+
+       return ret;
+#endif
+}
+
+int
+qwz_dp_tx_send_reo_cmd(struct qwz_softc *sc, struct dp_rx_tid *rx_tid,
+    enum hal_reo_cmd_type type, struct ath12k_hal_reo_cmd *cmd,
+    void (*cb)(struct qwz_dp *, void *, enum hal_reo_cmd_status))
+{
+       struct qwz_dp *dp = &sc->dp;
+       struct dp_reo_cmd *dp_cmd;
+       struct hal_srng *cmd_ring;
+       int cmd_num;
+
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags))
+               return ESHUTDOWN;
+
+       cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
+       cmd_num = qwz_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
+       /* cmd_num should start from 1, during failure return the error code */
+       if (cmd_num < 0)
+               return cmd_num;
+
+       /* reo cmd ring descriptors has cmd_num starting from 1 */
+       if (cmd_num == 0)
+               return EINVAL;
+
+       if (!cb)
+               return 0;
+
+       /* Can this be optimized so that we keep the pending command list only
+        * for tid delete command to free up the resource on the command status
+        * indication?
+        */
+       dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
+       if (!dp_cmd)
+               return ENOMEM;
+
+       memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+       dp_cmd->cmd_num = cmd_num;
+       dp_cmd->handler = cb;
+#ifdef notyet
+       spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+       TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
+#ifdef notyet
+       spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+       return 0;
+}
+
+uint32_t
+qwz_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
+{
+       uint32_t num_ext_desc;
+
+       if (ba_window_size <= 1) {
+               if (tid != HAL_DESC_REO_NON_QOS_TID)
+                       num_ext_desc = 1;
+               else
+                       num_ext_desc = 0;
+       } else if (ba_window_size <= 105) {
+               num_ext_desc = 1;
+       } else if (ba_window_size <= 210) {
+               num_ext_desc = 2;
+       } else {
+               num_ext_desc = 3;
+       }
+
+       return sizeof(struct hal_rx_reo_queue) +
+               (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void
+qwz_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
+{
+       hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+                    FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+       /* Magic pattern in reserved bits for debugging */
+       hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+void
+qwz_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
+    uint32_t start_seq, enum hal_pn_type type)
+{
+       struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+       struct hal_rx_reo_queue_ext *ext_desc;
+
+       memset(qdesc, 0, sizeof(*qdesc));
+
+       qwz_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+           HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+       qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+       qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+           FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+           FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwz_tid_to_ac(tid));
+
+       if (ba_window_size < 1)
+               ba_window_size = 1;
+
+       if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+               ba_window_size++;
+
+       if (ba_window_size == 1)
+               qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+       qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+                                  ba_window_size - 1);
+       switch (type) {
+       case HAL_PN_TYPE_NONE:
+       case HAL_PN_TYPE_WAPI_EVEN:
+       case HAL_PN_TYPE_WAPI_UNEVEN:
+               break;
+       case HAL_PN_TYPE_WPA:
+               qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+                   FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+                   HAL_RX_REO_QUEUE_PN_SIZE_48);
+               break;
+       }
+
+       /* TODO: Set Ignore ampdu flags based on BA window size and/or
+        * AMPDU capabilities
+        */
+       qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+       qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+       if (start_seq <= 0xfff)
+               qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+                   start_seq);
+
+       if (tid == HAL_DESC_REO_NON_QOS_TID)
+               return;
+
+       ext_desc = qdesc->ext_desc;
+
+       /* TODO: HW queue descriptors are currently allocated for max BA
+        * window size for all QOS TIDs so that same descriptor can be used
+        * later when ADDBA request is received. This should be changed to
+        * allocate HW queue descriptors based on BA window size being
+        * negotiated (0 for non BA cases), and reallocate when BA window
+        * size changes and also send WMI message to FW to change the REO
+        * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+        */
+       memset(ext_desc, 0, sizeof(*ext_desc));
+       qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+           HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+       ext_desc++;
+       memset(ext_desc, 0, sizeof(*ext_desc));
+       qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+           HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+       ext_desc++;
+       memset(ext_desc, 0, sizeof(*ext_desc));
+       qwz_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+           HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void
+qwz_dp_reo_cmd_free(struct qwz_dp *dp, void *ctx,
+    enum hal_reo_cmd_status status)
+{
+       struct qwz_softc *sc = dp->sc;
+       struct dp_rx_tid *rx_tid = ctx;
+
+       if (status != HAL_REO_CMD_SUCCESS)
+               printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
+                   sc->sc_dev.dv_xname, rx_tid->tid, status);
+
+       if (rx_tid->mem) {
+               qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+               rx_tid->mem = NULL;
+               rx_tid->vaddr = NULL;
+               rx_tid->paddr = 0ULL;
+               rx_tid->size = 0;
+       }
+}
+
+void
+qwz_dp_reo_cache_flush(struct qwz_softc *sc, struct dp_rx_tid *rx_tid)
+{
+       struct ath12k_hal_reo_cmd cmd = {0};
+       unsigned long tot_desc_sz, desc_sz;
+       int ret;
+
+       tot_desc_sz = rx_tid->size;
+       desc_sz = qwz_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+       while (tot_desc_sz > desc_sz) {
+               tot_desc_sz -= desc_sz;
+               cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
+               cmd.addr_hi = rx_tid->paddr >> 32;
+               ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid,
+                   HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
+               if (ret) {
+                       printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
+                           "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
+                           ret);
+               }
+       }
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+       cmd.addr_hi = rx_tid->paddr >> 32;
+       cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+       ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
+           &cmd, qwz_dp_reo_cmd_free);
+       if (ret) {
+               printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
+                   "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
+               if (rx_tid->mem) {
+                       qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+                       rx_tid->mem = NULL;
+                       rx_tid->vaddr = NULL;
+                       rx_tid->paddr = 0ULL;
+                       rx_tid->size = 0;
+               }
+       }
+}
+
+void
+qwz_dp_rx_tid_del_func(struct qwz_dp *dp, void *ctx,
+    enum hal_reo_cmd_status status)
+{
+       struct qwz_softc *sc = dp->sc;
+       struct dp_rx_tid *rx_tid = ctx;
+       struct dp_reo_cache_flush_elem *elem, *tmp;
+       uint64_t now;
+
+       if (status == HAL_REO_CMD_DRAIN) {
+               goto free_desc;
+       } else if (status != HAL_REO_CMD_SUCCESS) {
+               /* Shouldn't happen! Cleanup in case of other failure? */
+               printf("%s: failed to delete rx tid %d hw descriptor %d\n",
+                   sc->sc_dev.dv_xname, rx_tid->tid, status);
+               return;
+       }
+
+       elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
+       if (!elem)
+               goto free_desc;
+
+       now = getnsecuptime();
+       elem->ts = now;
+       memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+       rx_tid->mem = NULL;
+       rx_tid->vaddr = NULL;
+       rx_tid->paddr = 0ULL;
+       rx_tid->size = 0;
+
+#ifdef notyet
+       spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+       TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
+       dp->reo_cmd_cache_flush_count++;
+
+       /* Flush and invalidate aged REO desc from HW cache */
+       TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
+               if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+                   now >= elem->ts + MSEC_TO_NSEC(DP_REO_DESC_FREE_TIMEOUT_MS)) {
+                       TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
+                       dp->reo_cmd_cache_flush_count--;
+#ifdef notyet
+                       spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+                       qwz_dp_reo_cache_flush(sc, &elem->data);
+                       free(elem, M_DEVBUF, sizeof(*elem));
+#ifdef notyet
+                       spin_lock_bh(&dp->reo_cmd_lock);
+#endif
+               }
+       }
+#ifdef notyet
+       spin_unlock_bh(&dp->reo_cmd_lock);
+#endif
+       return;
+free_desc:
+       if (rx_tid->mem) {
+               qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+               rx_tid->mem = NULL;
+               rx_tid->vaddr = NULL;
+               rx_tid->paddr = 0ULL;
+               rx_tid->size = 0;
+       }
+}
+
+void
+qwz_peer_rx_tid_delete(struct qwz_softc *sc, struct ath12k_peer *peer,
+    uint8_t tid)
+{
+       struct ath12k_hal_reo_cmd cmd = {0};
+       struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+       int ret;
+
+       if (!rx_tid->active)
+               return;
+
+       rx_tid->active = 0;
+
+       cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+       cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+       cmd.addr_hi = rx_tid->paddr >> 32;
+       cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+       ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
+           &cmd, qwz_dp_rx_tid_del_func);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send "
+                           "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+                           sc->sc_dev.dv_xname, tid, ret);
+               }
+
+               if (rx_tid->mem) {
+                       qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+                       rx_tid->mem = NULL;
+                       rx_tid->vaddr = NULL;
+                       rx_tid->paddr = 0ULL;
+                       rx_tid->size = 0;
+               }
+       }
+}
+
+void
+qwz_dp_rx_frags_cleanup(struct qwz_softc *sc, struct dp_rx_tid *rx_tid,
+    int rel_link_desc)
+{
+#ifdef notyet
+       lockdep_assert_held(&ab->base_lock);
+#endif
+#if 0
+       if (rx_tid->dst_ring_desc) {
+               if (rel_link_desc)
+                       ath12k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
+                                                     HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+               kfree(rx_tid->dst_ring_desc);
+               rx_tid->dst_ring_desc = NULL;
+       }
+#endif
+       rx_tid->cur_sn = 0;
+       rx_tid->last_frag_no = 0;
+       rx_tid->rx_frag_bitmap = 0;
+#if 0
+       __skb_queue_purge(&rx_tid->rx_frags);
+#endif
+}
+
+void
+qwz_peer_frags_flush(struct qwz_softc *sc, struct ath12k_peer *peer)
+{
+       struct dp_rx_tid *rx_tid;
+       int i;
+#ifdef notyet
+       lockdep_assert_held(&ar->ab->base_lock);
+#endif
+       for (i = 0; i < IEEE80211_NUM_TID; i++) {
+               rx_tid = &peer->rx_tid[i];
+
+               qwz_dp_rx_frags_cleanup(sc, rx_tid, 1);
+#if 0
+               spin_unlock_bh(&ar->ab->base_lock);
+               del_timer_sync(&rx_tid->frag_timer);
+               spin_lock_bh(&ar->ab->base_lock);
+#endif
+       }
+}
+
+void
+qwz_peer_rx_tid_cleanup(struct qwz_softc *sc, struct ath12k_peer *peer)
+{
+       struct dp_rx_tid *rx_tid;
+       int i;
+#ifdef notyet
+       lockdep_assert_held(&ar->ab->base_lock);
+#endif
+       for (i = 0; i < IEEE80211_NUM_TID; i++) {
+               rx_tid = &peer->rx_tid[i];
+
+               qwz_peer_rx_tid_delete(sc, peer, i);
+               qwz_dp_rx_frags_cleanup(sc, rx_tid, 1);
+#if 0
+               spin_unlock_bh(&ar->ab->base_lock);
+               del_timer_sync(&rx_tid->frag_timer);
+               spin_lock_bh(&ar->ab->base_lock);
+#endif
+       }
+}
+
+int
+qwz_peer_rx_tid_reo_update(struct qwz_softc *sc, struct ath12k_peer *peer,
+    struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
+    int update_ssn)
+{
+       struct ath12k_hal_reo_cmd cmd = {0};
+       int ret;
+
+       cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+       cmd.addr_hi = rx_tid->paddr >> 32;
+       cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+       cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+       cmd.ba_window_size = ba_win_sz;
+
+       if (update_ssn) {
+               cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+               cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+       }
+
+       ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
+           &cmd, NULL);
+       if (ret) {
+               printf("%s: failed to update rx tid queue, tid %d (%d)\n",
+                   sc->sc_dev.dv_xname, rx_tid->tid, ret);
+               return ret;
+       }
+
+       rx_tid->ba_win_sz = ba_win_sz;
+
+       return 0;
+}
+
+void
+qwz_dp_rx_tid_mem_free(struct qwz_softc *sc, struct ieee80211_node *ni,
+    int vdev_id, uint8_t tid)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       struct dp_rx_tid *rx_tid;
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+       rx_tid = &peer->rx_tid[tid];
+
+       if (rx_tid->mem) {
+               qwz_dmamem_free(sc->sc_dmat, rx_tid->mem);
+               rx_tid->mem = NULL;
+               rx_tid->vaddr = NULL;
+               rx_tid->paddr = 0ULL;
+               rx_tid->size = 0;
+       }
+
+       rx_tid->active = 0;
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+}
+
+int
+qwz_peer_rx_tid_setup(struct qwz_softc *sc, struct ieee80211_node *ni,
+    int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
+    enum hal_pn_type pn_type)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       struct dp_rx_tid *rx_tid;
+       uint32_t hw_desc_sz;
+       void *vaddr;
+       uint64_t paddr;
+       int ret;
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+       rx_tid = &peer->rx_tid[tid];
+       /* Update the tid queue if it is already setup */
+       if (rx_tid->active) {
+               paddr = rx_tid->paddr;
+               ret = qwz_peer_rx_tid_reo_update(sc, peer, rx_tid,
+                   ba_win_sz, ssn, 1);
+#ifdef notyet
+               spin_unlock_bh(&ab->base_lock);
+#endif
+               if (ret) {
+                       printf("%s: failed to update reo for peer %s "
+                           "rx tid %d\n: %d", sc->sc_dev.dv_xname,
+                           ether_sprintf(ni->ni_macaddr), tid, ret);
+                       return ret;
+               }
+
+               ret = qwz_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
+                   pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
+               if (ret)
+                       printf("%s: failed to send wmi rx reorder queue "
+                           "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
+                           ether_sprintf(ni->ni_macaddr), tid, ret);
+               return ret;
+       }
+
+       rx_tid->tid = tid;
+
+       rx_tid->ba_win_sz = ba_win_sz;
+
+       /* TODO: Optimize the memory allocation for qos tid based on
+        * the actual BA window size in REO tid update path.
+        */
+       if (tid == HAL_DESC_REO_NON_QOS_TID)
+               hw_desc_sz = qwz_hal_reo_qdesc_size(ba_win_sz, tid);
+       else
+               hw_desc_sz = qwz_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+       rx_tid->mem = qwz_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
+           HAL_LINK_DESC_ALIGN);
+       if (rx_tid->mem == NULL) {
+#ifdef notyet
+               spin_unlock_bh(&ab->base_lock);
+#endif
+               return ENOMEM;
+       }
+
+       vaddr = QWZ_DMA_KVA(rx_tid->mem);
+
+       qwz_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
+
+       paddr = QWZ_DMA_DVA(rx_tid->mem);
+
+       rx_tid->vaddr = vaddr;
+       rx_tid->paddr = paddr;
+       rx_tid->size = hw_desc_sz;
+       rx_tid->active = 1;
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+       ret = qwz_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
+           ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
+       if (ret) {
+               printf("%s: failed to setup rx reorder queue for peer %s "
+                   "tid %d: %d\n", sc->sc_dev.dv_xname,
+                   ether_sprintf(ni->ni_macaddr), tid, ret);
+               qwz_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
+       }
+
+       return ret;
+}
+
+int
+qwz_peer_rx_frag_setup(struct qwz_softc *sc, struct ieee80211_node *ni,
+    int vdev_id)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       struct dp_rx_tid *rx_tid;
+       int i;
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+       for (i = 0; i <= nitems(peer->rx_tid); i++) {
+               rx_tid = &peer->rx_tid[i];
+#if 0
+               rx_tid->ab = ab;
+               timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
+#endif
+       }
+#if 0
+       peer->dp_setup_done = true;
+#endif
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+       return 0;
+}
+
+int
+qwz_dp_peer_setup(struct qwz_softc *sc, int vdev_id, int pdev_id,
+    struct ieee80211_node *ni)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       uint32_t reo_dest;
+       int ret = 0, tid;
+
+       /* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+       reo_dest = sc->pdev_dp.mac_id + 1;
+       ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
+           WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
+       if (ret) {
+               printf("%s: failed to set default routing %d peer %s "
+                   "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
+                   ether_sprintf(ni->ni_macaddr), vdev_id);
+               return ret;
+       }
+
+       for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
+               ret = qwz_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
+                   tid, 1, 0, HAL_PN_TYPE_NONE);
+               if (ret) {
+                       printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
+                           sc->sc_dev.dv_xname, tid, ret);
+                       goto peer_clean;
+               }
+       }
+
+       ret = qwz_peer_rx_frag_setup(sc, ni, vdev_id);
+       if (ret) {
+               printf("%s: failed to setup rx defrag context\n",
+                   sc->sc_dev.dv_xname);
+               tid--;
+               goto peer_clean;
+       }
+
+       /* TODO: Setup other peer specific resource used in data path */
+
+       return 0;
+
+peer_clean:
+#ifdef notyet
+       spin_lock_bh(&ab->base_lock);
+#endif
+#if 0
+       peer = ath12k_peer_find(ab, vdev_id, addr);
+       if (!peer) {
+               ath12k_warn(ab, "failed to find the peer to del rx tid\n");
+               spin_unlock_bh(&ab->base_lock);
+               return -ENOENT;
+       }
+#endif
+       for (; tid >= 0; tid--)
+               qwz_peer_rx_tid_delete(sc, peer, tid);
+#ifdef notyet
+       spin_unlock_bh(&ab->base_lock);
+#endif
+       return ret;
+}
+
+int
+qwz_dp_peer_rx_pn_replay_config(struct qwz_softc *sc, struct qwz_vif *arvif,
+    struct ieee80211_node *ni, struct ieee80211_key *k, int delete_key)
+{
+       struct ath12k_hal_reo_cmd cmd = {0};
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       struct dp_rx_tid *rx_tid;
+       uint8_t tid;
+       int ret = 0;
+
+       /*
+        * NOTE: Enable PN/TSC replay check offload only for unicast frames.
+        * We use net80211 PN/TSC replay check functionality for bcast/mcast
+        * for now.
+        */
+       if (k->k_flags & IEEE80211_KEY_GROUP)
+               return 0;
+
+       cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+       cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
+                   HAL_REO_CMD_UPD0_PN_SIZE |
+                   HAL_REO_CMD_UPD0_PN_VALID |
+                   HAL_REO_CMD_UPD0_PN_CHECK |
+                   HAL_REO_CMD_UPD0_SVLD;
+
+       switch (k->k_cipher) {
+       case IEEE80211_CIPHER_TKIP:
+       case IEEE80211_CIPHER_CCMP:
+#if 0
+       case WLAN_CIPHER_SUITE_CCMP_256:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+#endif
+               if (!delete_key) {
+                       cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+                       cmd.pn_size = 48;
+               }
+               break;
+       default:
+               printf("%s: cipher %u is not supported\n",
+                   sc->sc_dev.dv_xname, k->k_cipher);
+               return EOPNOTSUPP;
+       }
+
+       for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
+               rx_tid = &peer->rx_tid[tid];
+               if (!rx_tid->active)
+                       continue;
+               cmd.addr_lo = rx_tid->paddr & 0xffffffff;
+               cmd.addr_hi = (rx_tid->paddr >> 32);
+               ret = qwz_dp_tx_send_reo_cmd(sc, rx_tid,
+                   HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL);
+               if (ret) {
+                       printf("%s: failed to configure rx tid %d queue "
+                           "for pn replay detection %d\n",
+                           sc->sc_dev.dv_xname, tid, ret);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+enum hal_tcl_encap_type
+qwz_dp_tx_get_encap_type(struct qwz_softc *sc)
+{
+       if (test_bit(ATH12K_FLAG_RAW_MODE, sc->sc_flags))
+               return HAL_TCL_ENCAP_TYPE_RAW;
+#if 0
+       if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+               return HAL_TCL_ENCAP_TYPE_ETHERNET;
+#endif
+       return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+uint8_t
+qwz_dp_tx_get_tid(struct mbuf *m)
+{
+       struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+       uint16_t qos = ieee80211_get_qos(wh);
+       uint8_t tid = qos & IEEE80211_QOS_TID;
+
+       return tid;
+}
+
+void
+qwz_hal_tx_cmd_desc_setup(struct qwz_softc *sc, void *cmd,
+    struct hal_tx_info *ti)
+{
+       struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
+
+       tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+           ti->paddr);
+       tcl_cmd->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+           ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+       tcl_cmd->buf_addr_info.info1 |= FIELD_PREP(
+           BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
+           FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+       tcl_cmd->info0 =
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE, ti->encrypt_type) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE, ti->search_type) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN, ti->addr_search_flags) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM, ti->meta_data_flags);
+
+       tcl_cmd->info1 = ti->flags0 |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+       tcl_cmd->info2 = ti->flags1 |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+       tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+           ti->dscp_tid_tbl_idx) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX, ti->bss_ast_idx) |
+           FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash);
+       tcl_cmd->info4 = 0;
+#ifdef notyet
+       if (ti->enable_mesh)
+               ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
+#endif
+}
+
+int
+qwz_dp_tx(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
+    struct ieee80211_node *ni, struct mbuf *m)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_dp *dp = &sc->dp;
+       struct hal_tx_info ti = {0};
+       struct qwz_tx_data *tx_data;
+       struct hal_srng *tcl_ring;
+       struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+       struct ieee80211_key *k = NULL;
+       struct dp_tx_ring *tx_ring;
+       void *hal_tcl_desc;
+       uint8_t pool_id;
+       uint8_t hal_ring_id;
+       int ret, msdu_id, off;
+       uint32_t ring_selector = 0;
+       uint8_t ring_map = 0;
+
+       if (test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
+               m_freem(m);
+               return ESHUTDOWN;
+       }
+#if 0
+       if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+                    !ieee80211_is_data(hdr->frame_control)))
+               return -ENOTSUPP;
+#endif
+       pool_id = 0;
+       ring_selector = 0;
+
+       ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
+       ti.rbm_id = sc->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
+
+       ring_map |= (1 << ti.ring_id);
+
+       tx_ring = &dp->tx_ring[ti.ring_id];
+
+       if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
+               m_freem(m);
+               return ENOSPC;
+       }
+
+       msdu_id = tx_ring->cur;
+       tx_data = &tx_ring->data[msdu_id];
+       if (tx_data->m != NULL) {
+               m_freem(m);
+               return ENOSPC;
+       }
+
+       ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, pdev_id) |
+           FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, msdu_id) |
+           FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+       ti.encap_type = qwz_dp_tx_get_encap_type(sc);
+
+       ti.meta_data_flags = arvif->tcl_metadata;
+
+       if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
+           ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
+               k = ieee80211_get_txkey(ic, wh, ni);
+               if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags)) {
+                       ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+               } else {
+                       switch (k->k_cipher) {
+                       case IEEE80211_CIPHER_CCMP:
+                               ti.encrypt_type = HAL_ENCRYPT_TYPE_CCMP_128;
+                               if (m_makespace(m, m->m_pkthdr.len,
+                                   IEEE80211_CCMP_MICLEN, &off) == NULL) {
+                                       m_freem(m);
+                                       return ENOSPC;
+                               }
+                               break;
+                       case IEEE80211_CIPHER_TKIP:
+                               ti.encrypt_type = HAL_ENCRYPT_TYPE_TKIP_MIC;
+                               if (m_makespace(m, m->m_pkthdr.len,
+                                   IEEE80211_TKIP_MICLEN, &off) == NULL) {
+                                       m_freem(m);
+                                       return ENOSPC;
+                               }
+                               break;
+                       default:
+                               ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+                               break;
+                       }
+               }
+
+               if (ti.encrypt_type == HAL_ENCRYPT_TYPE_OPEN) {
+                       /* Using software crypto. */
+                       if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
+                               return ENOBUFS;
+                       /* 802.11 header may have moved. */
+                       wh = mtod(m, struct ieee80211_frame *);
+               }
+       }
+
+       ti.addr_search_flags = arvif->hal_addr_search_flags;
+       ti.search_type = arvif->search_type;
+       ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+       ti.pkt_offset = 0;
+       ti.lmac_id = qwz_hw_get_mac_from_pdev_id(sc, pdev_id);
+       ti.bss_ast_hash = arvif->ast_hash;
+       ti.bss_ast_idx = arvif->ast_idx;
+       ti.dscp_tid_tbl_idx = 0;
+#if 0
+       if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
+                  ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
+               ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+                            FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+                            FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+                            FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+                            FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+       }
+
+       if (ieee80211_vif_is_mesh(arvif->vif))
+               ti.enable_mesh = true;
+#endif
+       ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+       ti.tid = qwz_dp_tx_get_tid(m);
+#if 0
+       switch (ti.encap_type) {
+       case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+               ath12k_dp_tx_encap_nwifi(skb);
+               break;
+       case HAL_TCL_ENCAP_TYPE_RAW:
+               if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+                       ret = -EINVAL;
+                       goto fail_remove_idr;
+               }
+               break;
+       case HAL_TCL_ENCAP_TYPE_ETHERNET:
+               /* no need to encap */
+               break;
+       case HAL_TCL_ENCAP_TYPE_802_3:
+       default:
+               /* TODO: Take care of other encap modes as well */
+               ret = -EINVAL;
+               atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+               goto fail_remove_idr;
+       }
+#endif
+       ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
+           m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
+       if (ret && ret != EFBIG) {
+               printf("%s: failed to map Tx buffer: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               m_freem(m);
+               return ret;
+       }
+       if (ret) {
+               /* Too many DMA segments, linearize mbuf. */
+               if (m_defrag(m, M_DONTWAIT)) {
+                       m_freem(m);
+                       return ENOBUFS;
+               }
+               ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
+                   BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+               if (ret) {
+                       printf("%s: failed to map Tx buffer: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       m_freem(m);
+                       return ret;
+               }
+       }
+       ti.paddr = tx_data->map->dm_segs[0].ds_addr;
+
+       ti.data_len = m->m_pkthdr.len;
+
+       hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+       tcl_ring = &sc->hal.srng_list[hal_ring_id];
+#ifdef notyet
+       spin_lock_bh(&tcl_ring->lock);
+#endif
+       qwz_hal_srng_access_begin(sc, tcl_ring);
+
+       hal_tcl_desc = (void *)qwz_hal_srng_src_get_next_entry(sc, tcl_ring);
+       if (!hal_tcl_desc) {
+               /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+                * desc because the desc is directly enqueued onto hw queue.
+                */
+               qwz_hal_srng_access_end(sc, tcl_ring);
+#if 0
+               ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+#endif
+#ifdef notyet
+               spin_unlock_bh(&tcl_ring->lock);
+#endif
+               bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+               m_freem(m);
+               return ENOMEM;
+       }
+
+       tx_data->m = m;
+       tx_data->ni = ni;
+
+       qwz_hal_tx_cmd_desc_setup(sc,
+           hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti);
+
+       qwz_hal_srng_access_end(sc, tcl_ring);
+
+       qwz_dp_shadow_start_timer(sc, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
+#ifdef notyet
+       spin_unlock_bh(&tcl_ring->lock);
+#endif
+       tx_ring->queued++;
+       tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
+
+       if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
+               sc->qfullmsk |= (1 << ti.ring_id); 
+
+       return 0;
+}
+
+int
+qwz_mac_station_remove(struct qwz_softc *sc, struct qwz_vif *arvif,
+    uint8_t pdev_id, struct ieee80211_node *ni)
+{
+       struct qwz_node *nq = (struct qwz_node *)ni;
+       struct ath12k_peer *peer = &nq->peer;
+       int ret;
+
+       qwz_peer_rx_tid_cleanup(sc, peer);
+
+       ret = qwz_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
+       if (ret) {
+               printf("%s: unable to delete BSS peer: %d\n",
+                  sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+qwz_mac_station_add(struct qwz_softc *sc, struct qwz_vif *arvif,
+    uint8_t pdev_id, struct ieee80211_node *ni)
+{
+       struct peer_create_params peer_param;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       peer_param.vdev_id = arvif->vdev_id;
+       peer_param.peer_addr = ni->ni_macaddr;
+       peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+       ret = qwz_peer_create(sc, arvif, pdev_id, ni, &peer_param);
+       if (ret) {
+               printf("%s: Failed to add peer: %s for VDEV: %d\n",
+                   sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+                   arvif->vdev_id);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
+           ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
+
+       ret = qwz_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
+       if (ret) {
+               printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
+                   sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+                   arvif->vdev_id, ret);
+               goto free_peer;
+       }
+
+       return 0;
+
+free_peer:
+       qwz_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
+       return ret;
+}
+
+int
+qwz_mac_mgmt_tx_wmi(struct qwz_softc *sc, struct qwz_vif *arvif,
+    uint8_t pdev_id, struct ieee80211_node *ni, struct mbuf *m)
+{
+       struct qwz_txmgmt_queue *txmgmt = &arvif->txmgmt;
+       struct qwz_tx_data *tx_data;
+       int buf_id;
+       int ret;
+
+       buf_id = txmgmt->cur;
+
+       DNPRINTF(QWZ_D_MAC, "%s: tx mgmt frame, buf id %d\n", __func__, buf_id);
+
+       if (txmgmt->queued >= nitems(txmgmt->data))
+               return ENOSPC;
+
+       tx_data = &txmgmt->data[buf_id];
+#if 0
+       if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+               if ((ieee80211_is_action(hdr->frame_control) ||
+                    ieee80211_is_deauth(hdr->frame_control) ||
+                    ieee80211_is_disassoc(hdr->frame_control)) &&
+                    ieee80211_has_protected(hdr->frame_control)) {
+                       skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+               }
+       }
+#endif
+       ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
+           m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
+       if (ret && ret != EFBIG) {
+               printf("%s: failed to map mgmt Tx buffer: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+       if (ret) {
+               /* Too many DMA segments, linearize mbuf. */
+               if (m_defrag(m, M_DONTWAIT)) {
+                       m_freem(m);
+                       return ENOBUFS;
+               }
+               ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
+                   BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+               if (ret) {
+                       printf("%s: failed to map mgmt Tx buffer: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+                       m_freem(m);
+                       return ret;
+               }
+       }
+
+       ret = qwz_wmi_mgmt_send(sc, arvif, pdev_id, buf_id, m, tx_data);
+       if (ret) {
+               printf("%s: failed to send mgmt frame: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto err_unmap_buf;
+       }
+       tx_data->ni = ni;
+
+       txmgmt->cur = (txmgmt->cur + 1) % nitems(txmgmt->data);
+       txmgmt->queued++;
+
+       if (txmgmt->queued >= nitems(txmgmt->data) - 1)
+               sc->qfullmsk |= (1U << QWZ_MGMT_QUEUE_ID);
+
+       return 0;
+
+err_unmap_buf:
+       bus_dmamap_unload(sc->sc_dmat, tx_data->map);
+       return ret;
+}
+
+void
+qwz_wmi_start_scan_init(struct qwz_softc *sc, struct scan_req_params *arg)
+{
+       /* setup commonly used values */
+       arg->scan_req_id = 1;
+       if (sc->state_11d == ATH12K_11D_PREPARING)
+               arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+       else
+               arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+       arg->dwell_time_active = 50;
+       arg->dwell_time_active_2g = 0;
+       arg->dwell_time_passive = 150;
+       arg->dwell_time_active_6g = 40;
+       arg->dwell_time_passive_6g = 30;
+       arg->min_rest_time = 50;
+       arg->max_rest_time = 500;
+       arg->repeat_probe_time = 0;
+       arg->probe_spacing_time = 0;
+       arg->idle_time = 0;
+       arg->max_scan_time = 20000;
+       arg->probe_delay = 5;
+       arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+           WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
+           WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
+       arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+
+       if (isset(sc->wmi.svc_map,
+           WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
+               arg->scan_ctrl_flags_ext |=
+                   WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
+
+       arg->num_bssid = 1;
+
+       /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+        * ZEROs in probe request
+        */
+       IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
+}
+
+int
+qwz_wmi_set_peer_param(struct qwz_softc *sc, uint8_t *peer_addr,
+    uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_peer_set_param_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
+       cmd->vdev_id = vdev_id;
+       cmd->param_id = param_id;
+       cmd->param_value = param_val;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+               return ret;
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd peer set param vdev %d peer %s "
+           "set param %d value %d\n", __func__, vdev_id,
+           ether_sprintf(peer_addr), param_id, param_val);
+
+       return 0;
+}
+
+int
+qwz_wmi_peer_rx_reorder_queue_setup(struct qwz_softc *sc, int vdev_id,
+    int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
+    uint8_t ba_window_size_valid, uint32_t ba_window_size)
+{
+       struct qwz_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
+       struct wmi_peer_reorder_queue_setup_cmd *cmd;
+       struct mbuf *m;
+       int ret;
+
+       m = qwz_wmi_alloc_mbuf(sizeof(*cmd));
+       if (!m)
+               return ENOMEM;
+
+       cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
+           sizeof(struct ath12k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+           WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+           FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
+       cmd->vdev_id = vdev_id;
+       cmd->tid = tid;
+       cmd->queue_ptr_lo = paddr & 0xffffffff;
+       cmd->queue_ptr_hi = paddr >> 32;
+       cmd->queue_no = tid;
+       cmd->ba_window_size_valid = ba_window_size_valid;
+       cmd->ba_window_size = ba_window_size;
+
+       ret = qwz_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to send "
+                           "WMI_PEER_REORDER_QUEUE_SETUP\n",
+                           sc->sc_dev.dv_xname);
+               }
+               m_freem(m);
+       }
+
+       DNPRINTF(QWZ_D_WMI, "%s: cmd peer reorder queue setup addr %s "
+           "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
+
+       return ret;
+}
+
+enum ath12k_spectral_mode
+qwz_spectral_get_mode(struct qwz_softc *sc)
+{
+#if 0
+       if (sc->spectral.enabled)
+               return ar->spectral.mode;
+       else
+#endif
+               return ATH12K_SPECTRAL_DISABLED;
+}
+
+void
+qwz_spectral_reset_buffer(struct qwz_softc *sc)
+{
+       printf("%s: not implemented\n", __func__);
+}
+
+int
+qwz_scan_stop(struct qwz_softc *sc)
+{
+       struct scan_cancel_param arg = {
+               .req_type = WLAN_SCAN_CANCEL_SINGLE,
+               .scan_id = ATH12K_SCAN_ID,
+       };
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       /* TODO: Fill other STOP Params */
+       arg.pdev_id = 0; /* TODO: derive pdev ID somehow? */
+       arg.vdev_id = sc->scan.vdev_id;
+
+       ret = qwz_wmi_send_scan_stop_cmd(sc, &arg);
+       if (ret) {
+               printf("%s: failed to stop wmi scan: %d\n",
+                   sc->sc_dev.dv_xname, ret);
+               goto out;
+       }
+
+       while (sc->scan.state != ATH12K_SCAN_IDLE) {
+               ret = tsleep_nsec(&sc->scan.state, 0, "qwzscstop",
+                   SEC_TO_NSEC(3));
+               if (ret) {
+                       printf("%s: scan stop timeout\n", sc->sc_dev.dv_xname);
+                       break;
+               }
+       }
+out:
+       /* Scan state should be updated upon scan completion but in case
+        * firmware fails to deliver the event (for whatever reason) it is
+        * desired to clean up scan state anyway. Firmware may have just
+        * dropped the scan completion event delivery due to transport pipe
+        * being overflown with data and/or it can recover on its own before
+        * next scan request is submitted.
+        */
+#ifdef notyet
+       spin_lock_bh(&ar->data_lock);
+#endif
+       if (sc->scan.state != ATH12K_SCAN_IDLE)
+               qwz_mac_scan_finish(sc);
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+#endif
+       return ret;
+}
+
+void
+qwz_scan_timeout(void *arg)
+{
+       struct qwz_softc *sc = arg;
+       int s = splnet();
+
+#ifdef notyet
+       mutex_lock(&ar->conf_mutex);
+#endif
+       printf("%s\n", __func__);
+       qwz_scan_abort(sc);
+#ifdef notyet
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       splx(s);
+}
+
+int
+qwz_start_scan(struct qwz_softc *sc, struct scan_req_params *arg)
+{
+       int ret;
+       unsigned long timeout = 1;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       if (qwz_spectral_get_mode(sc) == ATH12K_SPECTRAL_BACKGROUND)
+               qwz_spectral_reset_buffer(sc);
+
+       ret = qwz_wmi_send_scan_start_cmd(sc, arg);
+       if (ret)
+               return ret;
+
+       if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
+               timeout = 5;
+#if 0
+               if (ar->supports_6ghz)
+                       timeout += 5 * HZ;
+#endif
+       }
+
+       while (sc->scan.state == ATH12K_SCAN_STARTING) {
+               ret = tsleep_nsec(&sc->scan.state, 0, "qwzscan",
+                   SEC_TO_NSEC(timeout));
+               if (ret) {
+                       printf("%s: scan start timeout\n", sc->sc_dev.dv_xname);
+                       qwz_scan_stop(sc);
+                       break;
+               }
+       }
+
+#ifdef notyet
+       spin_lock_bh(&ar->data_lock);
+       spin_unlock_bh(&ar->data_lock);
+#endif
+       return ret;
+}
+
+#define ATH12K_MAC_SCAN_CMD_EVT_OVERHEAD               200 /* in msecs */
+
+int
+qwz_scan(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list);
+       struct scan_req_params *arg = NULL;
+       struct ieee80211_channel *chan, *lastc;
+       int ret = 0, num_channels, i;
+       uint32_t scan_timeout;
+
+       if (arvif == NULL) {
+               printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
+               return EINVAL;
+       }
+
+       /*
+        * TODO Will we need separate scan iterations on devices with
+        * multiple radios?
+        */
+       if (sc->num_radios > 1)
+               printf("%s: TODO: only scanning with first vdev\n", __func__);
+
+       /* Firmwares advertising the support of triggering 11D algorithm
+        * on the scan results of a regular scan expects driver to send
+        * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
+        * With this feature, separate 11D scan can be avoided since
+        * regdomain can be determined with the scan results of the
+        * regular scan.
+        */
+       if (sc->state_11d == ATH12K_11D_PREPARING &&
+           isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN))
+               qwz_mac_11d_scan_start(sc, arvif);
+#ifdef notyet
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+               sc->scan.started = 0;
+               sc->scan.completed = 0;
+               sc->scan.state = ATH12K_SCAN_STARTING;
+               sc->scan.is_roc = 0;
+               sc->scan.vdev_id = arvif->vdev_id;
+               ret = 0;
+               break;
+       case ATH12K_SCAN_STARTING:
+       case ATH12K_SCAN_RUNNING:
+       case ATH12K_SCAN_ABORTING:
+               ret = EBUSY;
+               break;
+       }
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+#endif
+       if (ret)
+               goto exit;
+
+       arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_NOWAIT);
+       if (!arg) {
+               ret = ENOMEM;
+               goto exit;
+       }
+
+       qwz_wmi_start_scan_init(sc, arg);
+       arg->vdev_id = arvif->vdev_id;
+       arg->scan_id = ATH12K_SCAN_ID;
+
+       if (ic->ic_des_esslen != 0) {
+               arg->num_ssids = 1;
+               arg->ssid[0].length  = ic->ic_des_esslen;
+               memcpy(&arg->ssid[0].ssid, ic->ic_des_essid,
+                   ic->ic_des_esslen);
+       } else
+               arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+
+       lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
+       num_channels = 0;
+       for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
+               if (chan->ic_flags == 0)
+                       continue;
+               num_channels++;
+       }
+       if (num_channels) {
+               arg->num_chan = num_channels;
+               arg->chan_list = mallocarray(arg->num_chan,
+                   sizeof(*arg->chan_list), M_DEVBUF, M_NOWAIT | M_ZERO);
+
+               if (!arg->chan_list) {
+                       ret = ENOMEM;
+                       goto exit;
+               }
+
+               i = 0;
+               for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
+                       if (chan->ic_flags == 0)
+                               continue;
+                       if (isset(sc->wmi.svc_map,
+                           WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL)) {
+                               arg->chan_list[i++] = chan->ic_freq &
+                                   WMI_SCAN_CONFIG_PER_CHANNEL_MASK;
+#if 0
+                               /* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
+                                * flags, then scan all PSC channels in 6 GHz band and
+                                * those non-PSC channels where RNR IE is found during
+                                * the legacy 2.4/5 GHz scan.
+                                * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
+                                * then all channels in 6 GHz will be scanned.
+                                */
+                               if (req->channels[i]->band == NL80211_BAND_6GHZ &&
+                                   req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
+                                   !cfg80211_channel_is_psc(req->channels[i]))
+                                       arg->chan_list[i] |=
+                                               WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
+#endif
+                       } else {
+                               arg->chan_list[i++] = chan->ic_freq;
+                       }
+               }
+       }
+#if 0
+       if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+               arg->scan_f_add_spoofed_mac_in_probe = 1;
+               ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+               ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
+       }
+#endif
+       scan_timeout = 5000;
+
+       /* Add a margin to account for event/command processing */
+       scan_timeout += ATH12K_MAC_SCAN_CMD_EVT_OVERHEAD;
+
+       ret = qwz_start_scan(sc, arg);
+       if (ret) {
+               if (ret != ESHUTDOWN) {
+                       printf("%s: failed to start hw scan: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+               }
+#ifdef notyet
+               spin_lock_bh(&ar->data_lock);
+#endif
+               sc->scan.state = ATH12K_SCAN_IDLE;
+#ifdef notyet
+               spin_unlock_bh(&ar->data_lock);
+#endif
+       } else {
+               /*
+                * The current mode might have been fixed during association.
+                * Ensure all channels get scanned.
+                */
+               if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
+                       ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
+       }
+#if 0
+       timeout_add_msec(&sc->scan.timeout, scan_timeout);
+#endif
+exit:
+       if (arg) {
+               free(arg->chan_list, M_DEVBUF,
+                   arg->num_chan * sizeof(*arg->chan_list));
+#if 0
+               kfree(arg->extraie.ptr);
+#endif
+               free(arg, M_DEVBUF, sizeof(*arg));
+       }
+#ifdef notyet
+       mutex_unlock(&ar->conf_mutex);
+#endif
+       if (sc->state_11d == ATH12K_11D_PREPARING)
+               qwz_mac_11d_scan_start(sc, arvif);
+
+       return ret;
+}
+
+void
+qwz_scan_abort(struct qwz_softc *sc)
+{
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+#endif
+       switch (sc->scan.state) {
+       case ATH12K_SCAN_IDLE:
+               /* This can happen if timeout worker kicked in and called
+                * abortion while scan completion was being processed.
+                */
+               break;
+       case ATH12K_SCAN_STARTING:
+       case ATH12K_SCAN_ABORTING:
+               printf("%s: refusing scan abortion due to invalid "
+                   "scan state: %d\n", sc->sc_dev.dv_xname, sc->scan.state);
+               break;
+       case ATH12K_SCAN_RUNNING:
+               sc->scan.state = ATH12K_SCAN_ABORTING;
+#ifdef notyet
+               spin_unlock_bh(&ar->data_lock);
+#endif
+               ret = qwz_scan_stop(sc);
+               if (ret)
+                       printf("%s: failed to abort scan: %d\n",
+                           sc->sc_dev.dv_xname, ret);
+#ifdef notyet
+               spin_lock_bh(&ar->data_lock);
+#endif
+               break;
+       }
+#ifdef notyet
+       spin_unlock_bh(&ar->data_lock);
+#endif
+}
+
+/*
+ * Find a pdev which corresponds to a given channel.
+ * This doesn't exactly match the semantics of the Linux driver
+ * but because OpenBSD does not (yet) implement multi-bss mode
+ * we can assume that only one PHY will be active in either the
+ * 2 GHz or the 5 GHz band.
+ */
+struct qwz_pdev *
+qwz_get_pdev_for_chan(struct qwz_softc *sc, struct ieee80211_channel *chan)
+{
+       struct qwz_pdev *pdev;
+       int i;
+
+       for (i = 0; i < sc->num_radios; i++) {
+               if ((sc->pdevs_active & (1 << i)) == 0)
+                       continue;
+
+               pdev = &sc->pdevs[i];
+               if (IEEE80211_IS_CHAN_2GHZ(chan) &&
+                   (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
+                       return pdev;
+               if (IEEE80211_IS_CHAN_5GHZ(chan) &&
+                   (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
+                       return pdev;
+       }
+
+       return NULL;
+}
+
+void
+qwz_recalculate_mgmt_rate(struct qwz_softc *sc, struct ieee80211_node *ni,
+    uint32_t vdev_id, uint32_t pdev_id)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       int hw_rate_code;
+       uint32_t vdev_param;
+       int bitrate;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+       bitrate = ieee80211_min_basic_rate(ic);
+       hw_rate_code = qwz_mac_get_rate_hw_value(ic, ni, bitrate);
+       if (hw_rate_code < 0) {
+               DPRINTF("%s: bitrate not supported %d\n",
+                   sc->sc_dev.dv_xname, bitrate);
+               return;
+       }
+
+       vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+       ret = qwz_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
+           vdev_param, hw_rate_code);
+       if (ret)
+               printf("%s: failed to set mgmt tx rate\n",
+                   sc->sc_dev.dv_xname);
+#if 0
+       /* For WCN6855, firmware will clear this param when vdev starts, hence
+        * cache it here so that we can reconfigure it once vdev starts.
+        */
+       ab->hw_rate_code = hw_rate_code;
+#endif
+       vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+       ret = qwz_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
+           hw_rate_code);
+       if (ret)
+               printf("%s: failed to set beacon tx rate\n",
+                   sc->sc_dev.dv_xname);
+}
+
+int
+qwz_auth(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni = ic->ic_bss;
+       uint32_t param_id;
+       struct qwz_vif *arvif;
+       struct qwz_pdev *pdev;
+       int ret;
+
+       arvif = TAILQ_FIRST(&sc->vif_list);
+       if (arvif == NULL) {
+               printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
+               return EINVAL;
+       }
+
+       pdev = qwz_get_pdev_for_chan(sc, ni->ni_chan);
+       if (pdev == NULL) {
+               printf("%s: no pdev found for channel %d\n",
+                   sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
+               return EINVAL;
+       }
+
+       param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+       ret = qwz_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
+           param_id, ni->ni_intval);
+       if (ret) {
+               printf("%s: failed to set beacon interval for VDEV: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id);
+               return ret;
+       }
+
+       qwz_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
+       ni->ni_txrate = 0;
+       
+       ret = qwz_mac_station_add(sc, arvif, pdev->pdev_id, ni);
+       if (ret)
+               return ret;
+
+       /* Start vdev. */
+       ret = qwz_mac_vdev_start(sc, arvif, pdev->pdev_id);
+       if (ret) {
+               printf("%s: failed to start MAC for VDEV: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id);
+               return ret;
+       }
+
+       /*
+        * WCN6855 firmware clears basic-rate parameters when vdev starts.
+        * Set it once more.
+        */
+       qwz_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
+
+       return ret;
+}
+
+int
+qwz_deauth(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni = ic->ic_bss;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
+       int ret;
+
+       ret = qwz_mac_vdev_stop(sc, arvif, pdev_id);
+       if (ret) {
+               printf("%s: unable to stop vdev vdev_id %d: %d\n",
+                  sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
+           pdev_id, WMI_PEER_AUTHORIZE, 0);
+       if (ret) {
+               printf("%s: unable to deauthorize BSS peer: %d\n",
+                  sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       ret = qwz_mac_station_remove(sc, arvif, pdev_id, ni);
+       if (ret)
+               return ret;
+
+       DNPRINTF(QWZ_D_MAC, "%s: disassociated from bssid %s aid %d\n",
+           __func__, ether_sprintf(ni->ni_bssid), arvif->aid);
+
+       return 0;
+}
+
+void
+qwz_peer_assoc_h_basic(struct qwz_softc *sc, struct qwz_vif *arvif,
+    struct ieee80211_node *ni, struct peer_assoc_params *arg)
+{
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+
+       IEEE80211_ADDR_COPY(arg->peer_mac, ni->ni_macaddr);
+       arg->vdev_id = arvif->vdev_id;
+       arg->peer_associd = ni->ni_associd;
+       arg->auth_flag = 1;
+       arg->peer_listen_intval = ni->ni_intval;
+       arg->peer_nss = 1;
+       arg->peer_caps = ni->ni_capinfo;
+}
+
+void
+qwz_peer_assoc_h_crypto(struct qwz_softc *sc, struct qwz_vif *arvif,
+    struct ieee80211_node *ni, struct peer_assoc_params *arg)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+
+       if (ic->ic_flags & IEEE80211_F_RSNON) {
+               arg->need_ptk_4_way = 1;
+               if (ni->ni_rsnprotos == IEEE80211_PROTO_WPA)
+                       arg->need_gtk_2_way = 1;
+       }
+#if 0
+       if (sta->mfp) {
+               /* TODO: Need to check if FW supports PMF? */
+               arg->is_pmf_enabled = true;
+       }
+#endif
+}
+
+int
+qwz_mac_rate_is_cck(uint8_t rate)
+{
+       return (rate == 2 || rate == 4 || rate == 11 || rate == 22);
+}
+
+void
+qwz_peer_assoc_h_rates(struct ieee80211_node *ni, struct peer_assoc_params *arg)
+{
+       struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+       struct ieee80211_rateset *rs = &ni->ni_rates;
+       int i;
+
+       for (i = 0, rateset->num_rates = 0;
+           i < rs->rs_nrates && rateset->num_rates < nitems(rateset->rates);
+           i++, rateset->num_rates++) {
+               uint8_t rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+               if (qwz_mac_rate_is_cck(rate))
+                       rate |= 0x80;
+               rateset->rates[rateset->num_rates] = rate;
+       }
+}
+
+void
+qwz_peer_assoc_h_phymode(struct qwz_softc *sc, struct ieee80211_node *ni,
+    struct peer_assoc_params *arg)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       enum wmi_phy_mode phymode;
+
+       switch (ic->ic_curmode) {
+       case IEEE80211_MODE_11A:
+               phymode = MODE_11A;
+               break;
+       case IEEE80211_MODE_11B:
+               phymode = MODE_11B;
+               break;
+       case IEEE80211_MODE_11G:
+               phymode = MODE_11G;
+               break;
+       default:
+               phymode = MODE_UNKNOWN;
+               break;
+       }
+
+       DNPRINTF(QWZ_D_MAC, "%s: peer %s phymode %s\n", __func__,
+           ether_sprintf(ni->ni_macaddr), qwz_wmi_phymode_str(phymode));
+
+       arg->peer_phymode = phymode;
+}
+
+void
+qwz_peer_assoc_prepare(struct qwz_softc *sc, struct qwz_vif *arvif,
+    struct ieee80211_node *ni, struct peer_assoc_params *arg, int reassoc)
+{
+       memset(arg, 0, sizeof(*arg));
+
+       arg->peer_new_assoc = !reassoc;
+       qwz_peer_assoc_h_basic(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_crypto(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_rates(ni, arg);
+       qwz_peer_assoc_h_phymode(sc, ni, arg);
+#if 0
+       qwz_peer_assoc_h_ht(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_vht(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_he(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_he_6ghz(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_qos(sc, arvif, ni, arg);
+       qwz_peer_assoc_h_smps(ni, arg);
+#endif
+#if 0
+       arsta->peer_nss = arg->peer_nss;
+#endif
+       /* TODO: amsdu_disable req? */
+}
+
+int
+qwz_run(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ieee80211_node *ni = ic->ic_bss;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
+       struct peer_assoc_params peer_arg;
+       int ret;
+#ifdef notyet
+       lockdep_assert_held(&ar->conf_mutex);
+#endif
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %i assoc bssid %pM aid %d\n",
+           __func__, arvif->vdev_id, arvif->bssid, arvif->aid);
+
+       qwz_peer_assoc_prepare(sc, arvif, ni, &peer_arg, 0);
+
+       peer_arg.is_assoc = 1;
+
+       sc->peer_assoc_done = 0;
+       ret = qwz_wmi_send_peer_assoc_cmd(sc, pdev_id, &peer_arg);
+       if (ret) {
+               printf("%s: failed to run peer assoc for %s vdev %i: %d\n",
+                   sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
+                   arvif->vdev_id, ret);
+               return ret;
+       }
+
+       while (!sc->peer_assoc_done) {
+               ret = tsleep_nsec(&sc->peer_assoc_done, 0, "qwzassoc",
+                   SEC_TO_NSEC(1));
+               if (ret) {
+                       printf("%s: failed to get peer assoc conf event "
+                           "for %s vdev %i\n", sc->sc_dev.dv_xname,
+                           ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
+                       return ret;
+               }
+       }
+#if 0
+       ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
+                                    &sta->deflink.ht_cap,
+                                    le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
+       if (ret) {
+               ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       if (!ath12k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+               ath12k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+                           arvif->vdev_id, bss_conf->bssid);
+               return;
+       }
+
+       WARN_ON(arvif->is_up);
+#endif
+
+       arvif->aid = ni->ni_associd;
+       IEEE80211_ADDR_COPY(arvif->bssid, ni->ni_bssid);
+
+       ret = qwz_wmi_vdev_up(sc, arvif->vdev_id, pdev_id, arvif->aid,
+           arvif->bssid, NULL, 0, 0);
+       if (ret) {
+               printf("%s: failed to set vdev %d up: %d\n",
+                   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       arvif->is_up = 1;
+#if 0
+       arvif->rekey_data.enable_offload = 0;
+#endif
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %d up (associated) bssid %s aid %d\n",
+           __func__, arvif->vdev_id, ether_sprintf(ni->ni_bssid), arvif->aid);
+
+       ret = qwz_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
+           pdev_id, WMI_PEER_AUTHORIZE, 1);
+       if (ret) {
+               printf("%s: unable to authorize BSS peer: %d\n",
+                  sc->sc_dev.dv_xname, ret);
+               return ret;
+       }
+
+       /* Enable "ext" IRQs for datapath. */
+       sc->ops.irq_enable(sc);
+
+       return 0;
+}
+
+int
+qwz_run_stop(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct qwz_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
+       uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
+       struct qwz_node *nq = (void *)ic->ic_bss;
+       int ret;
+
+       sc->ops.irq_disable(sc);
+
+       if (ic->ic_opmode == IEEE80211_M_STA) {
+               ic->ic_bss->ni_txrate = 0;
+               nq->flags = 0;
+       }
+
+       ret = qwz_wmi_vdev_down(sc, arvif->vdev_id, pdev_id);
+       if (ret)
+               return ret;
+
+       arvif->is_up = 0;
+
+       DNPRINTF(QWZ_D_MAC, "%s: vdev %d down\n", __func__, arvif->vdev_id);
+
+       return 0;
+}
+
+#if NBPFILTER > 0
+void
+qwz_radiotap_attach(struct qwz_softc *sc)
+{
+       bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
+           sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
+
+       sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
+       sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
+       sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
+
+       sc->sc_txtap_len = sizeof(sc->sc_txtapu);
+       sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
+       sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
+}
+#endif
+
+int
+qwz_attach(struct qwz_softc *sc)
+{
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       int error, i;
+
+       task_set(&sc->init_task, qwz_init_task, sc);
+       task_set(&sc->newstate_task, qwz_newstate_task, sc);
+       task_set(&sc->setkey_task, qwz_setkey_task, sc);
+       timeout_set_proc(&sc->scan.timeout, qwz_scan_timeout, sc);
+#if NBPFILTER > 0
+       qwz_radiotap_attach(sc);
+#endif
+       for (i = 0; i < nitems(sc->pdevs); i++)
+               sc->pdevs[i].sc = sc;
+
+       TAILQ_INIT(&sc->vif_list);
+
+       error = qwz_init(ifp);
+       if (error)
+               return error;
+
+       /* Turn device off until interface comes up. */
+       qwz_core_deinit(sc);
+
+       return 0;
+}
+
+void
+qwz_detach(struct qwz_softc *sc)
+{
+       if (sc->fwmem) {
+               qwz_dmamem_free(sc->sc_dmat, sc->fwmem);
+               sc->fwmem = NULL;
+       }
+
+       if (sc->m3_mem) {
+               qwz_dmamem_free(sc->sc_dmat, sc->m3_mem);
+               sc->m3_mem = NULL;
+       }
+
+       qwz_free_firmware(sc);
+}
+
+struct qwz_dmamem *
+qwz_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
+{
+       struct qwz_dmamem *adm;
+       int nsegs;
+
+       adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (adm == NULL)
+               return NULL;
+       adm->size = size;
+
+       if (bus_dmamap_create(dmat, size, 1, size, 0,
+           BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &adm->map) != 0)
+               goto admfree;
+
+       if (bus_dmamem_alloc_range(dmat, size, align, 0, &adm->seg, 1,
+           &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 0, 0xffffffff) != 0)
+               goto destroy;
+
+       if (bus_dmamem_map(dmat, &adm->seg, nsegs, size,
+           &adm->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0)
+               goto free;
+
+       if (bus_dmamap_load_raw(dmat, adm->map, &adm->seg, nsegs, size,
+           BUS_DMA_NOWAIT) != 0)
+               goto unmap;
+
+       bzero(adm->kva, size);
+
+       return adm;
+
+unmap:
+       bus_dmamem_unmap(dmat, adm->kva, size);
+free:
+       bus_dmamem_free(dmat, &adm->seg, 1);
+destroy:
+       bus_dmamap_destroy(dmat, adm->map);
+admfree:
+       free(adm, M_DEVBUF, sizeof(*adm));
+
+       return NULL;
+}
+
+void
+qwz_dmamem_free(bus_dma_tag_t dmat, struct qwz_dmamem *adm)
+{
+       bus_dmamem_unmap(dmat, adm->kva, adm->size);
+       bus_dmamem_free(dmat, &adm->seg, 1);
+       bus_dmamap_destroy(dmat, adm->map);
+       free(adm, M_DEVBUF, sizeof(*adm));
+}
+
+int
+qwz_activate(struct device *self, int act)
+{
+       struct qwz_softc *sc = (struct qwz_softc *)self;
+       struct ifnet *ifp = &sc->sc_ic.ic_if;
+       int err = 0;
+
+       switch (act) {
+       case DVACT_QUIESCE:
+               if (ifp->if_flags & IFF_RUNNING) {
+                       rw_enter_write(&sc->ioctl_rwl);
+                       qwz_stop(ifp);
+                       rw_exit(&sc->ioctl_rwl);
+               }
+               break;
+       case DVACT_RESUME:
+               break;
+       case DVACT_WAKEUP:
+               if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
+                       err = qwz_init(ifp);
+                       if (err)
+                               printf("%s: could not initialize hardware\n",
+                                   sc->sc_dev.dv_xname);
+               }
+               break;
+       }
+
+       return 0;
+}
diff --git a/sys/dev/ic/qwzreg.h b/sys/dev/ic/qwzreg.h
new file mode 100644 (file)
index 0000000..e0cf264
--- /dev/null
@@ -0,0 +1,13253 @@
+/*     $OpenBSD: qwzreg.h,v 1.1 2024/08/14 14:40:46 patrick Exp $      */
+
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
+ * Copyright (c) 2018-2021 The Linux Foundation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted (subject to the limitations in the disclaimer
+ * below) provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ *  * Neither the name of [Owner Organization] nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
+ * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * core.h
+ */
+
+#define ATH12K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH12K_INVALID_HW_MAC_ID       0xFF
+#define ATH12K_CONNECTION_LOSS_HZ      (3 * HZ)
+
+enum ath12k_hw_rev {
+       ATH12K_HW_IPQ8074,
+       ATH12K_HW_QCA6390_HW20,
+       ATH12K_HW_IPQ6018_HW10,
+       ATH12K_HW_QCN9074_HW10,
+       ATH12K_HW_WCN6855_HW20,
+       ATH12K_HW_WCN6855_HW21,
+       ATH12K_HW_WCN6750_HW10,
+};
+
+enum ath12k_firmware_mode {
+       /* the default mode, standard 802.11 functionality */
+       ATH12K_FIRMWARE_MODE_NORMAL,
+
+       /* factory tests etc */
+       ATH12K_FIRMWARE_MODE_FTM,
+
+       /* Cold boot calibration */
+       ATH12K_FIRMWARE_MODE_COLD_BOOT = 7,
+};
+
+enum ath12k_crypt_mode {
+       /* Only use hardware crypto engine */
+       ATH12K_CRYPT_MODE_HW,
+       /* Only use software crypto */
+       ATH12K_CRYPT_MODE_SW,
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+#define ATH12K_MIN_5G_FREQ 4150
+#define ATH12K_MIN_6G_FREQ 5925
+#define ATH12K_MAX_6G_FREQ 7115
+#define ATH12K_NUM_CHANS 102
+#define ATH12K_MAX_5G_CHAN 177
+
+/* Antenna noise floor */
+#define ATH12K_DEFAULT_NOISE_FLOOR -95
+
+/*
+ * wmi.h
+ */
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS               8
+#define MAX_HE_MODULATION        8
+#define MAX_HE_RU                4
+#define HE_MODULATION_NONE       7
+#define HE_PET_0_USEC            0
+#define HE_PET_8_USEC            1
+#define HE_PET_16_USEC           2
+
+#define WMI_MAX_CHAINS          8
+
+#define WMI_MAX_NUM_SS                    MAX_HE_NSS
+#define WMI_MAX_NUM_RU                    MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+       uint32_t cmd_id;
+} __packed;
+
+struct wmi_tlv {
+       uint32_t header;
+       uint8_t value[];
+} __packed;
+
+#define WMI_TLV_LEN    GENMASK(15, 0)
+#define WMI_TLV_TAG    GENMASK(31, 16)
+#define TLV_HDR_SIZE   sizeof(uint32_t) /* wmi_tlv.header */
+
+#define WMI_CMD_HDR_CMD_ID      GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS        32
+#define ATH12K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID        10
+#define WLAN_SCAN_MAX_HINT_BSSID         10
+#define MAX_RNR_BSS                    5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID        10
+#define WLAN_SCAN_MAX_HINT_BSSID         10
+#define MAX_RNR_BSS                    5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID    16
+#define WLAN_SCAN_PARAMS_MAX_BSSID   4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN  256
+
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
+#define MAX_WMI_UTF_LEN 252
+#define WMI_BA_MODE_BUFFER_SIZE_256  3
+
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ *                        one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ *                        same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ *                        Support for both PHYs within one band is planned
+ *                        for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ *                        but could be extended to other bands in the future.
+ *                        The separation of the band between the two PHYs needs
+ *                        to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ *                           as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ *                        5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+       WMI_HOST_HW_MODE_SINGLE       = 0,
+       WMI_HOST_HW_MODE_DBS          = 1,
+       WMI_HOST_HW_MODE_SBS_PASSIVE  = 2,
+       WMI_HOST_HW_MODE_SBS          = 3,
+       WMI_HOST_HW_MODE_DBS_SBS      = 4,
+       WMI_HOST_HW_MODE_DBS_OR_SBS   = 5,
+
+       /* keep last */
+       WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+       WMI_HOST_HW_MODE_DBS_SBS_PRI,
+       WMI_HOST_HW_MODE_DBS_PRI,
+       WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+       WMI_HOST_HW_MODE_SBS_PRI,
+       WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+       WMI_HOST_HW_MODE_SINGLE_PRI,
+
+       /* keep last the lowest priority */
+       WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum WMI_HOST_WLAN_BAND {
+       WMI_HOST_WLAN_2G_CAP    = 0x1,
+       WMI_HOST_WLAN_5G_CAP    = 0x2,
+       WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP,
+};
+
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+       /* HE LTF related configuration */
+       WMI_HE_AUTORATE_LTF_1X = BIT(0),
+       WMI_HE_AUTORATE_LTF_2X = BIT(1),
+       WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+       /* HE GI related configuration */
+       WMI_AUTORATE_400NS_GI = BIT(8),
+       WMI_AUTORATE_800NS_GI = BIT(9),
+       WMI_AUTORATE_1600NS_GI = BIT(10),
+       WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
+enum {
+       WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP       = 0x00000001,
+       WMI_HOST_VDEV_FLAGS_TRANSMIT_AP         = 0x00000002,
+       WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP     = 0x00000004,
+       WMI_HOST_VDEV_FLAGS_EMA_MODE            = 0x00000008,
+       WMI_HOST_VDEV_FLAGS_SCAN_MODE_VAP       = 0x00000010,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+       /* 0 to 2 are reserved */
+       WMI_GRP_START = 0x3,
+       WMI_GRP_SCAN = WMI_GRP_START,
+       WMI_GRP_PDEV            = 0x4,
+       WMI_GRP_VDEV           = 0x5,
+       WMI_GRP_PEER           = 0x6,
+       WMI_GRP_MGMT           = 0x7,
+       WMI_GRP_BA_NEG         = 0x8,
+       WMI_GRP_STA_PS         = 0x9,
+       WMI_GRP_DFS            = 0xa,
+       WMI_GRP_ROAM           = 0xb,
+       WMI_GRP_OFL_SCAN       = 0xc,
+       WMI_GRP_P2P            = 0xd,
+       WMI_GRP_AP_PS          = 0xe,
+       WMI_GRP_RATE_CTRL      = 0xf,
+       WMI_GRP_PROFILE        = 0x10,
+       WMI_GRP_SUSPEND        = 0x11,
+       WMI_GRP_BCN_FILTER     = 0x12,
+       WMI_GRP_WOW            = 0x13,
+       WMI_GRP_RTT            = 0x14,
+       WMI_GRP_SPECTRAL       = 0x15,
+       WMI_GRP_STATS          = 0x16,
+       WMI_GRP_ARP_NS_OFL     = 0x17,
+       WMI_GRP_NLO_OFL        = 0x18,
+       WMI_GRP_GTK_OFL        = 0x19,
+       WMI_GRP_CSA_OFL        = 0x1a,
+       WMI_GRP_CHATTER        = 0x1b,
+       WMI_GRP_TID_ADDBA      = 0x1c,
+       WMI_GRP_MISC           = 0x1d,
+       WMI_GRP_GPIO           = 0x1e,
+       WMI_GRP_FWTEST         = 0x1f,
+       WMI_GRP_TDLS           = 0x20,
+       WMI_GRP_RESMGR         = 0x21,
+       WMI_GRP_STA_SMPS       = 0x22,
+       WMI_GRP_WLAN_HB        = 0x23,
+       WMI_GRP_RMC            = 0x24,
+       WMI_GRP_MHF_OFL        = 0x25,
+       WMI_GRP_LOCATION_SCAN  = 0x26,
+       WMI_GRP_OEM            = 0x27,
+       WMI_GRP_NAN            = 0x28,
+       WMI_GRP_COEX           = 0x29,
+       WMI_GRP_OBSS_OFL       = 0x2a,
+       WMI_GRP_LPI            = 0x2b,
+       WMI_GRP_EXTSCAN        = 0x2c,
+       WMI_GRP_DHCP_OFL       = 0x2d,
+       WMI_GRP_IPA            = 0x2e,
+       WMI_GRP_MDNS_OFL       = 0x2f,
+       WMI_GRP_SAP_OFL        = 0x30,
+       WMI_GRP_OCB            = 0x31,
+       WMI_GRP_SOC            = 0x32,
+       WMI_GRP_PKT_FILTER     = 0x33,
+       WMI_GRP_MAWC           = 0x34,
+       WMI_GRP_PMF_OFFLOAD    = 0x35,
+       WMI_GRP_BPF_OFFLOAD    = 0x36,
+       WMI_GRP_NAN_DATA       = 0x37,
+       WMI_GRP_PROTOTYPE      = 0x38,
+       WMI_GRP_MONITOR        = 0x39,
+       WMI_GRP_REGULATORY     = 0x3a,
+       WMI_GRP_HW_DATA_FILTER = 0x3b,
+       WMI_GRP_WLM            = 0x3c,
+       WMI_GRP_11K_OFFLOAD    = 0x3d,
+       WMI_GRP_TWT            = 0x3e,
+       WMI_GRP_MOTION_DET     = 0x3f,
+       WMI_GRP_SPATIAL_REUSE  = 0x40,
+};
+
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+       WMI_INIT_CMDID = 0x1,
+       WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+       WMI_STOP_SCAN_CMDID,
+       WMI_SCAN_CHAN_LIST_CMDID,
+       WMI_SCAN_SCH_PRIO_TBL_CMDID,
+       WMI_SCAN_UPDATE_REQUEST_CMDID,
+       WMI_SCAN_PROB_REQ_OUI_CMDID,
+       WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+       WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+       WMI_PDEV_SET_CHANNEL_CMDID,
+       WMI_PDEV_SET_PARAM_CMDID,
+       WMI_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_PDEV_GET_TPC_CONFIG_CMDID,
+       WMI_PDEV_SET_BASE_MACADDR_CMDID,
+       WMI_PDEV_DUMP_CMDID,
+       WMI_PDEV_SET_LED_CONFIG_CMDID,
+       WMI_PDEV_GET_TEMPERATURE_CMDID,
+       WMI_PDEV_SET_LED_FLASHING_CMDID,
+       WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+       WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+       WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+       WMI_PDEV_SET_CTL_TABLE_CMDID,
+       WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+       WMI_PDEV_FIPS_CMDID,
+       WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+       WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+       WMI_PDEV_GET_NFCAL_POWER_CMDID,
+       WMI_PDEV_GET_TPC_CMDID,
+       WMI_MIB_STATS_ENABLE_CMDID,
+       WMI_PDEV_SET_PCL_CMDID,
+       WMI_PDEV_SET_HW_MODE_CMDID,
+       WMI_PDEV_SET_MAC_CONFIG_CMDID,
+       WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+       WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+       WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+       WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+       WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+       WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+       WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+       WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+       WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+       WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+       WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+       WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+       WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+       WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+       WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+       WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+       WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+       WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+       WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+       WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+       WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+       WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+       WMI_PDEV_PKTLOG_FILTER_CMDID,
+       WMI_PDEV_SET_RAP_CONFIG_CMDID,
+       WMI_PDEV_DSM_FILTER_CMDID,
+       WMI_PDEV_FRAME_INJECT_CMDID,
+       WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
+       WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+       WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+       WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+       WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+       WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+       WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+       WMI_PDEV_GET_TPC_STATS_CMDID,
+       WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID,
+       WMI_PDEV_GET_DPD_STATUS_CMDID,
+       WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID,
+       WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID,
+       WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+       WMI_VDEV_DELETE_CMDID,
+       WMI_VDEV_START_REQUEST_CMDID,
+       WMI_VDEV_RESTART_REQUEST_CMDID,
+       WMI_VDEV_UP_CMDID,
+       WMI_VDEV_STOP_CMDID,
+       WMI_VDEV_DOWN_CMDID,
+       WMI_VDEV_SET_PARAM_CMDID,
+       WMI_VDEV_INSTALL_KEY_CMDID,
+       WMI_VDEV_WNM_SLEEPMODE_CMDID,
+       WMI_VDEV_WMM_ADDTS_CMDID,
+       WMI_VDEV_WMM_DELTS_CMDID,
+       WMI_VDEV_SET_WMM_PARAMS_CMDID,
+       WMI_VDEV_SET_GTX_PARAMS_CMDID,
+       WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+       WMI_VDEV_PLMREQ_START_CMDID,
+       WMI_VDEV_PLMREQ_STOP_CMDID,
+       WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+       WMI_VDEV_SET_IE_CMDID,
+       WMI_VDEV_RATEMASK_CMDID,
+       WMI_VDEV_ATF_REQUEST_CMDID,
+       WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+       WMI_VDEV_SET_QUIET_MODE_CMDID,
+       WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+       WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+       WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+       WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+       WMI_PEER_DELETE_CMDID,
+       WMI_PEER_FLUSH_TIDS_CMDID,
+       WMI_PEER_SET_PARAM_CMDID,
+       WMI_PEER_ASSOC_CMDID,
+       WMI_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_PEER_MCAST_GROUP_CMDID,
+       WMI_PEER_INFO_REQ_CMDID,
+       WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+       WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+       WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+       WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+       WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+       WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+       WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+       WMI_PEER_ATF_REQUEST_CMDID,
+       WMI_PEER_BWF_REQUEST_CMDID,
+       WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+       WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+       WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+       WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+       WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+       WMI_PDEV_SEND_BCN_CMDID,
+       WMI_BCN_TMPL_CMDID,
+       WMI_BCN_FILTER_RX_CMDID,
+       WMI_PRB_REQ_FILTER_RX_CMDID,
+       WMI_MGMT_TX_CMDID,
+       WMI_PRB_TMPL_CMDID,
+       WMI_MGMT_TX_SEND_CMDID,
+       WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+       WMI_PDEV_SEND_FD_CMDID,
+       WMI_BCN_OFFLOAD_CTRL_CMDID,
+       WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+       WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+       WMI_FILS_DISCOVERY_TMPL_CMDID,
+       WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+       WMI_ADDBA_SEND_CMDID,
+       WMI_ADDBA_STATUS_CMDID,
+       WMI_DELBA_SEND_CMDID,
+       WMI_ADDBA_SET_RESP_CMDID,
+       WMI_SEND_SINGLEAMSDU_CMDID,
+       WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+       WMI_STA_POWERSAVE_PARAM_CMDID,
+       WMI_STA_MIMO_PS_MODE_CMDID,
+       WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+       WMI_PDEV_DFS_DISABLE_CMDID,
+       WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+       WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+       WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+       WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+       WMI_VDEV_ADFS_CH_CFG_CMDID,
+       WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+       WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+       WMI_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_ROAM_SCAN_PERIOD,
+       WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_ROAM_AP_PROFILE,
+       WMI_ROAM_CHAN_LIST,
+       WMI_ROAM_SCAN_CMD,
+       WMI_ROAM_SYNCH_COMPLETE,
+       WMI_ROAM_SET_RIC_REQUEST_CMDID,
+       WMI_ROAM_INVOKE_CMDID,
+       WMI_ROAM_FILTER_CMDID,
+       WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+       WMI_ROAM_CONFIGURE_MAWC_CMDID,
+       WMI_ROAM_SET_MBO_PARAM_CMDID,
+       WMI_ROAM_PER_CONFIG_CMDID,
+       WMI_ROAM_BTM_CONFIG_CMDID,
+       WMI_ENABLE_FILS_CMDID,
+       WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+       WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_OFL_SCAN_PERIOD,
+       WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+       WMI_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_P2P_GO_SET_BEACON_IE,
+       WMI_P2P_GO_SET_PROBE_RESP_IE,
+       WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+       WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+       WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+       WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+       WMI_P2P_SET_OPPPS_PARAM_CMDID,
+       WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+       WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+       WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+       WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+       WMI_AP_PS_EGAP_PARAM_CMDID,
+       WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+       WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+       WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+       WMI_PDEV_RESUME_CMDID,
+       WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+       WMI_RMV_BCN_FILTER_CMDID,
+       WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+       WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_WOW_ENABLE_CMDID,
+       WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+       WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+       WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+       WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+       WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+       WMI_EXTWOW_ENABLE_CMDID,
+       WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+       WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+       WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+       WMI_WOW_UDP_SVC_OFLD_CMDID,
+       WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+       WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+       WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+       WMI_RTT_TSF_CMDID,
+       WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+       WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+       WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+       WMI_REQUEST_STATS_EXT_CMDID,
+       WMI_REQUEST_LINK_STATS_CMDID,
+       WMI_START_LINK_STATS_CMDID,
+       WMI_CLEAR_LINK_STATS_CMDID,
+       WMI_GET_FW_MEM_DUMP_CMDID,
+       WMI_DEBUG_MESG_FLUSH_CMDID,
+       WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+       WMI_REQUEST_WLAN_STATS_CMDID,
+       WMI_REQUEST_RCPI_CMDID,
+       WMI_REQUEST_PEER_STATS_INFO_CMDID,
+       WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+       WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+       WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+       WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+       WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+       WMI_APFIND_CMDID,
+       WMI_PASSPOINT_LIST_CONFIG_CMDID,
+       WMI_NLO_CONFIGURE_MAWC_CMDID,
+       WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+       WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+       WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+       WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+       WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+       WMI_CHATTER_COALESCING_QUERY_CMDID,
+       WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+       WMI_PEER_TID_DELBA_CMDID,
+       WMI_STA_DTIM_PS_METHOD_CMDID,
+       WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+       WMI_STA_KEEPALIVE_CMDID,
+       WMI_BA_REQ_SSN_CMDID,
+       WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+       WMI_PDEV_UTF_CMDID,
+       WMI_DBGLOG_CFG_CMDID,
+       WMI_PDEV_QVIT_CMDID,
+       WMI_PDEV_FTM_INTG_CMDID,
+       WMI_VDEV_SET_KEEPALIVE_CMDID,
+       WMI_VDEV_GET_KEEPALIVE_CMDID,
+       WMI_FORCE_FW_HANG_CMDID,
+       WMI_SET_MCASTBCAST_FILTER_CMDID,
+       WMI_THERMAL_MGMT_CMDID,
+       WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+       WMI_TPC_CHAINMASK_CONFIG_CMDID,
+       WMI_SET_ANTENNA_DIVERSITY_CMDID,
+       WMI_OCB_SET_SCHED_CMDID,
+       WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+       WMI_LRO_CONFIG_CMDID,
+       WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+       WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+       WMI_VDEV_WISA_CMDID,
+       WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+       WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+       WMI_READ_DATA_FROM_FLASH_CMDID,
+       WMI_THERM_THROT_SET_CONF_CMDID,
+       WMI_RUNTIME_DPD_RECAL_CMDID,
+       WMI_GET_TPC_POWER_CMDID,
+       WMI_IDLE_TRIGGER_MONITOR_CMDID,
+       WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+       WMI_GPIO_OUTPUT_CMDID,
+       WMI_TXBF_CMDID,
+       WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+       WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+       WMI_UNIT_TEST_CMDID,
+       WMI_FWTEST_CMDID,
+       WMI_QBOOST_CFG_CMDID,
+       WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+       WMI_TDLS_PEER_UPDATE_CMDID,
+       WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+       WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+       WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+       WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+       WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+       WMI_STA_SMPS_PARAM_CMDID,
+       WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+       WMI_HB_SET_TCP_PARAMS_CMDID,
+       WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+       WMI_HB_SET_UDP_PARAMS_CMDID,
+       WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+       WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+       WMI_RMC_SET_ACTION_PERIOD_CMDID,
+       WMI_RMC_CONFIG_CMDID,
+       WMI_RMC_SET_MANUAL_LEADER_CMDID,
+       WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+       WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+       WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+       WMI_BATCH_SCAN_DISABLE_CMDID,
+       WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+       WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+       WMI_OEM_REQUEST_CMDID,
+       WMI_LPI_OEM_REQ_CMDID,
+       WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+       WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+       WMI_CHAN_AVOID_UPDATE_CMDID,
+       WMI_COEX_CONFIG_CMDID,
+       WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+       WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+       WMI_SAR_LIMITS_CMDID,
+       WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+       WMI_OBSS_SCAN_DISABLE_CMDID,
+       WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
+       WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+       WMI_LPI_START_SCAN_CMDID,
+       WMI_LPI_STOP_SCAN_CMDID,
+       WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+       WMI_EXTSCAN_STOP_CMDID,
+       WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+       WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+       WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+       WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+       WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+       WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+       WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+       WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+       WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+       WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+       WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+       WMI_MDNS_SET_FQDN_CMDID,
+       WMI_MDNS_SET_RESPONSE_CMDID,
+       WMI_MDNS_GET_STATS_CMDID,
+       WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+       WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+       WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+       WMI_OCB_SET_UTC_TIME_CMDID,
+       WMI_OCB_START_TIMING_ADVERT_CMDID,
+       WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+       WMI_OCB_GET_TSF_TIMER_CMDID,
+       WMI_DCC_GET_STATS_CMDID,
+       WMI_DCC_CLEAR_STATS_CMDID,
+       WMI_DCC_UPDATE_NDL_CMDID,
+       WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+       WMI_SOC_SET_HW_MODE_CMDID,
+       WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+       WMI_SOC_SET_ANTENNA_MODE_CMDID,
+       WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+       WMI_PACKET_FILTER_ENABLE_CMDID,
+       WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+       WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+       WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+       WMI_BPF_GET_VDEV_STATS_CMDID,
+       WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+       WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+       WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+       WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+       WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+       WMI_11D_SCAN_START_CMDID,
+       WMI_11D_SCAN_STOP_CMDID,
+       WMI_SET_INIT_COUNTRY_CMDID,
+       WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+       WMI_NDP_INITIATOR_REQ_CMDID,
+       WMI_NDP_RESPONDER_REQ_CMDID,
+       WMI_NDP_END_REQ_CMDID,
+       WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+       WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+       WMI_TWT_DISABLE_CMDID,
+       WMI_TWT_ADD_DIALOG_CMDID,
+       WMI_TWT_DEL_DIALOG_CMDID,
+       WMI_TWT_PAUSE_DIALOG_CMDID,
+       WMI_TWT_RESUME_DIALOG_CMDID,
+       WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+                               WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+       WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+       WMI_SERVICE_READY_EVENTID = 0x1,
+       WMI_READY_EVENTID,
+       WMI_SERVICE_AVAILABLE_EVENTID,
+       WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+       WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+       WMI_CHAN_INFO_EVENTID,
+       WMI_PHYERR_EVENTID,
+       WMI_PDEV_DUMP_EVENTID,
+       WMI_TX_PAUSE_EVENTID,
+       WMI_DFS_RADAR_EVENTID,
+       WMI_PDEV_L1SS_TRACK_EVENTID,
+       WMI_PDEV_TEMPERATURE_EVENTID,
+       WMI_SERVICE_READY_EXT_EVENTID,
+       WMI_PDEV_FIPS_EVENTID,
+       WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+       WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+       WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+       WMI_PDEV_TPC_EVENTID,
+       WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+       WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+       WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+       WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+       WMI_PDEV_ANTDIV_STATUS_EVENTID,
+       WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+       WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+       WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+       WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+       WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+       WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+       WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+       WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+       WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+       WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+       WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+       WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+       WMI_PDEV_RAP_INFO_EVENTID,
+       WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+       WMI_SERVICE_READY_EXT2_EVENTID,
+       WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+       WMI_VDEV_STOPPED_EVENTID,
+       WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+       WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+       WMI_VDEV_TSF_REPORT_EVENTID,
+       WMI_VDEV_DELETE_RESP_EVENTID,
+       WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+       WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+       WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+       WMI_PEER_INFO_EVENTID,
+       WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+       WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+       WMI_PEER_STATE_EVENTID,
+       WMI_PEER_ASSOC_CONF_EVENTID,
+       WMI_PEER_DELETE_RESP_EVENTID,
+       WMI_PEER_RATECODE_LIST_EVENTID,
+       WMI_WDS_PEER_EVENTID,
+       WMI_PEER_STA_PS_STATECHG_EVENTID,
+       WMI_PEER_ANTDIV_INFO_EVENTID,
+       WMI_PEER_RESERVED0_EVENTID,
+       WMI_PEER_RESERVED1_EVENTID,
+       WMI_PEER_RESERVED2_EVENTID,
+       WMI_PEER_RESERVED3_EVENTID,
+       WMI_PEER_RESERVED4_EVENTID,
+       WMI_PEER_RESERVED5_EVENTID,
+       WMI_PEER_RESERVED6_EVENTID,
+       WMI_PEER_RESERVED7_EVENTID,
+       WMI_PEER_RESERVED8_EVENTID,
+       WMI_PEER_RESERVED9_EVENTID,
+       WMI_PEER_RESERVED10_EVENTID,
+       WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+       WMI_PEER_TX_PN_RESPONSE_EVENTID,
+       WMI_PEER_CFR_CAPTURE_EVENTID,
+       WMI_PEER_CREATE_CONF_EVENTID,
+       WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+       WMI_HOST_SWBA_EVENTID,
+       WMI_TBTTOFFSET_UPDATE_EVENTID,
+       WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+       WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+       WMI_MGMT_TX_COMPLETION_EVENTID,
+       WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+       WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+       WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
+       WMI_HOST_FILS_DISCOVERY_EVENTID,
+       WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+       WMI_TX_ADDBA_COMPLETE_EVENTID,
+       WMI_BA_RSP_SSN_EVENTID,
+       WMI_AGGR_STATE_TRIG_EVENTID,
+       WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+       WMI_PROFILE_MATCH,
+       WMI_ROAM_SYNCH_EVENTID,
+       WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+       WMI_P2P_NOA_EVENTID,
+       WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+       WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+       WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+       WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+       WMI_D0_WOW_DISABLE_ACK_EVENTID,
+       WMI_WOW_INITIAL_WAKEUP_EVENTID,
+       WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+       WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_RTT_ERROR_REPORT_EVENTID,
+       WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+       WMI_IFACE_LINK_STATS_EVENTID,
+       WMI_PEER_LINK_STATS_EVENTID,
+       WMI_RADIO_LINK_STATS_EVENTID,
+       WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+       WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+       WMI_INST_RSSI_STATS_EVENTID,
+       WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+       WMI_REPORT_STATS_EVENTID,
+       WMI_UPDATE_RCPI_EVENTID,
+       WMI_PEER_STATS_INFO_EVENTID,
+       WMI_RADIO_CHAN_STATS_EVENTID,
+       WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+       WMI_NLO_SCAN_COMPLETE_EVENTID,
+       WMI_APFIND_EVENTID,
+       WMI_PASSPOINT_MATCH_EVENTID,
+       WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+       WMI_GTK_REKEY_FAIL_EVENTID,
+       WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+       WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+       WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+       WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+       WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+       WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+       WMI_PDEV_UTF_EVENTID,
+       WMI_DEBUG_MESG_EVENTID,
+       WMI_UPDATE_STATS_EVENTID,
+       WMI_DEBUG_PRINT_EVENTID,
+       WMI_DCS_INTERFERENCE_EVENTID,
+       WMI_PDEV_QVIT_EVENTID,
+       WMI_WLAN_PROFILE_DATA_EVENTID,
+       WMI_PDEV_FTM_INTG_EVENTID,
+       WMI_WLAN_FREQ_AVOID_EVENTID,
+       WMI_VDEV_GET_KEEPALIVE_EVENTID,
+       WMI_THERMAL_MGMT_EVENTID,
+       WMI_DIAG_DATA_CONTAINER_EVENTID,
+       WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+       WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+       WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+       WMI_DIAG_EVENTID,
+       WMI_OCB_SET_SCHED_EVENTID,
+       WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+       WMI_RSSI_BREACH_EVENTID,
+       WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+       WMI_PDEV_UTF_SCPC_EVENTID,
+       WMI_READ_DATA_FROM_FLASH_EVENTID,
+       WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+       WMI_PKGID_EVENTID,
+       WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+       WMI_UPLOADH_EVENTID,
+       WMI_CAPTUREH_EVENTID,
+       WMI_RFKILL_STATE_CHANGE_EVENTID,
+       WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+       WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+       WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+       WMI_BATCH_SCAN_RESULT_EVENTID,
+       WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+       WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+       WMI_OEM_ERROR_REPORT_EVENTID,
+       WMI_OEM_RESPONSE_EVENTID,
+       WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+       WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+       WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+       WMI_NAN_STARTED_CLUSTER_EVENTID,
+       WMI_NAN_JOINED_CLUSTER_EVENTID,
+       WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+       WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+       WMI_LPI_STATUS_EVENTID,
+       WMI_LPI_HANDOFF_EVENTID,
+       WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+       WMI_EXTSCAN_OPERATION_EVENTID,
+       WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+       WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+       WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+       WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+       WMI_EXTSCAN_CAPABILITIES_EVENTID,
+       WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+       WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+       WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+       WMI_SAP_OFL_DEL_STA_EVENTID,
+       WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+               WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
+       WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+       WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+       WMI_DCC_GET_STATS_RESP_EVENTID,
+       WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+       WMI_DCC_STATS_EVENTID,
+       WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+       WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+       WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+       WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+       WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+       WMI_BPF_VDEV_STATS_INFO_EVENTID,
+       WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+       WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+       WMI_11D_NEW_COUNTRY_EVENTID,
+       WMI_REG_CHAN_LIST_CC_EXT_EVENTID,
+       WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+       WMI_NDP_INITIATOR_RSP_EVENTID,
+       WMI_NDP_RESPONDER_RSP_EVENTID,
+       WMI_NDP_END_RSP_EVENTID,
+       WMI_NDP_INDICATION_EVENTID,
+       WMI_NDP_CONFIRM_EVENTID,
+       WMI_NDP_END_INDICATION_EVENTID,
+
+       WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+       WMI_TWT_DISABLE_EVENTID,
+       WMI_TWT_ADD_DIALOG_EVENTID,
+       WMI_TWT_DEL_DIALOG_EVENTID,
+       WMI_TWT_PAUSE_DIALOG_EVENTID,
+       WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+       WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       WMI_PDEV_PARAM_RX_CHAIN_MASK,
+       WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+       WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+       WMI_PDEV_PARAM_TXPOWER_SCALE,
+       WMI_PDEV_PARAM_BEACON_GEN_MODE,
+       WMI_PDEV_PARAM_BEACON_TX_MODE,
+       WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       WMI_PDEV_PARAM_PROTECTION_MODE,
+       WMI_PDEV_PARAM_DYNAMIC_BW,
+       WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+       WMI_PDEV_PARAM_STA_KICKOUT_TH,
+       WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       WMI_PDEV_PARAM_LTR_ENABLE,
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+       WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       WMI_PDEV_PARAM_L1SS_ENABLE,
+       WMI_PDEV_PARAM_DSLEEP_ENABLE,
+       WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       WMI_PDEV_PARAM_PMF_QOS,
+       WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+       WMI_PDEV_PARAM_DCS,
+       WMI_PDEV_PARAM_ANI_ENABLE,
+       WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+       WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+       WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+       WMI_PDEV_PARAM_DYNTXCHAIN,
+       WMI_PDEV_PARAM_PROXY_STA,
+       WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+       WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+       WMI_PDEV_PARAM_RFKILL_ENABLE,
+       WMI_PDEV_PARAM_BURST_DUR,
+       WMI_PDEV_PARAM_BURST_ENABLE,
+       WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+       WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+       WMI_PDEV_PARAM_L1SS_TRACK,
+       WMI_PDEV_PARAM_HYST_EN,
+       WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+       WMI_PDEV_PARAM_LED_SYS_STATE,
+       WMI_PDEV_PARAM_LED_ENABLE,
+       WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+       WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+       WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+       WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+       WMI_PDEV_PARAM_CTS_CBW,
+       WMI_PDEV_PARAM_WNTS_CONFIG,
+       WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+       WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+       WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+       WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+       WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+       WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+       WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+       WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+       WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+       WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+       WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+       WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+       WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+       WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+       WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+       WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+       WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+       WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+       WMI_PDEV_PARAM_AGGR_BURST,
+       WMI_PDEV_PARAM_RX_DECAP_MODE,
+       WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+       WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+       WMI_PDEV_PARAM_ANTENNA_GAIN,
+       WMI_PDEV_PARAM_RX_FILTER,
+       WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+       WMI_PDEV_PARAM_PROXY_STA_MODE,
+       WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+       WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+       WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+       WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+       WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+       WMI_PDEV_PARAM_BLOCK_INTERBSS,
+       WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+       WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+       WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+       WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+       WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+       WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+       WMI_PDEV_PARAM_EN_STATS,
+       WMI_PDEV_PARAM_MU_GROUP_POLICY,
+       WMI_PDEV_PARAM_NOISE_DETECTION,
+       WMI_PDEV_PARAM_NOISE_THRESHOLD,
+       WMI_PDEV_PARAM_DPD_ENABLE,
+       WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+       WMI_PDEV_PARAM_ATF_STRICT_SCH,
+       WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+       WMI_PDEV_PARAM_ANT_PLZN,
+       WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+       WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+       WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+       WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+       WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+       WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+       WMI_PDEV_PARAM_CCA_THRESHOLD,
+       WMI_PDEV_PARAM_RTS_FIXED_RATE,
+       WMI_PDEV_PARAM_PDEV_RESET,
+       WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+       WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+       WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+       WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+       WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+       WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+       WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+       WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+       WMI_PDEV_PARAM_PROPAGATION_DELAY,
+       WMI_PDEV_PARAM_ENA_ANT_DIV,
+       WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+       WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+       WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+       WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+       WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+       WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+       WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+       WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+       WMI_PDEV_PARAM_TX_SCH_DELAY,
+       WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+       WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+       WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+       WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+       WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+       WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+       WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+       WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+       WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+       WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
+};
+
+enum wmi_tlv_vdev_param {
+       WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       WMI_VDEV_PARAM_BEACON_INTERVAL,
+       WMI_VDEV_PARAM_LISTEN_INTERVAL,
+       WMI_VDEV_PARAM_MULTICAST_RATE,
+       WMI_VDEV_PARAM_MGMT_TX_RATE,
+       WMI_VDEV_PARAM_SLOT_TIME,
+       WMI_VDEV_PARAM_PREAMBLE,
+       WMI_VDEV_PARAM_SWBA_TIME,
+       WMI_VDEV_STATS_UPDATE_PERIOD,
+       WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+       WMI_VDEV_HOST_SWBA_INTERVAL,
+       WMI_VDEV_PARAM_DTIM_PERIOD,
+       WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       WMI_VDEV_PARAM_WDS,
+       WMI_VDEV_PARAM_ATIM_WINDOW,
+       WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+       WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+       WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+       WMI_VDEV_PARAM_FEATURE_WMM,
+       WMI_VDEV_PARAM_CHWIDTH,
+       WMI_VDEV_PARAM_CHEXTOFFSET,
+       WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+       WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+       WMI_VDEV_PARAM_MGMT_RATE,
+       WMI_VDEV_PARAM_PROTECTION_MODE,
+       WMI_VDEV_PARAM_FIXED_RATE,
+       WMI_VDEV_PARAM_SGI,
+       WMI_VDEV_PARAM_LDPC,
+       WMI_VDEV_PARAM_TX_STBC,
+       WMI_VDEV_PARAM_RX_STBC,
+       WMI_VDEV_PARAM_INTRA_BSS_FWD,
+       WMI_VDEV_PARAM_DEF_KEYID,
+       WMI_VDEV_PARAM_NSS,
+       WMI_VDEV_PARAM_BCAST_DATA_RATE,
+       WMI_VDEV_PARAM_MCAST_DATA_RATE,
+       WMI_VDEV_PARAM_MCAST_INDICATE,
+       WMI_VDEV_PARAM_DHCP_INDICATE,
+       WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+       WMI_VDEV_PARAM_ENABLE_RTSCTS,
+       WMI_VDEV_PARAM_TXBF,
+       WMI_VDEV_PARAM_PACKET_POWERSAVE,
+       WMI_VDEV_PARAM_DROP_UNENCRY,
+       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+       WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+       WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+       WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+       WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+       WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+       WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+       WMI_VDEV_PARAM_TX_PWRLIMIT,
+       WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+       WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+       WMI_VDEV_PARAM_ENABLE_RMC,
+       WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+       WMI_VDEV_PARAM_MAX_RATE,
+       WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+       WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+       WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+       WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+       WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+       WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+       WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+       WMI_VDEV_PARAM_INACTIVITY_CNT,
+       WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+       WMI_VDEV_PARAM_DTIM_POLICY,
+       WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+       WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+       WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+       WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+       WMI_VDEV_PARAM_DISCONNECT_TH,
+       WMI_VDEV_PARAM_RTSCTS_RATE,
+       WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+       WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+       WMI_VDEV_PARAM_TXPOWER_SCALE,
+       WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+       WMI_VDEV_PARAM_MCAST2UCAST_SET,
+       WMI_VDEV_PARAM_RC_NUM_RETRIES,
+       WMI_VDEV_PARAM_CABQ_MAXDUR,
+       WMI_VDEV_PARAM_MFPTEST_SET,
+       WMI_VDEV_PARAM_RTS_FIXED_RATE,
+       WMI_VDEV_PARAM_VHT_SGIMASK,
+       WMI_VDEV_PARAM_VHT80_RATEMASK,
+       WMI_VDEV_PARAM_PROXY_STA,
+       WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+       WMI_VDEV_PARAM_RX_DECAP_TYPE,
+       WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+       WMI_VDEV_PARAM_SENSOR_AP,
+       WMI_VDEV_PARAM_BEACON_RATE,
+       WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+       WMI_VDEV_PARAM_STA_KICKOUT,
+       WMI_VDEV_PARAM_CAPABILITIES,
+       WMI_VDEV_PARAM_TSF_INCREMENT,
+       WMI_VDEV_PARAM_AMPDU_PER_AC,
+       WMI_VDEV_PARAM_RX_FILTER,
+       WMI_VDEV_PARAM_MGMT_TX_POWER,
+       WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+       WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+       WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+       WMI_VDEV_PARAM_HE_DCM,
+       WMI_VDEV_PARAM_HE_RANGE_EXT,
+       WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+       WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+       WMI_VDEV_PARAM_HE_LTF = 0x74,
+       WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d,
+       WMI_VDEV_PARAM_BA_MODE = 0x7e,
+       WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
+       WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+       WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
+       WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+       WMI_VDEV_PARAM_BSS_COLOR,
+       WMI_VDEV_PARAM_SET_HEMU_MODE,
+       WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+};
+
+enum wmi_tlv_peer_flags {
+       WMI_TLV_PEER_AUTH = 0x00000001,
+       WMI_TLV_PEER_QOS = 0x00000002,
+       WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+       WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+       WMI_TLV_PEER_APSD = 0x00000800,
+       WMI_TLV_PEER_HT = 0x00001000,
+       WMI_TLV_PEER_40MHZ = 0x00002000,
+       WMI_TLV_PEER_STBC = 0x00008000,
+       WMI_TLV_PEER_LDPC = 0x00010000,
+       WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+       WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+       WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+       WMI_TLV_PEER_VHT = 0x02000000,
+       WMI_TLV_PEER_80MHZ = 0x04000000,
+       WMI_TLV_PEER_PMF = 0x08000000,
+       WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+       WMI_PEER_160MHZ         = 0x40000000,
+       WMI_PEER_SAFEMODE_EN    = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+       WMI_TAG_LAST_RESERVED = 15,
+       WMI_TAG_FIRST_ARRAY_ENUM,
+       WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+       WMI_TAG_ARRAY_BYTE,
+       WMI_TAG_ARRAY_STRUCT,
+       WMI_TAG_ARRAY_FIXED_STRUCT,
+       WMI_TAG_LAST_ARRAY_ENUM = 31,
+       WMI_TAG_SERVICE_READY_EVENT,
+       WMI_TAG_HAL_REG_CAPABILITIES,
+       WMI_TAG_WLAN_HOST_MEM_REQ,
+       WMI_TAG_READY_EVENT,
+       WMI_TAG_SCAN_EVENT,
+       WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+       WMI_TAG_CHAN_INFO_EVENT,
+       WMI_TAG_COMB_PHYERR_RX_HDR,
+       WMI_TAG_VDEV_START_RESPONSE_EVENT,
+       WMI_TAG_VDEV_STOPPED_EVENT,
+       WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+       WMI_TAG_PEER_STA_KICKOUT_EVENT,
+       WMI_TAG_MGMT_RX_HDR,
+       WMI_TAG_TBTT_OFFSET_EVENT,
+       WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+       WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+       WMI_TAG_ROAM_EVENT,
+       WMI_TAG_WOW_EVENT_INFO,
+       WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+       WMI_TAG_RTT_EVENT_HEADER,
+       WMI_TAG_RTT_ERROR_REPORT_EVENT,
+       WMI_TAG_RTT_MEAS_EVENT,
+       WMI_TAG_ECHO_EVENT,
+       WMI_TAG_FTM_INTG_EVENT,
+       WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+       WMI_TAG_GPIO_INPUT_EVENT,
+       WMI_TAG_CSA_EVENT,
+       WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+       WMI_TAG_IGTK_INFO,
+       WMI_TAG_DCS_INTERFERENCE_EVENT,
+       WMI_TAG_ATH_DCS_CW_INT,
+       WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+               WMI_TAG_ATH_DCS_CW_INT,
+       WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+       WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+               WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+       WMI_TAG_WLAN_PROFILE_CTX_T,
+       WMI_TAG_WLAN_PROFILE_T,
+       WMI_TAG_PDEV_QVIT_EVENT,
+       WMI_TAG_HOST_SWBA_EVENT,
+       WMI_TAG_TIM_INFO,
+       WMI_TAG_P2P_NOA_INFO,
+       WMI_TAG_STATS_EVENT,
+       WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+       WMI_TAG_AVOID_FREQ_RANGE_DESC,
+       WMI_TAG_GTK_REKEY_FAIL_EVENT,
+       WMI_TAG_INIT_CMD,
+       WMI_TAG_RESOURCE_CONFIG,
+       WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+       WMI_TAG_START_SCAN_CMD,
+       WMI_TAG_STOP_SCAN_CMD,
+       WMI_TAG_SCAN_CHAN_LIST_CMD,
+       WMI_TAG_CHANNEL,
+       WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+       WMI_TAG_PDEV_SET_PARAM_CMD,
+       WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+       WMI_TAG_WMM_PARAMS,
+       WMI_TAG_PDEV_SET_QUIET_CMD,
+       WMI_TAG_VDEV_CREATE_CMD,
+       WMI_TAG_VDEV_DELETE_CMD,
+       WMI_TAG_VDEV_START_REQUEST_CMD,
+       WMI_TAG_P2P_NOA_DESCRIPTOR,
+       WMI_TAG_P2P_GO_SET_BEACON_IE,
+       WMI_TAG_GTK_OFFLOAD_CMD,
+       WMI_TAG_VDEV_UP_CMD,
+       WMI_TAG_VDEV_STOP_CMD,
+       WMI_TAG_VDEV_DOWN_CMD,
+       WMI_TAG_VDEV_SET_PARAM_CMD,
+       WMI_TAG_VDEV_INSTALL_KEY_CMD,
+       WMI_TAG_PEER_CREATE_CMD,
+       WMI_TAG_PEER_DELETE_CMD,
+       WMI_TAG_PEER_FLUSH_TIDS_CMD,
+       WMI_TAG_PEER_SET_PARAM_CMD,
+       WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+       WMI_TAG_VHT_RATE_SET,
+       WMI_TAG_BCN_TMPL_CMD,
+       WMI_TAG_PRB_TMPL_CMD,
+       WMI_TAG_BCN_PRB_INFO,
+       WMI_TAG_PEER_TID_ADDBA_CMD,
+       WMI_TAG_PEER_TID_DELBA_CMD,
+       WMI_TAG_STA_POWERSAVE_MODE_CMD,
+       WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+       WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+       WMI_TAG_ROAM_SCAN_MODE,
+       WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_TAG_ROAM_SCAN_PERIOD,
+       WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_TAG_PDEV_SUSPEND_CMD,
+       WMI_TAG_PDEV_RESUME_CMD,
+       WMI_TAG_ADD_BCN_FILTER_CMD,
+       WMI_TAG_RMV_BCN_FILTER_CMD,
+       WMI_TAG_WOW_ENABLE_CMD,
+       WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+       WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+       WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+       WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+       WMI_TAG_ARP_OFFLOAD_TUPLE,
+       WMI_TAG_NS_OFFLOAD_TUPLE,
+       WMI_TAG_FTM_INTG_CMD,
+       WMI_TAG_STA_KEEPALIVE_CMD,
+       WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
+       WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+       WMI_TAG_AP_PS_PEER_CMD,
+       WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+       WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+       WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+       WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+       WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+       WMI_TAG_WOW_DEL_PATTERN_CMD,
+       WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+       WMI_TAG_RTT_MEASREQ_HEAD,
+       WMI_TAG_RTT_MEASREQ_BODY,
+       WMI_TAG_RTT_TSF_CMD,
+       WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+       WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+       WMI_TAG_REQUEST_STATS_CMD,
+       WMI_TAG_NLO_CONFIG_CMD,
+       WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+       WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+       WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+       WMI_TAG_CHATTER_SET_MODE_CMD,
+       WMI_TAG_ECHO_CMD,
+       WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+       WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+       WMI_TAG_FORCE_FW_HANG_CMD,
+       WMI_TAG_GPIO_CONFIG_CMD,
+       WMI_TAG_GPIO_OUTPUT_CMD,
+       WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+       WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+       WMI_TAG_BCN_TX_HDR,
+       WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+       WMI_TAG_MGMT_TX_HDR,
+       WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+       WMI_TAG_ADDBA_SEND_CMD,
+       WMI_TAG_DELBA_SEND_CMD,
+       WMI_TAG_ADDBA_SETRESPONSE_CMD,
+       WMI_TAG_SEND_SINGLEAMSDU_CMD,
+       WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+       WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+       WMI_TAG_PDEV_SET_HT_IE_CMD,
+       WMI_TAG_PDEV_SET_VHT_IE_CMD,
+       WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+       WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+       WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+       WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+       WMI_TAG_PEER_MCAST_GROUP_CMD,
+       WMI_TAG_ROAM_AP_PROFILE,
+       WMI_TAG_AP_PROFILE,
+       WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+       WMI_TAG_PDEV_DFS_ENABLE_CMD,
+       WMI_TAG_PDEV_DFS_DISABLE_CMD,
+       WMI_TAG_WOW_ADD_PATTERN_CMD,
+       WMI_TAG_WOW_BITMAP_PATTERN_T,
+       WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+       WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+       WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+       WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+       WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+       WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+       WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+       WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+       WMI_TAG_TXBF_CMD,
+       WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+       WMI_TAG_NLO_EVENT,
+       WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+       WMI_TAG_UPLOAD_H_HDR,
+       WMI_TAG_CAPTURE_H_EVENT_HDR,
+       WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+       WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+       WMI_TAG_VDEV_WMM_ADDTS_CMD,
+       WMI_TAG_VDEV_WMM_DELTS_CMD,
+       WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+       WMI_TAG_TDLS_SET_STATE_CMD,
+       WMI_TAG_TDLS_PEER_UPDATE_CMD,
+       WMI_TAG_TDLS_PEER_EVENT,
+       WMI_TAG_TDLS_PEER_CAPABILITIES,
+       WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+       WMI_TAG_ROAM_CHAN_LIST,
+       WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+       WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+       WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+       WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+       WMI_TAG_BA_REQ_SSN_CMD,
+       WMI_TAG_BA_RSP_SSN_EVENT,
+       WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+       WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+       WMI_TAG_P2P_SET_OPPPS_CMD,
+       WMI_TAG_P2P_SET_NOA_CMD,
+       WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+       WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+       WMI_TAG_STA_SMPS_PARAM_CMD,
+       WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+       WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+       WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+       WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+       WMI_TAG_P2P_NOA_EVENT,
+       WMI_TAG_HB_SET_ENABLE_CMD,
+       WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+       WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+       WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+       WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+       WMI_TAG_HB_IND_EVENT,
+       WMI_TAG_TX_PAUSE_EVENT,
+       WMI_TAG_RFKILL_EVENT,
+       WMI_TAG_DFS_RADAR_EVENT,
+       WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+       WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+       WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+       WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+       WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+       WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+       WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+       WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+       WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+       WMI_TAG_VDEV_PLMREQ_START_CMD,
+       WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+       WMI_TAG_THERMAL_MGMT_CMD,
+       WMI_TAG_THERMAL_MGMT_EVENT,
+       WMI_TAG_PEER_INFO_REQ_CMD,
+       WMI_TAG_PEER_INFO_EVENT,
+       WMI_TAG_PEER_INFO,
+       WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+       WMI_TAG_RMC_SET_MODE_CMD,
+       WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+       WMI_TAG_RMC_CONFIG_CMD,
+       WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+       WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+       WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+       WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+       WMI_TAG_NAN_CMD_PARAM,
+       WMI_TAG_NAN_EVENT_HDR,
+       WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+       WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+       WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+       WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+       WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+       WMI_TAG_AGGR_STATE_TRIG_EVENT,
+       WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+       WMI_TAG_ROAM_SCAN_CMD,
+       WMI_TAG_REQ_STATS_EXT_CMD,
+       WMI_TAG_STATS_EXT_EVENT,
+       WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+       WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+       WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+       WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+       WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+       WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+       WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+       WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+       WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+       WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+       WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+       WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+       WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+       WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+       WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+       WMI_TAG_START_LINK_STATS_CMD,
+       WMI_TAG_CLEAR_LINK_STATS_CMD,
+       WMI_TAG_REQUEST_LINK_STATS_CMD,
+       WMI_TAG_IFACE_LINK_STATS_EVENT,
+       WMI_TAG_RADIO_LINK_STATS_EVENT,
+       WMI_TAG_PEER_STATS_EVENT,
+       WMI_TAG_CHANNEL_STATS,
+       WMI_TAG_RADIO_LINK_STATS,
+       WMI_TAG_RATE_STATS,
+       WMI_TAG_PEER_LINK_STATS,
+       WMI_TAG_WMM_AC_STATS,
+       WMI_TAG_IFACE_LINK_STATS,
+       WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+       WMI_TAG_LPI_START_SCAN_CMD,
+       WMI_TAG_LPI_STOP_SCAN_CMD,
+       WMI_TAG_LPI_RESULT_EVENT,
+       WMI_TAG_PEER_STATE_EVENT,
+       WMI_TAG_EXTSCAN_BUCKET_CMD,
+       WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+       WMI_TAG_EXTSCAN_START_CMD,
+       WMI_TAG_EXTSCAN_STOP_CMD,
+       WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+       WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+       WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+       WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+       WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+       WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+       WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+       WMI_TAG_EXTSCAN_OPERATION_EVENT,
+       WMI_TAG_EXTSCAN_START_STOP_EVENT,
+       WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+       WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+       WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+       WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+       WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+       WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+       WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+       WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+       WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+       WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+       WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+       WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+       WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+       WMI_TAG_UNIT_TEST_CMD,
+       WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+       WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+       WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+       WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+       WMI_TAG_ROAM_SYNCH_EVENT,
+       WMI_TAG_ROAM_SYNCH_COMPLETE,
+       WMI_TAG_EXTWOW_ENABLE_CMD,
+       WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+       WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+       WMI_TAG_LPI_STATUS_EVENT,
+       WMI_TAG_LPI_HANDOFF_EVENT,
+       WMI_TAG_VDEV_RATE_STATS_EVENT,
+       WMI_TAG_VDEV_RATE_HT_INFO,
+       WMI_TAG_RIC_REQUEST,
+       WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+       WMI_TAG_PDEV_TEMPERATURE_EVENT,
+       WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+       WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+       WMI_TAG_RIC_TSPEC,
+       WMI_TAG_TPC_CHAINMASK_CONFIG,
+       WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+       WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+       WMI_TAG_KEY_MATERIAL,
+       WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+       WMI_TAG_SET_LED_FLASHING_CMD,
+       WMI_TAG_MDNS_OFFLOAD_CMD,
+       WMI_TAG_MDNS_SET_FQDN_CMD,
+       WMI_TAG_MDNS_SET_RESP_CMD,
+       WMI_TAG_MDNS_GET_STATS_CMD,
+       WMI_TAG_MDNS_STATS_EVENT,
+       WMI_TAG_ROAM_INVOKE_CMD,
+       WMI_TAG_PDEV_RESUME_EVENT,
+       WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+       WMI_TAG_SAP_OFL_ENABLE_CMD,
+       WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+       WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+       WMI_TAG_APFIND_CMD_PARAM,
+       WMI_TAG_APFIND_EVENT_HDR,
+       WMI_TAG_OCB_SET_SCHED_CMD,
+       WMI_TAG_OCB_SET_SCHED_EVENT,
+       WMI_TAG_OCB_SET_CONFIG_CMD,
+       WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+       WMI_TAG_OCB_SET_UTC_TIME_CMD,
+       WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+       WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+       WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+       WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+       WMI_TAG_DCC_GET_STATS_CMD,
+       WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+       WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+       WMI_TAG_DCC_CLEAR_STATS_CMD,
+       WMI_TAG_DCC_UPDATE_NDL_CMD,
+       WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+       WMI_TAG_DCC_STATS_EVENT,
+       WMI_TAG_OCB_CHANNEL,
+       WMI_TAG_OCB_SCHEDULE_ELEMENT,
+       WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+       WMI_TAG_DCC_NDL_CHAN,
+       WMI_TAG_QOS_PARAMETER,
+       WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+       WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+       WMI_TAG_ROAM_FILTER,
+       WMI_TAG_PASSPOINT_CONFIG_CMD,
+       WMI_TAG_PASSPOINT_EVENT_HDR,
+       WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+       WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+       WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+       WMI_TAG_VDEV_TSF_REPORT_EVENT,
+       WMI_TAG_GET_FW_MEM_DUMP,
+       WMI_TAG_UPDATE_FW_MEM_DUMP,
+       WMI_TAG_FW_MEM_DUMP_PARAMS,
+       WMI_TAG_DEBUG_MESG_FLUSH,
+       WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+       WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+       WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+       WMI_TAG_VDEV_SET_IE_CMD,
+       WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+       WMI_TAG_RSSI_BREACH_EVENT,
+       WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+       WMI_TAG_SOC_SET_PCL_CMD,
+       WMI_TAG_SOC_SET_HW_MODE_CMD,
+       WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+       WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+       WMI_TAG_VDEV_TXRX_STREAMS,
+       WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+       WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+       WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+       WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+       WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+       WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+       WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+       WMI_TAG_PACKET_FILTER_CONFIG,
+       WMI_TAG_PACKET_FILTER_ENABLE,
+       WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+       WMI_TAG_MGMT_TX_SEND_CMD,
+       WMI_TAG_MGMT_TX_COMPL_EVENT,
+       WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+       WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+       WMI_TAG_LRO_INFO_CMD,
+       WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+       WMI_TAG_SERVICE_READY_EXT_EVENT,
+       WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+       WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+       WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+       WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+       WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+       WMI_TAG_PEER_ASSOC_CONF_EVENT,
+       WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+       WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+       WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+       WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+       WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+       WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+       WMI_TAG_SCPC_EVENT,
+       WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+       WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+       WMI_TAG_BPF_GET_CAPABILITY_CMD,
+       WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+       WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+       WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+       WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+       WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+       WMI_TAG_VDEV_DELETE_RESP_EVENT,
+       WMI_TAG_PEER_DELETE_RESP_EVENT,
+       WMI_TAG_ROAM_DENSE_THRES_PARAM,
+       WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+       WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+       WMI_TAG_VDEV_CONFIG_RATEMASK,
+       WMI_TAG_PDEV_FIPS_CMD,
+       WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+       WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+       WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+       WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+       WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+       WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+       WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+       WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+       WMI_TAG_FWTEST_SET_PARAM_CMD,
+       WMI_TAG_PEER_ATF_REQUEST,
+       WMI_TAG_VDEV_ATF_REQUEST,
+       WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+       WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+       WMI_TAG_INST_RSSI_STATS_RESP,
+       WMI_TAG_MED_UTIL_REPORT_EVENT,
+       WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+       WMI_TAG_WDS_ADDR_EVENT,
+       WMI_TAG_PEER_RATECODE_LIST_EVENT,
+       WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+       WMI_TAG_PDEV_TPC_EVENT,
+       WMI_TAG_ANI_OFDM_EVENT,
+       WMI_TAG_ANI_CCK_EVENT,
+       WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+       WMI_TAG_PDEV_FIPS_EVENT,
+       WMI_TAG_ATF_PEER_INFO,
+       WMI_TAG_PDEV_GET_TPC_CMD,
+       WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+       WMI_TAG_QBOOST_CFG_CMD,
+       WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+       WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+       WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+       WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+       WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+       WMI_TAG_PEER_MCS_RATE_INFO,
+       WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+       WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+       WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+       WMI_TAG_MU_REPORT_TOTAL_MU,
+       WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+       WMI_TAG_ROAM_SET_MBO,
+       WMI_TAG_MIB_STATS_ENABLE_CMD,
+       WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+       WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+       WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+       WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+       WMI_TAG_NDI_GET_CAP_REQ,
+       WMI_TAG_NDP_INITIATOR_REQ,
+       WMI_TAG_NDP_RESPONDER_REQ,
+       WMI_TAG_NDP_END_REQ,
+       WMI_TAG_NDI_CAP_RSP_EVENT,
+       WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+       WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+       WMI_TAG_NDP_END_RSP_EVENT,
+       WMI_TAG_NDP_INDICATION_EVENT,
+       WMI_TAG_NDP_CONFIRM_EVENT,
+       WMI_TAG_NDP_END_INDICATION_EVENT,
+       WMI_TAG_VDEV_SET_QUIET_CMD,
+       WMI_TAG_PDEV_SET_PCL_CMD,
+       WMI_TAG_PDEV_SET_HW_MODE_CMD,
+       WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+       WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+       WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+       WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+       WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+       WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+       WMI_TAG_COEX_CONFIG_CMD,
+       WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+       WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+       WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+       WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+       WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+       WMI_TAG_MAC_PHY_CAPABILITIES,
+       WMI_TAG_HW_MODE_CAPABILITIES,
+       WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+       WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+       WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+       WMI_TAG_VDEV_WISA_CMD,
+       WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+       WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+       WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+       WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+       WMI_TAG_NDP_END_RSP_PER_NDI,
+       WMI_TAG_PEER_BWF_REQUEST,
+       WMI_TAG_BWF_PEER_INFO,
+       WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+       WMI_TAG_RMC_SET_LEADER_CMD,
+       WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+       WMI_TAG_PER_CHAIN_RSSI_STATS,
+       WMI_TAG_RSSI_STATS,
+       WMI_TAG_P2P_LO_START_CMD,
+       WMI_TAG_P2P_LO_STOP_CMD,
+       WMI_TAG_P2P_LO_STOPPED_EVENT,
+       WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+       WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+       WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+       WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+       WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+       WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+       WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+       WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+       WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+       WMI_TAG_TLV_BUF_LEN_PARAM,
+       WMI_TAG_SERVICE_AVAILABLE_EVENT,
+       WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+       WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+       WMI_TAG_PEER_ANTDIV_INFO,
+       WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+       WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+       WMI_TAG_MNT_FILTER_CMD,
+       WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+       WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+       WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+       WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+       WMI_TAG_CHAN_CCA_STATS,
+       WMI_TAG_PEER_SIGNAL_STATS,
+       WMI_TAG_TX_STATS,
+       WMI_TAG_PEER_AC_TX_STATS,
+       WMI_TAG_RX_STATS,
+       WMI_TAG_PEER_AC_RX_STATS,
+       WMI_TAG_REPORT_STATS_EVENT,
+       WMI_TAG_CHAN_CCA_STATS_THRESH,
+       WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+       WMI_TAG_TX_STATS_THRESH,
+       WMI_TAG_RX_STATS_THRESH,
+       WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+       WMI_TAG_REQUEST_WLAN_STATS_CMD,
+       WMI_TAG_RX_AGGR_FAILURE_EVENT,
+       WMI_TAG_RX_AGGR_FAILURE_INFO,
+       WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+       WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+       WMI_TAG_PDEV_BAND_TO_MAC,
+       WMI_TAG_TBTT_OFFSET_INFO,
+       WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+       WMI_TAG_SAR_LIMITS_CMD,
+       WMI_TAG_SAR_LIMIT_CMD_ROW,
+       WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+       WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+       WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+       WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+       WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+       WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+       WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+       WMI_TAG_VENDOR_OUI,
+       WMI_TAG_REQUEST_RCPI_CMD,
+       WMI_TAG_UPDATE_RCPI_EVENT,
+       WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+       WMI_TAG_PEER_STATS_INFO,
+       WMI_TAG_PEER_STATS_INFO_EVENT,
+       WMI_TAG_PKGID_EVENT,
+       WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+       WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+       WMI_TAG_REGULATORY_RULE_STRUCT,
+       WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+       WMI_TAG_11D_SCAN_START_CMD,
+       WMI_TAG_11D_SCAN_STOP_CMD,
+       WMI_TAG_11D_NEW_COUNTRY_EVENT,
+       WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+       WMI_TAG_RADIO_CHAN_STATS,
+       WMI_TAG_RADIO_CHAN_STATS_EVENT,
+       WMI_TAG_ROAM_PER_CONFIG,
+       WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+       WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+       WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+       WMI_TAG_HW_DATA_FILTER_CMD,
+       WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+       WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+       WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+       WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+       WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+       WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+       WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+       WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+       WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+       WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+       WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+       WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+       WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+       WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+       WMI_TAG_IFACE_OFFLOAD_STATS,
+       WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+       WMI_TAG_RSSI_CTL_EXT,
+       WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+       WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+       WMI_TAG_VDEV_GET_TX_POWER_CMD,
+       WMI_TAG_VDEV_TX_POWER_EVENT,
+       WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+       WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+       WMI_TAG_TX_SEND_PARAMS,
+       WMI_TAG_HE_RATE_SET,
+       WMI_TAG_CONGESTION_STATS,
+       WMI_TAG_SET_INIT_COUNTRY_CMD,
+       WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+       WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+       WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+       WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+       WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+       WMI_TAG_THERM_THROT_STATS_EVENT,
+       WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+       WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+       WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+       WMI_TAG_OEM_DMA_RING_CFG_REQ,
+       WMI_TAG_OEM_DMA_RING_CFG_RSP,
+       WMI_TAG_OEM_INDIRECT_DATA,
+       WMI_TAG_OEM_DMA_BUF_RELEASE,
+       WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+       WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+       WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+       WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+       WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+       WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+       WMI_TAG_UNIT_TEST_EVENT,
+       WMI_TAG_ROAM_FILS_OFFLOAD,
+       WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+       WMI_TAG_PMK_CACHE,
+       WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+       WMI_TAG_ROAM_FILS_SYNCH,
+       WMI_TAG_GTK_OFFLOAD_EXTENDED,
+       WMI_TAG_ROAM_BG_SCAN_ROAMING,
+       WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+       WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+       WMI_TAG_OIC_PING_HANDOFF_EVENT,
+       WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+       WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+       WMI_TAG_BTM_CONFIG,
+       WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+       WMI_TAG_WLM_CONFIG_CMD,
+       WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+       WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+       WMI_TAG_ROAM_CND_SCORING_PARAM,
+       WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+       WMI_TAG_VENDOR_OUI_EXT,
+       WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+       WMI_TAG_FD_SEND_FROM_HOST_CMD,
+       WMI_TAG_ENABLE_FILS_CMD,
+       WMI_TAG_HOST_SWFDA_EVENT,
+       WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+       WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+       WMI_TAG_STATS_PERIOD,
+       WMI_TAG_NDL_SCHEDULE_UPDATE,
+       WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+       WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+       WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+       WMI_TAG_SAR2_RESULT_EVENT,
+       WMI_TAG_SAR_CAPABILITIES,
+       WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+       WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+       WMI_TAG_DMA_RING_CAPABILITIES,
+       WMI_TAG_DMA_RING_CFG_REQ,
+       WMI_TAG_DMA_RING_CFG_RSP,
+       WMI_TAG_DMA_BUF_RELEASE,
+       WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+       WMI_TAG_SAR_GET_LIMITS_CMD,
+       WMI_TAG_SAR_GET_LIMITS_EVENT,
+       WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+       WMI_TAG_OFFLOAD_11K_REPORT,
+       WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+       WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+       WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+       WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+       WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+       WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+       WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+       WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+       WMI_TAG_PDEV_GET_NFCAL_POWER,
+       WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+       WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+       WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+       WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+       WMI_TAG_TWT_ENABLE_CMD,
+       WMI_TAG_TWT_DISABLE_CMD,
+       WMI_TAG_TWT_ADD_DIALOG_CMD,
+       WMI_TAG_TWT_DEL_DIALOG_CMD,
+       WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+       WMI_TAG_TWT_RESUME_DIALOG_CMD,
+       WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+       WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+       WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+       WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+       WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+       WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+       WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+       WMI_TAG_ROAM_SCAN_STATS_EVENT,
+       WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+       WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+       WMI_TAG_GET_TPC_POWER_CMD,
+       WMI_TAG_GET_TPC_POWER_EVENT,
+       WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+       WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+       WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+       WMI_TAG_MOTION_DET_START_STOP_CMD,
+       WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+       WMI_TAG_MOTION_DET_EVENT,
+       WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+       WMI_TAG_NDP_TRANSPORT_IP,
+       WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+       WMI_TAG_ESP_ESTIMATE_EVENT,
+       WMI_TAG_NAN_HOST_CONFIG,
+       WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+       WMI_TAG_PEER_CFR_CAPTURE_CMD,
+       WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+       WMI_TAG_CHAN_WIDTH_PEER_LIST,
+       WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+       WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+       WMI_TAG_PEER_EXTD2_STATS,
+       WMI_TAG_HPCS_PULSE_START_CMD,
+       WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+       WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+       WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+       WMI_TAG_NAN_EVENT_INFO,
+       WMI_TAG_NDP_CHANNEL_INFO,
+       WMI_TAG_NDP_CMD,
+       WMI_TAG_NDP_EVENT,
+       WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+       WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+       WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+       WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+       WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+       WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+       WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+       WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+       WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+       WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
+       WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+       WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+       WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+       WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+       WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+       WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+       WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+       WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+       WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+       WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+       WMI_TLV_SERVICE_AP_UAPSD = 6,
+       WMI_TLV_SERVICE_AP_DFS = 7,
+       WMI_TLV_SERVICE_11AC = 8,
+       WMI_TLV_SERVICE_BLOCKACK = 9,
+       WMI_TLV_SERVICE_PHYERR = 10,
+       WMI_TLV_SERVICE_BCN_FILTER = 11,
+       WMI_TLV_SERVICE_RTT = 12,
+       WMI_TLV_SERVICE_WOW = 13,
+       WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+       WMI_TLV_SERVICE_IRAM_TIDS = 15,
+       WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+       WMI_TLV_SERVICE_NLO = 17,
+       WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+       WMI_TLV_SERVICE_SCAN_SCH = 19,
+       WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+       WMI_TLV_SERVICE_CHATTER = 21,
+       WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+       WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+       WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+       WMI_TLV_SERVICE_GPIO = 25,
+       WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+       WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+       WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+       WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+       WMI_TLV_SERVICE_TX_ENCAP = 30,
+       WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+       WMI_TLV_SERVICE_EARLY_RX = 32,
+       WMI_TLV_SERVICE_STA_SMPS = 33,
+       WMI_TLV_SERVICE_FWTEST = 34,
+       WMI_TLV_SERVICE_STA_WMMAC = 35,
+       WMI_TLV_SERVICE_TDLS = 36,
+       WMI_TLV_SERVICE_BURST = 37,
+       WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+       WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+       WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+       WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+       WMI_TLV_SERVICE_WLAN_HB = 42,
+       WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+       WMI_TLV_SERVICE_BATCH_SCAN = 44,
+       WMI_TLV_SERVICE_QPOWER = 45,
+       WMI_TLV_SERVICE_PLMREQ = 46,
+       WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+       WMI_TLV_SERVICE_RMC = 48,
+       WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+       WMI_TLV_SERVICE_COEX_SAR = 50,
+       WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+       WMI_TLV_SERVICE_NAN = 52,
+       WMI_TLV_SERVICE_L1SS_STAT = 53,
+       WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+       WMI_TLV_SERVICE_OBSS_SCAN = 55,
+       WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+       WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+       WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+       WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+       WMI_TLV_SERVICE_LPASS = 60,
+       WMI_TLV_SERVICE_EXTSCAN = 61,
+       WMI_TLV_SERVICE_D0WOW = 62,
+       WMI_TLV_SERVICE_HSOFFLOAD = 63,
+       WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+       WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+       WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+       WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+       WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+       WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+       WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+       WMI_TLV_SERVICE_OCB = 71,
+       WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+       WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+       WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+       WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+       WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+       WMI_TLV_SERVICE_EXT_MSG = 77,
+       WMI_TLV_SERVICE_MAWC = 78,
+       WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+       WMI_TLV_SERVICE_EGAP = 80,
+       WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+       WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+       WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+       WMI_TLV_SERVICE_ATF = 84,
+       WMI_TLV_SERVICE_COEX_GPIO = 85,
+       WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+       WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+       WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+       WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+       WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+       WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+       WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+       WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+       WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+       WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+       WMI_TLV_SERVICE_NAN_DATA = 96,
+       WMI_TLV_SERVICE_NAN_RTT = 97,
+       WMI_TLV_SERVICE_11AX = 98,
+       WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+       WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+       WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+       WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+       WMI_TLV_SERVICE_MESH_11S = 103,
+       WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+       WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+       WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+       WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+       WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+       WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+       WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+       WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+       WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+       WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+       WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+       WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+       WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+       WMI_TLV_SERVICE_REGULATORY_DB = 117,
+       WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+       WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+       WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+       WMI_TLV_SERVICE_PKT_ROUTING = 121,
+       WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+       WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+       WMI_TLV_SERVICE_8SS_TX_BFEE  =  124,
+       WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+       WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+       WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+       /* The first 128 bits */
+       WMI_MAX_SERVICE = 128,
+
+       WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+       WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+       WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+       WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+       WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+       WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+       WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+       WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+       WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+       WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+       WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+       WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+       WMI_TLV_SERVICE_THERM_THROT = 140,
+       WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+       WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+       WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+       WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+       WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+       WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+       WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+       WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+       WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+       WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+       WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+       WMI_TLV_SERVICE_STA_TWT = 152,
+       WMI_TLV_SERVICE_AP_TWT = 153,
+       WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+       WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+       WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+       WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+       WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+       WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+       WMI_TLV_SERVICE_MOTION_DET = 160,
+       WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+       WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+       WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+       WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+       WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+       WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+       WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+       WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+       WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+       WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+       WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+       WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+       WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+       WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+       WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+       WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+       WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+       WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+       WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+       WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+       WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+       WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+       WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+       WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+       WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+       WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+       WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+       WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+       WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+       WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+       WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+       WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+       WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+       WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+       WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+       WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+       WMI_TLV_SERVICE_VOW_ENABLE = 197,
+       WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+       WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+       WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+       WMI_TLV_SERVICE_PS_TDCC = 201,
+       WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY   = 202,
+       WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+       WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+       WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+       WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+       WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+       WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+       WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+       WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+       WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+       WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+       WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+       WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+       WMI_TLV_SERVICE_EXT2_MSG = 220,
+       WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
+       WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
+       WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
+       WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE = 263,
+
+       /* The second 128 bits */
+       WMI_MAX_EXT_SERVICE = 256,
+       WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+       WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
+       WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
+       WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
+
+       /* The third 128 bits */
+       WMI_MAX_EXT2_SERVICE = 384
+};
+
+enum {
+       WMI_SMPS_FORCED_MODE_NONE = 0,
+       WMI_SMPS_FORCED_MODE_DISABLED,
+       WMI_SMPS_FORCED_MODE_STATIC,
+       WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G      0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G      1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE                          0x1
+#define WMI_PEER_AMPDU                                  0x2
+#define WMI_PEER_AUTHORIZE                              0x3
+#define WMI_PEER_CHWIDTH                                0x4
+#define WMI_PEER_NSS                                    0x5
+#define WMI_PEER_USE_4ADDR                              0x6
+#define WMI_PEER_MEMBERSHIP                             0x7
+#define WMI_PEER_USERPOS                                0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED                0x9
+#define WMI_PEER_TX_FAIL_CNT_THR                        0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S                     0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH                0xC
+#define WMI_PEER_PHYMODE                                0xD
+#define WMI_PEER_USE_FIXED_PWR                          0xE
+#define WMI_PEER_PARAM_FIXED_RATE                       0xF
+#define WMI_PEER_SET_MU_WHITELIST                       0x10
+#define WMI_PEER_SET_MAX_TX_RATE                        0x11
+#define WMI_PEER_SET_MIN_TX_RATE                        0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING                    0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG         0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT        0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG          0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT         0x2
+
+enum wmi_peer_smps_state {
+       WMI_PEER_SMPS_PS_NONE = 0x0,
+       WMI_PEER_SMPS_STATIC  = 0x1,
+       WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+       WMI_PEER_CHWIDTH_20MHZ = 0,
+       WMI_PEER_CHWIDTH_40MHZ = 1,
+       WMI_PEER_CHWIDTH_80MHZ = 2,
+       WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+       WMI_BEACON_STAGGERED_MODE = 0,
+       WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_direct_buffer_module {
+       WMI_DIRECT_BUF_SPECTRAL = 0,
+       WMI_DIRECT_BUF_CFR = 1,
+
+       /* keep it last */
+       WMI_DIRECT_BUF_MAX
+};
+
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ *                     event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ *                        of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ *                         of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ *                      nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+       WMI_NSS_RATIO_1BY2_NSS = 0x0,
+       WMI_NSS_RATIO_3BY4_NSS = 0x1,
+       WMI_NSS_RATIO_1_NSS = 0x2,
+       WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
+enum wmi_dtim_policy {
+       WMI_DTIM_POLICY_IGNORE = 1,
+       WMI_DTIM_POLICY_NORMAL = 2,
+       WMI_DTIM_POLICY_STICK  = 3,
+       WMI_DTIM_POLICY_AUTO   = 4,
+};
+
+struct wmi_host_pdev_band_to_mac {
+       uint32_t pdev_id;
+       uint32_t start_freq;
+       uint32_t end_freq;
+};
+
+struct ath12k_ppe_threshold {
+       uint32_t numss_m1;
+       uint32_t ru_bit_mask;
+       uint32_t ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath12k_service_ext_param {
+       uint32_t default_conc_scan_config_bits;
+       uint32_t default_fw_config_bits;
+       struct ath12k_ppe_threshold ppet;
+       uint32_t he_cap_info;
+       uint32_t mpdu_density;
+       uint32_t max_bssid_rx_filters;
+       uint32_t num_hw_modes;
+       uint32_t num_phy;
+};
+
+struct ath12k_hw_mode_caps {
+       uint32_t hw_mode_id;
+       uint32_t phy_id_map;
+       uint32_t hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH12K_11B_SUPPORT                 BIT(0)
+#define ATH12K_11G_SUPPORT                 BIT(1)
+#define ATH12K_11A_SUPPORT                 BIT(2)
+#define ATH12K_11N_SUPPORT                 BIT(3)
+#define ATH12K_11AC_SUPPORT                BIT(4)
+#define ATH12K_11AX_SUPPORT                BIT(5)
+
+struct ath12k_hal_reg_capabilities_ext {
+       uint32_t phy_id;
+       uint32_t eeprom_reg_domain;
+       uint32_t eeprom_reg_domain_ext;
+       uint32_t regcap1;
+       uint32_t regcap2;
+       uint32_t wireless_modes;
+       uint32_t low_2ghz_chan;
+       uint32_t high_2ghz_chan;
+       uint32_t low_5ghz_chan;
+       uint32_t high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+       uint32_t tlv_header;
+       uint32_t req_id;
+       uint32_t ptr;
+       uint32_t size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+       void *vaddr;
+       bus_addr_t paddr;
+       uint32_t len;
+       uint32_t req_id;
+};
+
+struct wmi_init_cmd_param {
+       uint32_t tlv_header;
+       struct target_resource_config *res_cfg;
+       uint8_t num_mem_chunks;
+       struct wmi_host_mem_chunk *mem_chunks;
+       uint32_t hw_mode_id;
+       uint32_t num_band_to_mac;
+       struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t start_freq;
+       uint32_t end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t hw_mode_index;
+       uint32_t num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+       uint32_t numss_m1; /** NSS - 1*/
+       union {
+               uint32_t ru_count;
+               uint32_t ru_mask;
+       } __packed;
+       uint32_t ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE       5
+
+struct wmi_abi_version {
+       uint32_t abi_version_0;
+       uint32_t abi_version_1;
+       uint32_t abi_version_ns_0;
+       uint32_t abi_version_ns_1;
+       uint32_t abi_version_ns_2;
+       uint32_t abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+       uint32_t tlv_header;
+       struct wmi_abi_version host_abi_vers;
+       uint32_t num_host_mem_chunks;
+} __packed;
+
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
+#define WMI_RSRC_CFG_FLAG1_ACK_RSSI BIT(18)
+
+#define WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT 4
+
+struct wmi_resource_config {
+       uint32_t tlv_header;
+       uint32_t num_vdevs;
+       uint32_t num_peers;
+       uint32_t num_offload_peers;
+       uint32_t num_offload_reorder_buffs;
+       uint32_t num_peer_keys;
+       uint32_t num_tids;
+       uint32_t ast_skid_limit;
+       uint32_t tx_chain_mask;
+       uint32_t rx_chain_mask;
+       uint32_t rx_timeout_pri[4];
+       uint32_t rx_decap_mode;
+       uint32_t scan_max_pending_req;
+       uint32_t bmiss_offload_max_vdev;
+       uint32_t roam_offload_max_vdev;
+       uint32_t roam_offload_max_ap_profiles;
+       uint32_t num_mcast_groups;
+       uint32_t num_mcast_table_elems;
+       uint32_t mcast2ucast_mode;
+       uint32_t tx_dbg_log_size;
+       uint32_t num_wds_entries;
+       uint32_t dma_burst_size;
+       uint32_t mac_aggr_delim;
+       uint32_t rx_skip_defrag_timeout_dup_detection_check;
+       uint32_t vow_config;
+       uint32_t gtk_offload_max_vdev;
+       uint32_t num_msdu_desc;
+       uint32_t max_frag_entries;
+       uint32_t num_tdls_vdevs;
+       uint32_t num_tdls_conn_table_entries;
+       uint32_t beacon_tx_offload_max_vdev;
+       uint32_t num_multicast_filter_entries;
+       uint32_t num_wow_filters;
+       uint32_t num_keep_alive_pattern;
+       uint32_t keep_alive_pattern_size;
+       uint32_t max_tdls_concurrent_sleep_sta;
+       uint32_t max_tdls_concurrent_buffer_sta;
+       uint32_t wmi_send_separate;
+       uint32_t num_ocb_vdevs;
+       uint32_t num_ocb_channels;
+       uint32_t num_ocb_schedules;
+       uint32_t flag1;
+       uint32_t smart_ant_cap;
+       uint32_t bk_minfree;
+       uint32_t be_minfree;
+       uint32_t vi_minfree;
+       uint32_t vo_minfree;
+       uint32_t alloc_frag_desc_for_data_pkt;
+       uint32_t num_ns_ext_tuples_cfg;
+       uint32_t bpf_instruction_size;
+       uint32_t max_bssid_rx_filters;
+       uint32_t use_pdev_id;
+       uint32_t max_num_dbs_scan_duty_cycle;
+       uint32_t max_num_group_keys;
+       uint32_t peer_map_unmap_v2_support;
+       uint32_t sched_params;
+       uint32_t twt_ap_pdev_count;
+       uint32_t twt_ap_sta_count;
+#ifdef notyet /* 6 GHz support */
+       uint32_t max_nlo_ssids;
+       uint32_t num_pkt_filters;
+       uint32_t num_max_sta_vdevs;
+       uint32_t max_bssid_indicator;
+       uint32_t ul_resp_config;
+       uint32_t msdu_flow_override_config0;
+       uint32_t msdu_flow_override_config1;
+       uint32_t flags2;
+       uint32_t host_service_flags;
+       uint32_t max_rnr_neighbours;
+       uint32_t ema_max_vap_cnt;
+       uint32_t ema_max_profile_period;
+#endif
+} __packed;
+
+struct wmi_service_ready_event {
+       uint32_t fw_build_vers;
+       struct wmi_abi_version fw_abi_vers;
+       uint32_t phy_capability;
+       uint32_t max_frag_entry;
+       uint32_t num_rf_chains;
+       uint32_t ht_cap_info;
+       uint32_t vht_cap_info;
+       uint32_t vht_supp_mcs;
+       uint32_t hw_min_tx_power;
+       uint32_t hw_max_tx_power;
+       uint32_t sys_cap_info;
+       uint32_t min_pkt_size_enable;
+       uint32_t max_bcn_ie_size;
+       uint32_t num_mem_reqs;
+       uint32_t max_num_scan_channels;
+       uint32_t hw_bd_id;
+       uint32_t hw_bd_info[HW_BD_INFO_SIZE];
+       uint32_t max_supported_macs;
+       uint32_t wmi_fw_sub_feat_caps;
+       uint32_t num_dbs_hw_modes;
+       /* txrx_chainmask
+        *    [7:0]   - 2G band tx chain mask
+        *    [15:8]  - 2G band rx chain mask
+        *    [23:16] - 5G band tx chain mask
+        *    [31:24] - 5G band rx chain mask
+        */
+       uint32_t txrx_chainmask;
+       uint32_t default_dbs_hw_mode_index;
+       uint32_t num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE    ((WMI_MAX_SERVICE + sizeof(uint32_t) - 1) / sizeof(uint32_t))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x uint32_t = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+       uint32_t default_conc_scan_config_bits;
+       uint32_t default_fw_config_bits;
+       struct wmi_ppe_threshold ppet;
+       uint32_t he_cap_info;
+       uint32_t mpdu_density;
+       uint32_t max_bssid_rx_filters;
+       uint32_t fw_build_vers_ext;
+       uint32_t max_nlo_ssids;
+       uint32_t max_bssid_indicator;
+       uint32_t he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+       uint32_t num_hw_modes;
+       uint32_t num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+       uint32_t tlv_header;
+       uint32_t hw_mode_id;
+       uint32_t phy_id_map;
+       uint32_t hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE                 (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS    BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+       FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS              GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+       FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
+
+struct wmi_mac_phy_capabilities {
+       uint32_t hw_mode_id;
+       uint32_t pdev_id;
+       uint32_t phy_id;
+       uint32_t supported_flags;
+       uint32_t supported_bands;
+       uint32_t ampdu_density;
+       uint32_t max_bw_supported_2g;
+       uint32_t ht_cap_info_2g;
+       uint32_t vht_cap_info_2g;
+       uint32_t vht_supp_mcs_2g;
+       uint32_t he_cap_info_2g;
+       uint32_t he_supp_mcs_2g;
+       uint32_t tx_chain_mask_2g;
+       uint32_t rx_chain_mask_2g;
+       uint32_t max_bw_supported_5g;
+       uint32_t ht_cap_info_5g;
+       uint32_t vht_cap_info_5g;
+       uint32_t vht_supp_mcs_5g;
+       uint32_t he_cap_info_5g;
+       uint32_t he_supp_mcs_5g;
+       uint32_t tx_chain_mask_5g;
+       uint32_t rx_chain_mask_5g;
+       uint32_t he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+       uint32_t he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+       struct wmi_ppe_threshold he_ppet2g;
+       struct wmi_ppe_threshold he_ppet5g;
+       uint32_t chainmask_table_id;
+       uint32_t lmac_id;
+       uint32_t he_cap_info_2g_ext;
+       uint32_t he_cap_info_5g_ext;
+       uint32_t he_cap_info_internal;
+       uint32_t wireless_modes;
+       uint32_t low_2ghz_chan_freq;
+       uint32_t high_2ghz_chan_freq;
+       uint32_t low_5ghz_chan_freq;
+       uint32_t high_5ghz_chan_freq;
+       uint32_t nss_ratio;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+       uint32_t tlv_header;
+       uint32_t phy_id;
+       uint32_t eeprom_reg_domain;
+       uint32_t eeprom_reg_domain_ext;
+       uint32_t regcap1;
+       uint32_t regcap2;
+       uint32_t wireless_modes;
+       uint32_t low_2ghz_chan;
+       uint32_t high_2ghz_chan;
+       uint32_t low_5ghz_chan;
+       uint32_t high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+       uint32_t num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+       union {
+               uint8_t addr[6];
+               struct {
+                       uint32_t word0;
+                       uint32_t word1;
+               } __packed;
+       } __packed;
+} __packed;
+
+struct wmi_dma_ring_capabilities {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t module_id;
+       uint32_t min_elem;
+       uint32_t min_buf_sz;
+       uint32_t min_buf_align;
+} __packed;
+
+struct wmi_ready_event_min {
+       struct wmi_abi_version fw_abi_vers;
+       struct wmi_mac_addr mac_addr;
+       uint32_t status;
+       uint32_t num_dscp_table;
+       uint32_t num_extra_mac_addr;
+       uint32_t num_total_peers;
+       uint32_t num_extra_peers;
+} __packed;
+
+struct wmi_ready_event {
+       struct wmi_ready_event_min ready_event_min;
+       uint32_t max_ast_index;
+       uint32_t pktlog_defs_checksum;
+} __packed;
+
+struct wmi_service_available_event {
+       uint32_t wmi_service_segment_offset;
+       uint32_t wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct vdev_create_params {
+       uint8_t if_id;
+       uint32_t type;
+       uint32_t subtype;
+       struct {
+               uint8_t tx;
+               uint8_t rx;
+       } chains[2];
+       uint32_t pdev_id;
+       uint32_t mbssid_flags;
+       uint32_t mbssid_tx_vdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t vdev_type;
+       uint32_t vdev_subtype;
+       struct wmi_mac_addr vdev_macaddr;
+       uint32_t num_cfg_txrx_streams;
+       uint32_t pdev_id;
+       uint32_t mbssid_flags;
+       uint32_t mbssid_tx_vdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+       uint32_t tlv_header;
+       uint32_t band;
+       uint32_t supported_tx_streams;
+       uint32_t supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t vdev_assoc_id;
+       struct wmi_mac_addr vdev_bssid;
+       struct wmi_mac_addr tx_vdev_bssid;
+       uint32_t nontx_profile_idx;
+       uint32_t nontx_profile_cnt;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID  BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED  BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
+
+struct wmi_ssid {
+       uint32_t ssid_len;
+       uint32_t ssid[8];
+} __packed;
+
+#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t requestor_id;
+       uint32_t beacon_interval;
+       uint32_t dtim_period;
+       uint32_t flags;
+       struct wmi_ssid ssid;
+       uint32_t bcn_tx_rate;
+       uint32_t bcn_txpower;
+       uint32_t num_noa_descriptors;
+       uint32_t disable_hw_ack;
+       uint32_t preferred_tx_streams;
+       uint32_t preferred_rx_streams;
+       uint32_t he_ops;
+       uint32_t cac_duration_ms;
+       uint32_t regdomain;
+       uint32_t min_data_rate;
+       uint32_t mbssid_flags;
+       uint32_t mbssid_tx_vdev_id;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN                  64
+#define WMI_MAC_MAX_SSID_LENGTH              32
+struct mac_ssid {
+       uint8_t length;
+       uint8_t mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+       uint32_t type_count;
+       uint32_t duration;
+       uint32_t interval;
+       uint32_t start_time;
+};
+
+struct channel_param {
+       uint8_t chan_id;
+       uint8_t pwr;
+       uint32_t mhz;
+       uint32_t half_rate:1,
+           quarter_rate:1,
+           dfs_set:1,
+           dfs_set_cfreq2:1,
+           is_chan_passive:1,
+           allow_ht:1,
+           allow_vht:1,
+           allow_he:1,
+           set_agile:1,
+           psc_channel:1;
+       uint32_t phy_mode;
+       uint32_t cfreq1;
+       uint32_t cfreq2;
+       char   maxpower;
+       char   minpower;
+       char   maxregpower;
+       uint8_t  antennamax;
+       uint8_t  reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+       MODE_11A        = 0,
+       MODE_11G        = 1,   /* 11b/g Mode */
+       MODE_11B        = 2,   /* 11b Mode */
+       MODE_11GONLY    = 3,   /* 11g only Mode */
+       MODE_11NA_HT20   = 4,
+       MODE_11NG_HT20   = 5,
+       MODE_11NA_HT40   = 6,
+       MODE_11NG_HT40   = 7,
+       MODE_11AC_VHT20 = 8,
+       MODE_11AC_VHT40 = 9,
+       MODE_11AC_VHT80 = 10,
+       MODE_11AC_VHT20_2G = 11,
+       MODE_11AC_VHT40_2G = 12,
+       MODE_11AC_VHT80_2G = 13,
+       MODE_11AC_VHT80_80 = 14,
+       MODE_11AC_VHT160 = 15,
+       MODE_11AX_HE20 = 16,
+       MODE_11AX_HE40 = 17,
+       MODE_11AX_HE80 = 18,
+       MODE_11AX_HE80_80 = 19,
+       MODE_11AX_HE160 = 20,
+       MODE_11AX_HE20_2G = 21,
+       MODE_11AX_HE40_2G = 22,
+       MODE_11AX_HE80_2G = 23,
+       MODE_UNKNOWN = 24,
+       MODE_MAX = 24
+};
+
+static inline const char *qwz_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+       switch (mode) {
+       case MODE_11A:
+               return "11a";
+       case MODE_11G:
+               return "11g";
+       case MODE_11B:
+               return "11b";
+       case MODE_11GONLY:
+               return "11gonly";
+       case MODE_11NA_HT20:
+               return "11na-ht20";
+       case MODE_11NG_HT20:
+               return "11ng-ht20";
+       case MODE_11NA_HT40:
+               return "11na-ht40";
+       case MODE_11NG_HT40:
+               return "11ng-ht40";
+       case MODE_11AC_VHT20:
+               return "11ac-vht20";
+       case MODE_11AC_VHT40:
+               return "11ac-vht40";
+       case MODE_11AC_VHT80:
+               return "11ac-vht80";
+       case MODE_11AC_VHT160:
+               return "11ac-vht160";
+       case MODE_11AC_VHT80_80:
+               return "11ac-vht80+80";
+       case MODE_11AC_VHT20_2G:
+               return "11ac-vht20-2g";
+       case MODE_11AC_VHT40_2G:
+               return "11ac-vht40-2g";
+       case MODE_11AC_VHT80_2G:
+               return "11ac-vht80-2g";
+       case MODE_11AX_HE20:
+               return "11ax-he20";
+       case MODE_11AX_HE40:
+               return "11ax-he40";
+       case MODE_11AX_HE80:
+               return "11ax-he80";
+       case MODE_11AX_HE80_80:
+               return "11ax-he80+80";
+       case MODE_11AX_HE160:
+               return "11ax-he160";
+       case MODE_11AX_HE20_2G:
+               return "11ax-he20-2g";
+       case MODE_11AX_HE40_2G:
+               return "11ax-he40-2g";
+       case MODE_11AX_HE80_2G:
+               return "11ax-he80-2g";
+       case MODE_UNKNOWN:
+               /* skip */
+               break;
+
+               /* no default handler to allow compiler to check that the
+                * enum is fully handled
+                */
+       }
+
+       return "<unknown>";
+}
+
+struct wmi_channel_arg {
+       uint32_t freq;
+       uint32_t band_center_freq1;
+       uint32_t band_center_freq2;
+       bool passive;
+       bool allow_ibss;
+       bool allow_ht;
+       bool allow_vht;
+       bool ht40plus;
+       bool chan_radar;
+       bool freq2_radar;
+       bool allow_he;
+       uint32_t min_power;
+       uint32_t max_power;
+       uint32_t max_reg_power;
+       uint32_t max_antenna_gain;
+       enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+       uint32_t vdev_id;
+       struct wmi_channel_arg channel;
+       uint32_t bcn_intval;
+       uint32_t dtim_period;
+       uint8_t *ssid;
+       uint32_t ssid_len;
+       uint32_t bcn_tx_rate;
+       uint32_t bcn_tx_power;
+       bool disable_hw_ack;
+       bool hidden_ssid;
+       bool pmf_enabled;
+       uint32_t he_ops;
+       uint32_t cac_duration_ms;
+       uint32_t regdomain;
+       uint32_t pref_rx_streams;
+       uint32_t pref_tx_streams;
+       uint32_t num_noa_descriptors;
+       uint32_t min_data_rate;
+       uint32_t mbssid_flags;
+       uint32_t mbssid_tx_vdev_id;
+};
+
+struct peer_create_params {
+       uint8_t *peer_addr;
+       uint32_t peer_type;
+       uint32_t vdev_id;
+};
+
+struct peer_delete_params {
+       uint8_t vdev_id;
+};
+
+struct peer_flush_params {
+       uint32_t peer_tid_bitmap;
+       uint8_t vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+       uint16_t current_rd_in_use;
+       uint16_t current_rd_2g;
+       uint16_t current_rd_5g;
+       uint32_t ctl_2g;
+       uint32_t ctl_5g;
+       uint8_t dfs_domain;
+       uint32_t pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+       uint8_t *peer_macaddr;
+       uint16_t vdev_id;
+       uint32_t peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0   0
+#define WMI_HOST_PDEV_ID_1   1
+#define WMI_HOST_PDEV_ID_2   2
+
+#define WMI_PDEV_ID_SOC         0
+#define WMI_PDEV_ID_1ST         1
+#define WMI_PDEV_ID_2ND         2
+#define WMI_PDEV_ID_3RD         3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ                    0x0000ffff
+#define REG_RULE_END_FREQ                      0xffff0000
+#define REG_RULE_FLAGS                         0x0000ffff
+#define REG_RULE_MAX_BW                                0x0000ffff
+#define REG_RULE_REG_PWR                       0x00ff0000
+#define REG_RULE_ANT_GAIN                      0xff000000
+#define REG_RULE_PSD_INFO                      BIT(0)
+#define REG_RULE_PSD_EIRP                      0xff0000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HE_PHYCAP_BYTE_0       0
+#define HE_PHYCAP_BYTE_1       1
+#define HE_PHYCAP_BYTE_2       2
+#define HE_PHYCAP_BYTE_3       3
+#define HE_PHYCAP_BYTE_4       4
+
+#define HECAP_PHY_SU_BFER              BIT(7)
+#define HECAP_PHY_SU_BFEE              BIT(0)
+#define HECAP_PHY_MU_BFER              BIT(1)
+#define HECAP_PHY_UL_MUMIMO            BIT(6)
+#define HECAP_PHY_UL_MUOFDMA           BIT(7)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+       FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HE_PHYCAP_BYTE_3])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+       FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+       FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+       FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+       FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HE_MODE_SU_TX_BFEE     BIT(0)
+#define HE_MODE_SU_TX_BFER     BIT(1)
+#define HE_MODE_MU_TX_BFEE     BIT(2)
+#define HE_MODE_MU_TX_BFER     BIT(3)
+#define HE_MODE_DL_OFDMA       BIT(4)
+#define HE_MODE_UL_OFDMA       BIT(5)
+#define HE_MODE_UL_MUMIMO      BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE   1
+#define HE_UL_MUOFDMA_ENABLE   1
+#define HE_DL_MUMIMO_ENABLE    1
+#define HE_UL_MUMIMO_ENABLE    1
+#define HE_MU_BFEE_ENABLE      1
+#define HE_SU_BFEE_ENABLE      1
+#define HE_MU_BFER_ENABLE      1
+#define HE_SU_BFER_ENABLE      1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE            1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE          1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE   1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE           BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE         BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE  BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB    4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK   0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB    8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK   0x700
+
+struct pdev_params {
+       uint32_t param_id;
+       uint32_t param_value;
+};
+
+enum wmi_peer_type {
+       WMI_PEER_TYPE_DEFAULT = 0,
+       WMI_PEER_TYPE_BSS = 1,
+       WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t tid;
+       uint32_t queue_ptr_lo;
+       uint32_t queue_ptr_hi;
+       uint32_t queue_no;
+       uint32_t ba_window_size_valid;
+       uint32_t ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t tid_mask;
+} __packed;
+
+struct gpio_config_params {
+       uint32_t gpio_num;
+       uint32_t input;
+       uint32_t pull_type;
+       uint32_t intr_mode;
+};
+
+enum wmi_gpio_type {
+       WMI_GPIO_PULL_NONE,
+       WMI_GPIO_PULL_UP,
+       WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+       WMI_GPIO_INTTYPE_DISABLE,
+       WMI_GPIO_INTTYPE_RISING_EDGE,
+       WMI_GPIO_INTTYPE_FALLING_EDGE,
+       WMI_GPIO_INTTYPE_BOTH_EDGE,
+       WMI_GPIO_INTTYPE_LEVEL_LOW,
+       WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+       WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+       WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+       uint32_t tlv_header;
+       uint32_t gpio_num;
+       uint32_t input;
+       uint32_t pull_type;
+       uint32_t intr_mode;
+};
+
+struct gpio_output_params {
+       uint32_t gpio_num;
+       uint32_t set;
+};
+
+struct wmi_gpio_output_cmd_param {
+       uint32_t tlv_header;
+       uint32_t gpio_num;
+       uint32_t set;
+};
+
+struct set_fwtest_params {
+       uint32_t arg;
+       uint32_t value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+       uint32_t tlv_header;
+       uint32_t param_id;
+       uint32_t param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t param_id;
+       uint32_t param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+       uint32_t tlv_header;
+       /* ref wmi_bss_chan_info_req_type */
+       uint32_t req_type;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t param;
+       uint32_t value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t param;
+       uint32_t value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t reg_domain;
+       uint32_t reg_domain_2g;
+       uint32_t reg_domain_5g;
+       uint32_t conformance_test_limit_2g;
+       uint32_t conformance_test_limit_5g;
+       uint32_t dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t param_id;
+       uint32_t param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+       SCAN_DWELL_MODE_DEFAULT = 0,
+       SCAN_DWELL_MODE_CONSERVATIVE = 1,
+       SCAN_DWELL_MODE_MODERATE = 2,
+       SCAN_DWELL_MODE_AGGRESSIVE = 3,
+       SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+       uint32_t len;
+       uint8_t *ptr;
+};
+
+struct wlan_ssid {
+       uint8_t length;
+       uint8_t ssid[WLAN_SSID_MAX_LEN];
+};
+
+#define WMI_IE_BITMAP_SIZE             8
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+       WMI_SCAN_PRIORITY_VERY_LOW = 0,
+       WMI_SCAN_PRIORITY_LOW,
+       WMI_SCAN_PRIORITY_MEDIUM,
+       WMI_SCAN_PRIORITY_HIGH,
+       WMI_SCAN_PRIORITY_VERY_HIGH,
+       WMI_SCAN_PRIORITY_COUNT   /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+       WMI_SCAN_EVENT_STARTED              = BIT(0),
+       WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+       WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+       WMI_SCAN_EVENT_FOREIGN_CHAN         = BIT(3),
+       WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+       /* possibly by high-prio scan */
+       WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+       WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+       WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+       WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT    = BIT(8),
+       WMI_SCAN_EVENT_SUSPENDED            = BIT(9),
+       WMI_SCAN_EVENT_RESUMED              = BIT(10),
+       WMI_SCAN_EVENT_MAX                  = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+       WMI_SCAN_REASON_COMPLETED,
+       WMI_SCAN_REASON_CANCELLED,
+       WMI_SCAN_REASON_PREEMPTED,
+       WMI_SCAN_REASON_TIMEDOUT,
+       WMI_SCAN_REASON_INTERNAL_FAILURE,
+       WMI_SCAN_REASON_MAX,
+};
+
+struct  wmi_start_scan_cmd {
+       uint32_t tlv_header;
+       uint32_t scan_id;
+       uint32_t scan_req_id;
+       uint32_t vdev_id;
+       uint32_t scan_priority;
+       uint32_t notify_scan_events;
+       uint32_t dwell_time_active;
+       uint32_t dwell_time_passive;
+       uint32_t min_rest_time;
+       uint32_t max_rest_time;
+       uint32_t repeat_probe_time;
+       uint32_t probe_spacing_time;
+       uint32_t idle_time;
+       uint32_t max_scan_time;
+       uint32_t probe_delay;
+       uint32_t scan_ctrl_flags;
+       uint32_t burst_duration;
+       uint32_t num_chan;
+       uint32_t num_bssid;
+       uint32_t num_ssids;
+       uint32_t ie_len;
+       uint32_t n_probes;
+       struct wmi_mac_addr mac_addr;
+       struct wmi_mac_addr mac_mask;
+       uint32_t ie_bitmap[WMI_IE_BITMAP_SIZE];
+       uint32_t num_vendor_oui;
+       uint32_t scan_ctrl_flags_ext;
+       uint32_t dwell_time_active_2g;
+       uint32_t dwell_time_active_6g;
+       uint32_t dwell_time_passive_6g;
+       uint32_t scan_start_offset;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE        0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES       0x4
+#define WMI_SCAN_ADD_OFDM_RATES      0x8
+#define WMI_SCAN_CHAN_STAT_EVENT     0x10
+#define WMI_SCAN_FILTER_PROBE_REQ    0x20
+#define WMI_SCAN_BYPASS_DFS_CHN      0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR   0x80
+#define WMI_SCAN_FILTER_PROMISCUOS   0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ  0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ   0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ   0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX    0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX    0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR  0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT      0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT   0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT        21
+#define WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE   0x00000800
+
+#define WMI_SCAN_CONFIG_PER_CHANNEL_MASK       GENMASK(19, 0)
+#define WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND        BIT(20)
+
+enum {
+       WMI_SCAN_DWELL_MODE_DEFAULT      = 0,
+       WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+       WMI_SCAN_DWELL_MODE_MODERATE     = 2,
+       WMI_SCAN_DWELL_MODE_AGGRESSIVE   = 3,
+       WMI_SCAN_DWELL_MODE_STATIC       = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+       ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+                   WMI_SCAN_DWELL_MODE_MASK))
+
+struct hint_short_ssid {
+       uint32_t freq_flags;
+       uint32_t short_ssid;
+};
+
+struct hint_bssid {
+       uint32_t freq_flags;
+       struct wmi_mac_addr bssid;
+};
+
+struct scan_req_params {
+       uint32_t scan_id;
+       uint32_t scan_req_id;
+       uint32_t vdev_id;
+       uint32_t pdev_id;
+       enum wmi_scan_priority scan_priority;
+       union {
+               struct {
+                       uint32_t scan_ev_started:1,
+                           scan_ev_completed:1,
+                           scan_ev_bss_chan:1,
+                           scan_ev_foreign_chan:1,
+                           scan_ev_dequeued:1,
+                           scan_ev_preempted:1,
+                           scan_ev_start_failed:1,
+                           scan_ev_restarted:1,
+                           scan_ev_foreign_chn_exit:1,
+                           scan_ev_invalid:1,
+                           scan_ev_gpio_timeout:1,
+                           scan_ev_suspended:1,
+                           scan_ev_resumed:1;
+               };
+               uint32_t scan_events;
+       };
+       uint32_t scan_ctrl_flags_ext;
+       uint32_t dwell_time_active;
+       uint32_t dwell_time_active_2g;
+       uint32_t dwell_time_passive;
+       uint32_t dwell_time_active_6g;
+       uint32_t dwell_time_passive_6g;
+       uint32_t min_rest_time;
+       uint32_t max_rest_time;
+       uint32_t repeat_probe_time;
+       uint32_t probe_spacing_time;
+       uint32_t idle_time;
+       uint32_t max_scan_time;
+       uint32_t probe_delay;
+       union {
+               struct {
+                       uint32_t scan_f_passive:1,
+                           scan_f_bcast_probe:1,
+                           scan_f_cck_rates:1,
+                           scan_f_ofdm_rates:1,
+                           scan_f_chan_stat_evnt:1,
+                           scan_f_filter_prb_req:1,
+                           scan_f_bypass_dfs_chn:1,
+                           scan_f_continue_on_err:1,
+                           scan_f_offchan_mgmt_tx:1,
+                           scan_f_offchan_data_tx:1,
+                           scan_f_promisc_mode:1,
+                           scan_f_capture_phy_err:1,
+                           scan_f_strict_passive_pch:1,
+                           scan_f_half_rate:1,
+                           scan_f_quarter_rate:1,
+                           scan_f_force_active_dfs_chn:1,
+                           scan_f_add_tpc_ie_in_probe:1,
+                           scan_f_add_ds_ie_in_probe:1,
+                           scan_f_add_spoofed_mac_in_probe:1,
+                           scan_f_add_rand_seq_in_probe:1,
+                           scan_f_en_ie_whitelist_in_probe:1,
+                           scan_f_forced:1,
+                           scan_f_2ghz:1,
+                           scan_f_5ghz:1,
+                           scan_f_80mhz:1;
+               };
+               uint32_t scan_flags;
+       };
+       enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+       uint32_t burst_duration;
+       uint32_t num_chan;
+       uint32_t num_bssid;
+       uint32_t num_ssids;
+       uint32_t n_probes;
+       uint32_t *chan_list;
+       uint32_t notify_scan_events;
+       struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
+       struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
+       struct element_info extraie;
+       struct element_info htcap;
+       struct element_info vhtcap;
+       uint32_t num_hint_s_ssid;
+       uint32_t num_hint_bssid;
+       struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+       struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+       struct wmi_mac_addr mac_addr;
+       struct wmi_mac_addr mac_mask;
+};
+
+struct wmi_ssid_arg {
+       int len;
+       const uint8_t *ssid;
+};
+
+struct wmi_bssid_arg {
+       const uint8_t *bssid;
+};
+
+struct wmi_start_scan_arg {
+       uint32_t scan_id;
+       uint32_t scan_req_id;
+       uint32_t vdev_id;
+       uint32_t scan_priority;
+       uint32_t notify_scan_events;
+       uint32_t dwell_time_active;
+       uint32_t dwell_time_passive;
+       uint32_t min_rest_time;
+       uint32_t max_rest_time;
+       uint32_t repeat_probe_time;
+       uint32_t probe_spacing_time;
+       uint32_t idle_time;
+       uint32_t max_scan_time;
+       uint32_t probe_delay;
+       uint32_t scan_ctrl_flags;
+
+       uint32_t ie_len;
+       uint32_t n_channels;
+       uint32_t n_ssids;
+       uint32_t n_bssids;
+
+       uint8_t ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+       uint32_t channels[64];
+       struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+       struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE       0x00000000
+#define WMI_SCN_STOP_VAP_ALL    0x01000000
+#define WMI_SCAN_STOP_ALL       0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH12K_SCAN_ID          0xA000
+
+enum scan_cancel_req_type {
+       WLAN_SCAN_CANCEL_SINGLE = 1,
+       WLAN_SCAN_CANCEL_VDEV_ALL,
+       WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+       uint32_t requester;
+       uint32_t scan_id;
+       enum scan_cancel_req_type req_type;
+       uint32_t vdev_id;
+       uint32_t pdev_id;
+};
+
+struct  wmi_bcn_send_from_host_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t data_len;
+       union {
+               uint32_t frag_ptr;
+               uint32_t frag_ptr_lo;
+       };
+       uint32_t frame_ctrl;
+       uint32_t dtim_flag;
+       uint32_t bcn_antenna;
+       uint32_t frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE             GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS                BIT(6)
+#define WMI_CHAN_INFO_PASSIVE          BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED    BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED      BIT(9)
+#define WMI_CHAN_INFO_DFS              BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT         BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT                BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA    BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE                BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE     BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2                BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE         BIT(17)
+#define WMI_CHAN_INFO_PSC              BIT(18)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR     GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR     GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS     GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX     GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR  GENMASK(15, 8)
+
+struct wmi_channel {
+       uint32_t tlv_header;
+       uint32_t mhz;
+       uint32_t band_center_freq1;
+       uint32_t band_center_freq2;
+       uint32_t info;
+       uint32_t reg_info_1;
+       uint32_t reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+       void *tx_frame;
+       uint16_t frm_len;
+       uint8_t vdev_id;
+       uint16_t chanfreq;
+       void *pdata;
+       uint16_t desc_id;
+       uint8_t *macaddr;
+};
+
+enum wmi_sta_ps_mode {
+       WMI_STA_PS_MODE_DISABLED = 0,
+       WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH12K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH12K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+       uint32_t tlv_header;
+       uint32_t type;
+       uint32_t delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t param_id;
+       uint32_t param_value;
+} __packed;
+
+enum wmi_stats_id {
+       WMI_REQUEST_PEER_STAT                   = BIT(0),
+       WMI_REQUEST_AP_STAT                     = BIT(1),
+       WMI_REQUEST_PDEV_STAT                   = BIT(2),
+       WMI_REQUEST_VDEV_STAT                   = BIT(3),
+       WMI_REQUEST_BCNFLT_STAT                 = BIT(4),
+       WMI_REQUEST_VDEV_RATE_STAT              = BIT(5),
+       WMI_REQUEST_INST_STAT                   = BIT(6),
+       WMI_REQUEST_MIB_STAT                    = BIT(7),
+       WMI_REQUEST_RSSI_PER_CHAIN_STAT         = BIT(8),
+       WMI_REQUEST_CONGESTION_STAT             = BIT(9),
+       WMI_REQUEST_PEER_EXTD_STAT              = BIT(10),
+       WMI_REQUEST_BCN_STAT                    = BIT(11),
+       WMI_REQUEST_BCN_STAT_RESET              = BIT(12),
+       WMI_REQUEST_PEER_EXTD2_STAT             = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+       uint32_t tlv_header;
+       enum wmi_stats_id stats_id;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_get_pdev_temperature_cmd {
+       uint32_t tlv_header;
+       uint32_t param;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_ftm_seg_hdr {
+       uint32_t len;
+       uint32_t msgref;
+       uint32_t segmentinfo;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+       uint32_t tlv_header;
+       struct wmi_ftm_seg_hdr seg_hdr;
+       uint8_t data[];
+} __packed;
+
+struct wmi_ftm_event_msg {
+       struct wmi_ftm_seg_hdr seg_hdr;
+       uint8_t data[];
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE      512
+
+#define WMI_EMA_TMPL_IDX_SHIFT            8
+#define WMI_EMA_FIRST_TMPL_SHIFT          16
+#define WMI_EMA_LAST_TMPL_SHIFT           24
+
+struct wmi_bcn_tmpl_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t tim_ie_offset;
+       uint32_t buf_len;
+       uint32_t csa_switch_count_offset;
+       uint32_t ext_csa_switch_count_offset;
+       uint32_t csa_event_bitmap;
+       uint32_t mbssid_ie_offset;
+       uint32_t esp_ie_offset;
+       uint32_t csc_switch_count_offset;
+       uint32_t csc_event_bitmap;
+       uint32_t mu_edca_ie_offset;
+       uint32_t feature_enable_bitmap;
+       uint32_t ema_params;
+} __packed;
+
+struct wmi_key_seq_counter {
+       uint32_t key_seq_counter_l;
+       uint32_t key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t key_idx;
+       uint32_t key_flags;
+       uint32_t key_cipher;
+       struct wmi_key_seq_counter key_rsc_counter;
+       struct wmi_key_seq_counter key_global_rsc_counter;
+       struct wmi_key_seq_counter key_tsc_counter;
+       uint8_t wpi_key_rsc_counter[16];
+       uint8_t wpi_key_tsc_counter[16];
+       uint32_t key_len;
+       uint32_t key_txmic_len;
+       uint32_t key_rxmic_len;
+       uint32_t is_group_key_id_valid;
+       uint32_t group_key_id;
+
+       /* Followed by key_data containing key followed by
+        * tx mic and then rx mic
+        */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+       uint32_t vdev_id;
+       const uint8_t *macaddr;
+       uint32_t key_idx;
+       uint32_t key_flags;
+       uint32_t key_cipher;
+       uint32_t key_len;
+       uint32_t key_txmic_len;
+       uint32_t key_rxmic_len;
+       uint64_t key_rsc_counter;
+       const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES                        128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE            3
+#define WMI_HOST_MAX_HE_RATE_SET               3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80          0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160         1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80       2
+
+struct wmi_rate_set_arg {
+       uint32_t num_rates;
+       uint8_t rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t vdev_id;
+       uint32_t peer_new_assoc;
+       uint32_t peer_associd;
+       uint32_t peer_flags;
+       uint32_t peer_caps;
+       uint32_t peer_listen_intval;
+       uint32_t peer_ht_caps;
+       uint32_t peer_max_mpdu;
+       uint32_t peer_mpdu_density;
+       uint32_t peer_rate_caps;
+       uint32_t peer_nss;
+       uint32_t peer_vht_caps;
+       uint32_t peer_phymode;
+       uint32_t peer_ht_info[2];
+       struct wmi_rate_set_arg peer_legacy_rates;
+       struct wmi_rate_set_arg peer_ht_rates;
+       uint32_t rx_max_rate;
+       uint32_t rx_mcs_set;
+       uint32_t tx_max_rate;
+       uint32_t tx_mcs_set;
+       uint8_t vht_capable;
+       uint8_t min_data_rate;
+       uint32_t tx_max_mcs_nss;
+       uint32_t peer_bw_rxnss_override;
+       bool is_pmf_enabled;
+       bool is_wme_set;
+       bool qos_flag;
+       bool apsd_flag;
+       bool ht_flag;
+       bool bw_40;
+       bool bw_80;
+       bool bw_160;
+       bool stbc_flag;
+       bool ldpc_flag;
+       bool static_mimops_flag;
+       bool dynamic_mimops_flag;
+       bool spatial_mux_flag;
+       bool vht_flag;
+       bool vht_ng_flag;
+       bool need_ptk_4_way;
+       bool need_gtk_2_way;
+       bool auth_flag;
+       bool safe_mode_enabled;
+       bool amsdu_disable;
+       /* Use common structure */
+       uint8_t peer_mac[IEEE80211_ADDR_LEN];
+
+       bool he_flag;
+       uint32_t peer_he_cap_macinfo[2];
+       uint32_t peer_he_cap_macinfo_internal;
+       uint32_t peer_he_caps_6ghz;
+       uint32_t peer_he_ops;
+       uint32_t peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+       uint32_t peer_he_mcs_count;
+       uint32_t peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+       uint32_t peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+       bool twt_responder;
+       bool twt_requester;
+       bool is_assoc;
+       struct ath12k_ppe_threshold peer_ppet;
+};
+
+struct  wmi_peer_assoc_complete_cmd {
+       uint32_t tlv_header;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t vdev_id;
+       uint32_t peer_new_assoc;
+       uint32_t peer_associd;
+       uint32_t peer_flags;
+       uint32_t peer_caps;
+       uint32_t peer_listen_intval;
+       uint32_t peer_ht_caps;
+       uint32_t peer_max_mpdu;
+       uint32_t peer_mpdu_density;
+       uint32_t peer_rate_caps;
+       uint32_t peer_nss;
+       uint32_t peer_vht_caps;
+       uint32_t peer_phymode;
+       uint32_t peer_ht_info[2];
+       uint32_t num_peer_legacy_rates;
+       uint32_t num_peer_ht_rates;
+       uint32_t peer_bw_rxnss_override;
+       struct  wmi_ppe_threshold peer_ppet;
+       uint32_t peer_he_cap_info;
+       uint32_t peer_he_ops;
+       uint32_t peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+       uint32_t peer_he_mcs;
+       uint32_t peer_he_cap_info_ext;
+       uint32_t peer_he_cap_info_internal;
+       uint32_t min_data_rate;
+       uint32_t peer_he_caps_6ghz;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+       uint32_t tlv_header;
+       uint32_t requestor;
+       uint32_t scan_id;
+       uint32_t req_type;
+       uint32_t vdev_id;
+       uint32_t pdev_id;
+};
+
+struct scan_chan_list_params {
+       uint32_t pdev_id;
+       uint16_t nallchans;
+       struct channel_param ch_param[];
+};
+
+struct wmi_scan_chan_list_cmd {
+       uint32_t tlv_header;
+       uint32_t num_scan_chans;
+       uint32_t flags;
+       uint32_t pdev_id;
+} __packed;
+
+struct wmi_scan_prob_req_oui_cmd {
+       uint32_t tlv_header;
+       uint32_t prob_req_oui;
+}  __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN       64
+
+#define WMI_TX_PARAMS_DWORD0_POWER             GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK          GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK          GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT       GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK                GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK           GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE     GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE                BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD              GENMASK(31, 21)
+
+struct wmi_mgmt_send_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t desc_id;
+       uint32_t chanfreq;
+       uint32_t paddr_lo;
+       uint32_t paddr_hi;
+       uint32_t frame_len;
+       uint32_t buf_len;
+       uint32_t tx_params_valid;
+
+       /*
+        * Followed by struct wmi_tlv and buf_len bytes of frame data with
+        * buf_len <= WMI_MGMT_SEND_DOWNLD_LEN, which may be exceeded by
+        * frame_len. The full frame is mapped at paddr_lo/hi.
+        * Presumably the idea is that small frames can skip the extra DMA
+        * transfer of frame data after the command has been transferred.
+        */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t param;
+       uint32_t value;
+};
+
+struct wmi_bcn_prb_info {
+       uint32_t tlv_header;
+       uint32_t caps;
+       uint32_t erp;
+} __packed;
+
+enum {
+       WMI_PDEV_SUSPEND,
+       WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+       uint32_t value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t enable;
+};
+
+struct ap_ps_params {
+       uint32_t vdev_id;
+       uint32_t param;
+       uint32_t value;
+};
+
+struct vdev_set_params {
+       uint32_t if_id;
+       uint32_t param_id;
+       uint32_t param_value;
+};
+
+struct stats_request_params {
+       uint32_t stats_id;
+       uint32_t vdev_id;
+       uint32_t pdev_id;
+};
+
+struct wmi_set_current_country_params {
+       uint8_t alpha2[3];
+};
+
+struct wmi_set_current_country_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t new_alpha2;
+} __packed;
+
+enum set_init_cc_type {
+       WMI_COUNTRY_INFO_TYPE_ALPHA,
+       WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+       WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+       INVALID_CC,
+       CC_IS_SET,
+       REGDMN_IS_SET,
+       ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+       union {
+               uint16_t country_code;
+               uint16_t regdom_id;
+               uint8_t alpha2[3];
+       } cc_info;
+       enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t init_cc_type;
+       union {
+               uint32_t country_code;
+               uint32_t regdom_id;
+               uint32_t alpha2;
+       } cc_info;
+} __packed;
+
+struct wmi_11d_scan_start_params {
+       uint32_t vdev_id;
+       uint32_t scan_period_msec;
+       uint32_t start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t scan_period_msec;
+       uint32_t start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_ev {
+       uint32_t new_alpha2;
+} __packed;
+
+#define THERMAL_LEVELS  1
+struct tt_level_config {
+       uint32_t tmplwm;
+       uint32_t tmphwm;
+       uint32_t dcoffpercent;
+       uint32_t priority;
+};
+
+struct thermal_mitigation_params {
+       uint32_t pdev_id;
+       uint32_t enable;
+       uint32_t dc;
+       uint32_t dc_per_event;
+       struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t enable;
+       uint32_t dc;
+       uint32_t dc_per_event;
+       uint32_t therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+       uint32_t tlv_header;
+       uint32_t temp_lwm;
+       uint32_t temp_hwm;
+       uint32_t dc_off_percent;
+       uint32_t prio;
+} __packed;
+
+struct wmi_delba_send_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t tid;
+       uint32_t initiator;
+       uint32_t reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t tid;
+       uint32_t statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t tid;
+       uint32_t buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+       uint32_t tlv_header;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t enable;
+       uint32_t filter_type;
+       uint32_t num_mac;
+} __packed;
+
+enum ath12k_wmi_pktlog_enable {
+       ATH12K_WMI_PKTLOG_ENABLE_AUTO  = 0,
+       ATH12K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t evlist; /* WMI_PKTLOG_EVENT */
+       uint32_t enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE   0x2b
+#define DFS_UNIT_TEST_TOKEN    0xAA
+
+enum dfs_test_args_idx {
+       DFS_TEST_CMDID = 0,
+       DFS_TEST_PDEV_ID,
+       DFS_TEST_RADAR_PARAM,
+       DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+       uint32_t cmd_id;
+       uint32_t pdev_id;
+       uint32_t radar_param;
+};
+
+struct wmi_unit_test_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t module_id;
+       uint32_t num_args;
+       uint32_t diag_token;
+       /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH          0x00000001
+#define WMI_PEER_QOS           0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY        0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY        0x00000010
+#define WMI_PEER_HE            0x00000400
+#define WMI_PEER_APSD          0x00000800
+#define WMI_PEER_HT            0x00001000
+#define WMI_PEER_40MHZ         0x00002000
+#define WMI_PEER_STBC          0x00008000
+#define WMI_PEER_LDPC          0x00010000
+#define WMI_PEER_DYN_MIMOPS    0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX   0x00200000
+#define WMI_PEER_TWT_REQ       0x00400000
+#define WMI_PEER_TWT_RESP      0x00800000
+#define WMI_PEER_VHT           0x02000000
+#define WMI_PEER_80MHZ         0x04000000
+#define WMI_PEER_PMF           0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE        0x20000000
+#define WMI_PEER_160MHZ                0x40000000
+#define WMI_PEER_SAFEMODE_EN   0x80000000
+
+struct beacon_tmpl_params {
+       uint8_t vdev_id;
+       uint32_t tim_ie_offset;
+       uint32_t tmpl_len;
+       uint32_t tmpl_len_aligned;
+       uint32_t csa_switch_count_offset;
+       uint32_t ext_csa_switch_count_offset;
+       uint8_t *frm;
+};
+
+struct wmi_rate_set {
+       uint32_t num_rates;
+       uint32_t rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+       uint32_t tlv_header;
+       uint32_t rx_max_rate;
+       uint32_t rx_mcs_set;
+       uint32_t tx_max_rate;
+       uint32_t tx_mcs_set;
+       uint32_t tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+       uint32_t tlv_header;
+
+       /* MCS at which the peer can receive */
+       uint32_t rx_mcs_set;
+
+       /* MCS at which the peer can transmit */
+       uint32_t tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+#define MAX_6GHZ_REG_RULES 5
+
+enum wmi_start_event_param {
+       WMI_VDEV_START_RESP_EVENT = 0,
+       WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+       uint32_t vdev_id;
+       uint32_t requestor_id;
+       enum wmi_start_event_param resp_type;
+       uint32_t status;
+       uint32_t chain_mask;
+       uint32_t smps_mode;
+       union {
+               uint32_t mac_id;
+               uint32_t pdev_id;
+       };
+       uint32_t cfgd_tx_streams;
+       uint32_t cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+       WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+       WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+       WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+       WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+       WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED     BIT(0)
+#define REGULATORY_CHAN_NO_IR        BIT(1)
+#define REGULATORY_CHAN_RADAR        BIT(3)
+#define REGULATORY_CHAN_NO_OFDM      BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY  BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40      BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ     BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ    BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ     BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ     BIT(12)
+
+enum wmi_reg_chan_list_cmd_type {
+       WMI_REG_CHAN_LIST_CC_ID = 0,
+       WMI_REG_CHAN_LIST_CC_EXT_ID = 1,
+};
+
+enum wmi_reg_cc_setting_code {
+       WMI_REG_SET_CC_STATUS_PASS = 0,
+       WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+       WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+       WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+       WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+       WMI_REG_SET_CC_STATUS_FAIL = 5,
+
+       /* add new setting code above, update in
+        * @enum cc_setting_code as well.
+        * Also handle it in ath12k_wmi_cc_setting_code_to_reg()
+        */
+};
+
+enum cc_setting_code {
+       REG_SET_CC_STATUS_PASS = 0,
+       REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+       REG_INIT_ALPHA2_NOT_FOUND = 2,
+       REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+       REG_SET_CC_STATUS_NO_MEMORY = 4,
+       REG_SET_CC_STATUS_FAIL = 5,
+
+       /* add new setting code above, update in
+        * @enum wmi_reg_cc_setting_code as well.
+        * Also handle it in ath12k_cc_status_to_str()
+        */
+};
+
+static inline enum cc_setting_code
+qwz_wmi_cc_setting_code_to_reg(enum wmi_reg_cc_setting_code status_code)
+{
+       switch (status_code) {
+       case WMI_REG_SET_CC_STATUS_PASS:
+               return REG_SET_CC_STATUS_PASS;
+       case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
+               return REG_CURRENT_ALPHA2_NOT_FOUND;
+       case WMI_REG_INIT_ALPHA2_NOT_FOUND:
+               return REG_INIT_ALPHA2_NOT_FOUND;
+       case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
+               return REG_SET_CC_CHANGE_NOT_ALLOWED;
+       case WMI_REG_SET_CC_STATUS_NO_MEMORY:
+               return REG_SET_CC_STATUS_NO_MEMORY;
+       case WMI_REG_SET_CC_STATUS_FAIL:
+               return REG_SET_CC_STATUS_FAIL;
+       }
+
+       return REG_SET_CC_STATUS_FAIL;
+}
+
+static inline const char *
+qwz_cc_status_to_str(enum cc_setting_code code)
+{
+       switch (code) {
+       case REG_SET_CC_STATUS_PASS:
+               return "REG_SET_CC_STATUS_PASS";
+       case REG_CURRENT_ALPHA2_NOT_FOUND:
+               return "REG_CURRENT_ALPHA2_NOT_FOUND";
+       case REG_INIT_ALPHA2_NOT_FOUND:
+               return "REG_INIT_ALPHA2_NOT_FOUND";
+       case REG_SET_CC_CHANGE_NOT_ALLOWED:
+               return "REG_SET_CC_CHANGE_NOT_ALLOWED";
+       case REG_SET_CC_STATUS_NO_MEMORY:
+               return "REG_SET_CC_STATUS_NO_MEMORY";
+       case REG_SET_CC_STATUS_FAIL:
+               return "REG_SET_CC_STATUS_FAIL";
+       }
+
+       return "Unknown CC status";
+}
+
+enum wmi_reg_6ghz_ap_type {
+       WMI_REG_INDOOR_AP = 0,
+       WMI_REG_STANDARD_POWER_AP = 1,
+       WMI_REG_VERY_LOW_POWER_AP = 2,
+
+       /* add AP type above, handle in ath12k_6ghz_ap_type_to_str()
+        */
+       WMI_REG_CURRENT_MAX_AP_TYPE,
+       WMI_REG_MAX_AP_TYPE = 7,
+};
+
+static inline const char *
+qwz_6ghz_ap_type_to_str(enum wmi_reg_6ghz_ap_type type)
+{
+       switch (type) {
+       case WMI_REG_INDOOR_AP:
+               return "INDOOR AP";
+       case WMI_REG_STANDARD_POWER_AP:
+               return "STANDARD POWER AP";
+       case WMI_REG_VERY_LOW_POWER_AP:
+               return "VERY LOW POWER AP";
+       case WMI_REG_CURRENT_MAX_AP_TYPE:
+               return "CURRENT_MAX_AP_TYPE";
+       case WMI_REG_MAX_AP_TYPE:
+               return "MAX_AP_TYPE";
+       }
+
+       return "unknown 6 GHz AP type";
+}
+
+enum wmi_reg_6ghz_client_type {
+       WMI_REG_DEFAULT_CLIENT = 0,
+       WMI_REG_SUBORDINATE_CLIENT = 1,
+       WMI_REG_MAX_CLIENT_TYPE = 2,
+
+       /* add client type above, handle it in
+        * ath12k_6ghz_client_type_to_str()
+        */
+};
+
+static inline const char *
+qwz_6ghz_client_type_to_str(enum wmi_reg_6ghz_client_type type)
+{
+       switch (type) {
+       case WMI_REG_DEFAULT_CLIENT:
+               return "DEFAULT CLIENT";
+       case WMI_REG_SUBORDINATE_CLIENT:
+               return "SUBORDINATE CLIENT";
+       case WMI_REG_MAX_CLIENT_TYPE:
+               return "MAX_CLIENT_TYPE";
+       }
+
+       return "unknown 6 GHz client type";
+}
+
+enum reg_subdomains_6ghz {
+       EMPTY_6GHZ = 0x0,
+       FCC1_CLIENT_LPI_REGULAR_6GHZ = 0x01,
+       FCC1_CLIENT_SP_6GHZ = 0x02,
+       FCC1_AP_LPI_6GHZ = 0x03,
+       FCC1_CLIENT_LPI_SUBORDINATE = FCC1_AP_LPI_6GHZ,
+       FCC1_AP_SP_6GHZ = 0x04,
+       ETSI1_LPI_6GHZ = 0x10,
+       ETSI1_VLP_6GHZ = 0x11,
+       ETSI2_LPI_6GHZ = 0x12,
+       ETSI2_VLP_6GHZ = 0x13,
+       APL1_LPI_6GHZ = 0x20,
+       APL1_VLP_6GHZ = 0x21,
+
+       /* add sub-domain above, handle it in
+        * ath12k_sub_reg_6ghz_to_str()
+        */
+};
+
+static inline const char *
+qwz_sub_reg_6ghz_to_str(enum reg_subdomains_6ghz sub_id)
+{
+       switch (sub_id) {
+       case EMPTY_6GHZ:
+               return "N/A";
+       case FCC1_CLIENT_LPI_REGULAR_6GHZ:
+               return "FCC1_CLIENT_LPI_REGULAR_6GHZ";
+       case FCC1_CLIENT_SP_6GHZ:
+               return "FCC1_CLIENT_SP_6GHZ";
+       case FCC1_AP_LPI_6GHZ:
+               return "FCC1_AP_LPI_6GHZ/FCC1_CLIENT_LPI_SUBORDINATE";
+       case FCC1_AP_SP_6GHZ:
+               return "FCC1_AP_SP_6GHZ";
+       case ETSI1_LPI_6GHZ:
+               return "ETSI1_LPI_6GHZ";
+       case ETSI1_VLP_6GHZ:
+               return "ETSI1_VLP_6GHZ";
+       case ETSI2_LPI_6GHZ:
+               return "ETSI2_LPI_6GHZ";
+       case ETSI2_VLP_6GHZ:
+               return "ETSI2_VLP_6GHZ";
+       case APL1_LPI_6GHZ:
+               return "APL1_LPI_6GHZ";
+       case APL1_VLP_6GHZ:
+               return "APL1_VLP_6GHZ";
+       }
+
+       return "unknown sub reg id";
+}
+
+enum reg_super_domain_6ghz {
+       FCC1_6GHZ = 0x01,
+       ETSI1_6GHZ = 0x02,
+       ETSI2_6GHZ = 0x03,
+       APL1_6GHZ = 0x04,
+       FCC1_6GHZ_CL = 0x05,
+
+       /* add super domain above, handle it in
+        * ath12k_super_reg_6ghz_to_str()
+        */
+};
+
+static inline const char *
+qwz_super_reg_6ghz_to_str(enum reg_super_domain_6ghz domain_id)
+{
+       switch (domain_id) {
+       case FCC1_6GHZ:
+               return "FCC1_6GHZ";
+       case ETSI1_6GHZ:
+               return "ETSI1_6GHZ";
+       case ETSI2_6GHZ:
+               return "ETSI2_6GHZ";
+       case APL1_6GHZ:
+               return "APL1_6GHZ";
+       case FCC1_6GHZ_CL:
+               return "FCC1_6GHZ_CL";
+       }
+
+       return "unknown domain id";
+}
+
+struct cur_reg_rule {
+       uint16_t start_freq;
+       uint16_t end_freq;
+       uint16_t max_bw;
+       uint8_t reg_power;
+       uint8_t ant_gain;
+       uint16_t flags;
+       bool psd_flag;
+       int8_t psd_eirp;
+};
+
+struct cur_regulatory_info {
+       enum cc_setting_code status_code;
+       uint8_t num_phy;
+       uint8_t phy_id;
+       uint16_t reg_dmn_pair;
+       uint16_t ctry_code;
+       uint8_t alpha2[REG_ALPHA2_LEN + 1];
+       uint32_t dfs_region;
+       uint32_t phybitmap;
+       uint32_t min_bw_2ghz;
+       uint32_t max_bw_2ghz;
+       uint32_t min_bw_5ghz;
+       uint32_t max_bw_5ghz;
+       uint32_t num_2ghz_reg_rules;
+       uint32_t num_5ghz_reg_rules;
+       struct cur_reg_rule *reg_rules_2ghz_ptr;
+       struct cur_reg_rule *reg_rules_5ghz_ptr;
+       bool is_ext_reg_event;
+       enum wmi_reg_6ghz_client_type client_type;
+       bool rnr_tpe_usable;
+       bool unspecified_ap_usable;
+       uint8_t domain_code_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+       uint8_t domain_code_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+       uint32_t domain_code_6ghz_super_id;
+       uint32_t min_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+       uint32_t max_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+       uint32_t min_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+       uint32_t max_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+       uint32_t num_6ghz_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+       uint32_t num_6ghz_rules_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+       struct cur_reg_rule *reg_rules_6ghz_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE];
+       struct cur_reg_rule *reg_rules_6ghz_client_ptr
+               [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+};
+
+struct wmi_reg_chan_list_cc_event {
+       uint32_t status_code;
+       uint32_t phy_id;
+       uint32_t alpha2;
+       uint32_t num_phy;
+       uint32_t country_id;
+       uint32_t domain_code;
+       uint32_t dfs_region;
+       uint32_t phybitmap;
+       uint32_t min_bw_2ghz;
+       uint32_t max_bw_2ghz;
+       uint32_t min_bw_5ghz;
+       uint32_t max_bw_5ghz;
+       uint32_t num_2ghz_reg_rules;
+       uint32_t num_5ghz_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+       uint32_t  tlv_header;
+       uint32_t  freq_info;
+       uint32_t  bw_pwr_info;
+       uint32_t  flag_info;
+};
+
+#define WMI_REG_CLIENT_MAX 4
+
+struct wmi_reg_chan_list_cc_ext_event {
+       uint32_t status_code;
+       uint32_t phy_id;
+       uint32_t alpha2;
+       uint32_t num_phy;
+       uint32_t country_id;
+       uint32_t domain_code;
+       uint32_t dfs_region;
+       uint32_t phybitmap;
+       uint32_t min_bw_2ghz;
+       uint32_t max_bw_2ghz;
+       uint32_t min_bw_5ghz;
+       uint32_t max_bw_5ghz;
+       uint32_t num_2ghz_reg_rules;
+       uint32_t num_5ghz_reg_rules;
+       uint32_t client_type;
+       uint32_t rnr_tpe_usable;
+       uint32_t unspecified_ap_usable;
+       uint32_t domain_code_6ghz_ap_lpi;
+       uint32_t domain_code_6ghz_ap_sp;
+       uint32_t domain_code_6ghz_ap_vlp;
+       uint32_t domain_code_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+       uint32_t domain_code_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+       uint32_t domain_code_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+       uint32_t domain_code_6ghz_super_id;
+       uint32_t min_bw_6ghz_ap_sp;
+       uint32_t max_bw_6ghz_ap_sp;
+       uint32_t min_bw_6ghz_ap_lpi;
+       uint32_t max_bw_6ghz_ap_lpi;
+       uint32_t min_bw_6ghz_ap_vlp;
+       uint32_t max_bw_6ghz_ap_vlp;
+       uint32_t min_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+       uint32_t max_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+       uint32_t min_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+       uint32_t max_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+       uint32_t min_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+       uint32_t max_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+       uint32_t num_6ghz_reg_rules_ap_sp;
+       uint32_t num_6ghz_reg_rules_ap_lpi;
+       uint32_t num_6ghz_reg_rules_ap_vlp;
+       uint32_t num_6ghz_reg_rules_client_sp[WMI_REG_CLIENT_MAX];
+       uint32_t num_6ghz_reg_rules_client_lpi[WMI_REG_CLIENT_MAX];
+       uint32_t num_6ghz_reg_rules_client_vlp[WMI_REG_CLIENT_MAX];
+} __packed;
+
+struct wmi_regulatory_ext_rule {
+       uint32_t tlv_header;
+       uint32_t freq_info;
+       uint32_t bw_pwr_info;
+       uint32_t flag_info;
+       uint32_t psd_power_info;
+} __packed;
+
+struct wmi_vdev_delete_resp_event {
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_peer_delete_resp_event {
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+       uint32_t vdev_id;
+       uint32_t tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+       uint32_t freq;  /* Units in MHz */
+       uint32_t noise_floor;   /* units are dBm */
+       /* rx clear - how often the channel was unused */
+       uint32_t rx_clear_count_low;
+       uint32_t rx_clear_count_high;
+       /* cycle count - elapsed time during measured period, in clock ticks */
+       uint32_t cycle_count_low;
+       uint32_t cycle_count_high;
+       /* tx cycle count - elapsed time spent in tx, in clock ticks */
+       uint32_t tx_cycle_count_low;
+       uint32_t tx_cycle_count_high;
+       /* rx cycle count - elapsed time spent in rx, in clock ticks */
+       uint32_t rx_cycle_count_low;
+       uint32_t rx_cycle_count_high;
+       /*rx_cycle cnt for my bss in 64bits format */
+       uint32_t rx_bss_cycle_count_low;
+       uint32_t rx_bss_cycle_count_high;
+       uint32_t pdev_id;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t key_idx;
+       uint32_t key_flags;
+       uint32_t status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+       uint32_t vdev_id;
+       const uint8_t *macaddr;
+       uint32_t key_idx;
+       uint32_t key_flags;
+       uint32_t status;
+};
+
+struct wmi_peer_assoc_conf_event {
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+       uint32_t vdev_id;
+       const uint8_t *macaddr;
+};
+
+struct wmi_fils_discovery_event {
+       uint32_t vdev_id;
+       uint32_t fils_tt;
+       uint32_t tbtt;
+} __packed;
+
+struct wmi_probe_resp_tx_status_event {
+       uint32_t vdev_id;
+       uint32_t tx_status;
+} __packed;
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+       int32_t chan_nf;
+       uint32_t tx_frame_count; /* Cycles spent transmitting frames */
+       uint32_t rx_frame_count; /* Cycles spent receiving frames */
+       uint32_t rx_clear_count; /* Total channel busy time, evidently */
+       uint32_t cycle_count; /* Total on-channel time */
+       uint32_t phy_err_count;
+       uint32_t chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+       uint32_t ack_rx_bad;
+       uint32_t rts_bad;
+       uint32_t rts_good;
+       uint32_t fcs_bad;
+       uint32_t no_beacons;
+       uint32_t mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+       /* Num HTT cookies queued to dispatch list */
+       int32_t comp_queued;
+
+       /* Num HTT cookies dispatched */
+       int32_t comp_delivered;
+
+       /* Num MSDU queued to WAL */
+       int32_t msdu_enqued;
+
+       /* Num MPDU queue to WAL */
+       int32_t mpdu_enqued;
+
+       /* Num MSDUs dropped by WMM limit */
+       int32_t wmm_drop;
+
+       /* Num Local frames queued */
+       int32_t local_enqued;
+
+       /* Num Local frames done */
+       int32_t local_freed;
+
+       /* Num queued to HW */
+       int32_t hw_queued;
+
+       /* Num PPDU reaped from HW */
+       int32_t hw_reaped;
+
+       /* Num underruns */
+       int32_t underrun;
+
+       /* Num hw paused */
+       uint32_t hw_paused;
+
+       /* Num PPDUs cleaned up in TX abort */
+       int32_t tx_abort;
+
+       /* Num MPDUs requeued by SW */
+       int32_t mpdus_requeued;
+
+       /* excessive retries */
+       uint32_t tx_ko;
+
+       uint32_t tx_xretry;
+
+       /* data hw rate code */
+       uint32_t data_rc;
+
+       /* Scheduler self triggers */
+       uint32_t self_triggers;
+
+       /* frames dropped due to excessive sw retries */
+       uint32_t sw_retry_failure;
+
+       /* illegal rate phy errors  */
+       uint32_t illgl_rate_phy_err;
+
+       /* wal pdev continuous xretry */
+       uint32_t pdev_cont_xretry;
+
+       /* wal pdev tx timeouts */
+       uint32_t pdev_tx_timeout;
+
+       /* wal pdev resets  */
+       uint32_t pdev_resets;
+
+       /* frames dropped due to non-availability of stateless TIDs */
+       uint32_t stateless_tid_alloc_failure;
+
+       /* PhY/BB underrun */
+       uint32_t phy_underrun;
+
+       /* MPDU is more than txop limit */
+       uint32_t txop_ovf;
+
+       /* Num sequences posted */
+       uint32_t seq_posted;
+
+       /* Num sequences failed in queueing */
+       uint32_t seq_failed_queueing;
+
+       /* Num sequences completed */
+       uint32_t seq_completed;
+
+       /* Num sequences restarted */
+       uint32_t seq_restarted;
+
+       /* Num of MU sequences posted */
+       uint32_t mu_seq_posted;
+
+       /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+        * (Reset,channel change)
+        */
+       int32_t mpdus_sw_flush;
+
+       /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+       int32_t mpdus_hw_filter;
+
+       /* Num MPDUs truncated by PDG (TXOP, TBTT,
+        * PPDU_duration based on rate, dyn_bw)
+        */
+       int32_t mpdus_truncated;
+
+       /* Num MPDUs that was tried but didn't receive ACK or BA */
+       int32_t mpdus_ack_failed;
+
+       /* Num MPDUs that was dropped du to expiry. */
+       int32_t mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+       /* Cnts any change in ring routing mid-ppdu */
+       int32_t mid_ppdu_route_change;
+
+       /* Total number of statuses processed */
+       int32_t status_rcvd;
+
+       /* Extra frags on rings 0-3 */
+       int32_t r0_frags;
+       int32_t r1_frags;
+       int32_t r2_frags;
+       int32_t r3_frags;
+
+       /* MSDUs / MPDUs delivered to HTT */
+       int32_t htt_msdus;
+       int32_t htt_mpdus;
+
+       /* MSDUs / MPDUs delivered to local stack */
+       int32_t loc_msdus;
+       int32_t loc_mpdus;
+
+       /* AMSDUs that have more MSDUs than the status ring size */
+       int32_t oversize_amsdu;
+
+       /* Number of PHY errors */
+       int32_t phy_errs;
+
+       /* Number of PHY errors drops */
+       int32_t phy_err_drop;
+
+       /* Number of mpdu errors - FCS, MIC, ENC etc. */
+       int32_t mpdu_errs;
+
+       /* Num overflow errors */
+       int32_t rx_ovfl_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+       struct wmi_pdev_stats_base base;
+       struct wmi_pdev_stats_tx tx;
+       struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+       uint32_t vdev_id;
+       uint32_t beacon_snr;
+       uint32_t data_snr;
+       uint32_t num_tx_frames[WLAN_MAX_AC];
+       uint32_t num_rx_frames;
+       uint32_t num_tx_frames_retries[WLAN_MAX_AC];
+       uint32_t num_tx_frames_failures[WLAN_MAX_AC];
+       uint32_t num_rts_fail;
+       uint32_t num_rts_success;
+       uint32_t num_rx_err;
+       uint32_t num_rx_discard;
+       uint32_t num_tx_not_acked;
+       uint32_t tx_rate_history[MAX_TX_RATE_VALUES];
+       uint32_t beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+       uint32_t vdev_id;
+       uint32_t tx_bcn_succ_cnt;
+       uint32_t tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+       uint32_t stats_id;
+       uint32_t num_pdev_stats;
+       uint32_t num_vdev_stats;
+       uint32_t num_peer_stats;
+       uint32_t num_bcnflt_stats;
+       uint32_t num_chan_stats;
+       uint32_t num_mib_stats;
+       uint32_t pdev_id;
+       uint32_t num_bcn_stats;
+       uint32_t num_peer_extd_stats;
+       uint32_t num_peer_extd2_stats;
+} __packed;
+
+struct wmi_rssi_stats {
+       uint32_t vdev_id;
+       uint32_t rssi_avg_beacon[WMI_MAX_CHAINS];
+       uint32_t rssi_avg_data[WMI_MAX_CHAINS];
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stats {
+       uint32_t num_per_chain_rssi_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+       uint32_t pdev_id;
+       uint32_t ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+       uint32_t pdev_id;
+       uint32_t current_switch_count;
+       uint32_t num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+       uint32_t pdev_id;
+       uint32_t detection_mode;
+       uint32_t chan_freq;
+       uint32_t chan_width;
+       uint32_t detector_id;
+       uint32_t segment_id;
+       uint32_t timestamp;
+       uint32_t is_chirp;
+       int32_t freq_offset;
+       int32_t sidx;
+} __packed;
+
+struct wmi_pdev_temperature_event {
+       /* temperature value in Celsius degree */
+       int32_t temp;
+       uint32_t pdev_id;
+} __packed;
+
+#define WMI_RX_STATUS_OK                       0x00
+#define WMI_RX_STATUS_ERR_CRC                  0x01
+#define WMI_RX_STATUS_ERR_DECRYPT              0x08
+#define WMI_RX_STATUS_ERR_MIC                  0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS       0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+       uint32_t chan_freq;
+       uint32_t channel;
+       uint32_t snr;
+       uint8_t rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+       uint32_t rate;
+       enum wmi_phy_mode phy_mode;
+       uint32_t buf_len;
+       int status;
+       uint32_t flags;
+       int rssi;
+       uint32_t tsf_delta;
+       uint8_t pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+       uint32_t channel;
+       uint32_t snr;
+       uint32_t rate;
+       uint32_t phy_mode;
+       uint32_t buf_len;
+       uint32_t status;
+       uint32_t rssi_ctl[ATH_MAX_ANTENNA];
+       uint32_t flags;
+       int rssi;
+       uint32_t tsf_delta;
+       uint32_t rx_tsf_l32;
+       uint32_t rx_tsf_u32;
+       uint32_t pdev_id;
+       uint32_t chan_freq;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+       uint32_t tlv_header;
+       uint32_t rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+       uint32_t desc_id;
+       uint32_t status;
+       uint32_t pdev_id;
+       uint32_t ppdu_id;
+       uint32_t ack_rssi;
+} __packed;
+
+struct wmi_scan_event {
+       uint32_t event_type; /* %WMI_SCAN_EVENT_ */
+       uint32_t reason; /* %WMI_SCAN_REASON_ */
+       uint32_t channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+       uint32_t scan_req_id;
+       uint32_t scan_id;
+       uint32_t vdev_id;
+       /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+        * In case of AP it is TSF of the AP vdev
+        * In case of STA connected state, this is the TSF of the AP
+        * In case of STA not connected, it will be the free running HW timer
+        */
+       uint32_t tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+       const uint8_t *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+       WMI_ROAM_REASON_BETTER_AP = 1,
+       WMI_ROAM_REASON_BEACON_MISS = 2,
+       WMI_ROAM_REASON_LOW_RSSI = 3,
+       WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+       WMI_ROAM_REASON_HO_FAILED = 5,
+
+       /* keep last */
+       WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+       uint32_t vdev_id;
+       uint32_t reason;
+       uint32_t rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+       uint32_t err_code;
+       uint32_t freq;
+       uint32_t cmd_flags;
+       uint32_t noise_floor;
+       uint32_t rx_clear_count;
+       uint32_t cycle_count;
+       uint32_t chan_tx_pwr_range;
+       uint32_t chan_tx_pwr_tp;
+       uint32_t rx_frame_count;
+       uint32_t my_bss_rx_cycle_count;
+       uint32_t rx_11b_mode_data_duration;
+       uint32_t tx_frame_cnt;
+       uint32_t mac_clk_mhz;
+       uint32_t vdev_id;
+} __packed;
+
+struct ath12k_targ_cap {
+       uint32_t phy_capability;
+       uint32_t max_frag_entry;
+       uint32_t num_rf_chains;
+       uint32_t ht_cap_info;
+       uint32_t vht_cap_info;
+       uint32_t vht_supp_mcs;
+       uint32_t hw_min_tx_power;
+       uint32_t hw_max_tx_power;
+       uint32_t sys_cap_info;
+       uint32_t min_pkt_size_enable;
+       uint32_t max_bcn_ie_size;
+       uint32_t max_num_scan_channels;
+       uint32_t max_supported_macs;
+       uint32_t wmi_fw_sub_feat_caps;
+       uint32_t txrx_chainmask;
+       uint32_t default_dbs_hw_mode_index;
+       uint32_t num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+       WMI_VDEV_TYPE_AP      = 1,
+       WMI_VDEV_TYPE_STA     = 2,
+       WMI_VDEV_TYPE_IBSS    = 3,
+       WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+       WMI_VDEV_SUBTYPE_NONE,
+       WMI_VDEV_SUBTYPE_P2P_DEVICE,
+       WMI_VDEV_SUBTYPE_P2P_CLIENT,
+       WMI_VDEV_SUBTYPE_P2P_GO,
+       WMI_VDEV_SUBTYPE_PROXY_STA,
+       WMI_VDEV_SUBTYPE_MESH_NON_11S,
+       WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+       WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+       WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+       WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+       WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+       WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+       ((type ==  WMI_UAPSD_AC_TYPE_DELI) ? \
+        (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+       WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_STA_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_STA_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_STA_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_STA_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+       uint32_t wmm_ac;
+       uint32_t user_priority;
+       uint32_t service_interval;
+       uint32_t suspend_interval;
+       uint32_t delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+       uint32_t wmm_ac;
+       uint32_t user_priority;
+       uint32_t service_interval;
+       uint32_t suspend_interval;
+       uint32_t delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+       WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+       WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+       /* Values greater than one indicate that many TX attempts per beacon
+        * interval before the STA will wake up
+        */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+       WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+       /* Values greater than 0 indicate the maximum number of PS-Poll frames
+        * FW will send before waking up.
+        */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+       WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_AP_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_AP_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_AP_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_AP_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+       WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+       MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+       /** Set uapsd configuration for a given peer.
+        *
+        * This include the delivery and trigger enabled state for each AC.
+        * The host MLME needs to set this based on AP capability and stations
+        * request Set in the association request  received from the station.
+        *
+        * Lower 8 bits of the value specify the UAPSD configuration.
+        *
+        * (see enum wmi_ap_ps_param_uapsd)
+        * The default value is 0.
+        */
+       WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+       /**
+        * Set the service period for a UAPSD capable station
+        *
+        * The service period from wme ie in the (re)assoc request frame.
+        *
+        * (see enum wmi_ap_ps_peer_param_max_sp)
+        */
+       WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+       /** Time in seconds for aging out buffered frames
+        * for STA in power save
+        */
+       WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+       /** Specify frame types that are considered SIFS
+        * RESP trigger frame
+        */
+       WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+       /** Specifies the trigger state of TID.
+        * Valid only for UAPSD frame type
+        */
+       WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+       /* Specifies the WNM sleep state of a STA */
+       WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_LEN     32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP    0x01
+
+#define WMI_CIPHER_NONE     0x0 /* clear key */
+#define WMI_CIPHER_WEP      0x1
+#define WMI_CIPHER_TKIP     0x2
+#define WMI_CIPHER_AES_OCB  0x3
+#define WMI_CIPHER_AES_CCM  0x4
+#define WMI_CIPHER_WAPI     0x5
+#define WMI_CIPHER_CKIP     0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY      0x8
+#define WMI_CIPHER_AES_GCM  0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE    (0xffff)
+
+#define ATH12K_RC_VERSION_OFFSET       28
+#define ATH12K_RC_PREAMBLE_OFFSET      8
+#define ATH12K_RC_NSS_OFFSET           5
+
+#define ATH12K_HW_RATE_CODE(rate, nss, preamble)       \
+       ((1 << ATH12K_RC_VERSION_OFFSET) |              \
+        ((nss) << ATH12K_RC_NSS_OFFSET) |              \
+        ((preamble) << ATH12K_RC_PREAMBLE_OFFSET) |    \
+        (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+       WMI_RATE_PREAMBLE_OFDM,
+       WMI_RATE_PREAMBLE_CCK,
+       WMI_RATE_PREAMBLE_HT,
+       WMI_RATE_PREAMBLE_VHT,
+       WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+       WMI_RTS_CTS_DISABLED = 0,
+       WMI_USE_RTS_CTS = 1,
+       WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ *                           protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ *                                but if there's a sw retry, both the rate
+ *                                series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+       WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+       WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+       WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+       WMI_RTSCTS_ERP = 3,
+       WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath12k_hal_reg_cap {
+       uint32_t eeprom_rd;
+       uint32_t eeprom_rd_ext;
+       uint32_t regcap1;
+       uint32_t regcap2;
+       uint32_t wireless_modes;
+       uint32_t low_2ghz_chan;
+       uint32_t high_2ghz_chan;
+       uint32_t low_5ghz_chan;
+       uint32_t high_5ghz_chan;
+};
+
+struct ath12k_mem_chunk {
+       void *vaddr;
+       bus_addr_t paddr;
+       uint32_t len;
+       uint32_t req_id;
+};
+
+enum wmi_sta_ps_param_rx_wake_policy {
+       WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+       WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/* Do not change existing values! Used by ath12k_frame_mode parameter
+ * module parameter.
+ */
+enum ath12k_hw_txrx_mode {
+       ATH12K_HW_TXRX_RAW = 0,
+       ATH12K_HW_TXRX_NATIVE_WIFI = 1,
+       ATH12K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+       uint32_t tlv_header;
+       uint32_t cwmin;
+       uint32_t cwmax;
+       uint32_t aifs;
+       uint32_t txoplimit;
+       uint32_t acm;
+       uint32_t no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+       uint8_t acm;
+       uint8_t aifs;
+       uint16_t cwmin;
+       uint16_t cwmax;
+       uint16_t txop;
+       uint8_t no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_wmm_params wmm_params[4];
+       uint32_t wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+       struct wmi_wmm_params_arg ac_be;
+       struct wmi_wmm_params_arg ac_bk;
+       struct wmi_wmm_params_arg ac_vi;
+       struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH12K_TWT_DEF_STA_CONG_TIMER_MS               5000
+#define ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE               10
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP         50
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN      20
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL      100
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN    80
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP       50
+#define ATH12K_TWT_DEF_MIN_NO_STA_SETUP                        10
+#define ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN             2
+#define ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS         2
+#define ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS                        2
+#define ATH12K_TWT_DEF_MAX_NO_STA_TWT                  500
+#define ATH12K_TWT_DEF_MODE_CHECK_INTERVAL             10000
+#define ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL           1000
+#define ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL                5000
+
+struct wmi_twt_enable_params {
+       uint32_t sta_cong_timer_ms;
+       uint32_t mbss_support;
+       uint32_t default_slot_size;
+       uint32_t congestion_thresh_setup;
+       uint32_t congestion_thresh_teardown;
+       uint32_t congestion_thresh_critical;
+       uint32_t interference_thresh_teardown;
+       uint32_t interference_thresh_setup;
+       uint32_t min_no_sta_setup;
+       uint32_t min_no_sta_teardown;
+       uint32_t no_of_bcast_mcast_slots;
+       uint32_t min_no_twt_slots;
+       uint32_t max_no_sta_twt;
+       uint32_t mode_check_interval;
+       uint32_t add_sta_slot_interval;
+       uint32_t remove_sta_slot_interval;
+};
+
+struct wmi_twt_enable_params_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t sta_cong_timer_ms;
+       uint32_t mbss_support;
+       uint32_t default_slot_size;
+       uint32_t congestion_thresh_setup;
+       uint32_t congestion_thresh_teardown;
+       uint32_t congestion_thresh_critical;
+       uint32_t interference_thresh_teardown;
+       uint32_t interference_thresh_setup;
+       uint32_t min_no_sta_setup;
+       uint32_t min_no_sta_teardown;
+       uint32_t no_of_bcast_mcast_slots;
+       uint32_t min_no_twt_slots;
+       uint32_t max_no_sta_twt;
+       uint32_t mode_check_interval;
+       uint32_t add_sta_slot_interval;
+       uint32_t remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+} __packed;
+
+enum WMI_HOST_TWT_COMMAND {
+       WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+       WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+       WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+       WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+       WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+       WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+       WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+       WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST           BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER         BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE       BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION      BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t dialog_id;
+       uint32_t wake_intvl_us;
+       uint32_t wake_intvl_mantis;
+       uint32_t wake_dura_us;
+       uint32_t sp_offset_us;
+       uint32_t flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+       uint32_t vdev_id;
+       uint8_t peer_macaddr[IEEE80211_ADDR_LEN];
+       uint32_t dialog_id;
+       uint32_t wake_intvl_us;
+       uint32_t wake_intvl_mantis;
+       uint32_t wake_dura_us;
+       uint32_t sp_offset_us;
+       uint8_t twt_cmd;
+       uint8_t flag_bcast;
+       uint8_t flag_trigger;
+       uint8_t flag_flow_type;
+       uint8_t flag_protection;
+} __packed;
+
+enum  wmi_twt_add_dialog_status {
+       WMI_ADD_TWT_STATUS_OK,
+       WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+       WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+       WMI_ADD_TWT_STATUS_INVALID_PARAM,
+       WMI_ADD_TWT_STATUS_NOT_READY,
+       WMI_ADD_TWT_STATUS_NO_RESOURCE,
+       WMI_ADD_TWT_STATUS_NO_ACK,
+       WMI_ADD_TWT_STATUS_NO_RESPONSE,
+       WMI_ADD_TWT_STATUS_DENIED,
+       WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t dialog_id;
+       uint32_t status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+       uint32_t vdev_id;
+       uint8_t peer_macaddr[IEEE80211_ADDR_LEN];
+       uint32_t dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+       uint32_t vdev_id;
+       uint8_t peer_macaddr[IEEE80211_ADDR_LEN];
+       uint32_t dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+       uint32_t vdev_id;
+       uint8_t peer_macaddr[IEEE80211_ADDR_LEN];
+       uint32_t dialog_id;
+       uint32_t sp_offset_us;
+       uint32_t next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t dialog_id;
+       uint32_t sp_offset_us;
+       uint32_t next_twt_size;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t enable;
+       int32_t obss_min;
+       int32_t obss_max;
+       uint32_t vdev_id;
+} __packed;
+
+struct wmi_pdev_obss_pd_bitmap_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t bitmap[2];
+} __packed;
+
+#define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS              200
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE          0
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION                  1
+
+#define ATH12K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS     10000
+#define ATH12K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS      5000
+
+enum wmi_bss_color_collision {
+       WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+       WMI_BSS_COLOR_COLLISION_DETECTION,
+       WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+       WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+};
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t flags;
+       uint32_t evt_type;
+       uint32_t current_bss_color;
+       uint32_t detection_period_ms;
+       uint32_t scan_period_ms;
+       uint32_t free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t enable;
+} __packed;
+
+struct wmi_obss_color_collision_event {
+       uint32_t vdev_id;
+       uint32_t evt_type;
+       uint64_t obss_color_bitmap;
+} __packed;
+
+#define ATH12K_IPV4_TH_SEED_SIZE 5
+#define ATH12K_IPV6_TH_SEED_SIZE 11
+
+struct ath12k_wmi_pdev_lro_config_cmd {
+       uint32_t tlv_header;
+       uint32_t lro_enable;
+       uint32_t res;
+       uint32_t th_4[ATH12K_IPV4_TH_SEED_SIZE];
+       uint32_t th_6[ATH12K_IPV6_TH_SEED_SIZE];
+       uint32_t pdev_id;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_COUNT_DEFAULT                 0
+#define ATH12K_WMI_SPECTRAL_PERIOD_DEFAULT              224
+#define ATH12K_WMI_SPECTRAL_PRIORITY_DEFAULT              1
+#define ATH12K_WMI_SPECTRAL_FFT_SIZE_DEFAULT              7
+#define ATH12K_WMI_SPECTRAL_GC_ENA_DEFAULT                1
+#define ATH12K_WMI_SPECTRAL_RESTART_ENA_DEFAULT           0
+#define ATH12K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT     -96
+#define ATH12K_WMI_SPECTRAL_INIT_DELAY_DEFAULT           80
+#define ATH12K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT          12
+#define ATH12K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT           8
+#define ATH12K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT           0
+#define ATH12K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT         0
+#define ATH12K_WMI_SPECTRAL_RSSI_THR_DEFAULT           0xf0
+#define ATH12K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT            0
+#define ATH12K_WMI_SPECTRAL_RPT_MODE_DEFAULT              2
+#define ATH12K_WMI_SPECTRAL_BIN_SCALE_DEFAULT             1
+#define ATH12K_WMI_SPECTRAL_DBM_ADJ_DEFAULT               1
+#define ATH12K_WMI_SPECTRAL_CHN_MASK_DEFAULT              1
+
+struct ath12k_wmi_vdev_spectral_conf_param {
+       uint32_t vdev_id;
+       uint32_t scan_count;
+       uint32_t scan_period;
+       uint32_t scan_priority;
+       uint32_t scan_fft_size;
+       uint32_t scan_gc_ena;
+       uint32_t scan_restart_ena;
+       uint32_t scan_noise_floor_ref;
+       uint32_t scan_init_delay;
+       uint32_t scan_nb_tone_thr;
+       uint32_t scan_str_bin_thr;
+       uint32_t scan_wb_rpt_mode;
+       uint32_t scan_rssi_rpt_mode;
+       uint32_t scan_rssi_thr;
+       uint32_t scan_pwr_format;
+       uint32_t scan_rpt_mode;
+       uint32_t scan_bin_scale;
+       uint32_t scan_dbm_adj;
+       uint32_t scan_chn_mask;
+} __packed;
+
+struct ath12k_wmi_vdev_spectral_conf_cmd {
+       uint32_t tlv_header;
+       struct ath12k_wmi_vdev_spectral_conf_param param;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER  1
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR    2
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_ENABLE    1
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_DISABLE   2
+
+struct ath12k_wmi_vdev_spectral_enable_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t trigger_cmd;
+       uint32_t enable_cmd;
+} __packed;
+
+struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t module_id;             /* see enum wmi_direct_buffer_module */
+       uint32_t base_paddr_lo;
+       uint32_t base_paddr_hi;
+       uint32_t head_idx_paddr_lo;
+       uint32_t head_idx_paddr_hi;
+       uint32_t tail_idx_paddr_lo;
+       uint32_t tail_idx_paddr_hi;
+       uint32_t num_elems;             /* Number of elems in the ring */
+       uint32_t buf_size;              /* size of allocated buffer in bytes */
+
+       /* Number of wmi_dma_buf_release_entry packed together */
+       uint32_t num_resp_per_event;
+
+       /* Target should timeout and send whatever resp
+        * it has if this time expires, units in milliseconds
+        */
+       uint32_t event_timeout_ms;
+} __packed;
+
+struct ath12k_wmi_dma_buf_release_fixed_param {
+       uint32_t pdev_id;
+       uint32_t module_id;
+       uint32_t num_buf_release_entry;
+       uint32_t num_meta_data_entry;
+} __packed;
+
+struct wmi_dma_buf_release_entry {
+       uint32_t tlv_header;
+       uint32_t paddr_lo;
+
+       /* Bits 11:0:   address of data
+        * Bits 31:12:  host context data
+        */
+       uint32_t paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1          GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2          GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH      GENMASK(7, 0)
+
+struct wmi_dma_buf_release_meta_data {
+       uint32_t tlv_header;
+       int32_t noise_floor[WMI_MAX_CHAINS];
+       uint32_t reset_delay;
+       uint32_t freq1;
+       uint32_t freq2;
+       uint32_t ch_width;
+} __packed;
+
+enum wmi_fils_discovery_cmd_type {
+       WMI_FILS_DISCOVERY_CMD,
+       WMI_UNSOL_BCAST_PROBE_RESP,
+};
+
+struct wmi_fils_discovery_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t interval;
+       uint32_t config; /* enum wmi_fils_discovery_cmd_type */
+} __packed;
+
+struct wmi_fils_discovery_tmpl_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t buf_len;
+} __packed;
+
+struct wmi_probe_tmpl_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t buf_len;
+} __packed;
+
+struct target_resource_config {
+       uint32_t num_vdevs;
+       uint32_t num_peers;
+       uint32_t num_active_peers;
+       uint32_t num_offload_peers;
+       uint32_t num_offload_reorder_buffs;
+       uint32_t num_peer_keys;
+       uint32_t num_tids;
+       uint32_t ast_skid_limit;
+       uint32_t tx_chain_mask;
+       uint32_t rx_chain_mask;
+       uint32_t rx_timeout_pri[4];
+       uint32_t rx_decap_mode;
+       uint32_t scan_max_pending_req;
+       uint32_t bmiss_offload_max_vdev;
+       uint32_t roam_offload_max_vdev;
+       uint32_t roam_offload_max_ap_profiles;
+       uint32_t num_mcast_groups;
+       uint32_t num_mcast_table_elems;
+       uint32_t mcast2ucast_mode;
+       uint32_t tx_dbg_log_size;
+       uint32_t num_wds_entries;
+       uint32_t dma_burst_size;
+       uint32_t mac_aggr_delim;
+       uint32_t rx_skip_defrag_timeout_dup_detection_check;
+       uint32_t vow_config;
+       uint32_t gtk_offload_max_vdev;
+       uint32_t num_msdu_desc;
+       uint32_t max_frag_entries;
+       uint32_t max_peer_ext_stats;
+       uint32_t smart_ant_cap;
+       uint32_t bk_minfree;
+       uint32_t be_minfree;
+       uint32_t vi_minfree;
+       uint32_t vo_minfree;
+       uint32_t rx_batchmode;
+       uint32_t tt_support;
+       uint32_t flag1;
+       uint32_t iphdr_pad_config;
+       uint32_t qwrap_config:16,
+           alloc_frag_desc_for_data_pkt:16;
+       uint32_t num_tdls_vdevs;
+       uint32_t num_tdls_conn_table_entries;
+       uint32_t beacon_tx_offload_max_vdev;
+       uint32_t num_multicast_filter_entries;
+       uint32_t num_wow_filters;
+       uint32_t num_keep_alive_pattern;
+       uint32_t keep_alive_pattern_size;
+       uint32_t max_tdls_concurrent_sleep_sta;
+       uint32_t max_tdls_concurrent_buffer_sta;
+       uint32_t wmi_send_separate;
+       uint32_t num_ocb_vdevs;
+       uint32_t num_ocb_channels;
+       uint32_t num_ocb_schedules;
+       uint32_t num_ns_ext_tuples_cfg;
+       uint32_t bpf_instruction_size;
+       uint32_t max_bssid_rx_filters;
+       uint32_t use_pdev_id;
+       uint32_t peer_map_unmap_v2_support;
+       uint32_t sched_params;
+       uint32_t twt_ap_pdev_count;
+       uint32_t twt_ap_sta_count;
+       uint8_t is_reg_cc_ext_event_supported;
+       uint32_t ema_max_vap_cnt;
+       uint32_t ema_max_profile_period;
+};
+
+enum wmi_debug_log_param {
+       WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+       WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+       WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+       WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+       WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+       WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+       uint32_t tlv_header;
+       uint32_t dbg_log_param;
+       uint32_t value;
+} __packed;
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+enum ath12k_wmi_peer_ps_state {
+       WMI_PEER_PS_STATE_OFF,
+       WMI_PEER_PS_STATE_ON,
+       WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+       /* Used to indicate that power save state change is valid */
+       WMI_PEER_PS_VALID = 0x1,
+       WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+       struct wmi_mac_addr peer_macaddr;
+       uint32_t peer_ps_state;
+       uint32_t ps_supported_bitmap;
+       uint32_t peer_ps_valid;
+       uint32_t peer_ps_timestamp;
+} __packed;
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+       WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+       WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t enable;
+       uint32_t hw_filter_bitmap;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+       WOW_BMISS_EVENT = 0,
+       WOW_BETTER_AP_EVENT,
+       WOW_DEAUTH_RECVD_EVENT,
+       WOW_MAGIC_PKT_RECVD_EVENT,
+       WOW_GTK_ERR_EVENT,
+       WOW_FOURWAY_HSHAKE_EVENT,
+       WOW_EAPOL_RECVD_EVENT,
+       WOW_NLO_DETECTED_EVENT,
+       WOW_DISASSOC_RECVD_EVENT,
+       WOW_PATTERN_MATCH_EVENT,
+       WOW_CSA_IE_EVENT,
+       WOW_PROBE_REQ_WPS_IE_EVENT,
+       WOW_AUTH_REQ_EVENT,
+       WOW_ASSOC_REQ_EVENT,
+       WOW_HTT_EVENT,
+       WOW_RA_MATCH_EVENT,
+       WOW_HOST_AUTO_SHUTDOWN_EVENT,
+       WOW_IOAC_MAGIC_EVENT,
+       WOW_IOAC_SHORT_EVENT,
+       WOW_IOAC_EXTEND_EVENT,
+       WOW_IOAC_TIMER_EVENT,
+       WOW_DFS_PHYERR_RADAR_EVENT,
+       WOW_BEACON_EVENT,
+       WOW_CLIENT_KICKOUT_EVENT,
+       WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+       WOW_IFACE_PAUSE_ENABLED,
+       WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+       switch (ev) {
+       C2S(WOW_BMISS_EVENT);
+       C2S(WOW_BETTER_AP_EVENT);
+       C2S(WOW_DEAUTH_RECVD_EVENT);
+       C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+       C2S(WOW_GTK_ERR_EVENT);
+       C2S(WOW_FOURWAY_HSHAKE_EVENT);
+       C2S(WOW_EAPOL_RECVD_EVENT);
+       C2S(WOW_NLO_DETECTED_EVENT);
+       C2S(WOW_DISASSOC_RECVD_EVENT);
+       C2S(WOW_PATTERN_MATCH_EVENT);
+       C2S(WOW_CSA_IE_EVENT);
+       C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+       C2S(WOW_AUTH_REQ_EVENT);
+       C2S(WOW_ASSOC_REQ_EVENT);
+       C2S(WOW_HTT_EVENT);
+       C2S(WOW_RA_MATCH_EVENT);
+       C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+       C2S(WOW_IOAC_MAGIC_EVENT);
+       C2S(WOW_IOAC_SHORT_EVENT);
+       C2S(WOW_IOAC_EXTEND_EVENT);
+       C2S(WOW_IOAC_TIMER_EVENT);
+       C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+       C2S(WOW_BEACON_EVENT);
+       C2S(WOW_CLIENT_KICKOUT_EVENT);
+       C2S(WOW_EVENT_MAX);
+       default:
+               return NULL;
+       }
+}
+
+enum wmi_wow_wake_reason {
+       WOW_REASON_UNSPECIFIED = -1,
+       WOW_REASON_NLOD = 0,
+       WOW_REASON_AP_ASSOC_LOST,
+       WOW_REASON_LOW_RSSI,
+       WOW_REASON_DEAUTH_RECVD,
+       WOW_REASON_DISASSOC_RECVD,
+       WOW_REASON_GTK_HS_ERR,
+       WOW_REASON_EAP_REQ,
+       WOW_REASON_FOURWAY_HS_RECV,
+       WOW_REASON_TIMER_INTR_RECV,
+       WOW_REASON_PATTERN_MATCH_FOUND,
+       WOW_REASON_RECV_MAGIC_PATTERN,
+       WOW_REASON_P2P_DISC,
+       WOW_REASON_WLAN_HB,
+       WOW_REASON_CSA_EVENT,
+       WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+       WOW_REASON_AUTH_REQ_RECV,
+       WOW_REASON_ASSOC_REQ_RECV,
+       WOW_REASON_HTT_EVENT,
+       WOW_REASON_RA_MATCH,
+       WOW_REASON_HOST_AUTO_SHUTDOWN,
+       WOW_REASON_IOAC_MAGIC_EVENT,
+       WOW_REASON_IOAC_SHORT_EVENT,
+       WOW_REASON_IOAC_EXTEND_EVENT,
+       WOW_REASON_IOAC_TIMER_EVENT,
+       WOW_REASON_ROAM_HO,
+       WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+       WOW_REASON_BEACON_RECV,
+       WOW_REASON_CLIENT_KICKOUT_EVENT,
+       WOW_REASON_PAGE_FAULT = 0x3a,
+       WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+       switch (reason) {
+       C2S(WOW_REASON_UNSPECIFIED);
+       C2S(WOW_REASON_NLOD);
+       C2S(WOW_REASON_AP_ASSOC_LOST);
+       C2S(WOW_REASON_LOW_RSSI);
+       C2S(WOW_REASON_DEAUTH_RECVD);
+       C2S(WOW_REASON_DISASSOC_RECVD);
+       C2S(WOW_REASON_GTK_HS_ERR);
+       C2S(WOW_REASON_EAP_REQ);
+       C2S(WOW_REASON_FOURWAY_HS_RECV);
+       C2S(WOW_REASON_TIMER_INTR_RECV);
+       C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+       C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+       C2S(WOW_REASON_P2P_DISC);
+       C2S(WOW_REASON_WLAN_HB);
+       C2S(WOW_REASON_CSA_EVENT);
+       C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+       C2S(WOW_REASON_AUTH_REQ_RECV);
+       C2S(WOW_REASON_ASSOC_REQ_RECV);
+       C2S(WOW_REASON_HTT_EVENT);
+       C2S(WOW_REASON_RA_MATCH);
+       C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+       C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+       C2S(WOW_REASON_IOAC_SHORT_EVENT);
+       C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+       C2S(WOW_REASON_IOAC_TIMER_EVENT);
+       C2S(WOW_REASON_ROAM_HO);
+       C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+       C2S(WOW_REASON_BEACON_RECV);
+       C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+       C2S(WOW_REASON_PAGE_FAULT);
+       C2S(WOW_REASON_DEBUG_TEST);
+       default:
+               return NULL;
+       }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+       uint32_t vdev_id;
+       uint32_t flag;
+       enum wmi_wow_wake_reason wake_reason;
+       uint32_t data_len;
+};
+
+enum wmi_tlv_pattern_type {
+       WOW_PATTERN_MIN = 0,
+       WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+       WOW_IPV4_SYNC_PATTERN,
+       WOW_IPV6_SYNC_PATTERN,
+       WOW_WILD_CARD_PATTERN,
+       WOW_TIMER_PATTERN,
+       WOW_MAGIC_PATTERN,
+       WOW_IPV6_RA_PATTERN,
+       WOW_IOAC_PKT_PATTERN,
+       WOW_IOAC_TMR_PATTERN,
+       WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE                148
+#define WOW_DEFAULT_BITMASK_SIZE               148
+
+#define WOW_MIN_PATTERN_SIZE   1
+#define WOW_MAX_PATTERN_SIZE   148
+#define WOW_MAX_PKT_OFFSET     128
+#define WOW_HDR_LEN    (sizeof(struct ieee80211_hdr_3addr) + \
+       sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+       offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_add_del_event_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t is_add;
+       uint32_t event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+       uint32_t tlv_header;
+       uint32_t enable;
+       uint32_t pause_iface_config;
+       uint32_t flags;
+}  __packed;
+
+struct wmi_wow_host_wakeup_ind {
+       uint32_t tlv_header;
+       uint32_t reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+       uint32_t vdev_id;
+       uint32_t flag;
+       uint32_t wake_reason;
+       uint32_t data_len;
+} __packed;
+
+struct wmi_wow_bitmap_pattern {
+       uint32_t tlv_header;
+       uint8_t patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+       uint8_t bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+       uint32_t pattern_offset;
+       uint32_t pattern_len;
+       uint32_t bitmask_len;
+       uint32_t pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t pattern_id;
+       uint32_t pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t pattern_id;
+       uint32_t pattern_type;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS      2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT   7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS         26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX      60
+#define WMI_PNO_MAX_SUPP_NETWORKS         WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH             WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE    450
+
+#define WMI_PNO_24G_DEFAULT_CH     1
+#define WMI_PNO_5G_DEFAULT_CH      36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME   110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+       BCAST_UNKNOWN      = 0,
+       BCAST_NORMAL       = 1,
+       BCAST_HIDDEN       = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS    16
+#define WMI_NLO_MAX_CHAN     48
+
+#define WMI_NLO_CONFIG_STOP                             BIT(0)
+#define WMI_NLO_CONFIG_START                            BIT(1)
+#define WMI_NLO_CONFIG_RESET                            BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN                        BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN                        BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN                     BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO                             BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE                     BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET                       BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ         BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ       BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG          BIT(13)
+
+struct wmi_nlo_ssid_param {
+       uint32_t valid;
+       struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+       uint32_t valid;
+       uint32_t enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+       uint32_t valid;
+       uint32_t auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+       uint32_t valid;
+       uint32_t bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+       uint32_t valid;
+       int32_t rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+       /* TLV tag and len;*/
+       uint32_t tlv_header;
+       struct wmi_nlo_ssid_param ssid;
+       struct wmi_nlo_enc_param enc_type;
+       struct wmi_nlo_auth_param auth_type;
+       struct wmi_nlo_rssi_param rssi_cond;
+
+       /* indicates if the SSID is hidden or not */
+       struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+struct wmi_network_type {
+       struct wmi_ssid ssid;
+       uint32_t authentication;
+       uint32_t encryption;
+       uint32_t bcast_nw_type;
+       uint8_t channel_count;
+       uint16_t channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+       int32_t rssi_threshold;
+};
+
+struct wmi_pno_scan_req {
+       uint8_t enable;
+       uint8_t vdev_id;
+       uint8_t uc_networks_count;
+       struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+       uint32_t fast_scan_period;
+       uint32_t slow_scan_period;
+       uint8_t fast_scan_max_cycles;
+
+       bool do_passive_scan;
+
+       uint32_t delay_start_time;
+       uint32_t active_min_time;
+       uint32_t active_max_time;
+       uint32_t passive_min_time;
+       uint32_t passive_max_time;
+
+       /* mac address randomization attributes */
+       uint32_t enable_pno_scan_randomization;
+       uint8_t mac_addr[IEEE80211_ADDR_LEN];
+       uint8_t mac_addr_mask[IEEE80211_ADDR_LEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+       uint32_t tlv_header;
+       uint32_t flags;
+       uint32_t vdev_id;
+       uint32_t fast_scan_max_cycles;
+       uint32_t active_dwell_time;
+       uint32_t passive_dwell_time;
+       uint32_t probe_bundle_size;
+
+       /* ART = IRT */
+       uint32_t rest_time;
+
+       /* Max value that can be reached after SBM */
+       uint32_t max_rest_time;
+
+       /* SBM */
+       uint32_t scan_backoff_multiplier;
+
+       /* SCBM */
+       uint32_t fast_scan_period;
+
+       /* specific to windows */
+       uint32_t slow_scan_period;
+
+       uint32_t no_of_ssids;
+
+       uint32_t num_of_channels;
+
+       /* NLO scan start delay time in milliseconds */
+       uint32_t delay_start_time;
+
+       /* MAC Address to use in Probe Req as SA */
+       struct wmi_mac_addr mac_addr;
+
+       /* Mask on which MAC has to be randomized */
+       struct wmi_mac_addr mac_mask;
+
+       /* IE bitmap to use in Probe Req */
+       uint32_t ie_bitmap[8];
+
+       /* Number of vendor OUIs. In the TLV vendor_oui[] */
+       uint32_t num_vendor_oui;
+
+       /* Number of connected NLO band preferences */
+       uint32_t num_cnlo_band_pref;
+
+       /* The TLVs will follow.
+        * nlo_configured_parameters nlo_list[];
+        * uint32_t channel_list[num_of_channels];
+        */
+} __packed;
+
+#define WMI_MAX_NS_OFFLOADS           2
+#define WMI_MAX_ARP_OFFLOADS          2
+
+#define WMI_ARPOL_FLAGS_VALID              BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID          BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID    BIT(2)
+
+struct wmi_arp_offload_tuple {
+       uint32_t tlv_header;
+       uint32_t flags;
+       uint8_t target_ipaddr[4];
+       uint8_t remote_ipaddr[4];
+       struct wmi_mac_addr target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID               BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID           BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID     BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST     BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS    2
+
+struct wmi_ns_offload_tuple {
+       uint32_t tlv_header;
+       uint32_t flags;
+       uint8_t target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+       uint8_t solicitation_ipaddr[16];
+       uint8_t remote_ipaddr[16];
+       struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+       uint32_t tlv_header;
+       uint32_t flags;
+       uint32_t vdev_id;
+       uint32_t num_ns_ext_tuples;
+       /* The TLVs follow:
+        * wmi_ns_offload_tuple  ns_tuples[WMI_MAX_NS_OFFLOADS];
+        * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
+        * wmi_ns_offload_tuple  ns_ext_tuples[num_ns_ext_tuples];
+        */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK             0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE           0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE          0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE   0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES       16
+#define GTK_OFFLOAD_KCK_BYTES       16
+#define GTK_REPLAY_COUNTER_BYTES    8
+#define WMI_MAX_KEY_LEN             32
+#define IGTK_PN_SIZE                6
+
+struct wmi_replayc_cnt {
+       union {
+               uint8_t counter[GTK_REPLAY_COUNTER_BYTES];
+               struct {
+                       uint32_t word0;
+                       uint32_t word1;
+               } __packed;
+       } __packed;
+} __packed;
+
+struct wmi_gtk_offload_status_event {
+       uint32_t vdev_id;
+       uint32_t flags;
+       uint32_t refresh_cnt;
+       struct wmi_replayc_cnt replay_ctr;
+       uint8_t igtk_key_index;
+       uint8_t igtk_key_length;
+       uint8_t igtk_key_rsc[IGTK_PN_SIZE];
+       uint8_t igtk_key[WMI_MAX_KEY_LEN];
+       uint8_t gtk_key_index;
+       uint8_t gtk_key_length;
+       uint8_t gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+       uint8_t gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t flags;
+       uint8_t kek[GTK_OFFLOAD_KEK_BYTES];
+       uint8_t kck[GTK_OFFLOAD_KCK_BYTES];
+       uint8_t replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+#define BIOS_SAR_TABLE_LEN     (22)
+#define BIOS_SAR_RSVD1_LEN     (6)
+#define BIOS_SAR_RSVD2_LEN     (18)
+
+struct wmi_pdev_set_sar_table_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t sar_len;
+       uint32_t rsvd_len;
+} __packed;
+
+struct wmi_pdev_set_geo_table_cmd {
+       uint32_t tlv_header;
+       uint32_t pdev_id;
+       uint32_t rsvd_len;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+       uint32_t tlv_header;
+       uint32_t vdev_id;
+       uint32_t enabled;
+
+       /* WMI_STA_KEEPALIVE_METHOD_ */
+       uint32_t method;
+
+       /* in seconds */
+       uint32_t interval;
+
+       /* following this structure is the TLV for struct
+        * wmi_sta_keepalive_arp_resp
+        */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp {
+       uint32_t tlv_header;
+       uint32_t src_ip4_addr;
+       uint32_t dest_ip4_addr;
+       struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+       uint32_t vdev_id;
+       uint32_t enabled;
+       uint32_t method;
+       uint32_t interval;
+       uint32_t src_ip4_addr;
+       uint32_t dest_ip4_addr;
+       const uint8_t dest_mac_addr[IEEE80211_ADDR_LEN];
+};
+
+enum wmi_sta_keepalive_method {
+       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+       WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+       WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+       WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+       WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT     30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE     0
+
+
+/*
+ * qrtr.h
+ */
+
+#define QRTR_PROTO_VER_1       1
+#define QRTR_PROTO_VER_2       3 /* (sic!) */
+
+struct qrtr_hdr_v1 {
+       uint32_t version;
+       uint32_t type;
+       uint32_t src_node_id;
+       uint32_t src_port_id;
+       uint32_t confirm_rx;
+       uint32_t size;
+       uint32_t dst_node_id;
+       uint32_t dst_port_id;
+} __packed;
+
+struct qrtr_hdr_v2 {
+       uint8_t version;
+       uint8_t type;
+       uint8_t flags;
+       uint8_t optlen;
+       uint32_t size;
+       uint16_t src_node_id;
+       uint16_t src_port_id;
+       uint16_t dst_node_id;
+       uint16_t dst_port_id;
+};
+
+struct qrtr_ctrl_pkt {
+       uint32_t cmd;
+
+       union {
+               struct {
+                       uint32_t service;
+                       uint32_t instance;
+                       uint32_t node;
+                       uint32_t port;
+               } server;
+               struct {
+                       uint32_t node;
+                       uint32_t port;
+               } client;
+       };
+} __packed;
+
+#define QRTR_TYPE_DATA         1
+#define QRTR_TYPE_HELLO                2
+#define QRTR_TYPE_BYE          3
+#define QRTR_TYPE_NEW_SERVER   4
+#define QRTR_TYPE_DEL_SERVER   5
+#define QRTR_TYPE_DEL_CLIENT   6
+#define QRTR_TYPE_RESUME_TX    7
+#define QRTR_TYPE_EXIT         8
+#define QRTR_TYPE_PING         9
+#define QRTR_TYPE_NEW_LOOKUP   10
+#define QRTR_TYPE_DEL_LOOKUP   11
+
+#define QRTR_FLAGS_CONFIRM_RX  (1 << 0)
+
+#define QRTR_NODE_BCAST                0xffffffffU
+#define QRTR_PORT_CTRL         0xfffffffeU
+
+/*
+ * qmi.h
+ */
+
+#define QMI_REQUEST    0
+#define QMI_RESPONSE   2
+#define QMI_INDICATION 4
+
+struct qmi_header {
+       uint8_t type;
+       uint16_t txn_id;
+       uint16_t msg_id;
+       uint16_t msg_len;
+} __packed;
+
+#define QMI_COMMON_TLV_TYPE    0
+
+enum qmi_elem_type {
+       QMI_EOTI,
+       QMI_OPT_FLAG,
+       QMI_DATA_LEN,
+       QMI_UNSIGNED_1_BYTE,
+       QMI_UNSIGNED_2_BYTE,
+       QMI_UNSIGNED_4_BYTE,
+       QMI_UNSIGNED_8_BYTE,
+       QMI_SIGNED_2_BYTE_ENUM,
+       QMI_SIGNED_4_BYTE_ENUM,
+       QMI_STRUCT,
+       QMI_STRING,
+       QMI_NUM_DATA_TYPES
+};
+
+enum qmi_array_type {
+       NO_ARRAY,
+       STATIC_ARRAY,
+       VAR_LEN_ARRAY,
+};
+
+struct qmi_elem_info {
+       enum qmi_elem_type data_type;
+       uint32_t elem_len;
+       uint32_t elem_size;
+       enum qmi_array_type array_type;
+       uint8_t tlv_type;
+       uint32_t offset;
+       const struct qmi_elem_info *ei_array;
+};
+
+#define QMI_RESULT_SUCCESS_V01                 0
+#define QMI_RESULT_FAILURE_V01                 1
+
+#define QMI_ERR_NONE_V01                       0
+#define QMI_ERR_MALFORMED_MSG_V01              1
+#define QMI_ERR_NO_MEMORY_V01                  2
+#define QMI_ERR_INTERNAL_V01                   3
+#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01       5
+#define QMI_ERR_INVALID_ID_V01                 41
+#define QMI_ERR_ENCODING_V01                   58
+#define QMI_ERR_DISABLED_V01                    69
+#define QMI_ERR_INCOMPATIBLE_STATE_V01         90
+#define QMI_ERR_NOT_SUPPORTED_V01              94
+
+struct qmi_response_type_v01 {
+       uint16_t result;
+       uint16_t error;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN            54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01                                0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN           18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01                       0x0020
+#define QMI_WLANFW_CLIENT_ID                                   0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+       uint8_t fw_ready_enable_valid;
+       uint8_t fw_ready_enable;
+       uint8_t initiate_cal_download_enable_valid;
+       uint8_t initiate_cal_download_enable;
+       uint8_t initiate_cal_update_enable_valid;
+       uint8_t initiate_cal_update_enable;
+       uint8_t msa_ready_enable_valid;
+       uint8_t msa_ready_enable;
+       uint8_t pin_connect_result_enable_valid;
+       uint8_t pin_connect_result_enable;
+       uint8_t client_id_valid;
+       uint32_t client_id;
+       uint8_t request_mem_enable_valid;
+       uint8_t request_mem_enable;
+       uint8_t fw_mem_ready_enable_valid;
+       uint8_t fw_mem_ready_enable;
+       uint8_t fw_init_done_enable_valid;
+       uint8_t fw_init_done_enable;
+       uint8_t rejuvenate_enable_valid;
+       uint32_t rejuvenate_enable;
+       uint8_t xo_cal_enable_valid;
+       uint8_t xo_cal_enable;
+       uint8_t cal_done_enable_valid;
+       uint8_t cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+       uint8_t fw_status_valid;
+       uint64_t fw_status;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN                261
+#define QMI_WLANFW_HOST_CAP_REQ_V01                    0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN       7
+#define QMI_WLFW_HOST_CAP_RESP_V01                     0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01                      32
+#define QMI_IPQ8074_FW_MEM_MODE                                0xFF
+#define HOST_DDR_REGION_TYPE                           0x1
+#define BDF_MEM_REGION_TYPE                            0x2
+#define M3_DUMP_REGION_TYPE                            0x3
+#define CALDB_MEM_REGION_TYPE                          0x4
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+       uint8_t num_clients_valid;
+       uint32_t num_clients;
+       uint8_t wake_msi_valid;
+       uint32_t wake_msi;
+       uint8_t gpios_valid;
+       uint32_t gpios_len;
+       uint32_t gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+       uint8_t nm_modem_valid;
+       uint8_t nm_modem;
+       uint8_t bdf_support_valid;
+       uint8_t bdf_support;
+       uint8_t bdf_cache_support_valid;
+       uint8_t bdf_cache_support;
+       uint8_t m3_support_valid;
+       uint8_t m3_support;
+       uint8_t m3_cache_support_valid;
+       uint8_t m3_cache_support;
+       uint8_t cal_filesys_support_valid;
+       uint8_t cal_filesys_support;
+       uint8_t cal_cache_support_valid;
+       uint8_t cal_cache_support;
+       uint8_t cal_done_valid;
+       uint8_t cal_done;
+       uint8_t mem_bucket_valid;
+       uint32_t mem_bucket;
+       uint8_t mem_cfg_mode_valid;
+       uint8_t mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+#define ATH12K_HOST_VERSION_STRING             "WIN"
+#define ATH12K_QMI_WLANFW_TIMEOUT_MS           10000
+#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE      64
+#define ATH12K_QMI_CALDB_ADDRESS               0x4BA00000
+#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH12K_QMI_WLFW_SERVICE_ID_V01         0x45
+#define ATH12K_QMI_WLFW_SERVICE_VERS_V01       0x01
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01     0x02
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390     0x01
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074     0x02
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074     0x07
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750     0x03
+#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01        32
+
+#define ATH12K_QMI_RESP_LEN_MAX                        8192
+#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01  52
+#define ATH12K_QMI_CALDB_SIZE                  0x480000
+#define ATH12K_QMI_BDF_EXT_STR_LENGTH          0x20
+#define ATH12K_QMI_FW_MEM_REQ_SEGMENT_CNT      5
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01           0x0035
+#define QMI_WLFW_RESPOND_MEM_RESP_V01          0x0036
+#define QMI_WLFW_FW_MEM_READY_IND_V01          0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01    0x003E
+#define QMI_WLFW_FW_READY_IND_V01              0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01          0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01           6144
+#define ATH12K_FIRMWARE_MODE_OFF               4
+#define ATH12K_COLD_BOOT_FW_RESET_DELAY                (40 * HZ)
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN     1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN     888
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN    7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01                 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01                 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01                        0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01                 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+       uint64_t offset;
+       uint32_t size;
+       uint8_t secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+       WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+       QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+       QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+       QMI_WLANFW_MEM_BDF_V01 = 2,
+       QMI_WLANFW_MEM_M3_V01 = 3,
+       QMI_WLANFW_MEM_CAL_V01 = 4,
+       QMI_WLANFW_MEM_DPD_V01 = 5,
+       WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+       uint32_t size;
+       enum qmi_wlanfw_mem_type_enum_v01 type;
+       uint32_t mem_cfg_len;
+       struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+       uint32_t mem_seg_len;
+       struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+       uint64_t addr;
+       uint32_t size;
+       enum qmi_wlanfw_mem_type_enum_v01 type;
+       uint8_t restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+       uint32_t mem_seg_len;
+       struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+       char placeholder;
+};
+
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+       char placeholder;
+};
+
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+       char placeholder;
+};
+
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+       char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN             0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN            235
+#define QMI_WLANFW_CAP_REQ_V01                         0x0024
+#define QMI_WLANFW_CAP_RESP_V01                                0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01                 0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN     0
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+       QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+       QMI_WLFW_PIPEDIR_IN_V01 = 1,
+       QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+       QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+       uint32_t pipe_num;
+       uint32_t pipe_dir;
+       uint32_t nentries;
+       uint32_t nbytes_max;
+       uint32_t flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+       uint32_t service_id;
+       uint32_t pipe_dir;
+       uint32_t pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+       uint16_t id;
+       uint16_t offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+       uint32_t addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+       uint64_t region_addr;
+       uint32_t size;
+       uint8_t secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+       uint32_t chip_id;
+       uint32_t chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+       uint32_t board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+       uint32_t soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+       uint32_t fw_version;
+       char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+       QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+       QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+       QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+       QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+       QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+       QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+       uint8_t chip_info_valid;
+       struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+       uint8_t board_info_valid;
+       struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+       uint8_t soc_info_valid;
+       struct qmi_wlanfw_soc_info_s_v01 soc_info;
+       uint8_t fw_version_info_valid;
+       struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+       uint8_t fw_build_id_valid;
+       char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+       uint8_t num_macs_valid;
+       uint8_t num_macs;
+       uint8_t voltage_mv_valid;
+       uint32_t voltage_mv;
+       uint8_t time_freq_hz_valid;
+       uint32_t time_freq_hz;
+       uint8_t otp_version_valid;
+       uint32_t otp_version;
+       uint8_t eeprom_read_timeout_valid;
+       uint32_t eeprom_read_timeout;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+       char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN    6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN   7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01               0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01                        0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+       uint8_t valid;
+       uint8_t file_id_valid;
+       enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+       uint8_t total_size_valid;
+       uint32_t total_size;
+       uint8_t seg_id_valid;
+       uint32_t seg_id;
+       uint8_t data_valid;
+       uint32_t data_len;
+       uint8_t data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+       uint8_t end_valid;
+       uint8_t end;
+       uint8_t bdf_type_valid;
+       uint8_t bdf_type;
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN     18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN    7
+#define QMI_WLANFW_M3_INFO_RESP_V01            0x003c
+#define QMI_WLANFW_M3_INFO_REQ_V01             0x003c
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+       uint64_t addr;
+       uint32_t size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN       11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN      7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN                803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN       7
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN                4
+#define QMI_WLANFW_WLAN_MODE_REQ_V01                   0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01                  0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01                    0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01                   0x0023
+#define QMI_WLANFW_WLAN_INI_REQ_V01                    0x002f
+#define QMI_WLANFW_WLAN_INI_RESP_V01                   0x002f
+#define QMI_WLANFW_MAX_STR_LEN_V01                     16
+#define QMI_WLANFW_MAX_NUM_CE_V01                      12
+#define QMI_WLANFW_MAX_NUM_SVC_V01                     24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01              24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01           36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+       uint32_t mode;
+       uint8_t hw_debug_valid;
+       uint8_t hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+       uint8_t host_version_valid;
+       char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+       uint8_t tgt_cfg_valid;
+       uint32_t tgt_cfg_len;
+       struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+           tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+       uint8_t svc_cfg_valid;
+       uint32_t svc_cfg_len;
+       struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+           svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+       uint8_t shadow_reg_valid;
+       uint32_t shadow_reg_len;
+       struct qmi_wlanfw_shadow_reg_cfg_s_v01
+           shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+       uint8_t shadow_reg_v2_valid;
+       uint32_t shadow_reg_v2_len;
+       struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+               shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+       /* Must be set to true if enablefwlog is being passed */
+       uint8_t enablefwlog_valid;
+       uint8_t enablefwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+       struct qmi_response_type_v01 resp;
+};
+
+enum ath12k_qmi_file_type {
+       ATH12K_QMI_FILE_TYPE_BDF_GOLDEN,
+       ATH12K_QMI_FILE_TYPE_CALDATA = 2,
+       ATH12K_QMI_FILE_TYPE_EEPROM,
+       ATH12K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath12k_qmi_bdf_type {
+       ATH12K_QMI_BDF_TYPE_BIN                 = 0,
+       ATH12K_QMI_BDF_TYPE_ELF                 = 1,
+       ATH12K_QMI_BDF_TYPE_REGDB               = 4,
+};
+
+#define HAL_LINK_DESC_SIZE                     (32 << 2)
+#define HAL_LINK_DESC_ALIGN                    128
+#define HAL_NUM_MPDUS_PER_LINK_DESC            6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC         7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC         6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC      12
+#define HAL_MAX_AVAIL_BLK_RES                  3
+
+#define HAL_RING_BASE_ALIGN    8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX      32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE     8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+                                      HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX   48
+#define HAL_DSCP_TID_TBL_SIZE                  24
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR(sc)               \
+       (sc->hw_params.regs->hal_shadow_base_addr)
+#define HAL_SHADOW_NUM_REGS                    36
+#define HAL_HP_OFFSET_IN_REG_START             1
+#define HAL_OFFSET_FROM_HP_TO_TP               4
+
+#define HAL_SHADOW_REG(sc, x) (HAL_SHADOW_BASE_ADDR(sc) + (4 * (x)))
+
+enum hal_srng_ring_id {
+       HAL_SRNG_RING_ID_REO2SW1 = 0,
+       HAL_SRNG_RING_ID_REO2SW2,
+       HAL_SRNG_RING_ID_REO2SW3,
+       HAL_SRNG_RING_ID_REO2SW4,
+       HAL_SRNG_RING_ID_REO2TCL,
+       HAL_SRNG_RING_ID_SW2REO,
+
+       HAL_SRNG_RING_ID_REO_CMD = 8,
+       HAL_SRNG_RING_ID_REO_STATUS,
+
+       HAL_SRNG_RING_ID_SW2TCL1 = 16,
+       HAL_SRNG_RING_ID_SW2TCL2,
+       HAL_SRNG_RING_ID_SW2TCL3,
+       HAL_SRNG_RING_ID_SW2TCL4,
+
+       HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+       HAL_SRNG_RING_ID_TCL_STATUS,
+
+       HAL_SRNG_RING_ID_CE0_SRC = 32,
+       HAL_SRNG_RING_ID_CE1_SRC,
+       HAL_SRNG_RING_ID_CE2_SRC,
+       HAL_SRNG_RING_ID_CE3_SRC,
+       HAL_SRNG_RING_ID_CE4_SRC,
+       HAL_SRNG_RING_ID_CE5_SRC,
+       HAL_SRNG_RING_ID_CE6_SRC,
+       HAL_SRNG_RING_ID_CE7_SRC,
+       HAL_SRNG_RING_ID_CE8_SRC,
+       HAL_SRNG_RING_ID_CE9_SRC,
+       HAL_SRNG_RING_ID_CE10_SRC,
+       HAL_SRNG_RING_ID_CE11_SRC,
+
+       HAL_SRNG_RING_ID_CE0_DST = 56,
+       HAL_SRNG_RING_ID_CE1_DST,
+       HAL_SRNG_RING_ID_CE2_DST,
+       HAL_SRNG_RING_ID_CE3_DST,
+       HAL_SRNG_RING_ID_CE4_DST,
+       HAL_SRNG_RING_ID_CE5_DST,
+       HAL_SRNG_RING_ID_CE6_DST,
+       HAL_SRNG_RING_ID_CE7_DST,
+       HAL_SRNG_RING_ID_CE8_DST,
+       HAL_SRNG_RING_ID_CE9_DST,
+       HAL_SRNG_RING_ID_CE10_DST,
+       HAL_SRNG_RING_ID_CE11_DST,
+
+       HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+       HAL_SRNG_RING_ID_CE1_DST_STATUS,
+       HAL_SRNG_RING_ID_CE2_DST_STATUS,
+       HAL_SRNG_RING_ID_CE3_DST_STATUS,
+       HAL_SRNG_RING_ID_CE4_DST_STATUS,
+       HAL_SRNG_RING_ID_CE5_DST_STATUS,
+       HAL_SRNG_RING_ID_CE6_DST_STATUS,
+       HAL_SRNG_RING_ID_CE7_DST_STATUS,
+       HAL_SRNG_RING_ID_CE8_DST_STATUS,
+       HAL_SRNG_RING_ID_CE9_DST_STATUS,
+       HAL_SRNG_RING_ID_CE10_DST_STATUS,
+       HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+       HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+       HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+       HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+       HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+       HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+       HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+       HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
+
+       HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+       HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+       HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+       HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+       HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+       HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+       HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0    0
+#define HAL_SRNG_REG_GRP_R2    1
+#define HAL_SRNG_NUM_REG_GRP    2
+
+#define HAL_SRNG_NUM_LMACS      3
+#define HAL_SRNG_REO_EXCEPTION  HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+                                HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX    (HAL_SRNG_RING_ID_UMAC_ID_END + \
+                                HAL_SRNG_NUM_LMAC_RINGS)
+
+#define HAL_RX_MAX_BA_WINDOW   256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC           (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ *      earlier with a 'REO_FLUSH_CACHE' command
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+       HAL_REO_CMD_GET_QUEUE_STATS     = 0,
+       HAL_REO_CMD_FLUSH_QUEUE         = 1,
+       HAL_REO_CMD_FLUSH_CACHE         = 2,
+       HAL_REO_CMD_UNBLOCK_CACHE       = 3,
+       HAL_REO_CMD_FLUSH_TIMEOUT_LIST  = 4,
+       HAL_REO_CMD_UPDATE_RX_QUEUE     = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ *                      or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ *                     invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+       HAL_REO_CMD_SUCCESS             = 0,
+       HAL_REO_CMD_BLOCKED             = 1,
+       HAL_REO_CMD_FAILED              = 2,
+       HAL_REO_CMD_RESOURCE_BLOCKED    = 3,
+       HAL_REO_CMD_DRAIN               = 0xff,
+};
+
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET               0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG              0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG              0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) \
+       (sc->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) \
+       (sc->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) \
+       (sc->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) \
+       (sc->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_WBM_REG              0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE                        0x01b80000
+#define HAL_WLAON_REG_BASE                     0x01f80000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG             0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP             0x0000002c
+#define HAL_TCL1_RING_BASE_LSB(sc)             \
+       (sc->hw_params.regs->hal_tcl1_ring_base_lsb)
+#define HAL_TCL1_RING_BASE_MSB(sc)             \
+       (sc->hw_params.regs->hal_tcl1_ring_base_msb)
+#define HAL_TCL1_RING_ID(sc)                   \
+       (sc->hw_params.regs->hal_tcl1_ring_id)
+#define HAL_TCL1_RING_MISC(sc)                 \
+       (sc->hw_params.regs->hal_tcl1_ring_misc)
+#define HAL_TCL1_RING_TP_ADDR_LSB(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_tp_addr_lsb)
+#define HAL_TCL1_RING_TP_ADDR_MSB(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_tp_addr_msb)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1)
+#define HAL_TCL1_RING_MSI1_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_msi1_base_lsb)
+#define HAL_TCL1_RING_MSI1_BASE_MSB(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_msi1_base_msb)
+#define HAL_TCL1_RING_MSI1_DATA(sc) \
+       (sc->hw_params.regs->hal_tcl1_ring_msi1_data)
+#define HAL_TCL2_RING_BASE_LSB(sc)             \
+       (sc->hw_params.regs->hal_tcl2_ring_base_lsb)
+#define HAL_TCL_RING_BASE_LSB(sc)              \
+       (sc->hw_params.regs->hal_tcl_ring_base_lsb)
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc)                         \
+       (HAL_TCL1_RING_MSI1_BASE_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc)                         \
+       (HAL_TCL1_RING_MSI1_BASE_MSB(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(sc)                             \
+       (HAL_TCL1_RING_MSI1_DATA(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(sc)                              \
+       (HAL_TCL1_RING_BASE_MSB(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_ID_OFFSET(sc)                            \
+       (HAL_TCL1_RING_ID(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc)                   \
+       (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc) \
+               (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(sc) - \
+               HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc) \
+               (HAL_TCL1_RING_TP_ADDR_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc) \
+               (HAL_TCL1_RING_TP_ADDR_MSB(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+#define HAL_TCL1_RING_MISC_OFFSET(sc) \
+               (HAL_TCL1_RING_MISC(sc) - HAL_TCL1_RING_BASE_LSB(sc))
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP                       0x00002000
+#define HAL_TCL1_RING_TP                       0x00002004
+#define HAL_TCL2_RING_HP                       0x00002008
+#define HAL_TCL_RING_HP                                0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+               (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_tcl_status_ring_base_lsb)
+#define HAL_TCL_STATUS_RING_HP                 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE                    0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_0           0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1           0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2           0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3           0x00000010
+#define HAL_REO1_MISC_CTL(sc)                  \
+       (sc->hw_params.regs->hal_reo1_misc_ctl)
+#define HAL_REO1_RING_BASE_LSB(sc)             \
+       (sc->hw_params.regs->hal_reo1_ring_base_lsb)
+#define HAL_REO1_RING_BASE_MSB(sc)             \
+       (sc->hw_params.regs->hal_reo1_ring_base_msb)
+#define HAL_REO1_RING_ID(sc)                   \
+       (sc->hw_params.regs->hal_reo1_ring_id)
+#define HAL_REO1_RING_MISC(sc)                 \
+       (sc->hw_params.regs->hal_reo1_ring_misc)
+#define HAL_REO1_RING_HP_ADDR_LSB(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_hp_addr_lsb)
+#define HAL_REO1_RING_HP_ADDR_MSB(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_hp_addr_msb)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_producer_int_setup)
+#define HAL_REO1_RING_MSI1_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_msi1_base_lsb)
+#define HAL_REO1_RING_MSI1_BASE_MSB(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_msi1_base_msb)
+#define HAL_REO1_RING_MSI1_DATA(sc) \
+       (sc->hw_params.regs->hal_reo1_ring_msi1_data)
+#define HAL_REO2_RING_BASE_LSB(sc)             \
+       (sc->hw_params.regs->hal_reo2_ring_base_lsb)
+#define HAL_REO1_AGING_THRESH_IX_0(sc) \
+       (sc->hw_params.regs->hal_reo1_aging_thresh_ix_0)
+#define HAL_REO1_AGING_THRESH_IX_1(sc) \
+       (sc->hw_params.regs->hal_reo1_aging_thresh_ix_1)
+#define HAL_REO1_AGING_THRESH_IX_2(sc) \
+       (sc->hw_params.regs->hal_reo1_aging_thresh_ix_2)
+#define HAL_REO1_AGING_THRESH_IX_3(sc) \
+       (sc->hw_params.regs->hal_reo1_aging_thresh_ix_3)
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc) \
+               (HAL_REO1_RING_MSI1_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc) \
+               (HAL_REO1_RING_MSI1_BASE_MSB(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET(sc) \
+               (HAL_REO1_RING_MSI1_DATA(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_BASE_MSB_OFFSET(sc) \
+               (HAL_REO1_RING_BASE_MSB(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_ID_OFFSET(sc) (HAL_REO1_RING_ID(sc) - \
+                                       HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc) \
+               (HAL_REO1_RING_PRODUCER_INT_SETUP(sc) - \
+               HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc) \
+               (HAL_REO1_RING_HP_ADDR_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc) \
+               (HAL_REO1_RING_HP_ADDR_MSB(sc) - HAL_REO1_RING_BASE_LSB(sc))
+#define HAL_REO1_RING_MISC_OFFSET(sc) \
+       (HAL_REO1_RING_MISC(sc) - HAL_REO1_RING_BASE_LSB(sc))
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP(sc)                   \
+       (sc->hw_params.regs->hal_reo1_ring_hp)
+#define HAL_REO1_RING_TP(sc)                   \
+       (sc->hw_params.regs->hal_reo1_ring_tp)
+#define HAL_REO2_RING_HP(sc)                   \
+       (sc->hw_params.regs->hal_reo2_ring_hp)
+
+#define HAL_REO1_RING_TP_OFFSET(sc)    \
+       (HAL_REO1_RING_TP(sc) - HAL_REO1_RING_HP(sc))
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_reo_tcl_ring_base_lsb)
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP(sc)                        \
+       (sc->hw_params.regs->hal_reo_tcl_ring_hp)
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_reo_cmd_ring_base_lsb)
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP(sc)                     \
+       (sc->hw_params.regs->hal_reo_cmd_ring_hp)
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_sw2reo_ring_base_lsb)
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP(sc)                 \
+       (sc->hw_params.regs->hal_sw2reo_ring_hp)
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB               0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB                0x00000058
+#define HAL_CE_DST_RING_CTRL                   0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP                     0x00000400
+#define HAL_CE_DST_STATUS_RING_HP              0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(sc) \
+       (sc->hw_params.regs->hal_reo_status_ring_base_lsb)
+#define HAL_REO_STATUS_HP(sc)                  \
+       (sc->hw_params.regs->hal_reo_status_hp)
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \
+               (sc->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \
+               (sc->hw_params.regs->hal_wbm_idle_link_ring_misc)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR      0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR         0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB                0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB                0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR      0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP              0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \
+               (sc->hw_params.regs->hal_wbm_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP                        0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \
+               (sc->hw_params.regs->hal_wbm0_release_ring_base_lsb)
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \
+               (sc->hw_params.regs->hal_wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP               0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP               0x000030c8
+
+/* TCL ring field mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE               GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB      GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE                    GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE         BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP                    BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP                        BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP               BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE                 BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD   GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD    GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE                BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR               GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN    BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP               GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0              GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1              GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2              GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3              GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4              GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5              GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6              GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7              GENMASK(23, 21)
+
+/* REO ring field mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE               GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB      GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID                       GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE                    GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP                    BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP                        BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP               BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE                 BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD    GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE                BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR               GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING              GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE          BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE         BIT(3)
+#define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING            GENMASK(20, 17)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN                        GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK                          0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT                         32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE               BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE                    GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32     GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1       GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1       GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE                0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE                0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE         0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE            0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE         0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE            0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE                0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE         0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE             0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE             0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE      0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE      0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE     0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE     0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE                                0x0000ffff
+
+/* IPQ5018 ce registers */
+#define HAL_IPQ5018_CE_WFSS_REG_BASE           0x08400000
+#define HAL_IPQ5018_CE_SIZE                    0x200000
+
+#define BUFFER_ADDR_INFO0_ADDR         GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR         GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR  GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE    GENMASK(31, 11)
+
+struct ath12k_buffer_addr {
+       uint32_t info0;
+       uint32_t info1;
+} __packed;
+
+/* ath12k_buffer_addr
+ *
+ * info0
+ *             Address (lower 32 bits) of the msdu buffer or msdu extension
+ *             descriptor or Link descriptor
+ *
+ * addr
+ *             Address (upper 8 bits) of the msdu buffer or msdu extension
+ *             descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ *             Consumer: WBM
+ *             Producer: SW/FW
+ *             Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ *             descriptor or link descriptor that is being pointed to shall be
+ *             returned after the frame has been processed. It is used by WBM
+ *             for routing purposes.
+ *
+ *             Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ *             Cookie field exclusively used by SW. HW ignores the contents,
+ *             accept that it passes the programmed value on to other
+ *             descriptors together with the physical address.
+ *
+ *             Field can be used by SW to for example associate the buffers
+ *             physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+       HAL_MACTX_CBF_START                    =   0 /* 0x0 */,
+       HAL_PHYRX_DATA                         =   1 /* 0x1 */,
+       HAL_PHYRX_CBF_DATA_RESP                =   2 /* 0x2 */,
+       HAL_PHYRX_ABORT_REQUEST                =   3 /* 0x3 */,
+       HAL_PHYRX_USER_ABORT_NOTIFICATION      =   4 /* 0x4 */,
+       HAL_MACTX_DATA_RESP                    =   5 /* 0x5 */,
+       HAL_MACTX_CBF_DATA                     =   6 /* 0x6 */,
+       HAL_MACTX_CBF_DONE                     =   7 /* 0x7 */,
+       HAL_MACRX_CBF_READ_REQUEST             =   8 /* 0x8 */,
+       HAL_MACRX_CBF_DATA_REQUEST             =   9 /* 0x9 */,
+       HAL_MACRX_EXPECT_NDP_RECEPTION         =  10 /* 0xa */,
+       HAL_MACRX_FREEZE_CAPTURE_CHANNEL       =  11 /* 0xb */,
+       HAL_MACRX_NDP_TIMEOUT                  =  12 /* 0xc */,
+       HAL_MACRX_ABORT_ACK                    =  13 /* 0xd */,
+       HAL_MACRX_REQ_IMPLICIT_FB              =  14 /* 0xe */,
+       HAL_MACRX_CHAIN_MASK                   =  15 /* 0xf */,
+       HAL_MACRX_NAP_USER                     =  16 /* 0x10 */,
+       HAL_MACRX_ABORT_REQUEST                =  17 /* 0x11 */,
+       HAL_PHYTX_OTHER_TRANSMIT_INFO16        =  18 /* 0x12 */,
+       HAL_PHYTX_ABORT_ACK                    =  19 /* 0x13 */,
+       HAL_PHYTX_ABORT_REQUEST                =  20 /* 0x14 */,
+       HAL_PHYTX_PKT_END                      =  21 /* 0x15 */,
+       HAL_PHYTX_PPDU_HEADER_INFO_REQUEST     =  22 /* 0x16 */,
+       HAL_PHYTX_REQUEST_CTRL_INFO            =  23 /* 0x17 */,
+       HAL_PHYTX_DATA_REQUEST                 =  24 /* 0x18 */,
+       HAL_PHYTX_BF_CV_LOADING_DONE           =  25 /* 0x19 */,
+       HAL_PHYTX_NAP_ACK                      =  26 /* 0x1a */,
+       HAL_PHYTX_NAP_DONE                     =  27 /* 0x1b */,
+       HAL_PHYTX_OFF_ACK                      =  28 /* 0x1c */,
+       HAL_PHYTX_ON_ACK                       =  29 /* 0x1d */,
+       HAL_PHYTX_SYNTH_OFF_ACK                =  30 /* 0x1e */,
+       HAL_PHYTX_DEBUG16                      =  31 /* 0x1f */,
+       HAL_MACTX_ABORT_REQUEST                =  32 /* 0x20 */,
+       HAL_MACTX_ABORT_ACK                    =  33 /* 0x21 */,
+       HAL_MACTX_PKT_END                      =  34 /* 0x22 */,
+       HAL_MACTX_PRE_PHY_DESC                 =  35 /* 0x23 */,
+       HAL_MACTX_BF_PARAMS_COMMON             =  36 /* 0x24 */,
+       HAL_MACTX_BF_PARAMS_PER_USER           =  37 /* 0x25 */,
+       HAL_MACTX_PREFETCH_CV                  =  38 /* 0x26 */,
+       HAL_MACTX_USER_DESC_COMMON             =  39 /* 0x27 */,
+       HAL_MACTX_USER_DESC_PER_USER           =  40 /* 0x28 */,
+       HAL_EXAMPLE_USER_TLV_16                =  41 /* 0x29 */,
+       HAL_EXAMPLE_TLV_16                     =  42 /* 0x2a */,
+       HAL_MACTX_PHY_OFF                      =  43 /* 0x2b */,
+       HAL_MACTX_PHY_ON                       =  44 /* 0x2c */,
+       HAL_MACTX_SYNTH_OFF                    =  45 /* 0x2d */,
+       HAL_MACTX_EXPECT_CBF_COMMON            =  46 /* 0x2e */,
+       HAL_MACTX_EXPECT_CBF_PER_USER          =  47 /* 0x2f */,
+       HAL_MACTX_PHY_DESC                     =  48 /* 0x30 */,
+       HAL_MACTX_L_SIG_A                      =  49 /* 0x31 */,
+       HAL_MACTX_L_SIG_B                      =  50 /* 0x32 */,
+       HAL_MACTX_HT_SIG                       =  51 /* 0x33 */,
+       HAL_MACTX_VHT_SIG_A                    =  52 /* 0x34 */,
+       HAL_MACTX_VHT_SIG_B_SU20               =  53 /* 0x35 */,
+       HAL_MACTX_VHT_SIG_B_SU40               =  54 /* 0x36 */,
+       HAL_MACTX_VHT_SIG_B_SU80               =  55 /* 0x37 */,
+       HAL_MACTX_VHT_SIG_B_SU160              =  56 /* 0x38 */,
+       HAL_MACTX_VHT_SIG_B_MU20               =  57 /* 0x39 */,
+       HAL_MACTX_VHT_SIG_B_MU40               =  58 /* 0x3a */,
+       HAL_MACTX_VHT_SIG_B_MU80               =  59 /* 0x3b */,
+       HAL_MACTX_VHT_SIG_B_MU160              =  60 /* 0x3c */,
+       HAL_MACTX_SERVICE                      =  61 /* 0x3d */,
+       HAL_MACTX_HE_SIG_A_SU                  =  62 /* 0x3e */,
+       HAL_MACTX_HE_SIG_A_MU_DL               =  63 /* 0x3f */,
+       HAL_MACTX_HE_SIG_A_MU_UL               =  64 /* 0x40 */,
+       HAL_MACTX_HE_SIG_B1_MU                 =  65 /* 0x41 */,
+       HAL_MACTX_HE_SIG_B2_MU                 =  66 /* 0x42 */,
+       HAL_MACTX_HE_SIG_B2_OFDMA              =  67 /* 0x43 */,
+       HAL_MACTX_DELETE_CV                    =  68 /* 0x44 */,
+       HAL_MACTX_MU_UPLINK_COMMON             =  69 /* 0x45 */,
+       HAL_MACTX_MU_UPLINK_USER_SETUP         =  70 /* 0x46 */,
+       HAL_MACTX_OTHER_TRANSMIT_INFO          =  71 /* 0x47 */,
+       HAL_MACTX_PHY_NAP                      =  72 /* 0x48 */,
+       HAL_MACTX_DEBUG                        =  73 /* 0x49 */,
+       HAL_PHYRX_ABORT_ACK                    =  74 /* 0x4a */,
+       HAL_PHYRX_GENERATED_CBF_DETAILS        =  75 /* 0x4b */,
+       HAL_PHYRX_RSSI_LEGACY                  =  76 /* 0x4c */,
+       HAL_PHYRX_RSSI_HT                      =  77 /* 0x4d */,
+       HAL_PHYRX_USER_INFO                    =  78 /* 0x4e */,
+       HAL_PHYRX_PKT_END                      =  79 /* 0x4f */,
+       HAL_PHYRX_DEBUG                        =  80 /* 0x50 */,
+       HAL_PHYRX_CBF_TRANSFER_DONE            =  81 /* 0x51 */,
+       HAL_PHYRX_CBF_TRANSFER_ABORT           =  82 /* 0x52 */,
+       HAL_PHYRX_L_SIG_A                      =  83 /* 0x53 */,
+       HAL_PHYRX_L_SIG_B                      =  84 /* 0x54 */,
+       HAL_PHYRX_HT_SIG                       =  85 /* 0x55 */,
+       HAL_PHYRX_VHT_SIG_A                    =  86 /* 0x56 */,
+       HAL_PHYRX_VHT_SIG_B_SU20               =  87 /* 0x57 */,
+       HAL_PHYRX_VHT_SIG_B_SU40               =  88 /* 0x58 */,
+       HAL_PHYRX_VHT_SIG_B_SU80               =  89 /* 0x59 */,
+       HAL_PHYRX_VHT_SIG_B_SU160              =  90 /* 0x5a */,
+       HAL_PHYRX_VHT_SIG_B_MU20               =  91 /* 0x5b */,
+       HAL_PHYRX_VHT_SIG_B_MU40               =  92 /* 0x5c */,
+       HAL_PHYRX_VHT_SIG_B_MU80               =  93 /* 0x5d */,
+       HAL_PHYRX_VHT_SIG_B_MU160              =  94 /* 0x5e */,
+       HAL_PHYRX_HE_SIG_A_SU                  =  95 /* 0x5f */,
+       HAL_PHYRX_HE_SIG_A_MU_DL               =  96 /* 0x60 */,
+       HAL_PHYRX_HE_SIG_A_MU_UL               =  97 /* 0x61 */,
+       HAL_PHYRX_HE_SIG_B1_MU                 =  98 /* 0x62 */,
+       HAL_PHYRX_HE_SIG_B2_MU                 =  99 /* 0x63 */,
+       HAL_PHYRX_HE_SIG_B2_OFDMA              = 100 /* 0x64 */,
+       HAL_PHYRX_OTHER_RECEIVE_INFO           = 101 /* 0x65 */,
+       HAL_PHYRX_COMMON_USER_INFO             = 102 /* 0x66 */,
+       HAL_PHYRX_DATA_DONE                    = 103 /* 0x67 */,
+       HAL_RECEIVE_RSSI_INFO                  = 104 /* 0x68 */,
+       HAL_RECEIVE_USER_INFO                  = 105 /* 0x69 */,
+       HAL_MIMO_CONTROL_INFO                  = 106 /* 0x6a */,
+       HAL_RX_LOCATION_INFO                   = 107 /* 0x6b */,
+       HAL_COEX_TX_REQ                        = 108 /* 0x6c */,
+       HAL_DUMMY                              = 109 /* 0x6d */,
+       HAL_RX_TIMING_OFFSET_INFO              = 110 /* 0x6e */,
+       HAL_EXAMPLE_TLV_32_NAME                = 111 /* 0x6f */,
+       HAL_MPDU_LIMIT                         = 112 /* 0x70 */,
+       HAL_NA_LENGTH_END                      = 113 /* 0x71 */,
+       HAL_OLE_BUF_STATUS                     = 114 /* 0x72 */,
+       HAL_PCU_PPDU_SETUP_DONE                = 115 /* 0x73 */,
+       HAL_PCU_PPDU_SETUP_END                 = 116 /* 0x74 */,
+       HAL_PCU_PPDU_SETUP_INIT                = 117 /* 0x75 */,
+       HAL_PCU_PPDU_SETUP_START               = 118 /* 0x76 */,
+       HAL_PDG_FES_SETUP                      = 119 /* 0x77 */,
+       HAL_PDG_RESPONSE                       = 120 /* 0x78 */,
+       HAL_PDG_TX_REQ                         = 121 /* 0x79 */,
+       HAL_SCH_WAIT_INSTR                     = 122 /* 0x7a */,
+       HAL_SCHEDULER_TLV                      = 123 /* 0x7b */,
+       HAL_TQM_FLOW_EMPTY_STATUS              = 124 /* 0x7c */,
+       HAL_TQM_FLOW_NOT_EMPTY_STATUS          = 125 /* 0x7d */,
+       HAL_TQM_GEN_MPDU_LENGTH_LIST           = 126 /* 0x7e */,
+       HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS    = 127 /* 0x7f */,
+       HAL_TQM_GEN_MPDUS                      = 128 /* 0x80 */,
+       HAL_TQM_GEN_MPDUS_STATUS               = 129 /* 0x81 */,
+       HAL_TQM_REMOVE_MPDU                    = 130 /* 0x82 */,
+       HAL_TQM_REMOVE_MPDU_STATUS             = 131 /* 0x83 */,
+       HAL_TQM_REMOVE_MSDU                    = 132 /* 0x84 */,
+       HAL_TQM_REMOVE_MSDU_STATUS             = 133 /* 0x85 */,
+       HAL_TQM_UPDATE_TX_MPDU_COUNT           = 134 /* 0x86 */,
+       HAL_TQM_WRITE_CMD                      = 135 /* 0x87 */,
+       HAL_OFDMA_TRIGGER_DETAILS              = 136 /* 0x88 */,
+       HAL_TX_DATA                            = 137 /* 0x89 */,
+       HAL_TX_FES_SETUP                       = 138 /* 0x8a */,
+       HAL_RX_PACKET                          = 139 /* 0x8b */,
+       HAL_EXPECTED_RESPONSE                  = 140 /* 0x8c */,
+       HAL_TX_MPDU_END                        = 141 /* 0x8d */,
+       HAL_TX_MPDU_START                      = 142 /* 0x8e */,
+       HAL_TX_MSDU_END                        = 143 /* 0x8f */,
+       HAL_TX_MSDU_START                      = 144 /* 0x90 */,
+       HAL_TX_SW_MODE_SETUP                   = 145 /* 0x91 */,
+       HAL_TXPCU_BUFFER_STATUS                = 146 /* 0x92 */,
+       HAL_TXPCU_USER_BUFFER_STATUS           = 147 /* 0x93 */,
+       HAL_DATA_TO_TIME_CONFIG                = 148 /* 0x94 */,
+       HAL_EXAMPLE_USER_TLV_32                = 149 /* 0x95 */,
+       HAL_MPDU_INFO                          = 150 /* 0x96 */,
+       HAL_PDG_USER_SETUP                     = 151 /* 0x97 */,
+       HAL_TX_11AH_SETUP                      = 152 /* 0x98 */,
+       HAL_REO_UPDATE_RX_REO_QUEUE_STATUS     = 153 /* 0x99 */,
+       HAL_TX_PEER_ENTRY                      = 154 /* 0x9a */,
+       HAL_TX_RAW_OR_NATIVE_FRAME_SETUP       = 155 /* 0x9b */,
+       HAL_EXAMPLE_STRUCT_NAME                = 156 /* 0x9c */,
+       HAL_PCU_PPDU_SETUP_END_INFO            = 157 /* 0x9d */,
+       HAL_PPDU_RATE_SETTING                  = 158 /* 0x9e */,
+       HAL_PROT_RATE_SETTING                  = 159 /* 0x9f */,
+       HAL_RX_MPDU_DETAILS                    = 160 /* 0xa0 */,
+       HAL_EXAMPLE_USER_TLV_42                = 161 /* 0xa1 */,
+       HAL_RX_MSDU_LINK                       = 162 /* 0xa2 */,
+       HAL_RX_REO_QUEUE                       = 163 /* 0xa3 */,
+       HAL_ADDR_SEARCH_ENTRY                  = 164 /* 0xa4 */,
+       HAL_SCHEDULER_CMD                      = 165 /* 0xa5 */,
+       HAL_TX_FLUSH                           = 166 /* 0xa6 */,
+       HAL_TQM_ENTRANCE_RING                  = 167 /* 0xa7 */,
+       HAL_TX_DATA_WORD                       = 168 /* 0xa8 */,
+       HAL_TX_MPDU_DETAILS                    = 169 /* 0xa9 */,
+       HAL_TX_MPDU_LINK                       = 170 /* 0xaa */,
+       HAL_TX_MPDU_LINK_PTR                   = 171 /* 0xab */,
+       HAL_TX_MPDU_QUEUE_HEAD                 = 172 /* 0xac */,
+       HAL_TX_MPDU_QUEUE_EXT                  = 173 /* 0xad */,
+       HAL_TX_MPDU_QUEUE_EXT_PTR              = 174 /* 0xae */,
+       HAL_TX_MSDU_DETAILS                    = 175 /* 0xaf */,
+       HAL_TX_MSDU_EXTENSION                  = 176 /* 0xb0 */,
+       HAL_TX_MSDU_FLOW                       = 177 /* 0xb1 */,
+       HAL_TX_MSDU_LINK                       = 178 /* 0xb2 */,
+       HAL_TX_MSDU_LINK_ENTRY_PTR             = 179 /* 0xb3 */,
+       HAL_RESPONSE_RATE_SETTING              = 180 /* 0xb4 */,
+       HAL_TXPCU_BUFFER_BASICS                = 181 /* 0xb5 */,
+       HAL_UNIFORM_DESCRIPTOR_HEADER          = 182 /* 0xb6 */,
+       HAL_UNIFORM_TQM_CMD_HEADER             = 183 /* 0xb7 */,
+       HAL_UNIFORM_TQM_STATUS_HEADER          = 184 /* 0xb8 */,
+       HAL_USER_RATE_SETTING                  = 185 /* 0xb9 */,
+       HAL_WBM_BUFFER_RING                    = 186 /* 0xba */,
+       HAL_WBM_LINK_DESCRIPTOR_RING           = 187 /* 0xbb */,
+       HAL_WBM_RELEASE_RING                   = 188 /* 0xbc */,
+       HAL_TX_FLUSH_REQ                       = 189 /* 0xbd */,
+       HAL_RX_MSDU_DETAILS                    = 190 /* 0xbe */,
+       HAL_TQM_WRITE_CMD_STATUS               = 191 /* 0xbf */,
+       HAL_TQM_GET_MPDU_QUEUE_STATS           = 192 /* 0xc0 */,
+       HAL_TQM_GET_MSDU_FLOW_STATS            = 193 /* 0xc1 */,
+       HAL_EXAMPLE_USER_CTLV_32               = 194 /* 0xc2 */,
+       HAL_TX_FES_STATUS_START                = 195 /* 0xc3 */,
+       HAL_TX_FES_STATUS_USER_PPDU            = 196 /* 0xc4 */,
+       HAL_TX_FES_STATUS_USER_RESPONSE        = 197 /* 0xc5 */,
+       HAL_TX_FES_STATUS_END                  = 198 /* 0xc6 */,
+       HAL_RX_TRIG_INFO                       = 199 /* 0xc7 */,
+       HAL_RXPCU_TX_SETUP_CLEAR               = 200 /* 0xc8 */,
+       HAL_RX_FRAME_BITMAP_REQ                = 201 /* 0xc9 */,
+       HAL_RX_FRAME_BITMAP_ACK                = 202 /* 0xca */,
+       HAL_COEX_RX_STATUS                     = 203 /* 0xcb */,
+       HAL_RX_START_PARAM                     = 204 /* 0xcc */,
+       HAL_RX_PPDU_START                      = 205 /* 0xcd */,
+       HAL_RX_PPDU_END                        = 206 /* 0xce */,
+       HAL_RX_MPDU_START                      = 207 /* 0xcf */,
+       HAL_RX_MPDU_END                        = 208 /* 0xd0 */,
+       HAL_RX_MSDU_START                      = 209 /* 0xd1 */,
+       HAL_RX_MSDU_END                        = 210 /* 0xd2 */,
+       HAL_RX_ATTENTION                       = 211 /* 0xd3 */,
+       HAL_RECEIVED_RESPONSE_INFO             = 212 /* 0xd4 */,
+       HAL_RX_PHY_SLEEP                       = 213 /* 0xd5 */,
+       HAL_RX_HEADER                          = 214 /* 0xd6 */,
+       HAL_RX_PEER_ENTRY                      = 215 /* 0xd7 */,
+       HAL_RX_FLUSH                           = 216 /* 0xd8 */,
+       HAL_RX_RESPONSE_REQUIRED_INFO          = 217 /* 0xd9 */,
+       HAL_RX_FRAMELESS_BAR_DETAILS           = 218 /* 0xda */,
+       HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS    = 219 /* 0xdb */,
+       HAL_TQM_GET_MSDU_FLOW_STATS_STATUS     = 220 /* 0xdc */,
+       HAL_TX_CBF_INFO                        = 221 /* 0xdd */,
+       HAL_PCU_PPDU_SETUP_USER                = 222 /* 0xde */,
+       HAL_RX_MPDU_PCU_START                  = 223 /* 0xdf */,
+       HAL_RX_PM_INFO                         = 224 /* 0xe0 */,
+       HAL_RX_USER_PPDU_END                   = 225 /* 0xe1 */,
+       HAL_RX_PRE_PPDU_START                  = 226 /* 0xe2 */,
+       HAL_RX_PREAMBLE                        = 227 /* 0xe3 */,
+       HAL_TX_FES_SETUP_COMPLETE              = 228 /* 0xe4 */,
+       HAL_TX_LAST_MPDU_FETCHED               = 229 /* 0xe5 */,
+       HAL_TXDMA_STOP_REQUEST                 = 230 /* 0xe6 */,
+       HAL_RXPCU_SETUP                        = 231 /* 0xe7 */,
+       HAL_RXPCU_USER_SETUP                   = 232 /* 0xe8 */,
+       HAL_TX_FES_STATUS_ACK_OR_BA            = 233 /* 0xe9 */,
+       HAL_TQM_ACKED_MPDU                     = 234 /* 0xea */,
+       HAL_COEX_TX_RESP                       = 235 /* 0xeb */,
+       HAL_COEX_TX_STATUS                     = 236 /* 0xec */,
+       HAL_MACTX_COEX_PHY_CTRL                = 237 /* 0xed */,
+       HAL_COEX_STATUS_BROADCAST              = 238 /* 0xee */,
+       HAL_RESPONSE_START_STATUS              = 239 /* 0xef */,
+       HAL_RESPONSE_END_STATUS                = 240 /* 0xf0 */,
+       HAL_CRYPTO_STATUS                      = 241 /* 0xf1 */,
+       HAL_RECEIVED_TRIGGER_INFO              = 242 /* 0xf2 */,
+       HAL_REO_ENTRANCE_RING                  = 243 /* 0xf3 */,
+       HAL_RX_MPDU_LINK                       = 244 /* 0xf4 */,
+       HAL_COEX_TX_STOP_CTRL                  = 245 /* 0xf5 */,
+       HAL_RX_PPDU_ACK_REPORT                 = 246 /* 0xf6 */,
+       HAL_RX_PPDU_NO_ACK_REPORT              = 247 /* 0xf7 */,
+       HAL_SCH_COEX_STATUS                    = 248 /* 0xf8 */,
+       HAL_SCHEDULER_COMMAND_STATUS           = 249 /* 0xf9 */,
+       HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+       HAL_TX_FES_STATUS_PROT                 = 251 /* 0xfb */,
+       HAL_TX_FES_STATUS_START_PPDU           = 252 /* 0xfc */,
+       HAL_TX_FES_STATUS_START_PROT           = 253 /* 0xfd */,
+       HAL_TXPCU_PHYTX_DEBUG32                = 254 /* 0xfe */,
+       HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32  = 255 /* 0xff */,
+       HAL_TX_MPDU_COUNT_TRANSFER_END         = 256 /* 0x100 */,
+       HAL_WHO_ANCHOR_OFFSET                  = 257 /* 0x101 */,
+       HAL_WHO_ANCHOR_VALUE                   = 258 /* 0x102 */,
+       HAL_WHO_CCE_INFO                       = 259 /* 0x103 */,
+       HAL_WHO_COMMIT                         = 260 /* 0x104 */,
+       HAL_WHO_COMMIT_DONE                    = 261 /* 0x105 */,
+       HAL_WHO_FLUSH                          = 262 /* 0x106 */,
+       HAL_WHO_L2_LLC                         = 263 /* 0x107 */,
+       HAL_WHO_L2_PAYLOAD                     = 264 /* 0x108 */,
+       HAL_WHO_L3_CHECKSUM                    = 265 /* 0x109 */,
+       HAL_WHO_L3_INFO                        = 266 /* 0x10a */,
+       HAL_WHO_L4_CHECKSUM                    = 267 /* 0x10b */,
+       HAL_WHO_L4_INFO                        = 268 /* 0x10c */,
+       HAL_WHO_MSDU                           = 269 /* 0x10d */,
+       HAL_WHO_MSDU_MISC                      = 270 /* 0x10e */,
+       HAL_WHO_PACKET_DATA                    = 271 /* 0x10f */,
+       HAL_WHO_PACKET_HDR                     = 272 /* 0x110 */,
+       HAL_WHO_PPDU_END                       = 273 /* 0x111 */,
+       HAL_WHO_PPDU_START                     = 274 /* 0x112 */,
+       HAL_WHO_TSO                            = 275 /* 0x113 */,
+       HAL_WHO_WMAC_HEADER_PV0                = 276 /* 0x114 */,
+       HAL_WHO_WMAC_HEADER_PV1                = 277 /* 0x115 */,
+       HAL_WHO_WMAC_IV                        = 278 /* 0x116 */,
+       HAL_MPDU_INFO_END                      = 279 /* 0x117 */,
+       HAL_MPDU_INFO_BITMAP                   = 280 /* 0x118 */,
+       HAL_TX_QUEUE_EXTENSION                 = 281 /* 0x119 */,
+       HAL_RX_PEER_ENTRY_DETAILS              = 282 /* 0x11a */,
+       HAL_RX_REO_QUEUE_REFERENCE             = 283 /* 0x11b */,
+       HAL_RX_REO_QUEUE_EXT                   = 284 /* 0x11c */,
+       HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS  = 285 /* 0x11d */,
+       HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS    = 286 /* 0x11e */,
+       HAL_TQM_ACKED_MPDU_STATUS              = 287 /* 0x11f */,
+       HAL_TQM_ADD_MSDU_STATUS                = 288 /* 0x120 */,
+       HAL_RX_MPDU_LINK_PTR                   = 289 /* 0x121 */,
+       HAL_REO_DESTINATION_RING               = 290 /* 0x122 */,
+       HAL_TQM_LIST_GEN_DONE                  = 291 /* 0x123 */,
+       HAL_WHO_TERMINATE                      = 292 /* 0x124 */,
+       HAL_TX_LAST_MPDU_END                   = 293 /* 0x125 */,
+       HAL_TX_CV_DATA                         = 294 /* 0x126 */,
+       HAL_TCL_ENTRANCE_FROM_PPE_RING         = 295 /* 0x127 */,
+       HAL_PPDU_TX_END                        = 296 /* 0x128 */,
+       HAL_PROT_TX_END                        = 297 /* 0x129 */,
+       HAL_PDG_RESPONSE_RATE_SETTING          = 298 /* 0x12a */,
+       HAL_MPDU_INFO_GLOBAL_END               = 299 /* 0x12b */,
+       HAL_TQM_SCH_INSTR_GLOBAL_END           = 300 /* 0x12c */,
+       HAL_RX_PPDU_END_USER_STATS             = 301 /* 0x12d */,
+       HAL_RX_PPDU_END_USER_STATS_EXT         = 302 /* 0x12e */,
+       HAL_NO_ACK_REPORT                      = 303 /* 0x12f */,
+       HAL_ACK_REPORT                         = 304 /* 0x130 */,
+       HAL_UNIFORM_REO_CMD_HEADER             = 305 /* 0x131 */,
+       HAL_REO_GET_QUEUE_STATS                = 306 /* 0x132 */,
+       HAL_REO_FLUSH_QUEUE                    = 307 /* 0x133 */,
+       HAL_REO_FLUSH_CACHE                    = 308 /* 0x134 */,
+       HAL_REO_UNBLOCK_CACHE                  = 309 /* 0x135 */,
+       HAL_UNIFORM_REO_STATUS_HEADER          = 310 /* 0x136 */,
+       HAL_REO_GET_QUEUE_STATS_STATUS         = 311 /* 0x137 */,
+       HAL_REO_FLUSH_QUEUE_STATUS             = 312 /* 0x138 */,
+       HAL_REO_FLUSH_CACHE_STATUS             = 313 /* 0x139 */,
+       HAL_REO_UNBLOCK_CACHE_STATUS           = 314 /* 0x13a */,
+       HAL_TQM_FLUSH_CACHE                    = 315 /* 0x13b */,
+       HAL_TQM_UNBLOCK_CACHE                  = 316 /* 0x13c */,
+       HAL_TQM_FLUSH_CACHE_STATUS             = 317 /* 0x13d */,
+       HAL_TQM_UNBLOCK_CACHE_STATUS           = 318 /* 0x13e */,
+       HAL_RX_PPDU_END_STATUS_DONE            = 319 /* 0x13f */,
+       HAL_RX_STATUS_BUFFER_DONE              = 320 /* 0x140 */,
+       HAL_BUFFER_ADDR_INFO                   = 321 /* 0x141 */,
+       HAL_RX_MSDU_DESC_INFO                  = 322 /* 0x142 */,
+       HAL_RX_MPDU_DESC_INFO                  = 323 /* 0x143 */,
+       HAL_TCL_DATA_CMD                       = 324 /* 0x144 */,
+       HAL_TCL_GSE_CMD                        = 325 /* 0x145 */,
+       HAL_TCL_EXIT_BASE                      = 326 /* 0x146 */,
+       HAL_TCL_COMPACT_EXIT_RING              = 327 /* 0x147 */,
+       HAL_TCL_REGULAR_EXIT_RING              = 328 /* 0x148 */,
+       HAL_TCL_EXTENDED_EXIT_RING             = 329 /* 0x149 */,
+       HAL_UPLINK_COMMON_INFO                 = 330 /* 0x14a */,
+       HAL_UPLINK_USER_SETUP_INFO             = 331 /* 0x14b */,
+       HAL_TX_DATA_SYNC                       = 332 /* 0x14c */,
+       HAL_PHYRX_CBF_READ_REQUEST_ACK         = 333 /* 0x14d */,
+       HAL_TCL_STATUS_RING                    = 334 /* 0x14e */,
+       HAL_TQM_GET_MPDU_HEAD_INFO             = 335 /* 0x14f */,
+       HAL_TQM_SYNC_CMD                       = 336 /* 0x150 */,
+       HAL_TQM_GET_MPDU_HEAD_INFO_STATUS      = 337 /* 0x151 */,
+       HAL_TQM_SYNC_CMD_STATUS                = 338 /* 0x152 */,
+       HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+       HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+       HAL_REO_FLUSH_TIMEOUT_LIST             = 341 /* 0x155 */,
+       HAL_REO_FLUSH_TIMEOUT_LIST_STATUS      = 342 /* 0x156 */,
+       HAL_REO_TO_PPE_RING                    = 343 /* 0x157 */,
+       HAL_RX_MPDU_INFO                       = 344 /* 0x158 */,
+       HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+       HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+       HAL_EXAMPLE_USER_TLV_32_NAME           = 347 /* 0x15b */,
+       HAL_RX_PPDU_START_USER_INFO            = 348 /* 0x15c */,
+       HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW   = 349 /* 0x15d */,
+       HAL_RX_RING_MASK                       = 350 /* 0x15e */,
+       HAL_WHO_CLASSIFY_INFO                  = 351 /* 0x15f */,
+       HAL_TXPT_CLASSIFY_INFO                 = 352 /* 0x160 */,
+       HAL_RXPT_CLASSIFY_INFO                 = 353 /* 0x161 */,
+       HAL_TX_FLOW_SEARCH_ENTRY               = 354 /* 0x162 */,
+       HAL_RX_FLOW_SEARCH_ENTRY               = 355 /* 0x163 */,
+       HAL_RECEIVED_TRIGGER_INFO_DETAILS      = 356 /* 0x164 */,
+       HAL_COEX_MAC_NAP                       = 357 /* 0x165 */,
+       HAL_MACRX_ABORT_REQUEST_INFO           = 358 /* 0x166 */,
+       HAL_MACTX_ABORT_REQUEST_INFO           = 359 /* 0x167 */,
+       HAL_PHYRX_ABORT_REQUEST_INFO           = 360 /* 0x168 */,
+       HAL_PHYTX_ABORT_REQUEST_INFO           = 361 /* 0x169 */,
+       HAL_RXPCU_PPDU_END_INFO                = 362 /* 0x16a */,
+       HAL_WHO_MESH_CONTROL                   = 363 /* 0x16b */,
+       HAL_L_SIG_A_INFO                       = 364 /* 0x16c */,
+       HAL_L_SIG_B_INFO                       = 365 /* 0x16d */,
+       HAL_HT_SIG_INFO                        = 366 /* 0x16e */,
+       HAL_VHT_SIG_A_INFO                     = 367 /* 0x16f */,
+       HAL_VHT_SIG_B_SU20_INFO                = 368 /* 0x170 */,
+       HAL_VHT_SIG_B_SU40_INFO                = 369 /* 0x171 */,
+       HAL_VHT_SIG_B_SU80_INFO                = 370 /* 0x172 */,
+       HAL_VHT_SIG_B_SU160_INFO               = 371 /* 0x173 */,
+       HAL_VHT_SIG_B_MU20_INFO                = 372 /* 0x174 */,
+       HAL_VHT_SIG_B_MU40_INFO                = 373 /* 0x175 */,
+       HAL_VHT_SIG_B_MU80_INFO                = 374 /* 0x176 */,
+       HAL_VHT_SIG_B_MU160_INFO               = 375 /* 0x177 */,
+       HAL_SERVICE_INFO                       = 376 /* 0x178 */,
+       HAL_HE_SIG_A_SU_INFO                   = 377 /* 0x179 */,
+       HAL_HE_SIG_A_MU_DL_INFO                = 378 /* 0x17a */,
+       HAL_HE_SIG_A_MU_UL_INFO                = 379 /* 0x17b */,
+       HAL_HE_SIG_B1_MU_INFO                  = 380 /* 0x17c */,
+       HAL_HE_SIG_B2_MU_INFO                  = 381 /* 0x17d */,
+       HAL_HE_SIG_B2_OFDMA_INFO               = 382 /* 0x17e */,
+       HAL_PDG_SW_MODE_BW_START               = 383 /* 0x17f */,
+       HAL_PDG_SW_MODE_BW_END                 = 384 /* 0x180 */,
+       HAL_PDG_WAIT_FOR_MAC_REQUEST           = 385 /* 0x181 */,
+       HAL_PDG_WAIT_FOR_PHY_REQUEST           = 386 /* 0x182 */,
+       HAL_SCHEDULER_END                      = 387 /* 0x183 */,
+       HAL_PEER_TABLE_ENTRY                   = 388 /* 0x184 */,
+       HAL_SW_PEER_INFO                       = 389 /* 0x185 */,
+       HAL_RXOLE_CCE_CLASSIFY_INFO            = 390 /* 0x186 */,
+       HAL_TCL_CCE_CLASSIFY_INFO              = 391 /* 0x187 */,
+       HAL_RXOLE_CCE_INFO                     = 392 /* 0x188 */,
+       HAL_TCL_CCE_INFO                       = 393 /* 0x189 */,
+       HAL_TCL_CCE_SUPERRULE                  = 394 /* 0x18a */,
+       HAL_CCE_RULE                           = 395 /* 0x18b */,
+       HAL_RX_PPDU_START_DROPPED              = 396 /* 0x18c */,
+       HAL_RX_PPDU_END_DROPPED                = 397 /* 0x18d */,
+       HAL_RX_PPDU_END_STATUS_DONE_DROPPED    = 398 /* 0x18e */,
+       HAL_RX_MPDU_START_DROPPED              = 399 /* 0x18f */,
+       HAL_RX_MSDU_START_DROPPED              = 400 /* 0x190 */,
+       HAL_RX_MSDU_END_DROPPED                = 401 /* 0x191 */,
+       HAL_RX_MPDU_END_DROPPED                = 402 /* 0x192 */,
+       HAL_RX_ATTENTION_DROPPED               = 403 /* 0x193 */,
+       HAL_TXPCU_USER_SETUP                   = 404 /* 0x194 */,
+       HAL_RXPCU_USER_SETUP_EXT               = 405 /* 0x195 */,
+       HAL_CE_SRC_DESC                        = 406 /* 0x196 */,
+       HAL_CE_STAT_DESC                       = 407 /* 0x197 */,
+       HAL_RXOLE_CCE_SUPERRULE                = 408 /* 0x198 */,
+       HAL_TX_RATE_STATS_INFO                 = 409 /* 0x199 */,
+       HAL_CMD_PART_0_END                     = 410 /* 0x19a */,
+       HAL_MACTX_SYNTH_ON                     = 411 /* 0x19b */,
+       HAL_SCH_CRITICAL_TLV_REFERENCE         = 412 /* 0x19c */,
+       HAL_TQM_MPDU_GLOBAL_START              = 413 /* 0x19d */,
+       HAL_EXAMPLE_TLV_32                     = 414 /* 0x19e */,
+       HAL_TQM_UPDATE_TX_MSDU_FLOW            = 415 /* 0x19f */,
+       HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD      = 416 /* 0x1a0 */,
+       HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS     = 417 /* 0x1a1 */,
+       HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+       HAL_REO_UPDATE_RX_REO_QUEUE            = 419 /* 0x1a3 */,
+       HAL_CE_DST_DESC                        = 420 /* 0x1a4 */,
+       HAL_TLV_BASE                           = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG                GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN                GENMASK(25, 10)
+#define HAL_TLV_USR_ID         GENMASK(31, 26)
+
+#define HAL_TLV_ALIGN  4
+
+struct hal_tlv_hdr {
+       uint32_t tl;
+       uint8_t value[];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT          0xff
+#define RX_MPDU_DESC_INFO0_SEQ_NUM             0xfff00
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG           (1 << 20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY          (1 << 21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG          (1 << 22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME           (1 << 23)
+#define RX_MPDU_DESC_INFO0_VALID_PN            (1 << 24)
+#define RX_MPDU_DESC_INFO0_VALID_SA            (1 << 25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT      (1 << 26)
+#define RX_MPDU_DESC_INFO0_VALID_DA            (1 << 27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC             (1 << 28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT      (1 << 29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU            (1 << 30)
+
+#define RX_MPDU_DESC_META_DATA_PEER_ID         0xffff
+
+struct rx_mpdu_desc {
+       uint32_t info0; /* %RX_MPDU_DESC_INFO */
+       uint32_t meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ *             Producer: RXDMA
+ *             Consumer: REO/SW/FW
+ *
+ * msdu_count
+ *             The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ *             The field can have two different meanings based on the setting
+ *             of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ *             start sequence number from the BAR frame otherwise it means
+ *             the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ *             When set, this MPDU is a fragment and REO should forward this
+ *             fragment MPDU to the REO destination ring without any reorder
+ *             checks, pn checks or bitmap update. This implies that REO is
+ *             forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ *             The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ *             Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ *             Indicates the received frame is a BAR frame. After processing,
+ *             this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ *             When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ *             Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ *             Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ *             MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ *             When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ *             Field Only valid if valid_da is set. Indicates at least one of
+ *             the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ *             Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ *             MAC destination address search due to the expiration of search
+ *             timer.
+ *
+ * raw_mpdu
+ *             Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ *             the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+       HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+       HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU  (1 << 0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU   (1 << 1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION   (1 << 2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH         GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND                GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP           (1 << 22)
+#define RX_MSDU_DESC_INFO0_VALID_SA            (1 << 23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT      (1 << 24)
+#define RX_MSDU_DESC_INFO0_VALID_DA            (1 << 25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC             (1 << 26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT      (1 << 27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val)                \
+       (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+       uint32_t info0;
+       uint32_t rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ *             Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ *             Indicates last msdu in mpdu. This flag can be true only when
+ *             'Msdu_continuation' set to 0. This implies that when an msdu
+ *             is spread out over multiple buffers and thus msdu_continuation
+ *             is set, only for the very last buffer of the msdu, can the
+ *             'last_msdu_in_mpdu' be set.
+ *
+ *             When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ *             the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ *             When set, this MSDU buffer was not able to hold the entire MSDU.
+ *             The next buffer will therefore contain additional information
+ *             related to this MSDU.
+ *
+ * msdu_length
+ *             Field is only valid in combination with the 'first_msdu_in_mpdu'
+ *             being set. Full MSDU length in bytes after decapsulation. This
+ *             field is still valid for MPDU frames without A-MSDU. It still
+ *             represents MSDU length after decapsulation Or in case of RAW
+ *             MPDUs, it indicates the length of the entire MPDU (without FCS
+ *             field).
+ *
+ * reo_destination_indication
+ *             The id of the reo exit ring where the msdu frame shall push
+ *             after (MPDU level) reordering has finished. Values are defined
+ *             in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ *             Indicates that REO shall drop this MSDU and not forward it to
+ *             any other ring.
+ *
+ * valid_sa
+ *             Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ *             Indicates, an unsuccessful MAC source address search due to
+ *             the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ *             When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ *             Field Only valid if valid_da is set. Indicates the DA address
+ *             is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ *             Indicates, an unsuccessful MAC destination address search due
+ *             to the expiration of search timer for this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+       HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+       HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+       HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+       HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+       HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+       HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+       HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+       HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+       HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+       HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+       HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+       HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+       HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+       HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+       HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+       HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+       HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+       HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+       HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+       HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI          GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE            (1 << 8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON            GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE             GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM           GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID     (1 << 0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE         GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX       GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID                        GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT          GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+       struct ath12k_buffer_addr buf_addr_info;
+       struct rx_mpdu_desc rx_mpdu_info;
+       struct rx_msdu_desc rx_msdu_info;
+       uint32_t queue_addr_lo;
+       uint32_t info0; /* %HAL_REO_DEST_RING_INFO0_ */
+       uint32_t info1; /* %HAL_REO_DEST_RING_INFO1_ */
+       uint32_t rsvd0;
+       uint32_t rsvd1;
+       uint32_t rsvd2;
+       uint32_t rsvd3;
+       uint32_t rsvd4;
+       uint32_t rsvd5;
+       uint32_t info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ *             Producer: RXDMA
+ *             Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ *             Details of the physical address of a buffer or MSDU
+ *             link descriptor.
+ *
+ * rx_mpdu_info
+ *             General information related to the MPDU that is passed
+ *             on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ *             General information related to the MSDU that is passed
+ *             on from RXDMA all the way to the REO destination ring.
+ *
+ * queue_addr_lo
+ *             Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ *             Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ *             Indicates the type of address provided in the buf_addr_info.
+ *             Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ *             Reason for pushing this frame to this exit ring. Values are
+ *             defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ *             Valid only when 'push_reason' is set. All error codes are
+ *             defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ *             Indicates the REO MPDU reorder queue id from which this frame
+ *             originated.
+ *
+ * reorder_info_valid
+ *             When set, REO has been instructed to not perform the actual
+ *             re-ordering of frames for this queue, but just to insert
+ *             the reorder opcodes.
+ *
+ * reorder_opcode
+ *             Field is valid when 'reorder_info_valid' is set. This field is
+ *             always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ *             Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ *             The buffer pointer ring id.
+ *             0 - Idle ring
+ *             1 - N refers to other rings.
+ *
+ * looping_count
+ *             Indicates the number of times the producer of entries into
+ *             this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+       HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+       HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI          GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT                GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND               GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR          BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON      GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE       GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+       struct ath12k_buffer_addr buf_addr_info;
+       struct rx_mpdu_desc rx_mpdu_info;
+       uint32_t queue_addr_lo;
+       uint32_t info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+       uint32_t info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+       uint32_t info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ *             Producer: RXDMA
+ *             Consumer: REO
+ *
+ * buf_addr_info
+ *             Details of the physical address of a buffer or MSDU
+ *             link descriptor.
+ *
+ * rx_mpdu_info
+ *             General information related to the MPDU that is passed
+ *             on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ *             Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ *             Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ *             An approximation of the number of bytes received in this MPDU.
+ *             Used to keeps stats on the amount of data flowing
+ *             through a queue.
+ *
+ * reo_destination_indication
+ *             The id of the reo exit ring where the msdu frame shall push
+ *             after (MPDU level) reordering has finished. Values are defined
+ *             in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ *             Indicates that this REO entrance ring struct contains BAR info
+ *             from a multi TID BAR frame. The original multi TID BAR frame
+ *             itself contained all the REO info for the first TID, but all
+ *             the subsequent TID info and their linkage to the REO descriptors
+ *             is passed down as 'frameless' BAR info.
+ *
+ *             The only fields valid in this descriptor when this bit is set
+ *             are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ *             bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ *             Reason for pushing this frame to this exit ring. Values are
+ *             defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ *             Valid only when 'push_reason' is set. All error codes are
+ *             defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ *             The buffer pointer ring id.
+ *             0 - Idle ring
+ *             1 - N refers to other rings.
+ *
+ * looping_count
+ *             Indicates the number of times the producer of entries into
+ *             this ring has looped around the ring.
+ */
+
+#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON        GENMASK(1, 0)
+#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7)
+#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR    BIT(11)
+#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT   GENMASK(15, 12)
+#define HAL_SW_MON_RING_INFO0_END_OF_PPDU      BIT(16)
+
+#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID      GENMASK(15, 0)
+#define HAL_SW_MON_RING_INFO1_RING_ID          GENMASK(27, 20)
+#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT    GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+       struct ath12k_buffer_addr buf_addr_info;
+       struct rx_mpdu_desc rx_mpdu_info;
+       struct ath12k_buffer_addr status_buf_addr_info;
+       uint32_t info0;
+       uint32_t info1;
+} __packed;
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER       GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED  BIT(16)
+
+struct hal_reo_cmd_hdr {
+       uint32_t info0;
+} __packed;
+
+
+#define HAL_SRNG_DESC_LOOP_CNT         0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS            BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR            BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER      BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL         BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS    BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL              BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE         BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE            BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM          BIT(8)
+#define HAL_REO_CMD_UPD0_VLD                   BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC                  BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION     BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN       BIT(12)
+#define HAL_REO_CMD_UPD0_AC                    BIT(13)
+#define HAL_REO_CMD_UPD0_BAR                   BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY                 BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE         BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE              BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE                BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK              BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN               BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN             BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE      BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE               BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG      BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD                  BIT(25)
+#define HAL_REO_CMD_UPD0_SSN                   BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR            BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR                        BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID              BIT(29)
+#define HAL_REO_CMD_UPD0_PN                    BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD                   BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC                  GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION     BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN       BIT(20)
+#define HAL_REO_CMD_UPD1_AC                    GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR                   BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY                 BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE         BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE              BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK              BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN               BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN             BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE      BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG      BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD                  BIT(10)
+#define HAL_REO_CMD_UPD2_SSN                   GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR            BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR                        BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP   GENMASK(31, 8)
+
+struct ath12k_hal_reo_cmd {
+       uint32_t addr_lo;
+       uint32_t flag;
+       uint32_t upd0;
+       uint32_t upd1;
+       uint32_t upd2;
+       uint32_t pn[4];
+       uint16_t rx_queue_num;
+       uint16_t min_rel;
+       uint16_t min_fwd;
+       uint8_t addr_hi;
+       uint8_t ac_list;
+       uint8_t blocking_idx;
+       uint16_t ba_window_size;
+       uint8_t pn_size;
+};
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI    GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS      BIT(8)
+
+struct hal_reo_get_queue_stats {
+       struct hal_reo_cmd_hdr cmd;
+       uint32_t queue_addr_lo;
+       uint32_t info0;
+       uint32_t rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ *             Producer: SW
+ *             Consumer: REO
+ *
+ * cmd
+ *             Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ *             Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ *             Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ *             Clear stats settings. When set, Clear the stats after
+ *             generating the status.
+ *
+ *             Following stats will be cleared.
+ *             Timeout_count
+ *             Forward_due_to_bar_count
+ *             Duplicate_count
+ *             Frames_in_order_count
+ *             BAR_received_count
+ *             MPDU_Frames_processed_count
+ *             MSDU_Frames_processed_count
+ *             Total_processed_byte_count
+ *             Late_receive_MPDU_count
+ *             window_jump_2k
+ *             Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI         GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR      BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX      GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+       struct hal_reo_cmd_hdr cmd;
+       uint32_t desc_addr_lo;
+       uint32_t info0;
+       uint32_t rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI                GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS                BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX    BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX      GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE  BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE    BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL            BIT(14)
+
+struct hal_reo_flush_cache {
+       struct hal_reo_cmd_hdr cmd;
+       uint32_t cache_addr_lo;
+       uint32_t info0;
+       uint32_t rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE       BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD             BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE      GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE    GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP    BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP   BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE     GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN         GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM         GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN                GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN    BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN   BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN   BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN   BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN   BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW           BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET      GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP           GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID             BIT(19)
+#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE     BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE           BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID                     GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID                 GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX      GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX            GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM           GENMASK(29, 26)
+#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE     GENMASK(31, 30)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID                 GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT           GENMASK(31, 28)
+
+enum hal_encrypt_type {
+       HAL_ENCRYPT_TYPE_WEP_40,
+       HAL_ENCRYPT_TYPE_WEP_104,
+       HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+       HAL_ENCRYPT_TYPE_WEP_128,
+       HAL_ENCRYPT_TYPE_TKIP_MIC,
+       HAL_ENCRYPT_TYPE_WAPI,
+       HAL_ENCRYPT_TYPE_CCMP_128,
+       HAL_ENCRYPT_TYPE_OPEN,
+       HAL_ENCRYPT_TYPE_CCMP_256,
+       HAL_ENCRYPT_TYPE_GCMP_128,
+       HAL_ENCRYPT_TYPE_AES_GCMP_256,
+       HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+       HAL_TCL_ENCAP_TYPE_RAW,
+       HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+       HAL_TCL_ENCAP_TYPE_ETHERNET,
+       HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+       HAL_TCL_DESC_TYPE_BUFFER,
+       HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+       HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+       struct ath12k_buffer_addr buf_addr_info;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t info3;
+       uint32_t info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ *             Details of the physical address of a buffer or MSDU
+ *             link descriptor.
+ *
+ * desc_type
+ *             Indicates the type of address provided in the buf_addr_info.
+ *             Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ *             When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ *             Indicates the encapsulation that HW will perform. Values are
+ *             defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ *             Field only valid for encap_type: RAW
+ *             Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ *             Treats source memory (packet buffer) organization as big-endian.
+ *             1'b0: Source memory is little endian
+ *             1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ *             Treats link descriptor and Metadata as big-endian.
+ *             1'b0: memory is little endian
+ *             1'b1: memory is big endian
+ *
+ * search_type
+ *             Search type select
+ *             0 - Normal search, 1 - Index based address search,
+ *             2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ *             Address X/Y search enable in ASE correspondingly.
+ *             1'b0: Search disable
+ *             1'b1: Search Enable
+ *
+ * cmd_num
+ *             This number can be used to match against status.
+ *
+ * data_length
+ *             MSDU length in case of direct descriptor. Length of link
+ *             extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ *             Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ *             udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ *             Forward packet to FW along with classification result. The
+ *             packet will not be forward to TQM when this bit is set.
+ *             1'b0: Use classification result to forward the packet.
+ *             1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ *             Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ *             Frame system entrance timestamp. It shall be filled by first
+ *             module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ *             For raw WiFi frames, this indicates transmission to a mesh STA,
+ *             enabling the interpretation of the 'Mesh Control Present' bit
+ *             (bit 8) of QoS Control.
+ *             For native WiFi frames, this indicates that a 'Mesh Control'
+ *             field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ *             When set, TCL shall ignore the IP DSCP and VLAN PCP
+ *             fields and use HLOS_TID as the final TID. Otherwise TCL
+ *             shall consider the DSCP and PCP fields as well as HLOS_TID
+ *             and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ *             HLOS MSDU priority
+ *             Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ *             TCL uses this LMAC_ID in address search, i.e, while
+ *             finding matching entry for the packet in AST corresponding
+ *             to given LMAC_ID
+ *
+ *             If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ *             match for any MAC
+ *
+ * dscp_tid_table_num
+ *             DSCP to TID mapping table number that need to be used
+ *             for the MSDU.
+ *
+ * search_index
+ *             The index that will be used for index based address or
+ *             flow search. The field is valid when 'search_type' is  1 or 2.
+ *
+ * cache_set_num
+ *
+ *             Cache set number that should be used to cache the index
+ *             based search results, for address and flow search. This
+ *             value should be equal to LSB four bits of the hash value of
+ *             match data, in case of search index points to an entry which
+ *             may be used in content based search also. The value can be
+ *             anything when the entry pointed by search index will not be
+ *             used for content based search.
+ *
+ * ring_id
+ *             The buffer pointer ring ID.
+ *             0 refers to the IDLE ring
+ *             1 - N refers to other rings
+ *
+ * looping_count
+ *
+ *             A count value that indicates the number of times the
+ *             producer of entries into the Ring has looped around the
+ *             ring.
+ *
+ *             At initialization time, this value is set to 0. On the
+ *             first loop, this value is set to 1. After the max value is
+ *             reached allowed by the number of bits for this field, the
+ *             count value continues with 0 again.
+ *
+ *             In case SW is the consumer of the ring entries, it can
+ *             use this field to figure out up to where the producer of
+ *             entries has created new entries. This eliminates the need to
+ *             check where the head pointer' of the ring is located once
+ *             the SW starts processing an interrupt indicating that new
+ *             entries have been put into this ring...
+ *
+ *             Also note that SW if it wants only needs to look at the
+ *             LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+       HAL_TCL_GSE_CTRL_RD_STAT,
+       HAL_TCL_GSE_CTRL_SRCH_DIS,
+       HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+       HAL_TCL_GSE_CTRL_WR_BK_ALL,
+       HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+       HAL_TCL_GSE_CTRL_INVAL_ALL,
+       HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+       HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+       HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ *             Report or Read statistics
+ * srch_dis
+ *             Search disable. Report only Hash.
+ * wr_bk_single
+ *             Write Back single entry
+ * wr_bk_all
+ *             Write Back entire cache entry
+ * inval_single
+ *             Invalidate single cache entry
+ * inval_all
+ *             Invalidate entire cache
+ * wr_bk_inval_single
+ *             Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ *             Write back and invalidate entire cache
+ * clr_stat_single
+ *             Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI         GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL                 GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL                  BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID      BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP                     BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID                  GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT            GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+       uint32_t ctrl_buf_addr_lo;
+       uint32_t info0;
+       uint32_t meta_data[2];
+       uint32_t rsvd0[2];
+       uint32_t info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ *             Address of a control buffer containing additional info needed
+ *             for this command execution.
+ *
+ * gse_ctrl
+ *             GSE control operations. This includes cache operations and table
+ *             entry statistics read/clear operation. Values are defined in
+ *             enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ *             To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ *             0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ *             TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ *             Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ *             Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+       HAL_TCL_CACHE_OP_RES_DONE,
+       HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+       HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL             GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL              BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES         GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT             GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX             GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID              GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT                GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+       uint32_t info0;
+       uint32_t msdu_byte_count;
+       uint32_t msdu_timestamp;
+       uint32_t meta_data[2];
+       uint32_t info1;
+       uint32_t rsvd0;
+       uint32_t info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ *             GSE control operations. This includes cache operations and table
+ *             entry statistics read/clear operation. Values are defined in
+ *             enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ *             To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ *             0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ *             Cache operation result. Values are defined in enum
+ *             %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ *             MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ *             Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI      GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN      BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP    BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP    BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER       BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN          GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA         GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID          GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT         HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+       uint32_t buffer_addr_low;
+       uint32_t buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+       uint32_t meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+       uint32_t flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ *             LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ *             MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ *             Enable generation of 32-bit Toeplitz-LFSR hash for
+ *             data transfer. In case of gather field in first source
+ *             ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ *             Treats source memory organization as big-endian. For
+ *             each dword read (4 bytes), the byte 0 is swapped with byte 3
+ *             and byte 1 is swapped with byte 2.
+ *             In case of gather field in first source ring entry of
+ *             the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ *             Treats destination memory organization as big-endian.
+ *             For each dword write (4 bytes), the byte 0 is swapped with
+ *             byte 3 and byte 1 is swapped with byte 2.
+ *             In case of gather field in first source ring entry of
+ *             the gather copy cycle in taken into account.
+ *
+ * gather
+ *             Enables gather of multiple copy engine source
+ *             descriptors to one destination.
+ *
+ * ce_res_0
+ *             Reserved
+ *
+ *
+ * length
+ *             Length of the buffer in units of octets of the current
+ *             descriptor
+ *
+ * fw_metadata
+ *             Meta data used by FW.
+ *             In case of gather field in first source ring entry of
+ *             the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ *             Reserved
+ *
+ * ce_res_2
+ *             Reserved
+ *
+ * ring_id
+ *             The buffer pointer ring ID.
+ *             0 refers to the IDLE ring
+ *             1 - N refers to other rings
+ *             Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ *             A count value that indicates the number of times the
+ *             producer of entries into the Ring has looped around the
+ *             ring.
+ *
+ *             At initialization time, this value is set to 0. On the
+ *             first loop, this value is set to 1. After the max value is
+ *             reached allowed by the number of bits for this field, the
+ *             count value continues with 0 again.
+ *
+ *             In case SW is the consumer of the ring entries, it can
+ *             use this field to figure out up to where the producer of
+ *             entries has created new entries. This eliminates the need to
+ *             check where the head pointer' of the ring is located once
+ *             the SW starts processing an interrupt indicating that new
+ *             entries have been put into this ring...
+ *
+ *             Also note that SW if it wants only needs to look at the
+ *             LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI             GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID             GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT            HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+       uint32_t buffer_addr_low;
+       uint32_t buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ *             LSB 32 bits of the 40 Bit Pointer to the Destination
+ *             buffer
+ *
+ * dst_buffer_high
+ *             MSB 8 bits of the 40 Bit Pointer to the Destination
+ *             buffer
+ *
+ * ce_res_4
+ *             Reserved
+ *
+ * ring_id
+ *             The buffer pointer ring ID.
+ *             0 refers to the IDLE ring
+ *             1 - N refers to other rings
+ *             Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ *             A count value that indicates the number of times the
+ *             producer of entries into the Ring has looped around the
+ *             ring.
+ *
+ *             At initialization time, this value is set to 0. On the
+ *             first loop, this value is set to 1. After the max value is
+ *             reached allowed by the number of bits for this field, the
+ *             count value continues with 0 again.
+ *
+ *             In case SW is the consumer of the ring entries, it can
+ *             use this field to figure out up to where the producer of
+ *             entries has created new entries. This eliminates the need to
+ *             check where the head pointer' of the ring is located once
+ *             the SW starts processing an interrupt indicating that new
+ *             entries have been put into this ring...
+ *
+ *             Also note that SW if it wants only needs to look at the
+ *             LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN           BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP         BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP         BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER            BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN               GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA          GENMASK(15, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID       GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT      HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+       uint32_t flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+       uint32_t toeplitz_hash0;
+       uint32_t toeplitz_hash1;
+       uint32_t meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ *             Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ *             Source memory buffer swapped
+ *
+ * dest_swap
+ *             Destination  memory buffer swapped
+ *
+ * gather
+ *             Gather of multiple copy engine source descriptors to one
+ *             destination enabled
+ *
+ * ce_res_6
+ *             Reserved
+ *
+ * length
+ *             Sum of all the Lengths of the source descriptor in the
+ *             gather chain
+ *
+ * toeplitz_hash_0
+ *             32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ *             32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ *             Meta data used by FW
+ *             In case of gather field in first source ring entry of
+ *             the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ *             Reserved
+ *
+ * ring_id
+ *             The buffer pointer ring ID.
+ *             0 refers to the IDLE ring
+ *             1 - N refers to other rings
+ *             Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ *             A count value that indicates the number of times the
+ *             producer of entries into the Ring has looped around the
+ *             ring.
+ *
+ *             At initialization time, this value is set to 0. On the
+ *             first loop, this value is set to 1. After the max value is
+ *             reached allowed by the number of bits for this field, the
+ *             count value continues with 0 again.
+ *
+ *             In case SW is the consumer of the ring entries, it can
+ *             use this field to figure out up to where the producer of
+ *             entries has created new entries. This eliminates the need to
+ *             check where the head pointer' of the ring is located once
+ *             the SW starts processing an interrupt indicating that new
+ *             entries have been put into this ring...
+ *
+ *             Also note that SW if it wants only needs to look at the
+ *                     LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID          BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW             GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE       GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC           BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC           BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI            GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS            GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX       BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU    GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+       HAL_TX_RATE_STATS_BW_20,
+       HAL_TX_RATE_STATS_BW_40,
+       HAL_TX_RATE_STATS_BW_80,
+       HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+       HAL_TX_RATE_STATS_PKT_TYPE_11A,
+       HAL_TX_RATE_STATS_PKT_TYPE_11B,
+       HAL_TX_RATE_STATS_PKT_TYPE_11N,
+       HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+       HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+       HAL_TX_RATE_STATS_SGI_08US,
+       HAL_TX_RATE_STATS_SGI_04US,
+       HAL_TX_RATE_STATS_SGI_16US,
+       HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+       uint32_t info0;
+       uint32_t tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+       struct ath12k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ *     Producer: WBM
+ *     Consumer: WBM
+ *
+ * buf_addr_info
+ *             Details of the physical address of a buffer or MSDU
+ *             link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+       HAL_WBM_REL_SRC_MODULE_TQM,
+       HAL_WBM_REL_SRC_MODULE_RXDMA,
+       HAL_WBM_REL_SRC_MODULE_REO,
+       HAL_WBM_REL_SRC_MODULE_FW,
+       HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+       HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+       HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+       HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+       HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+       HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ *     The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ *     The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ *     The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ *     The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ *     The address points to an TQM queue extension descriptor. WBM should
+ *     treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+       HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ *     Put the buffer or descriptor back in the idle list. In case of MSDU or
+ *     MDPU link descriptor, BM does not need to check to release any
+ *     individual MSDU buffers.
+ *
+ * release_msdu_list
+ *     This BM action can only be used in combination with desc_type being
+ *     msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ *     pointer in the MSDU link descriptor is the first of an MPDU that is
+ *     released. BM shall release all the MSDU buffers linked to this first
+ *     MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ *     set to value 0, which represents the 'NULL' pointer. When all MSDU
+ *     buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ *     descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE           GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION                        GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE                        GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX           GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON       GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON                GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE         GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON          GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE           GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR       BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER                GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT           GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI           GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID     BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU               BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU                        BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU            BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME                BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP         GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID                  GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID                      GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID                  GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT            GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS           GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON     GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME                BIT(17)
+
+struct hal_wbm_release_ring {
+       struct ath12k_buffer_addr buf_addr_info;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t info2;
+       struct hal_tx_rate_stats rate_stats;
+       uint32_t info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ *     Producer: SW/TQM/RXDMA/REO/SWITCH
+ *     Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ *     Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ *     Indicates which module initiated the release of this buffer/descriptor.
+ *     Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ *     Field only valid when the field return_buffer_manager in
+ *     Released_buff_or_desc_addr_info indicates:
+ *             WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ *     Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ *     Field only valid when WBM is marked as the return_buffer_manager in
+ *     the Released_Buffer_address_info. Indicates that type of buffer or
+ *     descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ *     Field only valid for the bm_action release_msdu_list. The index of the
+ *     first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ *     Field only valid when Release_source_module is set to release_source_TQM
+ *     Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ *     Indicates why rxdma/reo pushed the frame to this ring and values are
+ *     defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ *     Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ *     Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ *     Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ *     are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ *     Is set when WBM got a buffer pointer but the action was to push it to
+ *     the idle link descriptor ring or do link related activity OR
+ *     Is set when WBM got a link buffer pointer but the action was to push it
+ *     to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ *     The value in this field is equal to tqm_cmd_number in TQM command. It is
+ *     used to correlate the statu with TQM commands. Only valid when
+ *     release_source_module is TQM.
+ *
+ * transmit_count
+ *     The number of times the frame has been transmitted, valid only when
+ *     release source in TQM.
+ *
+ * ack_frame_rssi
+ *     This field is only valid when the source is TQM. If this frame is
+ *     removed as the result of the reception of an ACK or BA, this field
+ *     indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ *     This is set when WMB got a 'release_msdu_list' command from TQM and
+ *     return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ *     and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ *     Field only valid when SW_release_details_valid is set.
+ *     When set, this MSDU is the first MSDU pointed to in the
+ *     'release_msdu_list' command.
+ *
+ * last_msdu
+ *     Field only valid when SW_release_details_valid is set.
+ *     When set, this MSDU is the last MSDU pointed to in the
+ *     'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ *     Field only valid when SW_release_details_valid is set.
+ *     When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ *     Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ *     Field only valid when SW_release_details_valid is set.
+ *     This is the Buffer_timestamp field from the
+ *     Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ *     Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ *     Field only valid when Release_source_module is set to
+ *     release_source_TQM
+ *
+ *     1) Release of msdu buffer due to drop_frame = 1. Flow is
+ *     not fetched and hence sw_peer_id and tid = 0
+ *
+ *     buffer_or_desc_type = e_num 0
+ *     MSDU_rel_buffertqm_release_reason = e_num 1
+ *     tqm_rr_rem_cmd_rem
+ *
+ *     2) Release of msdu buffer due to Flow is not fetched and
+ *     hence sw_peer_id and tid = 0
+ *
+ *     buffer_or_desc_type = e_num 0
+ *     MSDU_rel_buffertqm_release_reason = e_num 1
+ *     tqm_rr_rem_cmd_rem
+ *
+ *     3) Release of msdu link due to remove_mpdu or acked_mpdu
+ *     command.
+ *
+ *     buffer_or_desc_type = e_num1
+ *     msdu_link_descriptortqm_release_reason can be:e_num 1
+ *     tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ *     e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ *     This field represents the TID from the TX_MSDU_FLOW
+ *     descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ *     For debugging.
+ *     This field is filled in by the SRNG module.
+ *     It help to identify the ring that is being looked
+ *
+ * looping_count
+ *     A count value that indicates the number of times the
+ *     producer of entries into the Buffer Manager Ring has looped
+ *     around the ring.
+ *
+ *     At initialization time, this value is set to 0. On the
+ *     first loop, this value is set to 1. After the max value is
+ *     reached allowed by the number of bits for this field, the
+ *     count value continues with 0 again.
+ *
+ *     In case SW is the consumer of the ring entries, it can
+ *     use this field to figure out up to where the producer of
+ *     entries has created new entries. This eliminates the need to
+ *     check where the head pointer' of the ring is located once
+ *     the SW starts processing an interrupt indicating that new
+ *     entries have been put into this ring...
+ *
+ *     Also note that SW if it wants only needs to look at the
+ *     LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ *     initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ *     initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ *     mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ *     fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ *     fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ *     fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+       HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+       HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+       struct ath12k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+       HAL_DESC_OWNER_WBM,
+       HAL_DESC_OWNER_SW,
+       HAL_DESC_OWNER_TQM,
+       HAL_DESC_OWNER_RXDMA,
+       HAL_DESC_OWNER_REO,
+       HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+       HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+       HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+       HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+       HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+       HAL_DESC_BUF_TYPE_TX_FLOW,
+       HAL_DESC_BUF_TYPE_TX_BUFFER,
+       HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+       HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+       HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+       HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+       HAL_DESC_BUF_TYPE_RX_BUFFER,
+       HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED             4
+#define HAL_DESC_REO_QUEUE_DESC                8
+#define HAL_DESC_REO_QUEUE_EXT_DESC    9
+#define HAL_DESC_REO_NON_QOS_TID       16
+
+#define HAL_DESC_HDR_INFO0_OWNER       GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE    GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED        GENMASK(31, 8)
+
+struct hal_desc_header {
+       uint32_t info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+       struct ath12k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+       struct ath12k_buffer_addr buf_addr_info;
+       struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER          GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK           BIT(16)
+
+struct hal_rx_msdu_link {
+       struct hal_desc_header desc_hdr;
+       struct ath12k_buffer_addr buf_addr_info;
+       uint32_t info0;
+       uint32_t pn[4];
+       struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+       struct hal_desc_header desc_hdr;
+       uint32_t rsvd;
+       struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ *     Consumer: REO
+ *     Producer: REO
+ *
+ * descriptor_header
+ *     Details about which module owns this struct.
+ *
+ * mpdu_link
+ *     Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+       HAL_RX_REO_QUEUE_PN_SIZE_24,
+       HAL_RX_REO_QUEUE_PN_SIZE_48,
+       HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER               GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD                     BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER  GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION       BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN         BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC                      GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR                     BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY                   BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE           BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE                        BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE          GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK                        BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN                 BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN               BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE                BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE                 GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG                BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD                    BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN                     GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX             GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR              BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR                  BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID                        BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT              GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT              (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT           GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT      GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT         GENMASK(31, 16)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT      GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT         GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT      GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K          GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT              GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+       struct hal_desc_header desc_hdr;
+       uint32_t rx_queue_num;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t pn[4];
+       uint32_t last_rx_enqueue_timestamp;
+       uint32_t last_rx_dequeue_timestamp;
+       uint32_t next_aging_queue[2];
+       uint32_t prev_aging_queue[2];
+       uint32_t rx_bitmap[8];
+       uint32_t info2;
+       uint32_t info3;
+       uint32_t info4;
+       uint32_t processed_mpdus;
+       uint32_t processed_msdus;
+       uint32_t processed_total_bytes;
+       uint32_t info5;
+       uint32_t rsvd[3];
+       struct hal_rx_reo_queue_ext ext_desc[];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ *     Details about which module owns this struct. Note that sub field
+ *     Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ *     Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ *     Valid bit indicating a session is established and the queue descriptor
+ *     is valid.
+ * associated_link_descriptor_counter
+ *     Indicates which of the 3 link descriptor counters shall be incremented
+ *     or decremented when link descriptors are added or removed from this
+ *     flow queue.
+ * disable_duplicate_detection
+ *     When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ *     When set, REO has been instructed to not perform the actual re-ordering
+ *     of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ *     Indicates the access category of the queue descriptor.
+ * bar
+ *     Indicates if BAR has been received.
+ * retry
+ *     Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ *     Indicates what type of operation is expected from Reo when the received
+ *     frame SN falls within the 2K window.
+ * oor_mode
+ *     Indicates what type of operation is expected when the received frame
+ *     falls within the OOR window.
+ * ba_window_size
+ *     Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ *     A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ *     session, with window size of 0). The 3 values here are the main values
+ *     validated, but other values should work as well.
+ *
+ *     A BA window size of 0 (=> one frame entry bitmat), means that there is
+ *     no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ *     A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ *     A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ *     A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ *     REO shall perform the PN increment check, even number check, uneven
+ *     number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ *     REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ *     Sequence number in next field is valid one.
+ * ssn
+ *      Starting Sequence number of the session.
+ * current_index
+ *     Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ *     REO has detected a 2k error jump in the sequence number and from that
+ *     moment forward, all new frames are forwarded directly to FW, without
+ *     duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ *     REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI               GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM            BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD                     BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT      BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION       BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN         BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC                      BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR                     BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY                   BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE           BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE                        BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE          BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK                        BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN                 BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN               BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE                BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE                 BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG                BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD                    BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN                     BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR              BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR                  BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID                        BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN                      BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER             GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD                         BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER      GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION           BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN             BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC                          GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR                         BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY                       BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE               BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE                    BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK                    BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN                     BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN                   BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE            BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG            BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE              GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE                     GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD                                BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN                         GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR                  BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR                      BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID                    BIT(25)
+
+struct hal_reo_update_rx_queue {
+       struct hal_reo_cmd_hdr cmd;
+       uint32_t queue_addr_lo;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE                BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX       GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+       struct hal_reo_cmd_hdr cmd;
+       uint32_t info0;
+       uint32_t rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+       HAL_REO_EXEC_STATUS_SUCCESS,
+       HAL_REO_EXEC_STATUS_BLOCKED,
+       HAL_REO_EXEC_STATUS_FAILED,
+       HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM    GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME     GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS   GENMASK(27, 26)
+
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW  6
+
+struct hal_reo_status_hdr {
+       uint32_t info0;
+       uint32_t timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_num
+ *             The value in this field is equal to value of the reo command
+ *             number. This field helps to correlate the statuses with the REO
+ *             commands.
+ *
+ * execution_time (in us)
+ *             The amount of time REO took to execute the command. Note that
+ *             this time does not include the duration of the command waiting
+ *             in the command ring, before the execution started.
+ *
+ * execution_status
+ *             Execution status of the command. Values are defined in
+ *             enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN               GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX           GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT                GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT                GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT     GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT                GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT   GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT         GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT      GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU      GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K      GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT                GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT       GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t pn[4];
+       uint32_t last_rx_enqueue_timestamp;
+       uint32_t last_rx_dequeue_timestamp;
+       uint32_t rx_bitmap[8];
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t info3;
+       uint32_t num_mpdu_frames;
+       uint32_t num_msdu_frames;
+       uint32_t total_bytes;
+       uint32_t info4;
+       uint32_t info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ *             Starting Sequence number of the session, this changes whenever
+ *             window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ *             Points to last forwarded packet.
+ *
+ * pn
+ *             Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ *             Timestamp of arrival of the last MPDU for this queue and
+ *             Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ *             When a bit is set, the corresponding frame is currently held
+ *             in the re-order queue. The bitmap  is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ *             The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ *             The number of times REO started forwarding frames even though
+ *             there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ *             The number of times REO started forwarding frames even though
+ *             there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ *             The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ *             The number of frames that have been received in order (without
+ *             a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ *             The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ *             The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ *             An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ *             The number of MPDUs received after the window had already moved
+ *             on. The 'late' sequence window is defined as
+ *             (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ *             The number of times the window moved more than 2K
+ *
+ * hole_count
+ *             The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT                        GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD         GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD         GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t rsvd0[21];
+       uint32_t info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ *             Status of blocking resource
+ *
+ *             0 - No error has been detected while executing this command
+ *             1 - Error detected. The resource to be used for blocking was
+ *                 already in use.
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR                        BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE                GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT      BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE       GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID       GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR             GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT           GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t rsvd0[21];
+       uint32_t info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ *             Status for blocking resource handling
+ *
+ *             0 - No error has been detected while executing this command
+ *             1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ *             only valid when error_detected is set
+ *
+ *             0 - No blocking related errors found
+ *             1 - Blocking resource is already in use
+ *             2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ *             The status that the cache controller returned on executing the
+ *             flush command.
+ *
+ *             0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ *             Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ *             Module who made the flush request
+ *
+ *             In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ *             Error condition
+ *
+ *             0 - No error found
+ *             1 - HW interface is still busy
+ *             2 - Line currently locked. Used for one line flush command
+ *             3 - At least one line is still locked.
+ *                 Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ *             The number of lines that were actually flushed out
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR      BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE                BIT(1)
+
+struct hal_reo_unblock_cache_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t rsvd0[21];
+       uint32_t info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ *             0 - No error has been detected while executing this command
+ *             1 - The blocking resource was not in use, and therefore it could
+ *                 not be unblocked.
+ *
+ * unblock_type
+ *             Reference to the type of unblock command
+ *             0 - Unblock a blocking resource
+ *             1 - The entire cache usage is unblock
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR              BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY          BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT      GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT       GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t rsvd0[20];
+       uint32_t info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ *             0 - No error has been detected while executing this command
+ *             1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ *             When set, REO has depleted the timeout list and all entries are
+ *             gone.
+ *
+ * release_desc_count
+ *             Producer: SW; Consumer: REO
+ *             The number of link descriptor released
+ *
+ * forward_buf_count
+ *             Producer: SW; Consumer: REO
+ *             The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX          GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0    GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1    GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2    GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+       struct hal_reo_status_hdr hdr;
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t info3;
+       uint32_t info4;
+       uint32_t rsvd0[17];
+       uint32_t info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ *             Producer: REO
+ *             Consumer: SW
+ *
+ * status_hdr
+ *             Details that can link this status with the original command. It
+ *             also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ *             The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ *             Value of the respective counters at generation of this message
+ *
+ * looping_count
+ *             A count value that indicates the number of times the producer of
+ *             entries into this Ring has looped around the ring.
+ */
+
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+
+#define HAL_TX_ADDRX_EN                        1
+#define HAL_TX_ADDRY_EN                        2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT     0
+#define HAL_TX_ADDR_SEARCH_INDEX       1
+
+/* 
+ * Copy Engine
+ */
+
+#define CE_COUNT_MAX 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR               8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH12K_CE_USAGE_THRESHOLD 32
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE           0
+#define PIPEDIR_IN             1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT            2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT          3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H      4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS     0x00A1803C
+#define CE_HOST_IE_2_ADDRESS   0x00A18040
+#define CE_HOST_IE_3_ADDRESS   CE_HOST_IE_ADDRESS
+
+/* CE IE registers are different for IPQ5018 */
+#define CE_HOST_IPQ5018_IE_ADDRESS             0x0841804C
+#define CE_HOST_IPQ5018_IE_2_ADDRESS           0x08418050
+#define CE_HOST_IPQ5018_IE_3_ADDRESS           CE_HOST_IPQ5018_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT     0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+       uint32_t service_id;
+       uint32_t pipedir;
+       uint32_t pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+       uint32_t pipenum;
+       uint32_t pipedir;
+       uint32_t nentries;
+       uint32_t nbytes_max;
+       uint32_t flags;
+       uint32_t reserved;
+};
+
+/*
+ * HTC
+ */
+
+#define HTC_HDR_ENDPOINTID                       GENMASK(7, 0)
+#define HTC_HDR_FLAGS                            GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN                       GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0                    GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1                    GENMASK(15, 8)
+#define HTC_HDR_RESERVED                         GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID                   GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS              GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH            GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT                GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE                 GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS               GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION              GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE     GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID           GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS              GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID          GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE          GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH   GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID                        GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS     GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV      GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0          GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1          GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2          GENMASK(31, 24)
+
+enum ath12k_htc_tx_flags {
+       ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+       ATH12K_HTC_FLAG_SEND_BUNDLE        = 0x02
+};
+
+enum ath12k_htc_rx_flags {
+       ATH12K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+       ATH12K_HTC_FLAG_BUNDLE_MASK     = 0xF0
+};
+
+
+struct ath12k_htc_hdr {
+       uint32_t htc_info;
+       uint32_t ctrl_info;
+} __packed __aligned(4);
+
+enum ath12k_htc_msg_id {
+       ATH12K_HTC_MSG_READY_ID                = 1,
+       ATH12K_HTC_MSG_CONNECT_SERVICE_ID      = 2,
+       ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+       ATH12K_HTC_MSG_SETUP_COMPLETE_ID       = 4,
+       ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID    = 5,
+       ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE   = 6,
+       ATH12K_HTC_MSG_NACK_SUSPEND            = 7,
+       ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID  = 8,
+};
+
+enum ath12k_htc_version {
+       ATH12K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+       ATH12K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH12K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath12k_htc_conn_flags {
+       ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH    = 0x0,
+       ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF      = 0x1,
+       ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+       ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY         = 0x3,
+       ATH12K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE         = 0x4,
+       ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL      = 0x8,
+};
+
+enum ath12k_htc_conn_svc_status {
+       ATH12K_HTC_CONN_SVC_STATUS_SUCCESS      = 0,
+       ATH12K_HTC_CONN_SVC_STATUS_NOT_FOUND    = 1,
+       ATH12K_HTC_CONN_SVC_STATUS_FAILED       = 2,
+       ATH12K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+       ATH12K_HTC_CONN_SVC_STATUS_NO_MORE_EP   = 4
+};
+
+struct ath12k_htc_ready {
+       uint32_t id_credit_count;
+       uint32_t size_ep;
+} __packed;
+
+struct ath12k_htc_ready_extended {
+       struct ath12k_htc_ready base;
+       uint32_t ver_bundle;
+} __packed;
+
+struct ath12k_htc_conn_svc {
+       uint32_t msg_svc_id;
+       uint32_t flags_len;
+} __packed;
+
+struct ath12k_htc_conn_svc_resp {
+       uint32_t msg_svc_id;
+       uint32_t flags_len;
+       uint32_t svc_meta_pad;
+} __packed;
+
+#define ATH12K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
+
+struct ath12k_htc_setup_complete_extended {
+       uint32_t msg_id;
+       uint32_t flags;
+       uint32_t max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath12k_htc_msg {
+       uint32_t msg_svc_id;
+       uint32_t flags_len;
+} __packed __aligned(4);
+
+enum ath12k_htc_record_id {
+       ATH12K_HTC_RECORD_NULL    = 0,
+       ATH12K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath12k_htc_record_hdr {
+       uint8_t id; /* @enum ath12k_htc_record_id */
+       uint8_t len;
+       uint8_t pad0;
+       uint8_t pad1;
+} __packed;
+
+struct ath12k_htc_credit_report {
+       uint8_t eid; /* @enum ath12k_htc_ep_id */
+       uint8_t credits;
+       uint8_t pad0;
+       uint8_t pad1;
+} __packed;
+
+struct ath12k_htc_record {
+       struct ath12k_htc_record_hdr hdr;
+       union {
+               struct ath12k_htc_credit_report credit_report[0];
+               uint8_t payload[0];
+       };
+} __packed __aligned(4);
+
+/* note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath12k_htc_frame {
+       struct ath12k_htc_hdr hdr;
+       union {
+               struct ath12k_htc_msg msg;
+               uint8_t payload[0];
+       };
+       struct ath12k_htc_record trailer[0];
+} __packed __aligned(4);
+
+enum ath12k_htc_svc_gid {
+       ATH12K_HTC_SVC_GRP_RSVD = 0,
+       ATH12K_HTC_SVC_GRP_WMI = 1,
+       ATH12K_HTC_SVC_GRP_NMI = 2,
+       ATH12K_HTC_SVC_GRP_HTT = 3,
+       ATH12K_HTC_SVC_GRP_CFG = 4,
+       ATH12K_HTC_SVC_GRP_IPA = 5,
+       ATH12K_HTC_SVC_GRP_PKTLOG = 6,
+
+       ATH12K_HTC_SVC_GRP_TEST = 254,
+       ATH12K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+       (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath12k_htc_svc_id {
+       /* NOTE: service ID of 0x0000 is reserved and should never be used */
+       ATH12K_HTC_SVC_ID_RESERVED      = 0x0000,
+       ATH12K_HTC_SVC_ID_UNUSED        = ATH12K_HTC_SVC_ID_RESERVED,
+
+       ATH12K_HTC_SVC_ID_RSVD_CTRL     = SVC(ATH12K_HTC_SVC_GRP_RSVD, 1),
+       ATH12K_HTC_SVC_ID_WMI_CONTROL   = SVC(ATH12K_HTC_SVC_GRP_WMI, 0),
+       ATH12K_HTC_SVC_ID_WMI_DATA_BE   = SVC(ATH12K_HTC_SVC_GRP_WMI, 1),
+       ATH12K_HTC_SVC_ID_WMI_DATA_BK   = SVC(ATH12K_HTC_SVC_GRP_WMI, 2),
+       ATH12K_HTC_SVC_ID_WMI_DATA_VI   = SVC(ATH12K_HTC_SVC_GRP_WMI, 3),
+       ATH12K_HTC_SVC_ID_WMI_DATA_VO   = SVC(ATH12K_HTC_SVC_GRP_WMI, 4),
+       ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH12K_HTC_SVC_GRP_WMI, 5),
+       ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH12K_HTC_SVC_GRP_WMI, 6),
+
+       ATH12K_HTC_SVC_ID_NMI_CONTROL   = SVC(ATH12K_HTC_SVC_GRP_NMI, 0),
+       ATH12K_HTC_SVC_ID_NMI_DATA      = SVC(ATH12K_HTC_SVC_GRP_NMI, 1),
+
+       ATH12K_HTC_SVC_ID_HTT_DATA_MSG  = SVC(ATH12K_HTC_SVC_GRP_HTT, 0),
+
+       /* raw stream service (i.e. flash, tcmd, calibration apps) */
+       ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH12K_HTC_SVC_GRP_TEST, 0),
+       ATH12K_HTC_SVC_ID_IPA_TX = SVC(ATH12K_HTC_SVC_GRP_IPA, 0),
+       ATH12K_HTC_SVC_ID_PKT_LOG = SVC(ATH12K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath12k_htc_ep_id {
+       ATH12K_HTC_EP_UNUSED = -1,
+       ATH12K_HTC_EP_0 = 0,
+       ATH12K_HTC_EP_1 = 1,
+       ATH12K_HTC_EP_2,
+       ATH12K_HTC_EP_3,
+       ATH12K_HTC_EP_4,
+       ATH12K_HTC_EP_5,
+       ATH12K_HTC_EP_6,
+       ATH12K_HTC_EP_7,
+       ATH12K_HTC_EP_8,
+       ATH12K_HTC_EP_COUNT,
+};
+
+/*
+ * hw.h
+ */
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS(sc)   (sc->hw_params.num_vdevs)
+
+#define TARGET_NUM_PEERS_PDEV(sc) (sc->hw_params.num_peers + TARGET_NUM_VDEVS(sc))
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE(sc) (TARGET_NUM_PEERS_PDEV(sc))
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS(sc) (2 * TARGET_NUM_PEERS_PDEV(sc))
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS(sc)   (3 * TARGET_NUM_PEERS_PDEV(sc))
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS(sc)        (sc->hw_params.num_peers)
+
+#define TARGET_NUM_PEERS(sc, x)        TARGET_NUM_PEERS_##x(sc)
+#define TARGET_NUM_PEER_KEYS   2
+#define TARGET_NUM_TIDS(sc, x) (2 * TARGET_NUM_PEERS(sc, x) +  \
+                                4 * TARGET_NUM_VDEVS(sc) + 8)
+
+#define TARGET_AST_SKID_LIMIT  16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK   (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK   (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI       100
+#define TARGET_RX_TIMEOUT_HI_PRI       40
+
+#define TARGET_DECAP_MODE_RAW          0
+#define TARGET_DECAP_MODE_NATIVE_WIFI  1
+#define TARGET_DECAP_MODE_ETH          2
+
+#define TARGET_SCAN_MAX_PENDING_REQS   4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV  3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV   3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES    8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV    3
+#define TARGET_NUM_MCAST_GROUPS                12
+#define TARGET_NUM_MCAST_TABLE_ELEMS   64
+#define TARGET_MCAST2UCAST_MODE                2
+#define TARGET_TX_DBG_LOG_SIZE         1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG              0
+#define TARGET_NUM_MSDU_DESC           (2500)
+#define TARGET_MAX_FRAG_ENTRIES                6
+#define TARGET_MAX_BCN_OFFLD           16
+#define TARGET_NUM_WDS_ENTRIES         32
+#define TARGET_DMA_BURST_SIZE          1
+#define TARGET_RX_BATCHMODE            1
+#define TARGET_EMA_MAX_PROFILE_PERIOD  8
+
+#define ATH12K_HW_MAX_QUEUES           4
+#define ATH12K_QUEUE_LEN               4096
+
+#define ATH12k_HW_RATECODE_CCK_SHORT_PREAM_MASK  0x4
+
+enum ath12k_hw_rate_cck {
+       ATH12K_HW_RATE_CCK_LP_11M = 0,
+       ATH12K_HW_RATE_CCK_LP_5_5M,
+       ATH12K_HW_RATE_CCK_LP_2M,
+       ATH12K_HW_RATE_CCK_LP_1M,
+       ATH12K_HW_RATE_CCK_SP_11M,
+       ATH12K_HW_RATE_CCK_SP_5_5M,
+       ATH12K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath12k_hw_rate_ofdm {
+       ATH12K_HW_RATE_OFDM_48M = 0,
+       ATH12K_HW_RATE_OFDM_24M,
+       ATH12K_HW_RATE_OFDM_12M,
+       ATH12K_HW_RATE_OFDM_6M,
+       ATH12K_HW_RATE_OFDM_54M,
+       ATH12K_HW_RATE_OFDM_36M,
+       ATH12K_HW_RATE_OFDM_18M,
+       ATH12K_HW_RATE_OFDM_9M,
+};
+
+enum ath12k_bus {
+       ATH12K_BUS_AHB,
+       ATH12K_BUS_PCI,
+};
+
+#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
+
+/*
+ * rx_desc.h
+ */
+
+enum rx_desc_rxpcu_filter {
+       RX_DESC_RXPCU_FILTER_PASS,
+       RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+       RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ *             This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ *              This MPDU did not pass the regular frame filter and would
+ *              have been dropped, were it not for the frame fitting into the
+ *              'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ *             This MPDU did not pass the regular frame filter and also did
+ *             not pass the rxpcu_monitor_client filter. It would have been
+ *             dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER        GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID  GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+       RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+       RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+       RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+       RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+       RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+       RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+       RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+       RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+#define DP_MAX_NWIFI_HDR_LEN   30
+
+#define DP_RX_MPDU_ERR_FCS                     BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT                 BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC                        BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR               BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW                        BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN                        BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN                        BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME       BIT(7)
+
+enum dp_rx_decap_type {
+       DP_RX_DECAP_TYPE_RAW,
+       DP_RX_DECAP_TYPE_NATIVE_WIFI,
+       DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+       DP_RX_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decap_type {
+       RX_DESC_DECAP_TYPE_RAW,
+       RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+       RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+       RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+       RX_DESC_DECRYPT_STATUS_CODE_OK,
+       RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+       RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+       RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+       RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+       RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU          BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A             BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST         BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND   BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT    BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT          BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS             BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA           BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE           BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE           BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA           BIT(10)
+#define RX_ATTENTION_INFO1_EOSP                        BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR                BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT            BIT(13)
+#define RX_ATTENTION_INFO1_ORDER               BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH           BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR                BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR                BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL  BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL       BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID      BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID      BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B             BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP        BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED    BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED            BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT     BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR                BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR                BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR         BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR             BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT    BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID    BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR     BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR    BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT      BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT      BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR      BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID         BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC          BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID         BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE  GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE           BIT(31)
+
+struct rx_attention {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint32_t info1;
+       uint32_t info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ *             Field indicates what the reason was that this mpdu frame
+ *             was allowed to come into the receive path by rxpcu. Values
+ *             are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ *             SW processes frames based on certain classifications. Values
+ *             are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ *             A ppdu counter value that PHY increments for every PPDU
+ *             received. The counter value wraps around.
+ *
+ * first_mpdu
+ *             Indicates the first MSDU of the PPDU.  If both first_mpdu
+ *             and last_mpdu are set in the MSDU then this is a not an
+ *             A-MPDU frame but a stand alone MPDU.  Interior MPDU in an
+ *             A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ *             0.  The PPDU start status will only be valid when this bit
+ *             is set.
+ *
+ * mcast_bcast
+ *             Multicast / broadcast indicator.  Only set when the MAC
+ *             address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ *             matches one of the 4 BSSID registers. Only set when
+ *             first_msdu is set.
+ *
+ * ast_index_not_found
+ *             Only valid when first_msdu is set. Indicates no AST matching
+ *             entries within the max search count.
+ *
+ * ast_index_timeout
+ *             Only valid when first_msdu is set. Indicates an unsuccessful
+ *             search in the address search table due to timeout.
+ *
+ * power_mgmt
+ *             Power management bit set in the 802.11 header.  Only set
+ *             when first_msdu is set.
+ *
+ * non_qos
+ *             Set if packet is not a non-QoS data frame.  Only set when
+ *             first_msdu is set.
+ *
+ * null_data
+ *             Set if frame type indicates either null data or QoS null
+ *             data format.  Only set when first_msdu is set.
+ *
+ * mgmt_type
+ *             Set if packet is a management packet.  Only set when
+ *             first_msdu is set.
+ *
+ * ctrl_type
+ *             Set if packet is a control packet.  Only set when first_msdu
+ *             is set.
+ *
+ * more_data
+ *             Set if more bit in frame control is set.  Only set when
+ *             first_msdu is set.
+ *
+ * eosp
+ *             Set if the EOSP (end of service period) bit in the QoS
+ *             control field is set.  Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ *             Set if number of MSDUs in A-MSDU is above a threshold or if the
+ *             size of the MSDU is invalid. This receive buffer will contain
+ *             all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ *             Indicates that this is an 802.11 fragment frame.  This is
+ *             set when either the more_frag bit is set in the frame
+ *             control or the fragment number is not zero.  Only set when
+ *             first_msdu is set.
+ *
+ * order
+ *             Set if the order bit in the frame control is set.  Only set
+ *             when first_msdu is set.
+ *
+ * cce_match
+ *             Indicates that this status has a corresponding MSDU that
+ *             requires FW processing. The OLE will have classification
+ *             ring mask registers which will indicate the ring(s) for
+ *             packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ *             PCU Receive FIFO does not have enough space to store the
+ *             full receive packet.  Enough space is reserved in the
+ *             receive FIFO for the status is written.  This MPDU remaining
+ *             packets in the PPDU will be filtered and no Ack response
+ *             will be transmitted.
+ *
+ * msdu_length_err
+ *             Indicates that the MSDU length from the 802.3 encapsulated
+ *             length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ *             Indicates that the computed checksum (tcp_udp_chksum) did
+ *             not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ *             Indicates that the computed checksum did not match the
+ *             checksum in the IP header.
+ *
+ * sa_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the source MAC address.
+ *
+ * da_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ *             Indicates that RX packet is not decrypted as Crypto is busy
+ *             with TX packet processing.
+ *
+ * encrypt_required
+ *             Indicates that this data type frame is not encrypted even if
+ *             the policy for this MPDU requires encryption as indicated in
+ *             the peer table key type.
+ *
+ * directed
+ *             MPDU is a directed packet which means that the RA matched
+ *             our STA addresses.  In proxySTA it means that the TA matched
+ *             an entry in our address search table with the corresponding
+ *             'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ *             Indicates that at least one of the rx buffers has been
+ *             fragmented.  If set the FW should look at the rx_frag_info
+ *             descriptor described below.
+ *
+ * mpdu_length_err
+ *             Indicates that the MPDU was pre-maturely terminated
+ *             resulting in a truncated MPDU.  Don't trust the MPDU length
+ *             field.
+ *
+ * tkip_mic_err
+ *             Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ *             Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ *             Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ *             Indicates an unsuccessful flow search due to the expiring of
+ *             the search timer.
+ *
+ * flow_idx_invalid
+ *             flow id is not valid.
+ *
+ * amsdu_parser_error
+ *             A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ *             Indicates an unsuccessful search for the source MAC address
+ *             due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ *             Indicates an unsuccessful search for the destination MAC
+ *             address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ *             Indicates that the MSDU threshold was exceeded and thus
+ *             all the rest of the MSDUs will not be scattered and will not
+ *             be decasulated but will be DMA'ed in RAW format as a single
+ *             MSDU buffer.
+ *
+ * da_is_valid
+ *             Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ *             Field Only valid if da_is_valid is set. Indicates the DA address
+ *             was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ *             Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ *             Field provides insight into the decryption performed. Values are
+ *             defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ *             Frame is received, but RXPCU could not update the receive bitmap
+ *             due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ *             If set indicates that the RX packet data, RX header data, RX
+ *             PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ *             start/end descriptors and RX Attention descriptor are all
+ *             valid.  This bit must be in the last octet of the
+ *             descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME          BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR            BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR   BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR      BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID   BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_FCTRL_VALID   BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID     BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID    BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID    BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID    BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID    BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID        BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID        BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER   GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG     BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS            BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS              BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED          BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY         BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM       GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN             BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD    BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE           GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH  GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA           BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT          BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM          GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID                        GENMASK(17, 14)
+#define RX_MPDU_START_INFO2_TID_WCN6855                GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND               GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ           BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA      BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA      BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR                BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL                GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL                GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI  GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM     GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR    BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID             GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY     BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED     BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE         GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C   BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S   BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT    GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG         BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME          BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN           GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU         BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST                BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND  BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT    BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT         BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS            BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA          BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE          BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE          BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA          BIT(23)
+#define RX_MPDU_START_INFO6_EOSP               BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT           BIT(25)
+#define RX_MPDU_START_INFO6_ORDER              BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER      BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED   BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED           BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU                 BIT(0)
+
+struct rx_mpdu_start_ipq8074 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint16_t ast_index;
+       uint16_t sw_peer_id;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t pn[4];
+       uint32_t peer_meta_data;
+       uint32_t info3;
+       uint32_t reo_queue_desc_lo;
+       uint32_t info4;
+       uint32_t info5;
+       uint32_t info6;
+       uint16_t frame_ctrl;
+       uint16_t duration;
+       uint8_t addr1[IEEE80211_ADDR_LEN];
+       uint8_t addr2[IEEE80211_ADDR_LEN];
+       uint8_t addr3[IEEE80211_ADDR_LEN];
+       uint16_t seq_ctrl;
+       uint8_t addr4[IEEE80211_ADDR_LEN];
+       uint16_t qos_ctrl;
+       uint32_t ht_ctrl;
+       uint32_t raw;
+} __packed;
+
+#define RX_MPDU_START_INFO7_REO_DEST_IND               GENMASK(4, 0)
+#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB           GENMASK(6, 5)
+#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ           BIT(7)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA      BIT(8)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA      BIT(9)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR                BIT(10)
+#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL                GENMASK(12, 11)
+#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL                GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI          GENMASK(7, 0)
+#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM             GENMASK(23, 8)
+#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN         BIT(24)
+#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR            BIT(25)
+
+#define RX_MPDU_START_INFO9_EPD_EN                     BIT(0)
+#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD            BIT(1)
+#define RX_MPDU_START_INFO9_ENC_TYPE                   GENMASK(5, 2)
+#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH          GENMASK(7, 6)
+#define RX_MPDU_START_INFO9_MESH_STA                   GENMASK(9, 8)
+#define RX_MPDU_START_INFO9_BSSID_HIT                  BIT(10)
+#define RX_MPDU_START_INFO9_BSSID_NUM                  GENMASK(14, 11)
+#define RX_MPDU_START_INFO9_TID                                GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR           GENMASK(1, 0)
+#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID           GENMASK(8, 2)
+#define RX_MPDU_START_INFO10_NDP_FRAME                 BIT(9)
+#define RX_MPDU_START_INFO10_PHY_ERR                   BIT(10)
+#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR          BIT(11)
+#define RX_MPDU_START_INFO10_PROTO_VER_ERR             BIT(12)
+#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID          BIT(13)
+
+#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID          BIT(0)
+#define RX_MPDU_START_INFO11_MPDU_DUR_VALID            BIT(1)
+#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID           BIT(2)
+#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID           BIT(3)
+#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID           BIT(4)
+#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID           BIT(5)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID       BIT(6)
+#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID       BIT(7)
+#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID                BIT(8)
+#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID                BIT(9)
+#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER          GENMASK(13, 10)
+#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG            BIT(14)
+#define RX_MPDU_START_INFO11_FROM_DS                   BIT(16)
+#define RX_MPDU_START_INFO11_TO_DS                     BIT(17)
+#define RX_MPDU_START_INFO11_ENCRYPTED                 BIT(18)
+#define RX_MPDU_START_INFO11_MPDU_RETRY                        BIT(19)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM              GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO12_KEY_ID                    GENMASK(7, 0)
+#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY            BIT(8)
+#define RX_MPDU_START_INFO12_DECRYPT_NEEDED            BIT(9)
+#define RX_MPDU_START_INFO12_DECAP_TYPE                        GENMASK(11, 10)
+#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING                BIT(12)
+#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING                BIT(13)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C          BIT(14)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S          BIT(15)
+#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT           GENMASK(27, 16)
+#define RX_MPDU_START_INFO12_AMPDU_FLAG                        BIT(28)
+#define RX_MPDU_START_INFO12_BAR_FRAME                 BIT(29)
+#define RX_MPDU_START_INFO12_RAW_MPDU                  BIT(30)
+
+#define RX_MPDU_START_INFO13_MPDU_LEN                  GENMASK(13, 0)
+#define RX_MPDU_START_INFO13_FIRST_MPDU                        BIT(14)
+#define RX_MPDU_START_INFO13_MCAST_BCAST               BIT(15)
+#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND         BIT(16)
+#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT           BIT(17)
+#define RX_MPDU_START_INFO13_POWER_MGMT                        BIT(18)
+#define RX_MPDU_START_INFO13_NON_QOS                   BIT(19)
+#define RX_MPDU_START_INFO13_NULL_DATA                 BIT(20)
+#define RX_MPDU_START_INFO13_MGMT_TYPE                 BIT(21)
+#define RX_MPDU_START_INFO13_CTRL_TYPE                 BIT(22)
+#define RX_MPDU_START_INFO13_MORE_DATA                 BIT(23)
+#define RX_MPDU_START_INFO13_EOSP                      BIT(24)
+#define RX_MPDU_START_INFO13_FRAGMENT                  BIT(25)
+#define RX_MPDU_START_INFO13_ORDER                     BIT(26)
+#define RX_MPDU_START_INFO13_UAPSD_TRIGGER             BIT(27)
+#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED          BIT(28)
+#define RX_MPDU_START_INFO13_DIRECTED                  BIT(29)
+#define RX_MPDU_START_INFO13_AMSDU_PRESENT             BIT(30)
+
+struct rx_mpdu_start_qcn9074 {
+       uint32_t info7;
+       uint32_t reo_queue_desc_lo;
+       uint32_t info8;
+       uint32_t pn[4];
+       uint32_t info9;
+       uint32_t peer_meta_data;
+       uint16_t info10;
+       uint16_t phy_ppdu_id;
+       uint16_t ast_index;
+       uint16_t sw_peer_id;
+       uint32_t info11;
+       uint32_t info12;
+       uint32_t info13;
+       uint16_t frame_ctrl;
+       uint16_t duration;
+       uint8_t addr1[IEEE80211_ADDR_LEN];
+       uint8_t addr2[IEEE80211_ADDR_LEN];
+       uint8_t addr3[IEEE80211_ADDR_LEN];
+       uint16_t seq_ctrl;
+       uint8_t addr4[IEEE80211_ADDR_LEN];
+       uint16_t qos_ctrl;
+       uint32_t ht_ctrl;
+} __packed;
+
+struct rx_mpdu_start_wcn6855 {
+       uint32_t info3;
+       uint32_t reo_queue_desc_lo;
+       uint32_t info4;
+       uint32_t pn[4];
+       uint32_t info2;
+       uint32_t peer_meta_data;
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint16_t ast_index;
+       uint16_t sw_peer_id;
+       uint32_t info1;
+       uint32_t info5;
+       uint32_t info6;
+       uint16_t frame_ctrl;
+       uint16_t duration;
+       uint8_t addr1[IEEE80211_ADDR_LEN];
+       uint8_t addr2[IEEE80211_ADDR_LEN];
+       uint8_t addr3[IEEE80211_ADDR_LEN];
+       uint16_t seq_ctrl;
+       uint8_t addr4[IEEE80211_ADDR_LEN];
+       uint16_t qos_ctrl;
+       uint32_t ht_ctrl;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ *             Field indicates what the reason was that this mpdu frame
+ *             was allowed to come into the receive path by rxpcu. Values
+ *             are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *             Note: for ndp frame, if it was expected because the preceding
+ *             NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ *             used. This setting will also be used for every ndp frame in
+ *             case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ *             SW processes frames based on certain classifications. Values
+ *             are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ *             Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ *             Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ *             PHY error was received before MAC received the complete MPDU
+ *             header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ *             RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ *             AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ *             A ppdu counter value that PHY increments for every PPDU
+ *             received. The counter value wraps around.
+ *
+ * ast_index
+ *             This field indicates the index of the AST entry corresponding
+ *             to this MPDU. It is provided by the GSE module instantiated in
+ *             RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ *             This field indicates a unique peer identifier. It is set equal
+ *             to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ *             Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ *             Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ *             Valid only when mpdu_frame_control_valid is set. Indicates that
+ *             frame is received from DS and sent to DS.
+ *
+ * encrypted
+ *             Protected bit from the frame control.
+ *
+ * mpdu_retry
+ *             Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ *             The sequence number from the 802.11 header.
+ *
+ * epd_en
+ *             If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ *             If set, all frames (data only?) shall be encrypted. If not,
+ *             RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ *             Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ *             Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ *              BSSID of the incoming frame matched one of the 8 BSSID
+ *              register values.
+ *
+ * bssid_number
+ *             This number indicates which one out of the 8 BSSID register
+ *             values matched the incoming frame.
+ *
+ * tid
+ *             TID field in the QoS control field
+ *
+ * pn
+ *             The PN number.
+ *
+ * peer_meta_data
+ *             Meta data that SW has programmed in the Peer table entry
+ *             of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ *             Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ *             Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ *             Indicates the MPDU queue ID to which this MPDU link
+ *             descriptor belongs.
+ *
+ * pre_delim_err_warning
+ *             Indicates that a delimiter FCS error was found in between the
+ *             previous MPDU and this MPDU. Note that this is just a warning,
+ *             and does not mean that this MPDU is corrupted in any way. If
+ *             it is, there will be other errors indicated such as FCS or
+ *             decrypt errors.
+ *
+ * first_delim_err
+ *             Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ *             The key ID octet from the IV.
+ *
+ * new_peer_entry
+ *             Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ *             doesn't follow so RX DECRYPTION module either uses old peer
+ *             entry or not decrypt.
+ *
+ * decrypt_needed
+ *             When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ *             RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ *             reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ *             Used by the OLE during decapsulation. Values are defined in
+ *             enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ *             Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ *             the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ *             Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ *             The number of delimiters before this MPDU. Note that this
+ *             number is cleared at PPDU start. If this MPDU is the first
+ *             received MPDU in the PPDU and this MPDU gets filtered-in,
+ *             this field will indicate the number of delimiters located
+ *             after the last MPDU in the previous PPDU.
+ *
+ *             If this MPDU is located after the first received MPDU in
+ *             an PPDU, this field will indicate the number of delimiters
+ *             located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ *             Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ *             Received frame is a BAR frame
+ *
+ * mpdu_length
+ *             MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ *             See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+       RX_MSDU_START_PKT_TYPE_11A,
+       RX_MSDU_START_PKT_TYPE_11B,
+       RX_MSDU_START_PKT_TYPE_11N,
+       RX_MSDU_START_PKT_TYPE_11AC,
+       RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+       RX_MSDU_START_SGI_0_8_US,
+       RX_MSDU_START_SGI_0_4_US,
+       RX_MSDU_START_SGI_1_6_US,
+       RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+       RX_MSDU_START_RECV_BW_20MHZ,
+       RX_MSDU_START_RECV_BW_40MHZ,
+       RX_MSDU_START_RECV_BW_80MHZ,
+       RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+       RX_MSDU_START_RECEPTION_TYPE_SU,
+       RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+       RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+       RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+       RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+       RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+       RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH                GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A            BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP          BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET          GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH           BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET          GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER                GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE         GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4               BIT(10)
+#define RX_MSDU_START_INFO2_IPV6               BIT(11)
+#define RX_MSDU_START_INFO2_TCP                        BIT(12)
+#define RX_MSDU_START_INFO2_UDP                        BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG            BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK       BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST  BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH     GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID         BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID          BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID       BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT          BIT(22)
+#define RX_MSDU_START_INFO2_LDPC                       BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR            GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT               GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI          GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE           GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC               BIT(12)
+#define RX_MSDU_START_INFO3_SGI                        GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS           GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW            GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE     GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP     GENMASK(31, 24)
+
+struct rx_msdu_start_ipq8074 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t toeplitz_hash;
+       uint32_t flow_id_toeplitz;
+       uint32_t info3;
+       uint32_t ppdu_start_timestamp;
+       uint32_t phy_meta_data;
+} __packed;
+
+struct rx_msdu_start_qcn9074 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t toeplitz_hash;
+       uint32_t flow_id_toeplitz;
+       uint32_t info3;
+       uint32_t ppdu_start_timestamp;
+       uint32_t phy_meta_data;
+       uint16_t vlan_ctag_c1;
+       uint16_t vlan_stag_c1;
+} __packed;
+
+struct rx_msdu_start_wcn6855 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t toeplitz_hash;
+       uint32_t flow_id_toeplitz;
+       uint32_t info3;
+       uint32_t ppdu_start_timestamp;
+       uint32_t phy_meta_data;
+       uint16_t vlan_ctag_ci;
+       uint16_t vlan_stag_ci;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ *             Field indicates what the reason was that this mpdu frame
+ *             was allowed to come into the receive path by rxpcu. Values
+ *             are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ *             SW processes frames based on certain classifications. Values
+ *             are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ *             A ppdu counter value that PHY increments for every PPDU
+ *             received. The counter value wraps around.
+ *
+ * msdu_length
+ *             MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ *             Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ *             Depending upon mode bit, this field either indicates the
+ *             L3 offset in bytes from the start of the RX_HEADER or the IP
+ *             offset in bytes from the start of the packet after
+ *             decapsulation. The latter is only valid if ipv4_proto or
+ *             ipv6_proto is set.
+ *
+ * ipsec_ah
+ *             Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ *             Depending upon mode bit, this field either indicates the
+ *             L4 offset in bytes from the start of RX_HEADER (only valid
+ *             if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ *             the offset in bytes to the start of TCP or UDP header from
+ *             the start of the IP header after decapsulation (Only valid if
+ *             tcp_proto or udp_proto is set). The value 0 indicates that
+ *             the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ *             Indicates the MSDU number within a MPDU.  This value is
+ *             reset to zero at the start of each MPDU.  If the number of
+ *             MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ *             Indicates the format after decapsulation. Values are defined in
+ *             enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ *             Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ *             Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ *             indicates TCP.
+ *
+ * udp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ *             indicates UDP.
+ *
+ * ip_frag
+ *             Indicates that either the IP More frag bit is set or IP frag
+ *             number is non-zero.  If set indicates that this is a fragmented
+ *             IP packet.
+ *
+ * tcp_only_ack
+ *             Set if only the TCP Ack bit is set in the TCP flags and if
+ *             the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ *             The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ *             Actual chosen Hash.
+ *             0 - Toeplitz hash of 2-tuple (IP source address, IP
+ *                 destination address)
+ *             1 - Toeplitz hash of 4-tuple (IP source address,
+ *                 IP destination address, L4 (TCP/UDP) source port,
+ *                 L4 (TCP/UDP) destination port)
+ *             2 - Toeplitz of flow_id
+ *             3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ *             Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ *             fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ *             IPv6/IPv6 header, including IPv4 options and
+ *             recognizable extension headers parsed fully within first 256
+ *             bytes of the packet
+ *
+ * tcp_udp_header_valid
+ *             Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ *             header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ *             When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ *             For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ *             is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ *             Controlled by RxOLE register - If register bit set to 0,
+ *             Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ *             addresses; otherwise, toeplitz hash is computed over 4-tuple
+ *             IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ *             Toeplitz hash of 5-tuple
+ *             {IP source address, IP destination address, IP source port, IP
+ *             destination port, L4 protocol}  in case of non-IPSec.
+ *
+ *             In case of IPSec - Toeplitz hash of 4-tuple
+ *             {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ *             The relevant Toeplitz key registers are provided in RxOLE's
+ *             instance of common parser module. These registers are separate
+ *             from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ *             The actual value will be passed on from common parser module
+ *             to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ *             RSSI for this user
+ *
+ * pkt_type
+ *             Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ *             When set, use STBC transmission rates.
+ *
+ * sgi
+ *             Field only valid when pkt type is HT, VHT or HE. Values are
+ *             defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ *             MCS Rate used.
+ *
+ * receive_bandwidth
+ *             Full receive Bandwidth. Values are defined in enum
+ *             %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ *             Indicates what type of reception this is and defined in enum
+ *             %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ *             Field only valid when
+ *             Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ *             RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ *             Bitmap, with each bit indicating if the related spatial
+ *             stream is used for this STA
+ *
+ *             LSB related to SS 0
+ *
+ *             0 - spatial stream not used for this reception
+ *             1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ *             Timestamp that indicates when the PPDU that contained this MPDU
+ *             started on the medium.
+ *
+ * phy_meta_data
+ *             SW programmed Meta data provided by the PHY. Can be used for SW
+ *             to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER    GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID      GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID               GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE       GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE                BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS         BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN          GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN    GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU           BIT(14)
+#define RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855   BIT(28)
+#define RX_MSDU_END_INFO2_LAST_MSDU            BIT(15)
+#define RX_MSDU_END_INFO2_LAST_MSDU_WCN6855    BIT(29)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT       BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT       BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR       BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT     BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID     BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR      BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR     BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID          BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID          BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC           BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING       GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG             GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE         BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET            GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET            GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID      BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID      BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE              GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP            BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND         GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX             GENMASK(25, 6)
+
+struct rx_msdu_end_ipq8074 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint16_t ip_hdr_cksum;
+       uint16_t tcp_udp_cksum;
+       uint32_t info1;
+       uint32_t ext_wapi_pn[2];
+       uint32_t info2;
+       uint32_t ipv6_options_crc;
+       uint32_t tcp_seq_num;
+       uint32_t tcp_ack_num;
+       uint16_t info3;
+       uint16_t window_size;
+       uint32_t info4;
+       uint32_t rule_indication[2];
+       uint16_t sa_idx;
+       uint16_t da_idx;
+       uint32_t info5;
+       uint32_t fse_metadata;
+       uint16_t cce_metadata;
+       uint16_t sa_sw_peer_id;
+} __packed;
+
+struct rx_msdu_end_wcn6855 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint16_t ip_hdr_cksum;
+       uint16_t reported_mpdu_len;
+       uint32_t info1;
+       uint32_t ext_wapi_pn[2];
+       uint32_t info4;
+       uint32_t ipv6_options_crc;
+       uint32_t tcp_seq_num;
+       uint32_t tcp_ack_num;
+       uint16_t info3;
+       uint16_t window_size;
+       uint32_t info2;
+       uint16_t sa_idx;
+       uint16_t da_idx;
+       uint32_t info5;
+       uint32_t fse_metadata;
+       uint16_t cce_metadata;
+       uint16_t sa_sw_peer_id;
+       uint32_t rule_indication[2];
+       uint32_t info6;
+       uint32_t info7;
+} __packed;
+
+#define RX_MSDU_END_MPDU_LENGTH_INFO           GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET            GENMASK(5, 0)
+#define RX_MSDU_END_INFO2_SA_OFFSET            GENMASK(11, 6)
+#define RX_MSDU_END_INFO2_DA_OFFSET_VALID      BIT(12)
+#define RX_MSDU_END_INFO2_SA_OFFSET_VALID      BIT(13)
+#define RX_MSDU_END_INFO2_L3_TYPE              GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT       BIT(0)
+#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT       BIT(1)
+#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR       BIT(2)
+#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT     BIT(3)
+#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID     BIT(4)
+#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR      BIT(5)
+#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR     BIT(6)
+#define RX_MSDU_END_INFO4_SA_IS_VALID          BIT(7)
+#define RX_MSDU_END_INFO4_DA_IS_VALID          BIT(8)
+#define RX_MSDU_END_INFO4_DA_IS_MCBC           BIT(9)
+#define RX_MSDU_END_INFO4_L3_HDR_PADDING       GENMASK(11, 10)
+#define RX_MSDU_END_INFO4_FIRST_MSDU           BIT(12)
+#define RX_MSDU_END_INFO4_LAST_MSDU            BIT(13)
+
+#define RX_MSDU_END_INFO6_AGGR_COUNT           GENMASK(7, 0)
+#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN      BIT(8)
+#define RX_MSDU_END_INFO6_FISA_TIMEOUT         BIT(9)
+
+struct rx_msdu_end_qcn9074 {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint16_t ip_hdr_cksum;
+       uint16_t mpdu_length_info;
+       uint32_t info1;
+       uint32_t rule_indication[2];
+       uint32_t info2;
+       uint32_t ipv6_options_crc;
+       uint32_t tcp_seq_num;
+       uint32_t tcp_ack_num;
+       uint16_t info3;
+       uint16_t window_size;
+       uint16_t tcp_udp_cksum;
+       uint16_t info4;
+       uint16_t sa_idx;
+       uint16_t da_idx;
+       uint32_t info5;
+       uint32_t fse_metadata;
+       uint16_t cce_metadata;
+       uint16_t sa_sw_peer_id;
+       uint32_t info6;
+       uint16_t cum_l4_cksum;
+       uint16_t cum_ip_length;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ *             Field indicates what the reason was that this mpdu frame
+ *             was allowed to come into the receive path by rxpcu. Values
+ *             are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ *             SW processes frames based on certain classifications. Values
+ *             are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ *             A ppdu counter value that PHY increments for every PPDU
+ *             received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ *             This can include the IP header checksum or the pseudo
+ *             header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ *             The value of the computed TCP/UDP checksum.  A mode bit
+ *             selects whether this checksum is the full checksum or the
+ *             partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ *             The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ *             Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ *             Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ *             Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ *             Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ *             MPDU length before decapsulation. Only valid when first_msdu is
+ *             set. This field is taken directly from the length field of the
+ *             A-MPDU delimiter or the preamble length field for non-A-MPDU
+ *             frames.
+ *
+ * first_msdu
+ *             Indicates the first MSDU of A-MSDU. If both first_msdu and
+ *             last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ *             frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ *             first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ *             Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ *             valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ *             Indicates an unsuccessful MAC source address search due to the
+ *             expiring of the search timer.
+ *
+ * da_idx_timeout
+ *             Indicates an unsuccessful MAC destination address search due to
+ *             the expiring of the search timer.
+ *
+ * msdu_limit_error
+ *             Indicates that the MSDU threshold was exceeded and thus all the
+ *             rest of the MSDUs will not be scattered and will not be
+ *             decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ *             Indicates an unsuccessful flow search due to the expiring of
+ *             the search timer.
+ *
+ * flow_idx_invalid
+ *             flow id is not valid.
+ *
+ * amsdu_parser_error
+ *             A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ *             Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ *             Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ *             Field Only valid if da_is_valid is set. Indicates the DA address
+ *             was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ *             Number of bytes padded  to make sure that the L3 header will
+ *             always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ *             32 bit CRC computed out of  IP v6 extension headers.
+ *
+ * tcp_seq_number
+ *             TCP sequence number.
+ *
+ * tcp_ack_number
+ *             TCP acknowledge number.
+ *
+ * tcp_flag
+ *             TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ *             Computed out of TCP and IP fields to indicate that this
+ *             MSDU is eligible for LRO.
+ *
+ * window_size
+ *             TCP receive window size.
+ *
+ * da_offset
+ *             Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ *             Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ *             da_offset field is valid. This will be set to 0 in case
+ *             of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ *             sa_offset field is valid. This will be set to 0 in case
+ *             of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ *             The 16-bit type value indicating the type of L3 later
+ *             extracted from LLC/SNAP, set to zero if SNAP is not
+ *             available.
+ *
+ * rule_indication
+ *             Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ *             The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ *             The offset in the address table which matches MAC destination
+ *             address.
+ *
+ * msdu_drop
+ *             REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ *             The id of the reo exit ring where the msdu frame shall push
+ *             after (MPDU level) reordering has finished. Values are defined
+ *             in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ *             Flow table index.
+ *
+ * fse_metadata
+ *             FSE related meta data.
+ *
+ * cce_metadata
+ *             CCE related meta data.
+ *
+ * sa_sw_peer_id
+ *             sw_peer_id from the address search entry corresponding to the
+ *             source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+       RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+       RX_MPDU_END_RXDMA_DEST_RING_FW,
+       RX_MPDU_END_RXDMA_DEST_RING_SW,
+       RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME      BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT         BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR                 BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR                 BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR                 BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR                  BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR                BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID              BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR                      BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR                 BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING             GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING             GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE          GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD            BIT(28)
+
+struct rx_mpdu_end {
+       uint16_t info0;
+       uint16_t phy_ppdu_id;
+       uint32_t info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ *             Field indicates what the reason was that this mpdu frame
+ *             was allowed to come into the receive path by rxpcu. Values
+ *             are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ *             SW processes frames based on certain classifications. Values
+ *             are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ *             A ppdu counter value that PHY increments for every PPDU
+ *             received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ *             This bit will be '1' when WEP or TKIP or WAPI key type is
+ *             received for 11ah short frame. Crypto will bypass the received
+ *             packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ *             Indicates that RX packet is not decrypted as Crypto is
+ *             busy with TX packet processing.
+ *
+ * overflow_err
+ *             RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ *             Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ *             This MPDU will not be ACKed.
+ *
+ *             RXPCU might still be able to correctly receive the following
+ *             MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ *             Set by RXPCU if the expected MPDU length does not correspond
+ *             with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ *             Set by Rx crypto when crypto detected a TKIP MIC error for
+ *             this MPDU.
+ *
+ * decrypt_err
+ *             Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ *             MPDU or CRYPTO received an encrypted frame, but did not get a
+ *             valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ *             Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ *             in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ *             Set by RX CRYPTO to indicate that there is a valid PN field
+ *             present in this MPDU.
+ *
+ * fcs_err
+ *             Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ *             Set by RXOLE when there is an msdu length error detected
+ *             in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ *             The ring to which RXDMA0/1 shall push the frame, assuming
+ *             no MPDU level errors are detected. In case of MPDU level
+ *             errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ *             are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ *             Field provides insight into the decryption performed. Values
+ *             are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ *             Frame is received, but RXPCU could not update the receive bitmap
+ *             due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES     4
+#define HAL_RX_DESC_PADDING1_BYTES     16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN     120
+
+struct hal_rx_desc_ipq8074 {
+       uint32_t msdu_end_tag;
+       struct rx_msdu_end_ipq8074 msdu_end;
+       uint32_t rx_attn_tag;
+       struct rx_attention attention;
+       uint32_t msdu_start_tag;
+       struct rx_msdu_start_ipq8074 msdu_start;
+       uint8_t rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+       uint32_t mpdu_start_tag;
+       struct rx_mpdu_start_ipq8074 mpdu_start;
+       uint32_t mpdu_end_tag;
+       struct rx_mpdu_end mpdu_end;
+       uint8_t rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+       uint32_t hdr_status_tag;
+       uint32_t phy_ppdu_id;
+       uint8_t hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+       uint8_t msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_qcn9074 {
+       uint32_t msdu_end_tag;
+       struct rx_msdu_end_qcn9074 msdu_end;
+       uint32_t rx_attn_tag;
+       struct rx_attention attention;
+       uint32_t msdu_start_tag;
+       struct rx_msdu_start_qcn9074 msdu_start;
+       uint8_t rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+       uint32_t mpdu_start_tag;
+       struct rx_mpdu_start_qcn9074 mpdu_start;
+       uint32_t mpdu_end_tag;
+       struct rx_mpdu_end mpdu_end;
+       uint8_t rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+       uint32_t hdr_status_tag;
+       uint32_t phy_ppdu_id;
+       uint8_t hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+       uint8_t msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_wcn6855 {
+       uint32_t msdu_end_tag;
+       struct rx_msdu_end_wcn6855 msdu_end;
+       uint32_t rx_attn_tag;
+       struct rx_attention attention;
+       uint32_t msdu_start_tag;
+       struct rx_msdu_start_wcn6855 msdu_start;
+       uint8_t rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+       uint32_t mpdu_start_tag;
+       struct rx_mpdu_start_wcn6855 mpdu_start;
+       uint32_t mpdu_end_tag;
+       struct rx_mpdu_end mpdu_end;
+       uint8_t rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+       uint32_t hdr_status_tag;
+       uint32_t phy_ppdu_id;
+       uint8_t hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+       uint8_t msdu_payload[];
+} __packed;
+
+struct hal_rx_desc {
+       union {
+               struct hal_rx_desc_ipq8074 ipq8074;
+               struct hal_rx_desc_qcn9074 qcn9074;
+               struct hal_rx_desc_wcn6855 wcn6855;
+       } u;
+} __packed;
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26  1
+#define RU_52  2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
+/*
+ * dp.h
+ */
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE                 BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT            BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID              GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID              GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED       BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID              GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+#define HTT_INVALID_PEER_ID    0xffff
+
+/* HTT tx completion is overlaid in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS           GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON  GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON  GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI         GENMASK(31, 24)
+#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID       GENMASK(15, 0)
+#define HTT_TX_WBM_COMP_INFO2_VALID            BIT(21)
+
+struct htt_tx_wbm_completion {
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t info2;
+       uint32_t info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+       HTT_H2T_MSG_TYPE_VERSION_REQ            = 0,
+       HTT_H2T_MSG_TYPE_SRING_SETUP            = 0xb,
+       HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG  = 0xc,
+       HTT_H2T_MSG_TYPE_EXT_STATS_CFG          = 0x10,
+       HTT_H2T_MSG_TYPE_PPDU_STATS_CFG         = 0x11,
+       HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE   = 0x17,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID                GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+       uint32_t ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+       HTT_HW_TO_SW_RING,
+       HTT_SW_TO_HW_RING,
+       HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+       HTT_RXDMA_HOST_BUF_RING,
+       HTT_RXDMA_MONITOR_STATUS_RING,
+       HTT_RXDMA_MONITOR_BUF_RING,
+       HTT_RXDMA_MONITOR_DESC_RING,
+       HTT_RXDMA_MONITOR_DEST_RING,
+       HTT_HOST1_TO_FW_RXBUF_RING,
+       HTT_HOST2_TO_FW_RXBUF_RING,
+       HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target  HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31            24|23    20|19|18 16|15|14          8|7                0|
+ * |--------------- +-----------------+----------------+------------------|
+ * |    ring_type   |      ring_id    |    pdev_id     |     msg_type     |
+ * |----------------------------------------------------------------------|
+ * |                          ring_base_addr_lo                           |
+ * |----------------------------------------------------------------------|
+ * |                         ring_base_addr_hi                            |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size|            ring_size              |
+ * |----------------------------------------------------------------------|
+ * |                         ring_head_offset32_remote_addr_lo            |
+ * |----------------------------------------------------------------------|
+ * |                         ring_head_offset32_remote_addr_hi            |
+ * |----------------------------------------------------------------------|
+ * |                         ring_tail_offset32_remote_addr_lo            |
+ * |----------------------------------------------------------------------|
+ * |                         ring_tail_offset32_remote_addr_hi            |
+ * |----------------------------------------------------------------------|
+ * |                          ring_msi_addr_lo                            |
+ * |----------------------------------------------------------------------|
+ * |                          ring_msi_addr_hi                            |
+ * |----------------------------------------------------------------------|
+ * |                          ring_msi_data                               |
+ * |----------------------------------------------------------------------|
+ * |         intr_timer_th            |IM|      intr_batch_counter_th     |
+ * |----------------------------------------------------------------------|
+ * |          reserved        |RR|PTCF|        intr_low_threshold         |
+ * |----------------------------------------------------------------------|
+ * Where
+ *     IM = sw_intr_mode
+ *     RR = response_required
+ *     PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0  - b'0:7   - msg_type: This will be set to
+ *                     HTT_H2T_MSG_TYPE_SRING_SETUP
+ *           b'8:15  - pdev_id:
+ *                     0 (for rings at SOC/UMAC level),
+ *                     1/2/3 mac id (for rings at LMAC level)
+ *           b'16:23 - ring_id: identify which ring is to setup,
+ *                     more details can be got from enum htt_srng_ring_id
+ *           b'24:31 - ring_type: identify type of host rings,
+ *                     more details can be got from enum htt_srng_ring_type
+ * dword1  - b'0:31  - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2  - b'0:31  - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3  - b'0:15  - ring_size: size of the ring in unit of 4-bytes words
+ *           b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ *           b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ *                     SW_TO_HW_RING.
+ *                     Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4  - b'0:31  - ring_head_off32_remote_addr_lo:
+ *                     Lower 32 bits of memory address of the remote variable
+ *                     storing the 4-byte word offset that identifies the head
+ *                     element within the ring.
+ *                     (The head offset variable has type uint32_t.)
+ *                     Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5  - b'0:31  - ring_head_off32_remote_addr_hi:
+ *                     Upper 32 bits of memory address of the remote variable
+ *                     storing the 4-byte word offset that identifies the head
+ *                     element within the ring.
+ *                     (The head offset variable has type uint32_t.)
+ *                     Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6  - b'0:31  - ring_tail_off32_remote_addr_lo:
+ *                     Lower 32 bits of memory address of the remote variable
+ *                     storing the 4-byte word offset that identifies the tail
+ *                     element within the ring.
+ *                     (The tail offset variable has type uint32_t.)
+ *                     Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7  - b'0:31  - ring_tail_off32_remote_addr_hi:
+ *                     Upper 32 bits of memory address of the remote variable
+ *                     storing the 4-byte word offset that identifies the tail
+ *                     element within the ring.
+ *                     (The tail offset variable has type uint32_t.)
+ *                     Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8  - b'0:31  - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ *                     valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9  - b'0:31  - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ *                     valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31  - ring_msi_data: MSI data
+ *                     Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ *                     valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14  - intr_batch_counter_th:
+ *                     batch counter threshold is in units of 4-byte words.
+ *                     HW internally maintains and increments batch count.
+ *                     (see SRING spec for detail description).
+ *                     When batch count reaches threshold value, an interrupt
+ *                     is generated by HW.
+ *           b'15    - sw_intr_mode:
+ *                     This configuration shall be static.
+ *                     Only programmed at power up.
+ *                     0: generate pulse style sw interrupts
+ *                     1: generate level style sw interrupts
+ *           b'16:31 - intr_timer_th:
+ *                     The timer init value when timer is idle or is
+ *                     initialized to start downcounting.
+ *                     In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15  - intr_low_threshold:
+ *                     Used only by Consumer ring to generate ring_sw_int_p.
+ *                     Ring entries low threshold water mark, that is used
+ *                     in combination with the interrupt timer as well as
+ *                     the clearing of the level interrupt.
+ *           b'16:18 - prefetch_timer_cfg:
+ *                     Used only by Consumer ring to set timer mode to
+ *                     support Application prefetch handling.
+ *                     The external tail offset/pointer will be updated
+ *                     at following intervals:
+ *                     3'b000: (Prefetch feature disabled; used only for debug)
+ *                     3'b001: 1 usec
+ *                     3'b010: 4 usec
+ *                     3'b011: 8 usec (default)
+ *                     3'b100: 16 usec
+ *                     Others: Reserved
+ *           b'19    - response_required:
+ *                     Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ *           b'20:31 - reserved:  reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE      GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID       GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID       GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE     GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE                     GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE               GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS             BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP           BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP       BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP           BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH      GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE              BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH         GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH       GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG   BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED     BIT(19)
+
+struct htt_srng_setup_cmd {
+       uint32_t info0;
+       uint32_t ring_base_addr_lo;
+       uint32_t ring_base_addr_hi;
+       uint32_t info1;
+       uint32_t ring_head_off32_remote_addr_lo;
+       uint32_t ring_head_off32_remote_addr_hi;
+       uint32_t ring_tail_off32_remote_addr_lo;
+       uint32_t ring_tail_off32_remote_addr_hi;
+       uint32_t ring_msi_addr_lo;
+       uint32_t ring_msi_addr_hi;
+       uint32_t msi_data;
+       uint32_t intr_info;
+       uint32_t info2;
+} __packed;
+
+/* host -> target FW  PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31          24|23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |    REQ bit mask             |   pdev_mask  |   msg type   |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ *    Value: 0x11
+ *  - PDEV_MASK
+ *    Bits 8:15
+ *    Purpose: identifies which pdevs this PPDU stats configuration applies to
+ *    Value: This is a overloaded field, refer to usage and interpretation of
+ *           PDEV in interface document.
+ *           Bit   8    :  Reserved for SOC stats
+ *           Bit 9 - 15 :  Indicates PDEV_MASK in DBDC
+ *                         Indicates MACID_MASK in DBS
+ *  - REQ_TLV_BIT_MASK
+ *    Bits 16:31
+ *    Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ *        needs to be included in the target's PPDU_STATS_IND messages.
+ *    Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+       uint32_t msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE            GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_SOC_STATS           BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID             GENMASK(15, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK    GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+       HTT_PPDU_STATS_TAG_COMMON,
+       HTT_PPDU_STATS_TAG_USR_COMMON,
+       HTT_PPDU_STATS_TAG_USR_RATE,
+       HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+       HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+       HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+       HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+       HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+       HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+       HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+       HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+       HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+       HTT_PPDU_STATS_TAG_INFO,
+       HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+       /* New TLV's are added above to this line */
+       HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+                                  | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+                                  | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG  (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+                                   BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+                                   BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+                                   BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+                                   BIT(HTT_PPDU_STATS_TAG_INFO) | \
+                                   BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+                                   HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ *    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ *    configure RXDMA rings.
+ *    The configuration is per ring based and includes both packet subtypes
+ *    and PPDU/MPDU TLVs.
+ *
+ *    The message would appear as follows:
+ *
+ *    |31       26|25|24|23            16|15             8|7             0|
+ *    |-----------------+----------------+----------------+---------------|
+ *    |   rsvd1   |PS|SS|     ring_id    |     pdev_id    |    msg_type   |
+ *    |-------------------------------------------------------------------|
+ *    |              rsvd2               |           ring_buffer_size     |
+ *    |-------------------------------------------------------------------|
+ *    |                        packet_type_enable_flags_0                 |
+ *    |-------------------------------------------------------------------|
+ *    |                        packet_type_enable_flags_1                 |
+ *    |-------------------------------------------------------------------|
+ *    |                        packet_type_enable_flags_2                 |
+ *    |-------------------------------------------------------------------|
+ *    |                        packet_type_enable_flags_3                 |
+ *    |-------------------------------------------------------------------|
+ *    |                         tlv_filter_in_flags                       |
+ *    |-------------------------------------------------------------------|
+ * Where:
+ *     PS = pkt_swap
+ *     SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7   - msg_type: This will be set to
+ *                    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ *          b'8:15  - pdev_id:
+ *                    0 (for rings at SOC/UMAC level),
+ *                    1/2/3 mac id (for rings at LMAC level)
+ *          b'16:23 - ring_id : Identify the ring to configure.
+ *                    More details can be got from enum htt_srng_ring_id
+ *          b'24    - status_swap: 1 is to swap status TLV
+ *          b'25    - pkt_swap:  1 is to swap packet TLV
+ *          b'26:31 - rsvd1:  reserved for future use
+ * dword1 - b'0:16  - ring_buffer_size: size of buffers referenced by rx ring,
+ *                    in byte units.
+ *                    Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ *        - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31  - packet_type_enable_flags_0:
+ *                    Enable MGMT packet from 0b0000 to 0b1001
+ *                    bits from low to high: FP, MD, MO - 3 bits
+ *                        FP: Filter_Pass
+ *                        MD: Monitor_Direct
+ *                        MO: Monitor_Other
+ *                    10 mgmt subtypes * 3 bits -> 30 bits
+ *                    Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31  - packet_type_enable_flags_1:
+ *                    Enable MGMT packet from 0b1010 to 0b1111
+ *                    bits from low to high: FP, MD, MO - 3 bits
+ *                    Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 -  packet_type_enable_flags_2:
+ *                    Enable CTRL packet from 0b0000 to 0b1001
+ *                    bits from low to high: FP, MD, MO - 3 bits
+ *                    Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31  - packet_type_enable_flags_3:
+ *                    Enable CTRL packet from 0b1010 to 0b1111,
+ *                    MCAST_DATA, UCAST_DATA, NULL_DATA
+ *                    bits from low to high: FP, MD, MO - 3 bits
+ *                    Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 -  tlv_filter_in_flags:
+ *                    Filter in Attention/MPDU/PPDU/Header/User tlvs
+ *                    Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE   GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID    GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID    GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS         BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS         BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE   GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+       HTT_RX_FILTER_TLV_FLAGS_MPDU_START              = BIT(0),
+       HTT_RX_FILTER_TLV_FLAGS_MSDU_START              = BIT(1),
+       HTT_RX_FILTER_TLV_FLAGS_RX_PACKET               = BIT(2),
+       HTT_RX_FILTER_TLV_FLAGS_MSDU_END                = BIT(3),
+       HTT_RX_FILTER_TLV_FLAGS_MPDU_END                = BIT(4),
+       HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER           = BIT(5),
+       HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER         = BIT(6),
+       HTT_RX_FILTER_TLV_FLAGS_ATTENTION               = BIT(7),
+       HTT_RX_FILTER_TLV_FLAGS_PPDU_START              = BIT(8),
+       HTT_RX_FILTER_TLV_FLAGS_PPDU_END                = BIT(9),
+       HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS     = BIT(10),
+       HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+       HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE    = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ          = BIT(0),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ          = BIT(1),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ          = BIT(2),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP         = BIT(3),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP         = BIT(4),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP         = BIT(5),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ        = BIT(6),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ        = BIT(7),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ        = BIT(8),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP       = BIT(9),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP       = BIT(10),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP       = BIT(11),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ          = BIT(12),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ          = BIT(13),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ          = BIT(14),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP         = BIT(15),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP         = BIT(16),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP         = BIT(17),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV   = BIT(18),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV   = BIT(19),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV   = BIT(20),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7         = BIT(21),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7         = BIT(22),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7         = BIT(23),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON             = BIT(24),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON             = BIT(25),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON             = BIT(26),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM               = BIT(27),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM               = BIT(28),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM               = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC           = BIT(0),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC           = BIT(1),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC           = BIT(2),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH               = BIT(3),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH               = BIT(4),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH               = BIT(5),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH             = BIT(6),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH             = BIT(7),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH             = BIT(8),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION             = BIT(9),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION             = BIT(10),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION             = BIT(11),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK       = BIT(12),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK       = BIT(13),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK       = BIT(14),
+       HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15        = BIT(15),
+       HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15        = BIT(16),
+       HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15        = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1    = BIT(0),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1    = BIT(1),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1    = BIT(2),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2    = BIT(3),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2    = BIT(4),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2    = BIT(5),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER       = BIT(6),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER       = BIT(7),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER       = BIT(8),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4    = BIT(9),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4    = BIT(10),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4    = BIT(11),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL   = BIT(12),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL   = BIT(13),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL   = BIT(14),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP       = BIT(15),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP       = BIT(16),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP       = BIT(17),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT     = BIT(18),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT     = BIT(19),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT     = BIT(20),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER       = BIT(21),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER       = BIT(22),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER       = BIT(23),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR                = BIT(24),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR                = BIT(25),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR                = BIT(26),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA                 = BIT(27),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA                 = BIT(28),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA                 = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL             = BIT(0),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL             = BIT(1),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL             = BIT(2),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS                = BIT(3),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS                = BIT(4),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS                = BIT(5),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS                = BIT(6),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS                = BIT(7),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS                = BIT(8),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK                = BIT(9),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK                = BIT(10),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK                = BIT(11),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND              = BIT(12),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND              = BIT(13),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND              = BIT(14),
+       HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK          = BIT(15),
+       HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK          = BIT(16),
+       HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK          = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+       HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST      = BIT(18),
+       HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST      = BIT(19),
+       HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST      = BIT(20),
+       HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST      = BIT(21),
+       HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST      = BIT(22),
+       HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST      = BIT(23),
+       HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA  = BIT(24),
+       HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA  = BIT(25),
+       HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA  = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+       (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+       | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+       (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+       | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+       (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+       | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+                                    | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+                                    | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+                                    | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+                                    | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+                                    | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+                                    | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+                                    | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+                                    | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+                                    | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+                                    | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+                                    | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+                                    | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+                                    | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+                                    | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+                                    | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+                                    | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+                                    | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+                                    | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+                                    | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+                                    | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+                                    | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+               (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+               HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+               (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+               HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+               (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+               HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+               (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+               HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+               (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+               HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+               (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+               HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+               (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+               (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+               HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+               (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+               HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+               HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+               HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+               HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+               HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+               HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+       uint32_t info0;
+       uint32_t info1;
+       uint32_t pkt_type_en_flags0;
+       uint32_t pkt_type_en_flags1;
+       uint32_t pkt_type_en_flags2;
+       uint32_t pkt_type_en_flags3;
+       uint32_t rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+       uint32_t rx_filter; /* see htt_rx_filter_tlv_flags */
+       uint32_t pkt_filter_flags0; /* MGMT */
+       uint32_t pkt_filter_flags1; /* MGMT */
+       uint32_t pkt_filter_flags2; /* CTRL */
+       uint32_t pkt_filter_flags3; /* DATA */
+};
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE    GENMASK(7, 0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID     GENMASK(15, 8)
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE                        BIT(0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END                BIT(1)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END    BIT(2)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING          GENMASK(10, 3)
+
+/* Enumeration for full monitor mode destination ring select
+ * 0 - REO destination ring select
+ * 1 - FW destination ring select
+ * 2 - SW destination ring select
+ * 3 - Release destination ring select
+ */
+enum htt_rx_full_mon_release_ring {
+       HTT_RX_MON_RING_REO,
+       HTT_RX_MON_RING_FW,
+       HTT_RX_MON_RING_SW,
+       HTT_RX_MON_RING_RELEASE,
+};
+
+struct htt_rx_full_monitor_mode_cfg_cmd {
+       uint32_t info0;
+       uint32_t cfg;
+} __packed;
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+       HTT_T2H_MSG_TYPE_VERSION_CONF,
+       HTT_T2H_MSG_TYPE_PEER_MAP       = 0x3,
+       HTT_T2H_MSG_TYPE_PEER_UNMAP     = 0x4,
+       HTT_T2H_MSG_TYPE_RX_ADDBA       = 0x5,
+       HTT_T2H_MSG_TYPE_PKTLOG         = 0x8,
+       HTT_T2H_MSG_TYPE_SEC_IND        = 0xb,
+       HTT_T2H_MSG_TYPE_PEER_MAP2      = 0x1e,
+       HTT_T2H_MSG_TYPE_PEER_UNMAP2    = 0x1f,
+       HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+       HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+       HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE               GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR     GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR     GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+       uint32_t version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID  GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID  GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16    GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID      GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL    GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M      BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S      16
+
+struct htt_t2h_peer_map_event {
+       uint32_t info;
+       uint32_t mac_addr_l32;
+       uint32_t info1;
+       uint32_t info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID        HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID        HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+                                       HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+       uint32_t info;
+       uint32_t mac_addr_l32;
+       uint32_t info1;
+} __packed;
+
+struct htt_resp_msg {
+       union {
+               struct htt_t2h_version_conf_msg version_msg;
+               struct htt_t2h_peer_map_event peer_map_ev;
+               struct htt_t2h_peer_unmap_event peer_unmap_ev;
+       };
+} __packed;
+
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
+#define HTT_BACKPRESSURE_UMAC_RING_TYPE        0
+#define HTT_BACKPRESSURE_LMAC_RING_TYPE        1
+
+enum htt_backpressure_umac_ringid {
+       HTT_SW_RING_IDX_REO_REO2SW1_RING,
+       HTT_SW_RING_IDX_REO_REO2SW2_RING,
+       HTT_SW_RING_IDX_REO_REO2SW3_RING,
+       HTT_SW_RING_IDX_REO_REO2SW4_RING,
+       HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
+       HTT_SW_RING_IDX_REO_REO2TCL_RING,
+       HTT_SW_RING_IDX_REO_REO2FW_RING,
+       HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
+       HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
+       HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
+       HTT_SW_RING_IDX_REO_REO_CMD_RING,
+       HTT_SW_RING_IDX_REO_REO_STATUS_RING,
+       HTT_SW_UMAC_RING_IDX_MAX,
+};
+
+enum htt_backpressure_lmac_ringid {
+       HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
+       HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
+       HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
+       HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
+       HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
+       HTT_SW_RING_IDX_RXDMA2FW_RING,
+       HTT_SW_RING_IDX_RXDMA2SW_RING,
+       HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
+       HTT_SW_RING_IDX_RXDMA2REO_RING,
+       HTT_SW_RING_IDX_MONITOR_STATUS_RING,
+       HTT_SW_RING_IDX_MONITOR_BUF_RING,
+       HTT_SW_RING_IDX_MONITOR_DESC_RING,
+       HTT_SW_RING_IDX_MONITOR_DEST_RING,
+       HTT_SW_LMAC_RING_IDX_MAX,
+};
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31                         16|15   12|11   10|9      8|7            0 |
+ * |----------------------------------------------------------------------|
+ * |    payload_size             | rsvd  |pdev_id|mac_id  |    msg type   |
+ * |----------------------------------------------------------------------|
+ * |                          ppdu_id                                     |
+ * |----------------------------------------------------------------------|
+ * |                        Timestamp in us                               |
+ * |----------------------------------------------------------------------|
+ * |                          reserved                                    |
+ * |----------------------------------------------------------------------|
+ * |                    type-specific stats info                          |
+ * |                     (see htt_ppdu_stats.h)                           |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: Identifies this is a PPDU STATS indication
+ *             message.
+ *    Value: 0x1d
+ *  - mac_id
+ *    Bits 9:8
+ *    Purpose: mac_id of this ppdu_id
+ *    Value: 0-3
+ *  - pdev_id
+ *    Bits 11:10
+ *    Purpose: pdev_id of this ppdu_id
+ *    Value: 0-3
+ *     0 (for rings at SOC level),
+ *     1/2/3 PDEV -> 0/1/2
+ *  - payload_size
+ *    Bits 31:16
+ *    Purpose: total tlv size
+ *    Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath12k_htt_ppdu_stats_msg {
+       uint32_t info;
+       uint32_t ppdu_id;
+       uint32_t timestamp;
+       uint32_t rsvd;
+       uint8_t data[];
+} __packed;
+
+struct htt_tlv {
+       uint32_t header;
+       uint8_t *value;
+} __packed;
+
+#define HTT_TLV_TAG                    GENMASK(11, 0)
+#define HTT_TLV_LEN                    GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+       HTT_PPDU_STATS_BANDWIDTH_5MHZ   = 0,
+       HTT_PPDU_STATS_BANDWIDTH_10MHZ  = 1,
+       HTT_PPDU_STATS_BANDWIDTH_20MHZ  = 2,
+       HTT_PPDU_STATS_BANDWIDTH_40MHZ  = 3,
+       HTT_PPDU_STATS_BANDWIDTH_80MHZ  = 4,
+       HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+       HTT_PPDU_STATS_BANDWIDTH_DYN    = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M  GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M  GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M          GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+       uint32_t ppdu_id;
+       uint16_t sched_cmdid;
+       uint8_t ring_id;
+       uint8_t num_users;
+       uint32_t flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+       uint32_t chain_mask;
+       uint32_t fes_duration_us; /* frame exchange sequence */
+       uint32_t ppdu_sch_eval_start_tstmp_us;
+       uint32_t ppdu_sch_end_tstmp_us;
+       uint32_t ppdu_start_tstmp_us;
+       /* BIT [15 :  0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+        * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+        */
+       uint16_t phy_mode;
+       uint16_t bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+       HTT_PPDU_STATS_SGI_0_8_US,
+       HTT_PPDU_STATS_SGI_0_4_US,
+       HTT_PPDU_STATS_SGI_1_6_US,
+       HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M      GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M   GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M        BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M     GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M      GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M          BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M         BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M          GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M            GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M           GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M           GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M      GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M            GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M           BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M          BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+#define HTT_USR_RATE_DCM(_val) \
+               FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M         GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M             BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M            BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M             GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M               GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M              GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M              GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M         GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M               GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M              BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M             BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+       uint8_t tid_num;
+       uint8_t reserved0;
+       uint16_t sw_peer_id;
+       uint32_t info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+       uint16_t ru_end;
+       uint16_t ru_start;
+       uint16_t resp_ru_end;
+       uint16_t resp_ru_start;
+       uint32_t info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+       uint32_t rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+       /* Note: resp_rate_info is only valid for if resp_type is UL */
+       uint32_t resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M                GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M                BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M   GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M              GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M             BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M          GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+                       FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+                       FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+                       FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+                       FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+struct htt_tx_ppdu_stats_info {
+       struct htt_tlv tlv_hdr;
+       uint32_t tx_success_bytes;
+       uint32_t tx_retry_bytes;
+       uint32_t tx_failed_bytes;
+       uint32_t flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+       uint16_t tx_success_msdus;
+       uint16_t tx_retry_msdus;
+       uint16_t tx_failed_msdus;
+       uint16_t tx_duration; /* united in us */
+} __packed;
+
+enum  htt_ppdu_stats_usr_compln_status {
+       HTT_PPDU_STATS_USER_STATUS_OK,
+       HTT_PPDU_STATS_USER_STATUS_FILTERED,
+       HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+       HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+       HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M       GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M      GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M         BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M                GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+           FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+           FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+           FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+       uint8_t status;
+       uint8_t tid_num;
+       uint16_t sw_peer_id;
+       /* RSSI value of last ack packet (units = dB above noise floor) */
+       uint32_t ack_rssi;
+       uint16_t mpdu_tried;
+       uint16_t mpdu_success;
+       uint32_t flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M  GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M  GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM     GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID     16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+       uint32_t ppdu_id;
+       uint16_t sw_peer_id;
+       uint16_t reserved0;
+       uint32_t info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+       uint16_t current_seq;
+       uint16_t start_seq;
+       uint32_t success_bytes;
+} __packed;
+
+struct htt_ppdu_stats_usr_cmn_array {
+       struct htt_tlv tlv_hdr;
+       uint32_t num_ppdu_stats;
+       /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+        * elements.
+        * tx_ppdu_stats_info is variable length, with length =
+        *     number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+        */
+       struct htt_tx_ppdu_stats_info tx_ppdu_info[];
+} __packed;
+
+struct htt_ppdu_user_stats {
+       uint16_t peer_id;
+       uint32_t tlv_flags;
+       bool is_valid_peer_id;
+       struct htt_ppdu_stats_user_rate rate;
+       struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+       struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS       8
+#define HTT_PPDU_DESC_MAX_DEPTH        16
+
+struct htt_ppdu_stats {
+       struct htt_ppdu_stats_common common;
+       struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+       uint32_t ppdu_id;
+       struct htt_ppdu_stats ppdu_stats;
+#if 0
+       struct list_head list;
+#endif
+};
+
+/* @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31                         16|15  12|11   10|9    8|7            0|
+ * |------------------------------------------------------------------|
+ * |        payload_size         | rsvd |pdev_id|mac_id|   msg type   |
+ * |------------------------------------------------------------------|
+ * |                              payload                             |
+ * |------------------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a pktlog message
+ *     Value: HTT_T2H_MSG_TYPE_PKTLOG
+ *   - mac_id
+ *     Bits 9:8
+ *     Purpose: identifies which MAC/PHY instance generated this pktlog info
+ *     Value: 0-3
+ *   - pdev_id
+ *     Bits 11:10
+ *     Purpose: pdev_id
+ *     Value: 0-3
+ *     0 (for rings at SOC level),
+ *     1/2/3 PDEV -> 0/1/2
+ *   - payload_size
+ *     Bits 31:16
+ *     Purpose: explicitly specify the payload size
+ *     Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+       uint32_t hdr;
+       uint8_t payload[];
+};
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31          24|23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |   reserved   | stats type   |   pdev_mask  |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                   config param [0]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [1]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [2]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [3]                        |
+ * |-----------------------------------------------------------|
+ * |                         reserved                          |
+ * |-----------------------------------------------------------|
+ * |                        cookie LSBs                        |
+ * |-----------------------------------------------------------|
+ * |                        cookie MSBs                        |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies this is a extended stats upload request message
+ *    Value: 0x10
+ *  - PDEV_MASK
+ *    Bits 8:15
+ *    Purpose: identifies the mask of PDEVs to retrieve stats from
+ *    Value: This is a overloaded field, refer to usage and interpretation of
+ *           PDEV in interface document.
+ *           Bit   8    :  Reserved for SOC stats
+ *           Bit 9 - 15 :  Indicates PDEV_MASK in DBDC
+ *                         Indicates MACID_MASK in DBS
+ *  - STATS_TYPE
+ *    Bits 23:16
+ *    Purpose: identifies which FW statistics to upload
+ *    Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ *  - Reserved
+ *    Bits 31:24
+ *  - CONFIG_PARAM [0]
+ *    Bits 31:0
+ *    Purpose: give an opaque configuration value to the specified stats type
+ *    Value: stats-type specific configuration value
+ *           Refer to htt_stats.h for interpretation for each stats sub_type
+ *  - CONFIG_PARAM [1]
+ *    Bits 31:0
+ *    Purpose: give an opaque configuration value to the specified stats type
+ *    Value: stats-type specific configuration value
+ *           Refer to htt_stats.h for interpretation for each stats sub_type
+ *  - CONFIG_PARAM [2]
+ *    Bits 31:0
+ *    Purpose: give an opaque configuration value to the specified stats type
+ *    Value: stats-type specific configuration value
+ *           Refer to htt_stats.h for interpretation for each stats sub_type
+ *  - CONFIG_PARAM [3]
+ *    Bits 31:0
+ *    Purpose: give an opaque configuration value to the specified stats type
+ *    Value: stats-type specific configuration value
+ *           Refer to htt_stats.h for interpretation for each stats sub_type
+ *  - Reserved [31:0] for future use.
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+       uint8_t msg_type;
+       uint8_t pdev_mask;
+       uint8_t stats_type;
+       uint8_t reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+       struct htt_ext_stats_cfg_hdr hdr;
+       uint32_t cfg_param0;
+       uint32_t cfg_param1;
+       uint32_t cfg_param2;
+       uint32_t cfg_param3;
+       uint32_t reserved;
+       uint32_t cookie_lsb;
+       uint32_t cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ *  [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ *  [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ *  [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ *  peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ *   0 bit htt_peer_stats_cmn_tlv
+ *   1 bit htt_peer_details_tlv
+ *   2 bit htt_tx_peer_rate_stats_tlv
+ *   3 bit htt_rx_peer_rate_stats_tlv
+ *   4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ *   5 bit htt_rx_tid_stats_tlv
+ *   6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ *                [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+       uint32_t cfg0;
+       uint32_t cfg1;
+       uint32_t cfg2;
+       uint32_t cfg3;
+};
+
+/* @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31                         16|15    12|11|10 8|7   5|4       0|
+ * |--------------------------------------------------------------|
+ * |                   reserved                   |    msg type   |
+ * |--------------------------------------------------------------|
+ * |                         cookie LSBs                          |
+ * |--------------------------------------------------------------|
+ * |                         cookie MSBs                          |
+ * |--------------------------------------------------------------|
+ * |      stats entry length     | rsvd   | D|  S |   stat type   |
+ * |--------------------------------------------------------------|
+ * |                   type-specific stats info                   |
+ * |                      (see htt_stats.h)                       |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: Identifies this is a extended statistics upload confirmation
+ *             message.
+ *    Value: 0x1c
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ *  - STAT_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies the type of statistics info held in the
+ *        following information element
+ *    Value: htt_dbg_ext_stats_type
+ *  - STATUS
+ *    Bits 10:8
+ *    Purpose: indicate whether the requested stats are present
+ *    Value: htt_dbg_ext_stats_status
+ *  - DONE
+ *    Bits 11
+ *    Purpose:
+ *        Indicates the completion of the stats entry, this will be the last
+ *        stats conf HTT segment for the requested stats type.
+ *    Value:
+ *        0 -> the stats retrieval is ongoing
+ *        1 -> the stats retrieval is complete
+ *  - LENGTH
+ *    Bits 31:16
+ *    Purpose: indicate the stats information size
+ *    Value: This field specifies the number of bytes of stats information
+ *       that follows the element tag-length header.
+ *       It is expected but not required that this length is a multiple of
+ *       4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_DONE   BIT(11)
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH   GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+       uint32_t info0;
+       uint64_t cookie;
+       uint32_t info1;
+       uint8_t data[];
+} __packed;
+
+#define        HTT_MAC_ADDR_L32_0      GENMASK(7, 0)
+#define        HTT_MAC_ADDR_L32_1      GENMASK(15, 8)
+#define        HTT_MAC_ADDR_L32_2      GENMASK(23, 16)
+#define        HTT_MAC_ADDR_L32_3      GENMASK(31, 24)
+#define        HTT_MAC_ADDR_H16_0      GENMASK(7, 0)
+#define        HTT_MAC_ADDR_H16_1      GENMASK(15, 8)
diff --git a/sys/dev/ic/qwzvar.h b/sys/dev/ic/qwzvar.h
new file mode 100644 (file)
index 0000000..ee27c3f
--- /dev/null
@@ -0,0 +1,2031 @@
+/*     $OpenBSD: qwzvar.h,v 1.1 2024/08/14 14:40:46 patrick Exp $      */
+
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted (subject to the limitations in the disclaimer
+ * below) provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ *  * Neither the name of [Owner Organization] nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
+ * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef QWZ_DEBUG
+#define DPRINTF(x...)          do { if (qwz_debug) printf(x); } while(0)
+#define DNPRINTF(n,x...)       do { if (qwz_debug & n) printf(x); } while(0)
+#define        QWZ_D_MISC              0x00000001
+#define        QWZ_D_MHI               0x00000002
+#define        QWZ_D_QMI               0x00000004
+#define        QWZ_D_WMI               0x00000008
+#define        QWZ_D_HTC               0x00000010
+#define        QWZ_D_HTT               0x00000020
+#define        QWZ_D_MAC               0x00000040
+#define        QWZ_D_MGMT              0x00000080
+#define        QWZ_D_CE                0x00000100
+extern uint32_t        qwz_debug;
+#else
+#define DPRINTF(x...)
+#define DNPRINTF(n,x...)
+#endif
+
+struct qwz_softc;
+
+#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct ath12k_hw_ring_mask {
+       uint8_t tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t rxdma2host[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       uint8_t host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+};
+
+#define ATH12K_FW_DIR                  "qwz"
+
+#define ATH12K_BOARD_MAGIC             "QCA-ATH12K-BOARD"
+#define ATH12K_BOARD_API2_FILE         "board-2"
+#define ATH12K_DEFAULT_BOARD_FILE      "board"
+#define ATH12K_DEFAULT_CAL_FILE                "caldata"
+#define ATH12K_AMSS_FILE               "amss"
+#define ATH12K_M3_FILE                 "m3"
+#define ATH12K_REGDB_FILE              "regdb"
+
+#define QWZ_FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
+
+struct ath12k_hw_tcl2wbm_rbm_map {
+       uint8_t tcl_ring_num;
+       uint8_t wbm_ring_num;
+       uint8_t rbm_id;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ *     descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+       HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+       HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+       HAL_RX_BUF_RBM_FW_BM,
+       HAL_RX_BUF_RBM_SW0_BM,
+       HAL_RX_BUF_RBM_SW1_BM,
+       HAL_RX_BUF_RBM_SW2_BM,
+       HAL_RX_BUF_RBM_SW3_BM,
+       HAL_RX_BUF_RBM_SW4_BM,
+};
+
+struct ath12k_hw_hal_params {
+       enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+       const struct ath12k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
+};
+
+struct hal_tx_info {
+       uint16_t meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+       uint8_t ring_id;
+       uint32_t desc_id;
+       enum hal_tcl_desc_type type;
+       enum hal_tcl_encap_type encap_type;
+       uint64_t paddr;
+       uint32_t data_len;
+       uint32_t pkt_offset;
+       enum hal_encrypt_type encrypt_type;
+       uint32_t flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+       uint32_t flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+       uint16_t addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+       uint16_t bss_ast_hash;
+       uint16_t bss_ast_idx;
+       uint8_t tid;
+       uint8_t search_type; /* %HAL_TX_ADDR_SEARCH_ */
+       uint8_t lmac_id;
+       uint8_t dscp_tid_tbl_idx;
+       bool enable_mesh;
+       uint8_t rbm_id;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU         BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU          BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU      BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID   BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC          BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC          BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA              BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN         sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+       enum hal_wbm_rel_src_module buf_rel_source;
+       enum hal_wbm_tqm_rel_reason status;
+       uint8_t ack_rssi;
+       uint32_t flags; /* %HAL_TX_STATUS_FLAGS_ */
+       uint32_t ppdu_id;
+       uint8_t try_cnt;
+       uint8_t tid;
+       uint16_t peer_id;
+       uint32_t rate_stats;
+};
+
+struct ath12k_hw_params {
+       const char *name;
+       uint16_t hw_rev;
+       uint8_t max_radios;
+       uint32_t bdf_addr;
+
+       struct {
+               const char *dir;
+               size_t board_size;
+               size_t cal_offset;
+       } fw;
+
+       const struct ath12k_hw_ops *hw_ops;
+       const struct ath12k_hw_ring_mask *ring_mask;
+
+       bool internal_sleep_clock;
+
+       const struct ath12k_hw_regs *regs;
+       uint32_t qmi_service_ins_id;
+       const struct ce_attr *host_ce_config;
+       uint32_t ce_count;
+       const struct ce_pipe_config *target_ce_config;
+       uint32_t target_ce_count;
+       const struct service_to_pipe *svc_to_ce_map;
+       uint32_t svc_to_ce_map_len;
+
+       bool single_pdev_only;
+
+       bool rxdma1_enable;
+       int num_rxmda_per_pdev;
+       bool rx_mac_buf_ring;
+       bool vdev_start_delay;
+       bool htt_peer_map_v2;
+#if notyet
+       struct {
+               uint8_t fft_sz;
+               uint8_t fft_pad_sz;
+               uint8_t summary_pad_sz;
+               uint8_t fft_hdr_len;
+               uint16_t max_fft_bins;
+               bool fragment_160mhz;
+       } spectral;
+
+       uint16_t interface_modes;
+       bool supports_monitor;
+       bool full_monitor_mode;
+#endif
+       bool supports_shadow_regs;
+       bool idle_ps;
+       bool supports_sta_ps;
+       bool cold_boot_calib;
+       bool cbcal_restart_fw;
+       int fw_mem_mode;
+       uint32_t num_vdevs;
+       uint32_t num_peers;
+       bool supports_suspend;
+       uint32_t hal_desc_sz;
+       bool supports_regdb;
+       bool fix_l1ss;
+       bool credit_flow;
+       uint8_t max_tx_ring;
+       const struct ath12k_hw_hal_params *hal_params;
+#if notyet
+       bool supports_dynamic_smps_6ghz;
+       bool alloc_cacheable_memory;
+       bool supports_rssi_stats;
+#endif
+       bool fw_wmi_diag_event;
+       bool current_cc_support;
+       bool dbr_debug_support;
+       bool global_reset;
+#ifdef notyet
+       const struct cfg80211_sar_capa *bios_sar_capa;
+#endif
+       bool m3_fw_support;
+       bool fixed_bdf_addr;
+       bool fixed_mem_region;
+       bool static_window_map;
+       bool hybrid_bus_type;
+       bool fixed_fw_mem;
+#if notyet
+       bool support_off_channel_tx;
+       bool supports_multi_bssid;
+
+       struct {
+               uint32_t start;
+               uint32_t end;
+       } sram_dump;
+
+       bool tcl_ring_retry;
+#endif
+       uint32_t tx_ring_size;
+       bool smp2p_wow_exit;
+};
+
+struct ath12k_hw_ops {
+       uint8_t (*get_hw_mac_from_pdev_id)(int pdev_id);
+       void (*wmi_init_config)(struct qwz_softc *sc,
+           struct target_resource_config *config);
+       int (*mac_id_to_pdev_id)(struct ath12k_hw_params *hw, int mac_id);
+       int (*mac_id_to_srng_id)(struct ath12k_hw_params *hw, int mac_id);
+#if notyet
+       void (*tx_mesh_enable)(struct ath12k_base *ab,
+                              struct hal_tcl_data_cmd *tcl_cmd);
+#endif
+       int (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+#if notyet
+       bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+#endif
+       uint8_t (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+       uint8_t *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+       int (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+       uint32_t (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+       uint8_t (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+#ifdef notyet
+       uint8_t (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+       bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
+       bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+       bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+       uint16_t (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+#endif
+       uint16_t (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+#ifdef notyet
+       uint8_t (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+       uint8_t (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+       uint8_t (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+#endif
+       uint32_t (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+#ifdef notyet
+       uint8_t (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+       uint8_t (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+       uint8_t (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+       uint16_t (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+       void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
+                                         struct hal_rx_desc *ldesc);
+       uint32_t (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+       uint32_t (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+       void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, uint16_t len);
+#endif
+       struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+#ifdef notyet
+       uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+#endif
+       void (*reo_setup)(struct qwz_softc *);
+#ifdef notyet
+       uint16_t (*mpdu_info_get_peerid)(uint8_t *tlv_data);
+       bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+       uint8_t* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+       uint32_t (*get_ring_selector)(struct sk_buff *skb);
+#endif
+};
+
+extern const struct ath12k_hw_ops ipq8074_ops;
+extern const struct ath12k_hw_ops ipq6018_ops;
+extern const struct ath12k_hw_ops qca6390_ops;
+extern const struct ath12k_hw_ops qcn9074_ops;
+extern const struct ath12k_hw_ops wcn6855_ops;
+extern const struct ath12k_hw_ops wcn6750_ops;
+
+extern const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq8074;
+extern const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qca6390;
+extern const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9074;
+extern const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn6750;
+
+struct ath12k_hw_regs {
+       uint32_t hal_tcl1_ring_base_lsb;
+       uint32_t hal_tcl1_ring_base_msb;
+       uint32_t hal_tcl1_ring_id;
+       uint32_t hal_tcl1_ring_misc;
+       uint32_t hal_tcl1_ring_tp_addr_lsb;
+       uint32_t hal_tcl1_ring_tp_addr_msb;
+       uint32_t hal_tcl1_ring_consumer_int_setup_ix0;
+       uint32_t hal_tcl1_ring_consumer_int_setup_ix1;
+       uint32_t hal_tcl1_ring_msi1_base_lsb;
+       uint32_t hal_tcl1_ring_msi1_base_msb;
+       uint32_t hal_tcl1_ring_msi1_data;
+       uint32_t hal_tcl2_ring_base_lsb;
+       uint32_t hal_tcl_ring_base_lsb;
+
+       uint32_t hal_tcl_status_ring_base_lsb;
+
+       uint32_t hal_reo1_ring_base_lsb;
+       uint32_t hal_reo1_ring_base_msb;
+       uint32_t hal_reo1_ring_id;
+       uint32_t hal_reo1_ring_misc;
+       uint32_t hal_reo1_ring_hp_addr_lsb;
+       uint32_t hal_reo1_ring_hp_addr_msb;
+       uint32_t hal_reo1_ring_producer_int_setup;
+       uint32_t hal_reo1_ring_msi1_base_lsb;
+       uint32_t hal_reo1_ring_msi1_base_msb;
+       uint32_t hal_reo1_ring_msi1_data;
+       uint32_t hal_reo2_ring_base_lsb;
+       uint32_t hal_reo1_aging_thresh_ix_0;
+       uint32_t hal_reo1_aging_thresh_ix_1;
+       uint32_t hal_reo1_aging_thresh_ix_2;
+       uint32_t hal_reo1_aging_thresh_ix_3;
+
+       uint32_t hal_reo1_ring_hp;
+       uint32_t hal_reo1_ring_tp;
+       uint32_t hal_reo2_ring_hp;
+
+       uint32_t hal_reo_tcl_ring_base_lsb;
+       uint32_t hal_reo_tcl_ring_hp;
+
+       uint32_t hal_reo_status_ring_base_lsb;
+       uint32_t hal_reo_status_hp;
+
+       uint32_t hal_reo_cmd_ring_base_lsb;
+       uint32_t hal_reo_cmd_ring_hp;
+
+       uint32_t hal_sw2reo_ring_base_lsb;
+       uint32_t hal_sw2reo_ring_hp;
+
+       uint32_t hal_seq_wcss_umac_ce0_src_reg;
+       uint32_t hal_seq_wcss_umac_ce0_dst_reg;
+       uint32_t hal_seq_wcss_umac_ce1_src_reg;
+       uint32_t hal_seq_wcss_umac_ce1_dst_reg;
+
+       uint32_t hal_wbm_idle_link_ring_base_lsb;
+       uint32_t hal_wbm_idle_link_ring_misc;
+
+       uint32_t hal_wbm_release_ring_base_lsb;
+
+       uint32_t hal_wbm0_release_ring_base_lsb;
+       uint32_t hal_wbm1_release_ring_base_lsb;
+
+       uint32_t pcie_qserdes_sysclk_en_sel;
+       uint32_t pcie_pcs_osc_dtct_config_base;
+
+       uint32_t hal_shadow_base_addr;
+       uint32_t hal_reo1_misc_ctl;
+};
+
+extern const struct ath12k_hw_regs ipq8074_regs;
+extern const struct ath12k_hw_regs qca6390_regs;
+extern const struct ath12k_hw_regs qcn9074_regs;
+extern const struct ath12k_hw_regs wcn6855_regs;
+extern const struct ath12k_hw_regs wcn6750_regs;
+
+enum ath12k_dev_flags {
+       ATH12K_CAC_RUNNING,
+       ATH12K_FLAG_CORE_REGISTERED,
+       ATH12K_FLAG_CRASH_FLUSH,
+       ATH12K_FLAG_RAW_MODE,
+       ATH12K_FLAG_HW_CRYPTO_DISABLED,
+       ATH12K_FLAG_BTCOEX,
+       ATH12K_FLAG_RECOVERY,
+       ATH12K_FLAG_UNREGISTERING,
+       ATH12K_FLAG_REGISTERED,
+       ATH12K_FLAG_QMI_FAIL,
+       ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
+       ATH12K_FLAG_CE_IRQ_ENABLED,
+       ATH12K_FLAG_EXT_IRQ_ENABLED,
+       ATH12K_FLAG_FIXED_MEM_RGN,
+       ATH12K_FLAG_DEVICE_INIT_DONE,
+       ATH12K_FLAG_MULTI_MSI_VECTORS,
+};
+
+enum ath12k_scan_state {
+       ATH12K_SCAN_IDLE,
+       ATH12K_SCAN_STARTING,
+       ATH12K_SCAN_RUNNING,
+       ATH12K_SCAN_ABORTING,
+};
+
+enum ath12k_11d_state {
+       ATH12K_11D_IDLE,
+       ATH12K_11D_PREPARING,
+       ATH12K_11D_RUNNING,
+};
+
+/* enum ath12k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ *     something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ *     is performed manually.
+ */
+enum ath12k_spectral_mode {
+       ATH12K_SPECTRAL_DISABLED = 0,
+       ATH12K_SPECTRAL_BACKGROUND,
+       ATH12K_SPECTRAL_MANUAL,
+};
+
+#define QWZ_SCAN_11D_INTERVAL          600000
+#define QWZ_11D_INVALID_VDEV_ID                0xFFFF
+
+struct qwz_ops {
+       uint32_t        (*read32)(struct qwz_softc *, uint32_t);
+       void            (*write32)(struct qwz_softc *, uint32_t, uint32_t);
+       int             (*start)(struct qwz_softc *);
+       void            (*stop)(struct qwz_softc *);
+       int             (*power_up)(struct qwz_softc *);
+       void            (*power_down)(struct qwz_softc *);
+       int             (*submit_xfer)(struct qwz_softc *, struct mbuf *);
+       void            (*irq_enable)(struct qwz_softc *sc);
+       void            (*irq_disable)(struct qwz_softc *sc);
+       int             (*map_service_to_pipe)(struct qwz_softc *, uint16_t,
+                           uint8_t *, uint8_t *);
+       int             (*get_user_msi_vector)(struct qwz_softc *, char *,
+                           int *, uint32_t *, uint32_t *);
+};
+
+struct qwz_dmamem {
+       bus_dmamap_t            map;
+       bus_dma_segment_t       seg;
+       size_t                  size;
+       caddr_t                 kva;
+};
+
+struct qwz_dmamem *qwz_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t);
+void qwz_dmamem_free(bus_dma_tag_t, struct qwz_dmamem *);
+
+#define QWZ_DMA_MAP(_adm)      ((_adm)->map)
+#define QWZ_DMA_LEN(_adm)      ((_adm)->size)
+#define QWZ_DMA_DVA(_adm)      ((_adm)->map->dm_segs[0].ds_addr)
+#define QWZ_DMA_KVA(_adm)      ((void *)(_adm)->kva)
+
+struct hal_srng_params {
+       bus_addr_t ring_base_paddr;
+       uint32_t *ring_base_vaddr;
+       int num_entries;
+       uint32_t intr_batch_cntr_thres_entries;
+       uint32_t intr_timer_thres_us;
+       uint32_t flags;
+       uint32_t max_buffer_len;
+       uint32_t low_threshold;
+       uint64_t msi_addr;
+       uint32_t msi_data;
+
+       /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+       HAL_SRNG_DIR_SRC,
+       HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP                        0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP           0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP           0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN      0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR                        0x00020000
+#define HAL_SRNG_FLAGS_CACHED                   0x20000000
+#define HAL_SRNG_FLAGS_LMAC_RING               0x80000000
+#define HAL_SRNG_FLAGS_REMAP_CE_RING        0x10000000
+
+#define HAL_SRNG_TLV_HDR_TAG           GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN           GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+       /* Unique SRNG ring ID */
+       uint8_t ring_id;
+
+       /* Ring initialization done */
+       uint8_t initialized;
+
+       /* Interrupt/MSI value assigned to this ring */
+       int irq;
+
+       /* Physical base address of the ring */
+       bus_addr_t ring_base_paddr;
+
+       /* Virtual base address of the ring */
+       uint32_t *ring_base_vaddr;
+
+       /* Number of entries in ring */
+       uint32_t num_entries;
+
+       /* Ring size */
+       uint32_t ring_size;
+
+       /* Ring size mask */
+       uint32_t ring_size_mask;
+
+       /* Size of ring entry */
+       uint32_t entry_size;
+
+       /* Interrupt timer threshold - in micro seconds */
+       uint32_t intr_timer_thres_us;
+
+       /* Interrupt batch counter threshold - in number of ring entries */
+       uint32_t intr_batch_cntr_thres_entries;
+
+       /* MSI Address */
+       bus_addr_t msi_addr;
+
+       /* MSI data */
+       uint32_t msi_data;
+
+       /* Misc flags */
+       uint32_t flags;
+#ifdef notyet
+       /* Lock for serializing ring index updates */
+       spinlock_t lock;
+#endif
+       /* Start offset of SRNG register groups for this ring
+        * TBD: See if this is required - register address can be derived
+        * from ring ID
+        */
+       uint32_t hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+       uint64_t timestamp;
+
+       /* Source or Destination ring */
+       enum hal_srng_dir ring_dir;
+
+       union {
+               struct {
+                       /* SW tail pointer */
+                       uint32_t tp;
+
+                       /* Shadow head pointer location to be updated by HW */
+                       volatile uint32_t *hp_addr;
+
+                       /* Cached head pointer */
+                       uint32_t cached_hp;
+
+                       /* Tail pointer location to be updated by SW - This
+                        * will be a register address and need not be
+                        * accessed through SW structure
+                        */
+                       uint32_t *tp_addr;
+
+                       /* Current SW loop cnt */
+                       uint32_t loop_cnt;
+
+                       /* max transfer size */
+                       uint16_t max_buffer_length;
+
+                       /* head pointer at access end */
+                       uint32_t last_hp;
+               } dst_ring;
+
+               struct {
+                       /* SW head pointer */
+                       uint32_t hp;
+
+                       /* SW reap head pointer */
+                       uint32_t reap_hp;
+
+                       /* Shadow tail pointer location to be updated by HW */
+                       uint32_t *tp_addr;
+
+                       /* Cached tail pointer */
+                       uint32_t cached_tp;
+
+                       /* Head pointer location to be updated by SW - This
+                        * will be a register address and need not be accessed
+                        * through SW structure
+                        */
+                       uint32_t *hp_addr;
+
+                       /* Low threshold - in number of ring entries */
+                       uint32_t low_threshold;
+
+                       /* tail pointer at access end */
+                       uint32_t last_tp;
+               } src_ring;
+       } u;
+};
+
+enum hal_ring_type {
+       HAL_REO_DST,
+       HAL_REO_EXCEPTION,
+       HAL_REO_REINJECT,
+       HAL_REO_CMD,
+       HAL_REO_STATUS,
+       HAL_TCL_DATA,
+       HAL_TCL_CMD,
+       HAL_TCL_STATUS,
+       HAL_CE_SRC,
+       HAL_CE_DST,
+       HAL_CE_DST_STATUS,
+       HAL_WBM_IDLE_LINK,
+       HAL_SW2WBM_RELEASE,
+       HAL_WBM2SW_RELEASE,
+       HAL_RXDMA_BUF,
+       HAL_RXDMA_DST,
+       HAL_RXDMA_MONITOR_BUF,
+       HAL_RXDMA_MONITOR_STATUS,
+       HAL_RXDMA_MONITOR_DST,
+       HAL_RXDMA_MONITOR_DESC,
+       HAL_RXDMA_DIR_BUF,
+       HAL_MAX_RING_TYPES,
+};
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+       int start_ring_id;
+       uint16_t max_rings;
+       uint16_t entry_size;
+       uint32_t reg_start[HAL_SRNG_NUM_REG_GRP];
+       uint16_t reg_size[HAL_SRNG_NUM_REG_GRP];
+       uint8_t lmac_ring;
+       enum hal_srng_dir ring_dir;
+       uint32_t max_size;
+};
+
+#define QWZ_NUM_SRNG_CFG       21
+
+struct hal_reo_status_header {
+       uint16_t cmd_num;
+       enum hal_reo_cmd_status cmd_status;
+       uint16_t cmd_exe_time;
+       uint32_t timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+       uint16_t ssn;
+       uint16_t curr_idx;
+       uint32_t pn[4];
+       uint32_t last_rx_queue_ts;
+       uint32_t last_rx_dequeue_ts;
+       uint32_t rx_bitmap[8]; /* Bitmap from 0-255 */
+       uint32_t curr_mpdu_cnt;
+       uint32_t curr_msdu_cnt;
+       uint16_t fwd_due_to_bar_cnt;
+       uint16_t dup_cnt;
+       uint32_t frames_in_order_cnt;
+       uint32_t num_mpdu_processed_cnt;
+       uint32_t num_msdu_processed_cnt;
+       uint32_t total_num_processed_byte_cnt;
+       uint32_t late_rx_mpdu_cnt;
+       uint32_t reorder_hole_cnt;
+       uint8_t timeout_cnt;
+       uint8_t bar_rx_cnt;
+       uint8_t num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+       bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+       HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+       HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+       HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+       bool err_detected;
+       enum hal_reo_status_flush_cache_err_code err_code;
+       bool cache_controller_flush_status_hit;
+       uint8_t cache_controller_flush_status_desc_type;
+       uint8_t cache_controller_flush_status_client_id;
+       uint8_t cache_controller_flush_status_err;
+       uint8_t cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+       HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+       HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+       bool err_detected;
+       enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+       bool err_detected;
+       bool list_empty;
+       uint16_t release_desc_cnt;
+       uint16_t fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+       HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+       HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+       HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+       HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+       enum hal_reo_threshold_idx threshold_idx;
+       uint32_t link_desc_counter0;
+       uint32_t link_desc_counter1;
+       uint32_t link_desc_counter2;
+       uint32_t link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+       struct hal_reo_status_header uniform_hdr;
+       uint8_t loop_cnt;
+       union {
+               struct hal_reo_status_queue_stats queue_stats;
+               struct hal_reo_status_flush_queue flush_queue;
+               struct hal_reo_status_flush_cache flush_cache;
+               struct hal_reo_status_unblock_cache unblock_cache;
+               struct hal_reo_status_flush_timeout_list timeout_list;
+               struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+       } u;
+};
+
+/* HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath12k_hal {
+       /* HAL internal state for all SRNG rings.
+        */
+       struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+       /* SRNG configuration table */
+       struct hal_srng_config srng_config[QWZ_NUM_SRNG_CFG];
+
+       /* Remote pointer memory for HW/FW updates */
+       struct qwz_dmamem *rdpmem;
+       struct {
+               uint32_t *vaddr;
+               bus_addr_t paddr;
+       } rdp;
+
+       /* Shared memory for ring pointer updates from host to FW */
+       struct qwz_dmamem *wrpmem;
+       struct {
+               uint32_t *vaddr;
+               bus_addr_t paddr;
+       } wrp;
+
+       /* Available REO blocking resources bitmap */
+       uint8_t avail_blk_resource;
+
+       uint8_t current_blk_index;
+
+       /* shadow register configuration */
+       uint32_t shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+       int num_shadow_reg_configured;
+#ifdef notyet
+       struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
+#endif
+};
+
+enum hal_pn_type {
+       HAL_PN_TYPE_NONE,
+       HAL_PN_TYPE_WPA,
+       HAL_PN_TYPE_WAPI_EVEN,
+       HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+       HAL_CE_DESC_SRC,
+       HAL_CE_DESC_DST,
+       HAL_CE_DESC_DST_STATUS,
+};
+
+struct ce_ie_addr {
+       uint32_t ie1_reg_addr;
+       uint32_t ie2_reg_addr;
+       uint32_t ie3_reg_addr;
+};
+
+struct ce_remap {
+       uint32_t base;
+       uint32_t size;
+};
+
+struct ce_attr {
+       /* CE_ATTR_* values */
+       unsigned int flags;
+
+       /* #entries in source ring - Must be a power of 2 */
+       unsigned int src_nentries;
+
+       /*
+        * Max source send size for this CE.
+        * This is also the minimum size of a destination buffer.
+        */
+       unsigned int src_sz_max;
+
+       /* #entries in destination ring - Must be a power of 2 */
+       unsigned int dest_nentries;
+
+       void (*recv_cb)(struct qwz_softc *, struct mbuf *);
+       void (*send_cb)(struct qwz_softc *, struct mbuf *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct qwz_rx_msdu {
+       TAILQ_ENTRY(qwz_rx_msdu) entry;
+       struct mbuf *m;
+       struct ieee80211_rxinfo rxi;
+       int is_first_msdu;
+       int is_last_msdu;
+       int is_continuation;
+       int is_mcbc;
+       int is_eapol;
+       struct hal_rx_desc *rx_desc;
+       uint8_t err_rel_src;
+       uint8_t err_code;
+       uint8_t mac_id;
+       uint8_t unmapped;
+       uint8_t is_frag;
+       uint8_t tid;
+       uint16_t peer_id;
+       uint16_t seq_no;
+};
+
+TAILQ_HEAD(qwz_rx_msdu_list, qwz_rx_msdu);
+
+struct qwz_rx_data {
+       struct mbuf     *m;
+       bus_dmamap_t    map;
+       struct qwz_rx_msdu rx_msdu;
+};
+
+struct qwz_tx_data {
+       struct ieee80211_node *ni;
+       struct mbuf     *m;
+       bus_dmamap_t    map;
+       uint8_t eid;
+       uint8_t flags;
+       uint32_t cipher;
+};
+
+struct qwz_ce_ring {
+       /* Number of entries in this ring; must be power of 2 */
+       unsigned int nentries;
+       unsigned int nentries_mask;
+
+       /* For dest ring, this is the next index to be processed
+        * by software after it was/is received into.
+        *
+        * For src ring, this is the last descriptor that was sent
+        * and completion processed by software.
+        *
+        * Regardless of src or dest ring, this is an invariant
+        * (modulo ring size):
+        *     write index >= read index >= sw_index
+        */
+       unsigned int sw_index;
+       /* cached copy */
+       unsigned int write_index;
+
+       /* Start of DMA-coherent area reserved for descriptors */
+       /* Host address space */
+       caddr_t base_addr;
+
+       /* DMA map for Tx/Rx descriptors. */
+       bus_dmamap_t            dmap;
+       bus_dma_segment_t       dsegs;
+       int                     nsegs;
+       size_t                  desc_sz;
+
+       /* HAL ring id */
+       uint32_t hal_ring_id;
+
+       /*
+        * Per-transfer data. 
+        * Size and type of this data depends on how the ring is used.
+        *
+        * For transfers using DMA, the context contains pointers to
+        * struct qwz_rx_data if this ring is a dest ring, or struct
+        * qwz_tx_data if this ring is a src ring. DMA maps are allocated
+        * when the device is started via sc->ops.start, and will be used
+        * to load mbufs for DMA transfers.
+        * In this case, the pointers MUST NOT be cleared until the device
+        * is stopped. Otherwise we'd lose track of our DMA mappings!
+        * The Linux ath12k driver works differently because it can store
+        * DMA mapping information in a Linux socket buffer structure, which
+        * is not possible with mbufs.
+        *
+        * Keep last.
+        */
+       void *per_transfer_context[0];
+};
+
+void qwz_htc_tx_completion_handler(struct qwz_softc *, struct mbuf *);
+void qwz_htc_rx_completion_handler(struct qwz_softc *, struct mbuf *);
+void qwz_dp_htt_htc_t2h_msg_handler(struct qwz_softc *, struct mbuf *);
+
+struct qwz_dp;
+
+struct qwz_dp_htt_wbm_tx_status {
+       uint32_t msdu_id;
+       int acked;
+       int ack_rssi;
+       uint16_t peer_id;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE      1 /* Enable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX       256
+
+#define DP_TCL_NUM_RING_MAX    3
+#define DP_TCL_NUM_RING_MAX_QCA6390    1
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE       64
+#define DP_TCL_DATA_RING_SIZE          512
+#define DP_TCL_DATA_RING_SIZE_WCN6750  2048
+#define DP_TX_COMP_RING_SIZE           32768
+#define DP_TX_IDR_SIZE                 DP_TX_COMP_RING_SIZE
+#define DP_TCL_CMD_RING_SIZE           32
+#define DP_TCL_STATUS_RING_SIZE                32
+#define DP_REO_DST_RING_MAX            4
+#define DP_REO_DST_RING_SIZE           2048
+#define DP_REO_REINJECT_RING_SIZE      32
+#define DP_RX_RELEASE_RING_SIZE                1024
+#define DP_REO_EXCEPTION_RING_SIZE     128
+#define DP_REO_CMD_RING_SIZE           256
+#define DP_REO_STATUS_RING_SIZE                2048
+#define DP_RXDMA_BUF_RING_SIZE         4096
+#define DP_RXDMA_REFILL_RING_SIZE      2048
+#define DP_RXDMA_ERR_DST_RING_SIZE     1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE  1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE        4096
+
+#define DP_RX_RELEASE_RING_NUM 3
+
+#define DP_RX_BUFFER_SIZE      2048
+#define        DP_RX_BUFFER_SIZE_LITE  1024
+#define DP_RX_BUFFER_ALIGN_SIZE        128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID     GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID    GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID  GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+struct qwz_hp_update_timer {
+       struct timeout timer;
+       int started;
+       int init;
+       uint32_t tx_num;
+       uint32_t timer_tx_num;
+       uint32_t ring_id;
+       uint32_t interval;
+       struct qwz_softc *sc;
+};
+
+struct dp_rx_tid {
+       uint8_t tid;
+       struct qwz_dmamem *mem;
+       uint32_t *vaddr;
+       uint64_t paddr;
+       uint32_t size;
+       uint32_t ba_win_sz;
+       int active;
+
+       /* Info related to rx fragments */
+       uint32_t cur_sn;
+       uint16_t last_frag_no;
+       uint16_t rx_frag_bitmap;
+#if 0
+       struct sk_buff_head rx_frags;
+       struct hal_reo_dest_ring *dst_ring_desc;
+
+       /* Timer info related to fragments */
+       struct timer_list frag_timer;
+       struct ath12k_base *ab;
+#endif
+};
+
+#define DP_REO_DESC_FREE_THRESHOLD  64
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+#define DP_MON_PURGE_TIMEOUT_MS     100
+#define DP_MON_SERVICE_BUDGET       128
+
+struct dp_reo_cache_flush_elem {
+       TAILQ_ENTRY(dp_reo_cache_flush_elem) entry;
+       struct dp_rx_tid data;
+       uint64_t ts;
+};
+
+TAILQ_HEAD(dp_reo_cmd_cache_flush_head, dp_reo_cache_flush_elem);
+
+struct dp_reo_cmd {
+       TAILQ_ENTRY(dp_reo_cmd) entry;
+       struct dp_rx_tid data;
+       int cmd_num;
+       void (*handler)(struct qwz_dp *, void *,
+           enum hal_reo_cmd_status status);
+};
+
+TAILQ_HEAD(dp_reo_cmd_head, dp_reo_cmd);
+
+struct dp_srng {
+       struct qwz_dmamem *mem;
+       uint32_t *vaddr;
+       bus_addr_t paddr;
+       int size;
+       uint32_t ring_id;
+       uint8_t cached;
+};
+
+struct dp_tx_ring {
+       uint8_t tcl_data_ring_id;
+       struct dp_srng tcl_data_ring;
+       struct dp_srng tcl_comp_ring;
+       int cur;
+       int queued;
+       struct qwz_tx_data *data;
+       struct hal_wbm_release_ring *tx_status;
+       int tx_status_head;
+       int tx_status_tail;
+};
+
+
+struct dp_link_desc_bank {
+       struct qwz_dmamem *mem;
+       caddr_t *vaddr;
+       bus_addr_t paddr;
+       uint32_t size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+struct hal_wbm_idle_scatter_list {
+       struct qwz_dmamem *mem;
+       bus_addr_t paddr;
+       struct hal_wbm_link_desc *vaddr;
+};
+
+struct qwz_dp {
+       struct qwz_softc *sc;
+       enum ath12k_htc_ep_id eid;
+       int htt_tgt_version_received;
+       uint8_t htt_tgt_ver_major;
+       uint8_t htt_tgt_ver_minor;
+       struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+       struct dp_srng wbm_idle_ring;
+       struct dp_srng wbm_desc_rel_ring;
+       struct dp_srng tcl_cmd_ring;
+       struct dp_srng tcl_status_ring;
+       struct dp_srng reo_reinject_ring;
+       struct dp_srng rx_rel_ring;
+       struct dp_srng reo_except_ring;
+       struct dp_srng reo_cmd_ring;
+       struct dp_srng reo_status_ring;
+       struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
+       struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+       struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+       struct dp_reo_cmd_head reo_cmd_list;
+       struct dp_reo_cmd_cache_flush_head reo_cmd_cache_flush_list;
+#if 0
+       struct list_head dp_full_mon_mpdu_list;
+#endif
+       uint32_t reo_cmd_cache_flush_count;
+#if 0
+       /**
+        * protects access to below fields,
+        * - reo_cmd_list
+        * - reo_cmd_cache_flush_list
+        * - reo_cmd_cache_flush_count
+        */
+       spinlock_t reo_cmd_lock;
+#endif
+       struct qwz_hp_update_timer reo_cmd_timer;
+       struct qwz_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+};
+
+#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+struct qwz_ce_pipe {
+       struct qwz_softc *sc;
+       uint16_t pipe_num;
+       unsigned int attr_flags;
+       unsigned int buf_sz;
+       unsigned int rx_buf_needed;
+
+       void (*send_cb)(struct qwz_softc *, struct mbuf *);
+       void (*recv_cb)(struct qwz_softc *, struct mbuf *);
+
+#ifdef notyet
+       struct tasklet_struct intr_tq;
+#endif
+       struct qwz_ce_ring *src_ring;
+       struct qwz_ce_ring *dest_ring;
+       struct qwz_ce_ring *status_ring;
+       uint64_t timestamp;
+};
+
+struct qwz_ce {
+       struct qwz_ce_pipe ce_pipe[CE_COUNT_MAX];
+#ifdef notyet
+       /* Protects rings of all ce pipes */
+       spinlock_t ce_lock;
+#endif
+       struct qwz_hp_update_timer hp_timer[CE_COUNT_MAX];
+};
+
+
+/* XXX This may be non-zero on AHB but is always zero on PCI. */
+#define ATH12K_CE_OFFSET(sc)   (0)
+
+struct qwz_qmi_ce_cfg {
+       const uint8_t *shadow_reg;
+       int shadow_reg_len;
+       uint32_t *shadow_reg_v2;
+       uint32_t shadow_reg_v2_len;
+};
+
+struct qwz_qmi_target_info {
+       uint32_t chip_id;
+       uint32_t chip_family;
+       uint32_t board_id;
+       uint32_t soc_id;
+       uint32_t fw_version;
+       uint32_t eeprom_caldata;
+       char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+       char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+       char bdf_ext[ATH12K_QMI_BDF_EXT_STR_LENGTH];
+};
+
+enum ath12k_bdf_search {
+       ATH12K_BDF_SEARCH_DEFAULT,
+       ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
+struct qwz_device_id {
+       enum ath12k_bdf_search bdf_search;
+       uint32_t vendor;
+       uint32_t device;
+       uint32_t subsystem_vendor;
+       uint32_t subsystem_device;
+};
+
+struct qwz_wmi_base;
+
+struct qwz_pdev_wmi {
+       struct qwz_wmi_base *wmi;
+       enum ath12k_htc_ep_id eid;
+       const struct wmi_peer_flags_map *peer_flags;
+       uint32_t rx_decap_mode;
+       int tx_ce_desc;
+};
+
+#define QWZ_MAX_RADIOS 3
+
+struct qwz_wmi_base {
+       struct qwz_softc *sc;
+       struct qwz_pdev_wmi wmi[QWZ_MAX_RADIOS];
+       enum ath12k_htc_ep_id wmi_endpoint_id[QWZ_MAX_RADIOS];
+       uint32_t max_msg_len[QWZ_MAX_RADIOS];
+       int service_ready;
+       int unified_ready;
+       uint8_t svc_map[howmany(WMI_MAX_EXT2_SERVICE, 8)];
+       int tx_credits;
+       const struct wmi_peer_flags_map *peer_flags;
+       uint32_t num_mem_chunks;
+       uint32_t rx_decap_mode;
+       struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+       enum wmi_host_hw_mode_config_type preferred_hw_mode;
+       struct target_resource_config  wlan_resource_config;
+       struct ath12k_targ_cap *targ_cap;
+};
+
+struct wmi_tlv_policy {
+       size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+       int wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_dma_ring_caps_parse {
+       struct wmi_dma_ring_capabilities *dma_ring_caps;
+       uint32_t n_dma_ring_caps;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+       struct ath12k_service_ext_param param;
+       struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+       struct wmi_hw_mode_capabilities *hw_mode_caps;
+       uint32_t n_hw_mode_caps;
+       uint32_t tot_phy_id;
+       struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+       struct wmi_mac_phy_capabilities *mac_phy_caps;
+       size_t mac_phy_caps_size;
+       uint32_t n_mac_phy_caps;
+       struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+       struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+       uint32_t n_ext_hal_reg_caps;
+       struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+       int hw_mode_done;
+       int mac_phy_done;
+       int ext_hal_reg_done;
+       int mac_phy_chainmask_combo_done;
+       int mac_phy_chainmask_cap_done;
+       int oem_dma_ring_cap_done;
+       int dma_ring_cap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext2_parse {
+       struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+       bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_rdy_parse {
+       uint32_t num_extra_mac_addr;
+};
+
+struct wmi_tlv_dma_buf_release_parse {
+       struct ath12k_wmi_dma_buf_release_fixed_param fixed;
+       struct wmi_dma_buf_release_entry *buf_entry;
+       struct wmi_dma_buf_release_meta_data *meta_data;
+       uint32_t num_buf_entry;
+       uint32_t num_meta;
+       bool buf_entry_done;
+       bool meta_data_done;
+};
+
+struct wmi_tlv_fw_stats_parse {
+       const struct wmi_stats_event *ev;
+       const struct wmi_per_chain_rssi_stats *rssi;
+       struct ath12k_fw_stats *stats;
+       int rssi_num;
+       bool chain_rssi_done;
+};
+
+struct wmi_tlv_mgmt_rx_parse {
+       const struct wmi_mgmt_rx_hdr *fixed;
+       const uint8_t *frame_buf;
+       bool frame_buf_done;
+};
+
+struct qwz_htc;
+
+struct qwz_htc_ep_ops {
+       void (*ep_tx_complete)(struct qwz_softc *, struct mbuf *);
+       void (*ep_rx_complete)(struct qwz_softc *, struct mbuf *);
+       void (*ep_tx_credits)(struct qwz_softc *);
+};
+
+/* service connection information */
+struct qwz_htc_svc_conn_req {
+       uint16_t service_id;
+       struct qwz_htc_ep_ops ep_ops;
+       int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct qwz_htc_svc_conn_resp {
+       uint8_t buffer_len;
+       uint8_t actual_len;
+       enum ath12k_htc_ep_id eid;
+       unsigned int max_msg_len;
+       uint8_t connect_resp_code;
+};
+
+#define ATH12K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH12K_HTC_MAX_LEN 4096
+#define ATH12K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH12K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_CONTROL_BUFFER_SIZE (ATH12K_HTC_MAX_CTRL_MSG_LEN + \
+                                       sizeof(struct ath12k_htc_hdr))
+#define ATH12K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct qwz_htc_ep {
+       struct qwz_htc *htc;
+       enum ath12k_htc_ep_id eid;
+       enum ath12k_htc_svc_id service_id;
+       struct qwz_htc_ep_ops ep_ops;
+
+       int max_tx_queue_depth;
+       int max_ep_message_len;
+       uint8_t ul_pipe_id;
+       uint8_t dl_pipe_id;
+
+       uint8_t seq_no; /* for debugging */
+       int tx_credits;
+       bool tx_credit_flow_enabled;
+};
+
+struct qwz_htc_svc_tx_credits {
+       uint16_t service_id;
+       uint8_t  credit_allocation;
+};
+
+struct qwz_htc {
+       struct qwz_softc *sc;
+       struct qwz_htc_ep endpoint[ATH12K_HTC_EP_COUNT];
+#ifdef notyet
+       /* protects endpoints */
+       spinlock_t tx_lock;
+#endif
+       uint8_t control_resp_buffer[ATH12K_HTC_MAX_CTRL_MSG_LEN];
+       int control_resp_len;
+
+       int ctl_resp;
+
+       int total_transmit_credits;
+       struct qwz_htc_svc_tx_credits
+               service_alloc_table[ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+       int target_credit_size;
+       uint8_t wmi_ep_count;
+};
+
+struct qwz_msi_user {
+       char *name;
+       int num_vectors;
+       uint32_t base_vector;
+};
+
+struct qwz_msi_config {
+       int total_vectors;
+       int total_users;
+       struct qwz_msi_user *users;
+       uint16_t hw_rev;
+};
+
+struct ath12k_band_cap {
+       uint32_t phy_id;
+       uint32_t max_bw_supported;
+       uint32_t ht_cap_info;
+       uint32_t he_cap_info[2];
+       uint32_t he_mcs;
+       uint32_t he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+       struct ath12k_ppe_threshold he_ppet;
+       uint16_t he_6ghz_capa;
+};
+
+struct ath12k_pdev_cap {
+       uint32_t supported_bands;
+       uint32_t ampdu_density;
+       uint32_t vht_cap;
+       uint32_t vht_mcs;
+       uint32_t he_mcs;
+       uint32_t tx_chain_mask;
+       uint32_t rx_chain_mask;
+       uint32_t tx_chain_mask_shift;
+       uint32_t rx_chain_mask_shift;
+       struct ath12k_band_cap band[WMI_NUM_SUPPORTED_BAND_MAX];
+       int nss_ratio_enabled;
+       uint8_t nss_ratio_info;
+};
+
+struct qwz_pdev {
+       struct qwz_softc *sc;
+       uint32_t pdev_id;
+       struct ath12k_pdev_cap cap;
+       uint8_t mac_addr[IEEE80211_ADDR_LEN];
+};
+
+struct qwz_dbring_cap {
+       uint32_t pdev_id;
+       enum wmi_direct_buffer_module id;
+       uint32_t min_elem;
+       uint32_t min_buf_sz;
+       uint32_t min_buf_align;
+};
+
+struct dp_rxdma_ring {
+       struct dp_srng refill_buf_ring;
+#if 0
+       struct idr bufs_idr;
+       /* Protects bufs_idr */
+       spinlock_t idr_lock;
+#else
+       struct qwz_rx_data *rx_data;
+#endif
+       int bufs_max;
+       uint8_t freemap[howmany(DP_RXDMA_BUF_RING_SIZE, 8)];
+};
+
+enum hal_rx_mon_status {
+       HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+       HAL_RX_MON_STATUS_PPDU_DONE,
+       HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_user_status {
+       uint32_t mcs:4,
+       nss:3,
+       ofdma_info_valid:1,
+       dl_ofdma_ru_start_index:7,
+       dl_ofdma_ru_width:7,
+       dl_ofdma_ru_size:8;
+       uint32_t ul_ofdma_user_v0_word0;
+       uint32_t ul_ofdma_user_v0_word1;
+       uint32_t ast_index;
+       uint32_t tid;
+       uint16_t tcp_msdu_count;
+       uint16_t udp_msdu_count;
+       uint16_t other_msdu_count;
+       uint16_t frame_control;
+       uint8_t frame_control_info_valid;
+       uint8_t data_sequence_control_info_valid;
+       uint16_t first_data_seq_ctrl;
+       uint32_t preamble_type;
+       uint16_t ht_flags;
+       uint16_t vht_flags;
+       uint16_t he_flags;
+       uint8_t rs_flags;
+       uint32_t mpdu_cnt_fcs_ok;
+       uint32_t mpdu_cnt_fcs_err;
+       uint32_t mpdu_fcs_ok_bitmap[8];
+       uint32_t mpdu_ok_byte_count;
+       uint32_t mpdu_err_byte_count;
+};
+
+struct hal_rx_wbm_rel_info {
+       uint32_t cookie;
+       enum hal_wbm_rel_src_module err_rel_src;
+       enum hal_reo_dest_ring_push_reason push_reason;
+       uint32_t err_code;
+       int first_msdu;
+       int last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE    HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE        HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE         HAL_RX_MON_STATUS_BUF_DONE
+
+struct hal_rx_mon_ppdu_info {
+       uint32_t ppdu_id;
+       uint32_t ppdu_ts;
+       uint32_t num_mpdu_fcs_ok;
+       uint32_t num_mpdu_fcs_err;
+       uint32_t preamble_type;
+       uint16_t chan_num;
+       uint16_t tcp_msdu_count;
+       uint16_t tcp_ack_msdu_count;
+       uint16_t udp_msdu_count;
+       uint16_t other_msdu_count;
+       uint16_t peer_id;
+       uint8_t rate;
+       uint8_t mcs;
+       uint8_t nss;
+       uint8_t bw;
+       uint8_t vht_flag_values1;
+       uint8_t vht_flag_values2;
+       uint8_t vht_flag_values3[4];
+       uint8_t vht_flag_values4;
+       uint8_t vht_flag_values5;
+       uint16_t vht_flag_values6;
+       uint8_t is_stbc;
+       uint8_t gi;
+       uint8_t ldpc;
+       uint8_t beamformed;
+       uint8_t rssi_comb;
+       uint8_t rssi_chain_pri20[HAL_RX_MAX_NSS];
+       uint8_t tid;
+       uint16_t ht_flags;
+       uint16_t vht_flags;
+       uint16_t he_flags;
+       uint16_t he_mu_flags;
+       uint8_t dcm;
+       uint8_t ru_alloc;
+       uint8_t reception_type;
+       uint64_t tsft;
+       uint64_t rx_duration;
+       uint16_t frame_control;
+       uint32_t ast_index;
+       uint8_t rs_fcs_err;
+       uint8_t rs_flags;
+       uint8_t cck_flag;
+       uint8_t ofdm_flag;
+       uint8_t ulofdma_flag;
+       uint8_t frame_control_info_valid;
+       uint16_t he_per_user_1;
+       uint16_t he_per_user_2;
+       uint8_t he_per_user_position;
+       uint8_t he_per_user_known;
+       uint16_t he_flags1;
+       uint16_t he_flags2;
+       uint8_t he_RU[4];
+       uint16_t he_data1;
+       uint16_t he_data2;
+       uint16_t he_data3;
+       uint16_t he_data4;
+       uint16_t he_data5;
+       uint16_t he_data6;
+       uint32_t ppdu_len;
+       uint32_t prev_ppdu_id;
+       uint32_t device_id;
+       uint16_t first_data_seq_ctrl;
+       uint8_t monitor_direct_used;
+       uint8_t data_sequence_control_info_valid;
+       uint8_t ltf_size;
+       uint8_t rxpcu_filter_pass;
+       char rssi_chain[8][8];
+       struct hal_rx_user_status userstats;
+};
+
+enum dp_mon_status_buf_state {
+       /* PPDU id matches in dst ring and status ring */
+       DP_MON_STATUS_MATCH,
+       /* status ring dma is not done */
+       DP_MON_STATUS_NO_DMA,
+       /* status ring is lagging, reap status ring */
+       DP_MON_STATUS_LAG,
+       /* status ring is leading, reap dst ring and drop */
+       DP_MON_STATUS_LEAD,
+       /* replinish monitor status ring */
+       DP_MON_STATUS_REPLINISH,
+};
+
+struct qwz_pdev_mon_stats {
+       uint32_t status_ppdu_state;
+       uint32_t status_ppdu_start;
+       uint32_t status_ppdu_end;
+       uint32_t status_ppdu_compl;
+       uint32_t status_ppdu_start_mis;
+       uint32_t status_ppdu_end_mis;
+       uint32_t status_ppdu_done;
+       uint32_t dest_ppdu_done;
+       uint32_t dest_mpdu_done;
+       uint32_t dest_mpdu_drop;
+       uint32_t dup_mon_linkdesc_cnt;
+       uint32_t dup_mon_buf_cnt;
+       uint32_t dest_mon_stuck;
+       uint32_t dest_mon_not_reaped;
+};
+
+struct qwz_mon_data {
+       struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+       struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+       uint32_t mon_ppdu_status;
+       uint32_t mon_last_buf_cookie;
+       uint64_t mon_last_linkdesc_paddr;
+       uint16_t chan_noise_floor;
+       bool hold_mon_dst_ring;
+       enum dp_mon_status_buf_state buf_state;
+       bus_addr_t mon_status_paddr;
+       struct dp_full_mon_mpdu *mon_mpdu;
+#ifdef notyet
+       struct hal_sw_mon_ring_entries sw_mon_entries;
+#endif
+       struct qwz_pdev_mon_stats rx_mon_stats;
+#ifdef notyet
+       /* lock for monitor data */
+       spinlock_t mon_lock;
+       struct sk_buff_head rx_status_q;
+#endif
+};
+
+
+#define MAX_RXDMA_PER_PDEV     2
+
+struct qwz_pdev_dp {
+       uint32_t mac_id;
+       uint32_t mon_dest_ring_stuck_cnt;
+#if 0
+       atomic_t num_tx_pending;
+       wait_queue_head_t tx_empty_waitq;
+#endif
+       struct dp_rxdma_ring rx_refill_buf_ring;
+       struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+       struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
+       struct dp_srng rxdma_mon_dst_ring;
+       struct dp_srng rxdma_mon_desc_ring;
+       struct dp_rxdma_ring rxdma_mon_buf_ring;
+       struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
+#if 0
+       struct ieee80211_rx_status rx_status;
+#endif
+       struct qwz_mon_data mon_data;
+};
+
+struct qwz_txmgmt_queue {
+       struct qwz_tx_data data[8];
+       int cur;
+       int queued;
+};
+
+struct qwz_vif {
+       uint32_t vdev_id;
+       enum wmi_vdev_type vdev_type;
+       enum wmi_vdev_subtype vdev_subtype;
+       uint32_t beacon_interval;
+       uint32_t dtim_period;
+       uint16_t ast_hash;
+       uint16_t ast_idx;
+       uint16_t tcl_metadata;
+       uint8_t hal_addr_search_flags;
+       uint8_t search_type;
+
+       struct qwz_softc *sc;
+
+       uint16_t tx_seq_no;
+       struct wmi_wmm_params_all_arg wmm_params;
+       TAILQ_ENTRY(qwz_vif) entry;
+       union {
+               struct {
+                       uint32_t uapsd;
+               } sta;
+               struct {
+                       /* 127 stations; wmi limit */
+                       uint8_t tim_bitmap[16];
+                       uint8_t tim_len;
+                       uint32_t ssid_len;
+                       uint8_t ssid[IEEE80211_NWID_LEN];
+                       bool hidden_ssid;
+                       /* P2P_IE with NoA attribute for P2P_GO case */
+                       uint32_t noa_len;
+                       uint8_t *noa_data;
+               } ap;
+       } u;
+
+       bool is_started;
+       bool is_up;
+       bool ftm_responder;
+       bool spectral_enabled;
+       bool ps;
+       uint32_t aid;
+       uint8_t bssid[IEEE80211_ADDR_LEN];
+#if 0
+       struct cfg80211_bitrate_mask bitrate_mask;
+       struct delayed_work connection_loss_work;
+#endif
+       int num_legacy_stations;
+       int rtscts_prot_mode;
+       int txpower;
+       bool rsnie_present;
+       bool wpaie_present;
+       bool bcca_zero_sent;
+       bool do_not_send_tmpl;
+       struct ieee80211_channel *chan;
+#if 0
+       struct ath12k_arp_ns_offload arp_ns_offload;
+       struct ath12k_rekey_data rekey_data;
+#endif
+#ifdef CONFIG_ATH12K_DEBUGFS
+       struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+       struct qwz_txmgmt_queue txmgmt;
+};
+
+TAILQ_HEAD(qwz_vif_list, qwz_vif);
+
+struct qwz_survey_info {
+       int8_t noise;
+       uint64_t time;
+       uint64_t time_busy;
+};
+
+#define ATH12K_IRQ_NUM_MAX 52
+#define ATH12K_EXT_IRQ_NUM_MAX 16
+
+struct qwz_ext_irq_grp {
+       struct qwz_softc *sc;
+       uint32_t irqs[ATH12K_EXT_IRQ_NUM_MAX];
+       uint32_t num_irq;
+       uint32_t grp_id;
+       uint64_t timestamp;
+#if 0
+       bool napi_enabled;
+       struct napi_struct napi;
+       struct net_device napi_ndev;
+#endif
+};
+
+struct qwz_rx_radiotap_header {
+       struct ieee80211_radiotap_header wr_ihdr;
+} __packed;
+
+#define IWX_RX_RADIOTAP_PRESENT        0 /* TODO add more information */
+
+struct qwz_tx_radiotap_header {
+       struct ieee80211_radiotap_header wt_ihdr;
+} __packed;
+
+#define IWX_TX_RADIOTAP_PRESENT        0 /* TODO add more information */
+
+struct qwz_setkey_task_arg {
+       struct ieee80211_node *ni;
+       struct ieee80211_key *k;
+       int cmd;
+#define QWZ_ADD_KEY    1
+#define QWZ_DEL_KEY    2
+};
+
+struct qwz_softc {
+       struct device                   sc_dev;
+       struct ieee80211com             sc_ic;
+       uint32_t                        sc_flags;
+       int                             sc_node;
+
+       int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
+
+       struct rwlock ioctl_rwl;
+
+       struct task             init_task; /* NB: not reference-counted */
+       struct refcnt           task_refs;
+       struct taskq            *sc_nswq;
+       struct task             newstate_task;
+       enum ieee80211_state    ns_nstate;
+       int                     ns_arg;
+
+       /* Task for setting encryption keys and its arguments. */
+       struct task             setkey_task;
+       /*
+        * At present we need to process at most two keys at once:
+        * Our pairwise key and a group key.
+        * When hostap mode is implemented this array needs to grow or
+        * it might become a bottleneck for associations that occur at
+        * roughly the same time.
+        */
+       struct qwz_setkey_task_arg setkey_arg[2];
+       int setkey_cur;
+       int setkey_tail;
+       int setkey_nkeys;
+
+       int install_key_done;
+       int install_key_status;
+
+       enum ath12k_11d_state   state_11d;
+       int                     completed_11d_scan;
+       uint32_t                vdev_id_11d_scan;
+       struct {
+               int started;
+               int completed;
+               int on_channel;
+               struct timeout timeout;
+               enum ath12k_scan_state state;
+               int vdev_id;
+               int is_roc;
+               int roc_freq;
+               int roc_notify;
+       } scan;
+       u_int                   scan_channel;
+       struct qwz_survey_info  survey[IEEE80211_CHAN_MAX];
+
+       int                     attached;
+       struct {
+               u_char *data;
+               size_t size;
+       } fw_img[4];
+#define QWZ_FW_AMSS    0
+#define QWZ_FW_BOARD   1
+#define QWZ_FW_M3      2
+#define QWZ_FW_REGDB   3
+
+       int                     sc_tx_timer;
+       uint32_t                qfullmsk;
+#define        QWZ_MGMT_QUEUE_ID       31
+
+       bus_addr_t                      mem;
+       struct ath12k_hw_params         hw_params;
+       struct ath12k_hal               hal;
+       struct qwz_ce                   ce;
+       struct qwz_dp                   dp;
+       struct qwz_pdev_dp              pdev_dp;
+       struct qwz_wmi_base             wmi;
+       struct qwz_htc                  htc;
+
+       enum ath12k_firmware_mode       fw_mode;
+       enum ath12k_crypt_mode          crypto_mode;
+       enum ath12k_hw_txrx_mode        frame_mode;
+
+       struct qwz_ext_irq_grp          ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+
+       uint16_t                        qmi_txn_id;
+       int                             qmi_cal_done;
+       struct qwz_qmi_ce_cfg           qmi_ce_cfg;
+       struct qwz_qmi_target_info      qmi_target;
+       struct ath12k_targ_cap          target_caps;
+       int                             num_radios;
+       uint32_t                        cc_freq_hz;
+       uint32_t                        cfg_tx_chainmask;
+       uint32_t                        cfg_rx_chainmask;
+       int                             num_tx_chains;
+       int                             num_rx_chains;
+       int                             num_created_vdevs;
+       int                             num_started_vdevs;
+       uint32_t                        allocated_vdev_map;
+       uint32_t                        free_vdev_map;
+       int                             num_peers;
+       int                             peer_mapped;
+       int                             peer_delete_done;
+       int                             vdev_setup_done;
+       int                             peer_assoc_done;
+
+       struct qwz_dbring_cap   *db_caps;
+       uint32_t                 num_db_cap;
+
+       uint8_t         mac_addr[IEEE80211_ADDR_LEN];
+       int             wmi_ready;
+       uint32_t        wlan_init_status;
+
+       uint32_t pktlog_defs_checksum;
+
+       struct qwz_vif_list vif_list;
+       struct qwz_pdev pdevs[MAX_RADIOS];
+       struct {
+               enum WMI_HOST_WLAN_BAND supported_bands;
+               uint32_t pdev_id;
+       } target_pdev_ids[MAX_RADIOS];
+       uint8_t target_pdev_count;
+       uint32_t pdevs_active;
+       int pdevs_macaddr_valid;
+       struct ath12k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+
+       struct {
+               uint32_t service;
+               uint32_t instance;
+               uint32_t node;
+               uint32_t port;
+       } qrtr_server;
+
+       struct qmi_response_type_v01    qmi_resp;
+
+       struct qwz_dmamem               *fwmem;
+       int                              expect_fwmem_req;
+       int                              fwmem_ready;
+       int                              fw_init_done;
+
+       int                              ctl_resp;
+
+       struct qwz_dmamem               *m3_mem;
+
+       struct timeout                   mon_reap_timer;
+#define ATH12K_MON_TIMER_INTERVAL      10
+
+       /* Provided by attachment driver: */
+       struct qwz_ops                  ops;
+       bus_dma_tag_t                   sc_dmat;
+       enum ath12k_hw_rev              sc_hw_rev;
+       struct qwz_device_id            id;
+       char                            sc_bus_str[4]; /* "pci" or "ahb" */
+       int                             num_msivec;
+       uint32_t                        msi_addr_lo;
+       uint32_t                        msi_addr_hi;
+       uint32_t                        msi_data_start;
+       const struct qwz_msi_config     *msi_cfg;
+       uint32_t                        msi_ce_irqmask;
+
+       struct qmi_wlanfw_request_mem_ind_msg_v01 *sc_req_mem_ind;
+
+       caddr_t                 sc_drvbpf;
+
+       union {
+               struct qwz_rx_radiotap_header th;
+               uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+       } sc_rxtapu;
+#define sc_rxtap       sc_rxtapu.th
+       int                     sc_rxtap_len;
+
+       union {
+               struct qwz_tx_radiotap_header th;
+               uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+       } sc_txtapu;
+#define sc_txtap       sc_txtapu.th
+       int                     sc_txtap_len;
+};
+
+int    qwz_ce_intr(void *);
+int    qwz_ext_intr(void *);
+int    qwz_dp_service_srng(struct qwz_softc *, int);
+
+int    qwz_init_hw_params(struct qwz_softc *);
+int    qwz_attach(struct qwz_softc *);
+void   qwz_detach(struct qwz_softc *);
+int    qwz_activate(struct device *, int);
+
+void   qwz_core_deinit(struct qwz_softc *);
+void   qwz_ce_cleanup_pipes(struct qwz_softc *);
+
+int    qwz_ioctl(struct ifnet *, u_long, caddr_t);
+void   qwz_start(struct ifnet *);
+void   qwz_watchdog(struct ifnet *);
+int    qwz_media_change(struct ifnet *);
+void   qwz_init_task(void *);
+int    qwz_newstate(struct ieee80211com *, enum ieee80211_state, int);
+void   qwz_newstate_task(void *);
+
+struct ath12k_peer {
+#if 0
+       struct list_head list;
+       struct ieee80211_sta *sta;
+#endif
+       int vdev_id;
+#if 0
+       u8 addr[ETH_ALEN];
+#endif
+       int peer_id;
+       uint16_t ast_hash;
+       uint8_t pdev_id;
+       uint16_t hw_peer_id;
+#if 0
+       /* protected by ab->data_lock */
+       struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+#endif
+       struct dp_rx_tid rx_tid[IEEE80211_NUM_TID + 1];
+#if 0
+       /* peer id based rhashtable list pointer */
+       struct rhash_head rhash_id;
+       /* peer addr based rhashtable list pointer */
+       struct rhash_head rhash_addr;
+
+       /* Info used in MMIC verification of
+        * RX fragments
+        */
+       struct crypto_shash *tfm_mmic;
+       u8 mcast_keyidx;
+       u8 ucast_keyidx;
+       u16 sec_type;
+       u16 sec_type_grp;
+       bool is_authorized;
+       bool dp_setup_done;
+#endif
+};
+
+struct qwz_node {
+       struct ieee80211_node ni;
+       struct ath12k_peer peer;
+       unsigned int flags;
+#define QWZ_NODE_FLAG_HAVE_PAIRWISE_KEY        0x01
+#define QWZ_NODE_FLAG_HAVE_GROUP_KEY   0x02
+};
+
+struct ieee80211_node *qwz_node_alloc(struct ieee80211com *);
+int    qwz_set_key(struct ieee80211com *, struct ieee80211_node *,
+    struct ieee80211_key *);
+void   qwz_delete_key(struct ieee80211com *, struct ieee80211_node *,
+    struct ieee80211_key *);
+
+void   qwz_qrtr_recv_msg(struct qwz_softc *, struct mbuf *);
+
+int    qwz_hal_srng_init(struct qwz_softc *);
+
+int    qwz_ce_alloc_pipes(struct qwz_softc *);
+void   qwz_ce_free_pipes(struct qwz_softc *);
+void   qwz_ce_rx_post_buf(struct qwz_softc *);
+void   qwz_ce_get_shadow_config(struct qwz_softc *, uint32_t **, uint32_t *);
+
+static inline unsigned int
+qwz_roundup_pow_of_two(unsigned int i)
+{
+       return (powerof2(i) ? i : (1 << (fls(i) - 1)));
+}
+
+static inline unsigned int
+qwz_ce_get_attr_flags(struct qwz_softc *sc, int ce_id)
+{
+       KASSERT(ce_id < sc->hw_params.ce_count);
+       return sc->hw_params.host_ce_config[ce_id].flags;
+}
+
+static inline enum ieee80211_edca_ac qwz_tid_to_ac(uint32_t tid)
+{
+       return (((tid == 0) || (tid == 3)) ? EDCA_AC_BE :
+               ((tid == 1) || (tid == 2)) ? EDCA_AC_BK :
+               ((tid == 4) || (tid == 5)) ? EDCA_AC_VI :
+               EDCA_AC_VO);
+}
index a0d7d40..76cecec 100644 (file)
@@ -1,4 +1,4 @@
-#      $OpenBSD: files.pci,v 1.365 2024/04/09 14:58:41 mglocker Exp $
+#      $OpenBSD: files.pci,v 1.366 2024/08/14 14:40:46 patrick Exp $
 #      $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
 #
 # Config file and device description for machine-independent PCI code.
@@ -559,6 +559,10 @@ file       dev/pci/if_iwx.c                iwx
 attach qwx at pci with qwx_pci
 file   dev/pci/if_qwx_pci.c            qwx_pci
 
+# Qualcomm 802.11be
+attach qwz at pci with qwz_pci
+file   dev/pci/if_qwz_pci.c            qwz_pci
+
 # C-Media CMI8x38 Audio Chip
 device cmpci {}: audio
 attach cmpci at pci
diff --git a/sys/dev/pci/if_qwz_pci.c b/sys/dev/pci/if_qwz_pci.c
new file mode 100644 (file)
index 0000000..3a45b27
--- /dev/null
@@ -0,0 +1,4142 @@
+/*     $OpenBSD: if_qwz_pci.c,v 1.1 2024/08/14 14:40:46 patrick Exp $  */
+
+/*
+ * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
+ * Copyright (c) 2018-2021 The Linux Foundation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted (subject to the limitations in the disclaimer
+ * below) provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ *  * Neither the name of [Owner Organization] nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
+ * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/socket.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+/* XXX linux porting goo */
+#ifdef __LP64__
+#define BITS_PER_LONG          64
+#else
+#define BITS_PER_LONG          32
+#endif
+#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
+#define BIT(x)               (1UL << (x))
+#define test_bit(i, a)  ((a) & (1 << (i)))
+#define clear_bit(i, a) ((a)) &= ~(1 << (i))
+#define set_bit(i, a)   ((a)) |= (1 << (i))
+
+/* #define QWZ_DEBUG */
+
+#include <dev/ic/qwzreg.h>
+#include <dev/ic/qwzvar.h>
+
+#ifdef QWZ_DEBUG 
+/* Headers needed for RDDM dump */
+#include <sys/namei.h>
+#include <sys/pledge.h>
+#include <sys/vnode.h>
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#endif
+
+#define ATH12K_PCI_IRQ_CE0_OFFSET      3
+#define ATH12K_PCI_IRQ_DP_OFFSET       14
+
+#define ATH12K_PCI_CE_WAKE_IRQ         2
+
+#define ATH12K_PCI_WINDOW_ENABLE_BIT   0x40000000
+#define ATH12K_PCI_WINDOW_REG_ADDRESS  0x310c
+#define ATH12K_PCI_WINDOW_VALUE_MASK   GENMASK(24, 19)
+#define ATH12K_PCI_WINDOW_START                0x80000
+#define ATH12K_PCI_WINDOW_RANGE_MASK   GENMASK(18, 0)
+
+/* BAR0 + 4k is always accessible, and no need to force wakeup. */
+#define ATH12K_PCI_ACCESS_ALWAYS_OFF   0xFE0   /* 4K - 32 = 0xFE0 */
+
+#define TCSR_SOC_HW_VERSION            0x0224
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
+
+/*
+ * pci.h
+ */
+#define PCIE_SOC_GLOBAL_RESET                  0x3008
+#define PCIE_SOC_GLOBAL_RESET_V                        1
+
+#define WLAON_WARM_SW_ENTRY                    0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG              0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR                    0x01f80500
+#define PCIE_Q6_COOKIE_DATA                    0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG            0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG           0x3004
+
+#define PCIE_PCIE_PARF_LTSSM                   0x1e081b0
+#define PARM_LTSSM_VALUE                       0x111
+
+#define GCC_GCC_PCIE_HOT_RST                   0x1e402bc
+#define GCC_GCC_PCIE_HOT_RST_VAL               0x10
+
+#define PCIE_PCIE_INT_ALL_CLEAR                        0x1e08228
+#define PCIE_SMLH_REQ_RST_LINK_DOWN            0x2
+#define PCIE_INT_CLEAR_ALL                     0xffffffff
+
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc) \
+               (sc->hw_params.regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL     0x10
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK     0xffffffff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc) \
+               (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL          0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc) \
+               (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL          0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc) \
+               (sc->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL          0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK           0x000000ff
+
+#define WLAON_QFPROM_PWR_CTRL_REG              0x01f8031c
+#define QFPROM_PWR_CTRL_VDD4BLOW_MASK          0x4
+
+/*
+ * mhi.h
+ */
+#define PCIE_TXVECDB                           0x360
+#define PCIE_TXVECSTATUS                       0x368
+#define PCIE_RXVECDB                           0x394
+#define PCIE_RXVECSTATUS                       0x39C
+
+#define MHI_CHAN_CTX_CHSTATE_MASK              GENMASK(7, 0)
+#define   MHI_CHAN_CTX_CHSTATE_DISABLED                0
+#define   MHI_CHAN_CTX_CHSTATE_ENABLED         1
+#define   MHI_CHAN_CTX_CHSTATE_RUNNING         2
+#define   MHI_CHAN_CTX_CHSTATE_SUSPENDED       3
+#define   MHI_CHAN_CTX_CHSTATE_STOP            4
+#define   MHI_CHAN_CTX_CHSTATE_ERROR           5
+#define MHI_CHAN_CTX_BRSTMODE_MASK             GENMASK(9, 8)
+#define MHI_CHAN_CTX_BRSTMODE_SHFT             8
+#define   MHI_CHAN_CTX_BRSTMODE_DISABLE                2
+#define   MHI_CHAN_CTX_BRSTMODE_ENABLE         3
+#define MHI_CHAN_CTX_POLLCFG_MASK              GENMASK(15, 10)
+#define MHI_CHAN_CTX_RESERVED_MASK             GENMASK(31, 16)
+
+#define QWZ_MHI_CONFIG_QCA6390_MAX_CHANNELS    128
+#define QWZ_MHI_CONFIG_QCA6390_TIMEOUT_MS      2000
+#define QWZ_MHI_CONFIG_QCA9074_MAX_CHANNELS    30
+
+#define MHI_CHAN_TYPE_INVALID          0
+#define MHI_CHAN_TYPE_OUTBOUND         1 /* to device */
+#define MHI_CHAN_TYPE_INBOUND          2 /* from device */
+#define MHI_CHAN_TYPE_INBOUND_COALESCED        3
+
+#define MHI_EV_CTX_RESERVED_MASK       GENMASK(7, 0)
+#define MHI_EV_CTX_INTMODC_MASK                GENMASK(15, 8)
+#define MHI_EV_CTX_INTMODT_MASK                GENMASK(31, 16)
+#define MHI_EV_CTX_INTMODT_SHFT                16
+
+#define MHI_ER_TYPE_INVALID    0
+#define MHI_ER_TYPE_VALID      1
+
+#define MHI_ER_DATA    0
+#define MHI_ER_CTRL    1
+
+#define MHI_CH_STATE_DISABLED  0
+#define MHI_CH_STATE_ENABLED   1
+#define MHI_CH_STATE_RUNNING   2
+#define MHI_CH_STATE_SUSPENDED 3
+#define MHI_CH_STATE_STOP      4
+#define MHI_CH_STATE_ERROR     5
+
+#define QWZ_NUM_EVENT_CTX      2
+
+/* Event context. Shared with device. */
+struct qwz_mhi_event_ctxt {
+       uint32_t intmod;
+       uint32_t ertype;
+       uint32_t msivec;
+
+       uint64_t rbase;
+       uint64_t rlen;
+       uint64_t rp;
+       uint64_t wp;
+} __packed;
+
+/* Channel context. Shared with device. */
+struct qwz_mhi_chan_ctxt {
+       uint32_t chcfg;
+       uint32_t chtype;
+       uint32_t erindex;
+
+       uint64_t rbase;
+       uint64_t rlen;
+       uint64_t rp;
+       uint64_t wp;
+} __packed;
+
+/* Command context. Shared with device. */
+struct qwz_mhi_cmd_ctxt {
+       uint32_t reserved0;
+       uint32_t reserved1;
+       uint32_t reserved2;
+
+       uint64_t rbase;
+       uint64_t rlen;
+       uint64_t rp;
+       uint64_t wp;
+} __packed;
+
+struct qwz_mhi_ring_element {
+       uint64_t ptr;
+       uint32_t dword[2];
+};
+
+struct qwz_xfer_data {
+       bus_dmamap_t    map;
+       struct mbuf     *m;
+};
+
+#define QWZ_PCI_XFER_MAX_DATA_SIZE     0xffff
+#define QWZ_PCI_XFER_RING_MAX_ELEMENTS 64
+
+struct qwz_pci_xfer_ring {
+       struct qwz_dmamem       *dmamem;
+       bus_size_t              size;
+       uint32_t                mhi_chan_id;
+       uint32_t                mhi_chan_state;
+       uint32_t                mhi_chan_direction;
+       uint32_t                mhi_chan_event_ring_index;
+       uint32_t                db_addr;
+       uint32_t                cmd_status;
+       int                     num_elements;
+       int                     queued;
+       struct qwz_xfer_data    data[QWZ_PCI_XFER_RING_MAX_ELEMENTS];
+       uint64_t                rp;
+       uint64_t                wp;
+       struct qwz_mhi_chan_ctxt *chan_ctxt;
+};
+
+
+#define QWZ_PCI_EVENT_RING_MAX_ELEMENTS        256
+
+struct qwz_pci_event_ring {
+       struct qwz_dmamem       *dmamem;
+       bus_size_t              size;
+       uint32_t                mhi_er_type;
+       uint32_t                mhi_er_irq;
+       uint32_t                mhi_er_irq_moderation_ms;
+       uint32_t                db_addr;
+       int                     num_elements;
+       uint64_t                rp;
+       uint64_t                wp;
+       struct qwz_mhi_event_ctxt *event_ctxt;
+};
+
+struct qwz_cmd_data {
+       bus_dmamap_t    map;
+       struct mbuf     *m;
+};
+
+#define QWZ_PCI_CMD_RING_MAX_ELEMENTS  128
+
+struct qwz_pci_cmd_ring {
+       struct qwz_dmamem       *dmamem;
+       bus_size_t              size;
+       uint64_t                rp;
+       uint64_t                wp;
+       int                     num_elements;
+       int                     queued;
+};
+
+struct qwz_pci_ops;
+struct qwz_msi_config;
+
+#define QWZ_NUM_MSI_VEC        32
+
+struct qwz_pci_softc {
+       struct qwz_softc        sc_sc;
+       pci_chipset_tag_t       sc_pc;
+       pcitag_t                sc_tag;
+       int                     sc_cap_off;
+       int                     sc_msi_off;
+       pcireg_t                sc_msi_cap;
+       void                    *sc_ih[QWZ_NUM_MSI_VEC];
+       char                    sc_ivname[QWZ_NUM_MSI_VEC][16];
+       struct qwz_ext_irq_grp  ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+       int                     mhi_irq[2];
+       bus_space_tag_t         sc_st;
+       bus_space_handle_t      sc_sh;
+       bus_addr_t              sc_map;
+       bus_size_t              sc_mapsize;
+
+       pcireg_t                sc_lcsr;
+       uint32_t                sc_flags;
+#define ATH12K_PCI_ASPM_RESTORE        1
+
+       uint32_t                register_window;
+       const struct qwz_pci_ops *sc_pci_ops;
+
+       uint32_t                 bhi_off;
+       uint32_t                 bhi_ee;
+       uint32_t                 bhie_off;
+       uint32_t                 mhi_state;
+       uint32_t                 max_chan;
+
+       uint64_t                 wake_db;
+
+       /*
+        * DMA memory for AMSS.bin firmware image.
+        * This memory must remain available to the device until
+        * the device is powered down.
+        */
+       struct qwz_dmamem       *amss_data;
+       struct qwz_dmamem       *amss_vec;
+
+       struct qwz_dmamem        *rddm_vec;
+       struct qwz_dmamem        *rddm_data;
+       int                      rddm_triggered;
+       struct task              rddm_task;
+#define        QWZ_RDDM_DUMP_SIZE      0x420000
+
+       struct qwz_dmamem       *chan_ctxt;
+       struct qwz_dmamem       *event_ctxt;
+       struct qwz_dmamem       *cmd_ctxt;
+
+
+       struct qwz_pci_xfer_ring xfer_rings[4];
+#define QWZ_PCI_XFER_RING_LOOPBACK_OUTBOUND    0
+#define QWZ_PCI_XFER_RING_LOOPBACK_INBOUND     1
+#define QWZ_PCI_XFER_RING_IPCR_OUTBOUND                2
+#define QWZ_PCI_XFER_RING_IPCR_INBOUND         3
+       struct qwz_pci_event_ring event_rings[QWZ_NUM_EVENT_CTX];
+       struct qwz_pci_cmd_ring cmd_ring;
+};
+
+int    qwz_pci_match(struct device *, void *, void *);
+void   qwz_pci_attach(struct device *, struct device *, void *);
+int    qwz_pci_detach(struct device *, int);
+void   qwz_pci_attach_hook(struct device *);
+void   qwz_pci_free_xfer_rings(struct qwz_pci_softc *);
+int    qwz_pci_alloc_xfer_ring(struct qwz_softc *, struct qwz_pci_xfer_ring *,
+           uint32_t, uint32_t, uint32_t, size_t);
+int    qwz_pci_alloc_xfer_rings_qca6390(struct qwz_pci_softc *);
+int    qwz_pci_alloc_xfer_rings_qcn9074(struct qwz_pci_softc *);
+void   qwz_pci_free_event_rings(struct qwz_pci_softc *);
+int    qwz_pci_alloc_event_ring(struct qwz_softc *,
+           struct qwz_pci_event_ring *, uint32_t, uint32_t, uint32_t, size_t);
+int    qwz_pci_alloc_event_rings(struct qwz_pci_softc *);
+void   qwz_pci_free_cmd_ring(struct qwz_pci_softc *);
+int    qwz_pci_init_cmd_ring(struct qwz_softc *, struct qwz_pci_cmd_ring *);
+uint32_t qwz_pci_read(struct qwz_softc *, uint32_t);
+void   qwz_pci_write(struct qwz_softc *, uint32_t, uint32_t);
+
+void   qwz_pci_read_hw_version(struct qwz_softc *, uint32_t *, uint32_t *);
+uint32_t qwz_pcic_read32(struct qwz_softc *, uint32_t);
+void    qwz_pcic_write32(struct qwz_softc *, uint32_t, uint32_t);
+
+void   qwz_pcic_ext_irq_enable(struct qwz_softc *);
+void   qwz_pcic_ext_irq_disable(struct qwz_softc *);
+int    qwz_pcic_config_irq(struct qwz_softc *, struct pci_attach_args *);
+
+int    qwz_pci_start(struct qwz_softc *);
+void   qwz_pci_stop(struct qwz_softc *);
+void   qwz_pci_aspm_disable(struct qwz_softc *);
+void   qwz_pci_aspm_restore(struct qwz_softc *);
+int    qwz_pci_power_up(struct qwz_softc *);
+void   qwz_pci_power_down(struct qwz_softc *);
+
+int    qwz_pci_bus_wake_up(struct qwz_softc *);
+void   qwz_pci_bus_release(struct qwz_softc *);
+void   qwz_pci_window_write32(struct qwz_softc *, uint32_t, uint32_t);
+uint32_t qwz_pci_window_read32(struct qwz_softc *, uint32_t);
+
+int    qwz_mhi_register(struct qwz_softc *);
+void   qwz_mhi_unregister(struct qwz_softc *);
+void   qwz_mhi_ring_doorbell(struct qwz_softc *sc, uint64_t, uint64_t);
+void   qwz_mhi_device_wake(struct qwz_softc *);
+void   qwz_mhi_device_zzz(struct qwz_softc *);
+int    qwz_mhi_wake_db_clear_valid(struct qwz_softc *);
+void   qwz_mhi_init_xfer_rings(struct qwz_pci_softc *);
+void   qwz_mhi_init_event_rings(struct qwz_pci_softc *);
+void   qwz_mhi_init_cmd_ring(struct qwz_pci_softc *);
+void   qwz_mhi_init_dev_ctxt(struct qwz_pci_softc *);
+int    qwz_mhi_send_cmd(struct qwz_pci_softc *psc, uint32_t, uint32_t);
+void * qwz_pci_xfer_ring_get_elem(struct qwz_pci_xfer_ring *, uint64_t);
+struct qwz_xfer_data *qwz_pci_xfer_ring_get_data(struct qwz_pci_xfer_ring *,
+           uint64_t);
+int    qwz_mhi_submit_xfer(struct qwz_softc *sc, struct mbuf *m);
+int    qwz_mhi_start_channel(struct qwz_pci_softc *,
+           struct qwz_pci_xfer_ring *);
+int    qwz_mhi_start_channels(struct qwz_pci_softc *);
+int    qwz_mhi_start(struct qwz_pci_softc *);
+void   qwz_mhi_stop(struct qwz_softc *);
+int    qwz_mhi_reset_device(struct qwz_softc *, int);
+void   qwz_mhi_clear_vector(struct qwz_softc *);
+int    qwz_mhi_fw_load_handler(struct qwz_pci_softc *);
+int    qwz_mhi_await_device_reset(struct qwz_softc *);
+int    qwz_mhi_await_device_ready(struct qwz_softc *);
+void   qwz_mhi_ready_state_transition(struct qwz_pci_softc *);
+void   qwz_mhi_mission_mode_state_transition(struct qwz_pci_softc *);
+void   qwz_mhi_low_power_mode_state_transition(struct qwz_pci_softc *);
+void   qwz_mhi_set_state(struct qwz_softc *, uint32_t);
+void   qwz_mhi_init_mmio(struct qwz_pci_softc *);
+int    qwz_mhi_fw_load_bhi(struct qwz_pci_softc *, uint8_t *, size_t);
+int    qwz_mhi_fw_load_bhie(struct qwz_pci_softc *, uint8_t *, size_t);
+void   qwz_rddm_prepare(struct qwz_pci_softc *);
+#ifdef QWZ_DEBUG
+void   qwz_rddm_task(void *);
+#endif
+void * qwz_pci_event_ring_get_elem(struct qwz_pci_event_ring *, uint64_t);
+void   qwz_pci_intr_ctrl_event_mhi(struct qwz_pci_softc *, uint32_t);
+void   qwz_pci_intr_ctrl_event_ee(struct qwz_pci_softc *, uint32_t);
+void   qwz_pci_intr_ctrl_event_cmd_complete(struct qwz_pci_softc *,
+           uint64_t, uint32_t);
+int    qwz_pci_intr_ctrl_event(struct qwz_pci_softc *,
+           struct qwz_pci_event_ring *);
+void   qwz_pci_intr_data_event_tx(struct qwz_pci_softc *,
+           struct qwz_mhi_ring_element *);
+int    qwz_pci_intr_data_event(struct qwz_pci_softc *,
+           struct qwz_pci_event_ring *);
+int    qwz_pci_intr_mhi_ctrl(void *);
+int    qwz_pci_intr_mhi_data(void *);
+int    qwz_pci_intr(void *);
+
+struct qwz_pci_ops {
+       int      (*wakeup)(struct qwz_softc *);
+       void     (*release)(struct qwz_softc *);
+       int      (*get_msi_irq)(struct qwz_softc *, unsigned int);
+       void     (*window_write32)(struct qwz_softc *, uint32_t, uint32_t);
+       uint32_t (*window_read32)(struct qwz_softc *, uint32_t);
+       int      (*alloc_xfer_rings)(struct qwz_pci_softc *);
+};
+
+
+static const struct qwz_pci_ops qwz_pci_ops_qca6390 = {
+       .wakeup = qwz_pci_bus_wake_up,
+       .release = qwz_pci_bus_release,
+#if notyet
+       .get_msi_irq = qwz_pci_get_msi_irq,
+#endif
+       .window_write32 = qwz_pci_window_write32,
+       .window_read32 = qwz_pci_window_read32,
+       .alloc_xfer_rings = qwz_pci_alloc_xfer_rings_qca6390,
+};
+
+static const struct qwz_pci_ops qwz_pci_ops_qcn9074 = {
+       .wakeup = NULL,
+       .release = NULL,
+#if notyet
+       .get_msi_irq = qwz_pci_get_msi_irq,
+#endif
+       .window_write32 = qwz_pci_window_write32,
+       .window_read32 = qwz_pci_window_read32,
+       .alloc_xfer_rings = qwz_pci_alloc_xfer_rings_qcn9074,
+};
+
+const struct cfattach qwz_pci_ca = {
+       sizeof(struct qwz_pci_softc),
+       qwz_pci_match,
+       qwz_pci_attach,
+       qwz_pci_detach,
+       qwz_activate
+};
+
+/* XXX pcidev */
+#define PCI_PRODUCT_QUALCOMM_QCA6390   0x1101
+#define PCI_PRODUCT_QUALCOMM_QCN9074   0x1104
+
+static const struct pci_matchid qwz_pci_devices[] = {
+#if notyet
+       { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCA6390 },
+       { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCN9074 },
+#endif
+       { PCI_VENDOR_QUALCOMM, PCI_PRODUCT_QUALCOMM_QCNFA765 }
+};
+
+int
+qwz_pci_match(struct device *parent, void *match, void *aux)
+{
+       return pci_matchbyid(aux, qwz_pci_devices, nitems(qwz_pci_devices));
+}
+
+void
+qwz_pci_init_qmi_ce_config(struct qwz_softc *sc)
+{
+       struct qwz_qmi_ce_cfg *cfg = &sc->qmi_ce_cfg;
+
+       qwz_ce_get_shadow_config(sc, &cfg->shadow_reg_v2,
+           &cfg->shadow_reg_v2_len);
+}
+
+const struct qwz_msi_config qwz_msi_config_one_msi = {
+       .total_vectors = 1,
+       .total_users = 4,
+       .users = (struct qwz_msi_user[]) {
+               { .name = "MHI", .num_vectors = 1, .base_vector = 0 },
+               { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+               { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+               { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+       },
+};
+
+const struct qwz_msi_config qwz_msi_config[] = {
+       {
+               .total_vectors = 32,
+               .total_users = 4,
+               .users = (struct qwz_msi_user[]) {
+                       { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+                       { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+                       { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+                       { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+               },
+               .hw_rev = ATH12K_HW_QCA6390_HW20,
+       },
+       {
+               .total_vectors = 16,
+               .total_users = 3,
+               .users = (struct qwz_msi_user[]) {
+                       { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+                       { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+                       { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+               },
+               .hw_rev = ATH12K_HW_QCN9074_HW10,
+       },
+       {
+               .total_vectors = 32,
+               .total_users = 4,
+               .users = (struct qwz_msi_user[]) {
+                       { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+                       { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+                       { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+                       { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+               },
+               .hw_rev = ATH12K_HW_WCN6855_HW20,
+       },
+       {
+               .total_vectors = 32,
+               .total_users = 4,
+               .users = (struct qwz_msi_user[]) {
+                       { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+                       { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+                       { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+                       { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+               },
+               .hw_rev = ATH12K_HW_WCN6855_HW21,
+       },
+       {
+               .total_vectors = 28,
+               .total_users = 2,
+               .users = (struct qwz_msi_user[]) {
+                       { .name = "CE", .num_vectors = 10, .base_vector = 0 },
+                       { .name = "DP", .num_vectors = 18, .base_vector = 10 },
+               },
+               .hw_rev = ATH12K_HW_WCN6750_HW10,
+       },
+};
+
+int
+qwz_pcic_init_msi_config(struct qwz_softc *sc)
+{
+       const struct qwz_msi_config *msi_config;
+       int i;
+
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
+               sc->msi_cfg = &qwz_msi_config_one_msi;
+               return 0;
+       }
+       for (i = 0; i < nitems(qwz_msi_config); i++) {
+               msi_config = &qwz_msi_config[i];
+
+               if (msi_config->hw_rev == sc->sc_hw_rev)
+                       break;
+       }
+
+       if (i == nitems(qwz_msi_config)) {
+               printf("%s: failed to fetch msi config, "
+                   "unsupported hw version: 0x%x\n",
+                   sc->sc_dev.dv_xname, sc->sc_hw_rev);
+               return EINVAL;
+       }
+
+       sc->msi_cfg = msi_config;
+       return 0;
+}
+
+int
+qwz_pci_alloc_msi(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       uint64_t addr;
+       pcireg_t data;
+
+       if (psc->sc_msi_cap & PCI_MSI_MC_C64) {
+               uint64_t addr_hi;
+               pcireg_t addr_lo;
+
+               addr_lo = pci_conf_read(psc->sc_pc, psc->sc_tag,
+                   psc->sc_msi_off + PCI_MSI_MA);
+               addr_hi = pci_conf_read(psc->sc_pc, psc->sc_tag,
+                   psc->sc_msi_off + PCI_MSI_MAU32);
+               addr = addr_hi << 32 | addr_lo;
+               data = pci_conf_read(psc->sc_pc, psc->sc_tag,
+                   psc->sc_msi_off + PCI_MSI_MD64);
+       } else {
+               addr = pci_conf_read(psc->sc_pc, psc->sc_tag,
+                   psc->sc_msi_off + PCI_MSI_MA);
+               data = pci_conf_read(psc->sc_pc, psc->sc_tag,
+                   psc->sc_msi_off + PCI_MSI_MD32);
+       }
+
+       sc->msi_addr_lo = addr & 0xffffffff;
+       sc->msi_addr_hi = ((uint64_t)addr) >> 32;
+       sc->msi_data_start = data;
+
+       DPRINTF("%s: MSI addr: 0x%llx MSI data: 0x%x\n", sc->sc_dev.dv_xname,
+           addr, data);
+
+       return 0;
+}
+
+int
+qwz_pcic_map_service_to_pipe(struct qwz_softc *sc, uint16_t service_id,
+    uint8_t *ul_pipe, uint8_t *dl_pipe)
+{
+       const struct service_to_pipe *entry;
+       int ul_set = 0, dl_set = 0;
+       int i;
+
+       for (i = 0; i < sc->hw_params.svc_to_ce_map_len; i++) {
+               entry = &sc->hw_params.svc_to_ce_map[i];
+
+               if (le32toh(entry->service_id) != service_id)
+                       continue;
+
+               switch (le32toh(entry->pipedir)) {
+               case PIPEDIR_NONE:
+                       break;
+               case PIPEDIR_IN:
+                       *dl_pipe = le32toh(entry->pipenum);
+                       dl_set = 1;
+                       break;
+               case PIPEDIR_OUT:
+                       *ul_pipe = le32toh(entry->pipenum);
+                       ul_set = 1;
+                       break;
+               case PIPEDIR_INOUT:
+                       *dl_pipe = le32toh(entry->pipenum);
+                       *ul_pipe = le32toh(entry->pipenum);
+                       dl_set = 1;
+                       ul_set = 1;
+                       break;
+               }
+       }
+
+       if (!ul_set || !dl_set) {
+               DPRINTF("%s: found no uplink and no downlink\n", __func__);
+               return ENOENT;
+       }
+
+       return 0;
+}
+
+int
+qwz_pcic_get_user_msi_vector(struct qwz_softc *sc, char *user_name,
+    int *num_vectors, uint32_t *user_base_data, uint32_t *base_vector)
+{
+       const struct qwz_msi_config *msi_config = sc->msi_cfg;
+       int idx;
+
+       for (idx = 0; idx < msi_config->total_users; idx++) {
+               if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+                       *num_vectors = msi_config->users[idx].num_vectors;
+                       *base_vector =  msi_config->users[idx].base_vector;
+                       *user_base_data = *base_vector + sc->msi_data_start;
+
+                       DPRINTF("%s: MSI assignment %s num_vectors %d "
+                           "user_base_data %u base_vector %u\n", __func__,
+                           user_name, *num_vectors, *user_base_data,
+                           *base_vector);
+                       return 0;
+               }
+       }
+
+       DPRINTF("%s: Failed to find MSI assignment for %s\n",
+           sc->sc_dev.dv_xname, user_name);
+
+       return EINVAL;
+}
+
+void
+qwz_pci_attach(struct device *parent, struct device *self, void *aux)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)self;
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct ieee80211com *ic = &sc->sc_ic;
+       struct ifnet *ifp = &ic->ic_if;
+       uint32_t soc_hw_version_major, soc_hw_version_minor;
+       const struct qwz_pci_ops *pci_ops;
+       struct pci_attach_args *pa = aux;
+       pci_intr_handle_t ih;
+       pcireg_t memtype, reg;
+       const char *intrstr;
+       int error;
+       pcireg_t sreg;
+
+       sc->sc_dmat = pa->pa_dmat;
+       psc->sc_pc = pa->pa_pc;
+       psc->sc_tag = pa->pa_tag;
+
+#ifdef __HAVE_FDT
+       sc->sc_node = PCITAG_NODE(pa->pa_tag);
+#endif
+
+       rw_init(&sc->ioctl_rwl, "qwzioctl");
+
+       sreg = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_SUBSYS_ID_REG);
+       sc->id.bdf_search = ATH12K_BDF_SEARCH_DEFAULT;
+       sc->id.vendor = PCI_VENDOR(pa->pa_id);
+       sc->id.device = PCI_PRODUCT(pa->pa_id);
+       sc->id.subsystem_vendor = PCI_VENDOR(sreg);
+       sc->id.subsystem_device = PCI_PRODUCT(sreg);
+
+       strlcpy(sc->sc_bus_str, "pci", sizeof(sc->sc_bus_str));
+
+       sc->ops.read32 = qwz_pcic_read32;
+       sc->ops.write32 = qwz_pcic_write32;
+       sc->ops.start = qwz_pci_start;
+       sc->ops.stop = qwz_pci_stop;
+       sc->ops.power_up = qwz_pci_power_up;
+       sc->ops.power_down = qwz_pci_power_down;
+       sc->ops.submit_xfer = qwz_mhi_submit_xfer;
+       sc->ops.irq_enable = qwz_pcic_ext_irq_enable;
+       sc->ops.irq_disable = qwz_pcic_ext_irq_disable;
+       sc->ops.map_service_to_pipe = qwz_pcic_map_service_to_pipe;
+       sc->ops.get_user_msi_vector = qwz_pcic_get_user_msi_vector;
+
+       if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_PCIEXPRESS,
+           &psc->sc_cap_off, NULL) == 0) {
+               printf(": can't find PCIe capability structure\n");
+               return;
+       }
+
+       if (pci_get_capability(psc->sc_pc, psc->sc_tag, PCI_CAP_MSI,
+           &psc->sc_msi_off, &psc->sc_msi_cap) == 0) {
+               printf(": can't find MSI capability structure\n");
+               return;
+       }
+
+       reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+       reg |= PCI_COMMAND_MASTER_ENABLE;
+       pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg);
+
+       memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
+       if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
+           &psc->sc_st, &psc->sc_sh, &psc->sc_map, &psc->sc_mapsize, 0)) {
+               printf(": can't map mem space\n");
+               return;
+       }
+
+       sc->mem = psc->sc_map;
+
+       sc->num_msivec = 32;
+       if (pci_intr_enable_msivec(pa, sc->num_msivec) != 0) {
+               sc->num_msivec = 1;
+               if (pci_intr_map_msi(pa, &ih) != 0) {
+                       printf(": can't map interrupt\n");
+                       return;
+               }
+               clear_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
+       } else {
+               if (pci_intr_map_msivec(pa, 0, &ih) != 0 &&
+                   pci_intr_map_msi(pa, &ih) != 0) {
+                       printf(": can't map interrupt\n");
+                       return;
+               }
+               set_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
+               psc->mhi_irq[MHI_ER_CTRL] = 1;
+               psc->mhi_irq[MHI_ER_DATA] = 2;
+       }
+
+       intrstr = pci_intr_string(psc->sc_pc, ih);
+       snprintf(psc->sc_ivname[0], sizeof(psc->sc_ivname[0]), "%s:bhi",
+           sc->sc_dev.dv_xname);
+       psc->sc_ih[0] = pci_intr_establish(psc->sc_pc, ih, IPL_NET,
+           qwz_pci_intr, psc, psc->sc_ivname[0]);
+       if (psc->sc_ih[0] == NULL) {
+               printf(": can't establish interrupt");
+               if (intrstr != NULL)
+                       printf(" at %s", intrstr);
+               printf("\n");
+               return;
+       }
+       printf(": %s\n", intrstr);
+
+       if (test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
+               int msivec;
+
+               msivec = psc->mhi_irq[MHI_ER_CTRL];
+               if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
+                   pci_intr_map_msi(pa, &ih) != 0) {
+                       printf(": can't map interrupt\n");
+                       return;
+               }
+               snprintf(psc->sc_ivname[msivec],
+                   sizeof(psc->sc_ivname[msivec]),
+                   "%s:mhic", sc->sc_dev.dv_xname);
+               psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
+                   IPL_NET, qwz_pci_intr_mhi_ctrl, psc,
+                   psc->sc_ivname[msivec]);
+               if (psc->sc_ih[msivec] == NULL) {
+                       printf("%s: can't establish interrupt\n",
+                           sc->sc_dev.dv_xname);
+                       return;
+               }
+
+               msivec = psc->mhi_irq[MHI_ER_DATA];
+               if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
+                   pci_intr_map_msi(pa, &ih) != 0) {
+                       printf(": can't map interrupt\n");
+                       return;
+               }
+               snprintf(psc->sc_ivname[msivec],
+                   sizeof(psc->sc_ivname[msivec]),
+                   "%s:mhid", sc->sc_dev.dv_xname);
+               psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
+                   IPL_NET, qwz_pci_intr_mhi_data, psc,
+                   psc->sc_ivname[msivec]);
+               if (psc->sc_ih[msivec] == NULL) {
+                       printf("%s: can't establish interrupt\n",
+                           sc->sc_dev.dv_xname);
+                       return;
+               }
+       }
+
+       pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
+
+       switch (PCI_PRODUCT(pa->pa_id)) {
+       case PCI_PRODUCT_QUALCOMM_QCA6390:
+               qwz_pci_read_hw_version(sc, &soc_hw_version_major,
+                   &soc_hw_version_minor);
+               switch (soc_hw_version_major) {
+               case 2:
+                       sc->sc_hw_rev = ATH12K_HW_QCA6390_HW20;
+                       break;
+               default:
+                       printf(": unsupported QCA6390 SOC version: %d %d\n",
+                               soc_hw_version_major, soc_hw_version_minor);
+                       return;
+               }
+
+               pci_ops = &qwz_pci_ops_qca6390;
+               psc->max_chan = QWZ_MHI_CONFIG_QCA6390_MAX_CHANNELS;
+               break;
+       case PCI_PRODUCT_QUALCOMM_QCN9074:
+               pci_ops = &qwz_pci_ops_qcn9074;
+               sc->sc_hw_rev = ATH12K_HW_QCN9074_HW10;
+               psc->max_chan = QWZ_MHI_CONFIG_QCA9074_MAX_CHANNELS;
+               break;
+       case PCI_PRODUCT_QUALCOMM_QCNFA765:
+               sc->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
+               qwz_pci_read_hw_version(sc, &soc_hw_version_major,
+                   &soc_hw_version_minor);
+               switch (soc_hw_version_major) {
+               case 2:
+                       switch (soc_hw_version_minor) {
+                       case 0x00:
+                       case 0x01:
+                               sc->sc_hw_rev = ATH12K_HW_WCN6855_HW20;
+                               break;
+                       case 0x10:
+                       case 0x11:
+                               sc->sc_hw_rev = ATH12K_HW_WCN6855_HW21;
+                               break;
+                       default:
+                               goto unsupported_wcn6855_soc;
+                       }
+                       break;
+               default:
+unsupported_wcn6855_soc:
+                       printf(": unsupported WCN6855 SOC version: %d %d\n",
+                               soc_hw_version_major, soc_hw_version_minor);
+                       return;
+               }
+
+               pci_ops = &qwz_pci_ops_qca6390;
+               psc->max_chan = QWZ_MHI_CONFIG_QCA6390_MAX_CHANNELS;
+               break;
+       default:
+               printf(": unsupported chip\n");
+               return;
+       }
+
+       /* register PCI ops */
+       psc->sc_pci_ops = pci_ops;
+
+       error = qwz_pcic_init_msi_config(sc);
+       if (error)
+               goto err_pci_free_region;
+
+       error = qwz_pci_alloc_msi(sc);
+       if (error) {
+               printf("%s: failed to enable msi: %d\n", sc->sc_dev.dv_xname,
+                   error);
+               goto err_pci_free_region;
+       }
+
+       error = qwz_init_hw_params(sc);
+       if (error)
+               goto err_pci_disable_msi;
+
+       psc->chan_ctxt = qwz_dmamem_alloc(sc->sc_dmat,
+           sizeof(struct qwz_mhi_chan_ctxt) * psc->max_chan, 0);
+       if (psc->chan_ctxt == NULL) {
+               printf("%s: could not allocate channel context array\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_disable_msi;
+       }
+
+       if (psc->sc_pci_ops->alloc_xfer_rings(psc)) {
+               printf("%s: could not allocate transfer rings\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_free_chan_ctxt;
+       }
+
+       psc->event_ctxt = qwz_dmamem_alloc(sc->sc_dmat,
+           sizeof(struct qwz_mhi_event_ctxt) * QWZ_NUM_EVENT_CTX, 0);
+       if (psc->event_ctxt == NULL) {
+               printf("%s: could not allocate event context array\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_free_xfer_rings;
+       }
+
+       if (qwz_pci_alloc_event_rings(psc)) {
+               printf("%s: could not allocate event rings\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_free_event_ctxt;
+       }
+
+       psc->cmd_ctxt = qwz_dmamem_alloc(sc->sc_dmat,
+           sizeof(struct qwz_mhi_cmd_ctxt), 0);
+       if (psc->cmd_ctxt == NULL) {
+               printf("%s: could not allocate command context array\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_free_event_rings;
+       }
+
+       if (qwz_pci_init_cmd_ring(sc, &psc->cmd_ring))  {
+               printf("%s: could not allocate command ring\n",
+                   sc->sc_dev.dv_xname);
+               goto err_pci_free_cmd_ctxt;
+       }
+
+       error = qwz_mhi_register(sc);
+       if (error) {
+               printf(": failed to register mhi: %d\n", error);
+               goto err_pci_free_cmd_ring;
+       }
+
+       error = qwz_hal_srng_init(sc);
+       if (error)
+               goto err_mhi_unregister;
+
+       error = qwz_ce_alloc_pipes(sc);
+       if (error) {
+               printf(": failed to allocate ce pipes: %d\n", error);
+               goto err_hal_srng_deinit;
+       }
+
+       sc->sc_nswq = taskq_create("qwzns", 1, IPL_NET, 0);
+       if (sc->sc_nswq == NULL)
+               goto err_ce_free;
+
+       qwz_pci_init_qmi_ce_config(sc);
+
+       error = qwz_pcic_config_irq(sc, pa);
+       if (error) {
+               printf("%s: failed to config irq: %d\n",
+                   sc->sc_dev.dv_xname, error);
+               goto err_ce_free;
+       }
+#if notyet
+       ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+       if (ret) {
+               ath12k_err(ab, "failed to set irq affinity %d\n", ret);
+               goto err_free_irq;
+       }
+
+       /* kernel may allocate a dummy vector before request_irq and
+        * then allocate a real vector when request_irq is called.
+        * So get msi_data here again to avoid spurious interrupt
+        * as msi_data will configured to srngs.
+        */
+       ret = ath12k_pci_config_msi_data(ab_pci);
+       if (ret) {
+               ath12k_err(ab, "failed to config msi_data: %d\n", ret);
+               goto err_irq_affinity_cleanup;
+       }
+#endif
+#ifdef QWZ_DEBUG
+       task_set(&psc->rddm_task, qwz_rddm_task, psc);
+#endif
+       ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
+       ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
+       ic->ic_state = IEEE80211_S_INIT;
+
+       /* Set device capabilities. */
+       ic->ic_caps =
+#if 0
+           IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
+#endif
+           IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
+           IEEE80211_C_WEP |           /* WEP */
+           IEEE80211_C_RSN |           /* WPA/RSN */
+           IEEE80211_C_SCANALL |       /* device scans all channels at once */
+           IEEE80211_C_SCANALLBAND |   /* device scans all bands at once */
+#if 0
+           IEEE80211_C_MONITOR |       /* monitor mode supported */
+#endif
+           IEEE80211_C_SHSLOT |        /* short slot time supported */
+           IEEE80211_C_SHPREAMBLE;     /* short preamble supported */
+
+       ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
+       ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
+       ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
+
+       /* IBSS channel undefined for now. */
+       ic->ic_ibss_chan = &ic->ic_channels[1];
+
+       ifp->if_softc = sc;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = qwz_ioctl;
+       ifp->if_start = qwz_start;
+       ifp->if_watchdog = qwz_watchdog;
+       memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
+       if_attach(ifp);
+       ieee80211_ifattach(ifp);
+       ieee80211_media_init(ifp, qwz_media_change, ieee80211_media_status);
+
+       ic->ic_node_alloc = qwz_node_alloc;
+
+       /* Override 802.11 state transition machine. */
+       sc->sc_newstate = ic->ic_newstate;
+       ic->ic_newstate = qwz_newstate;
+       ic->ic_set_key = qwz_set_key;
+       ic->ic_delete_key = qwz_delete_key;
+#if 0
+       ic->ic_updatechan = qwz_updatechan;
+       ic->ic_updateprot = qwz_updateprot;
+       ic->ic_updateslot = qwz_updateslot;
+       ic->ic_updateedca = qwz_updateedca;
+       ic->ic_updatedtim = qwz_updatedtim;
+#endif
+       /*
+        * We cannot read the MAC address without loading the
+        * firmware from disk. Postpone until mountroot is done.
+        */
+       config_mountroot(self, qwz_pci_attach_hook);
+       return;
+
+err_ce_free:
+       qwz_ce_free_pipes(sc);
+err_hal_srng_deinit:
+err_mhi_unregister:
+err_pci_free_cmd_ring:
+       qwz_pci_free_cmd_ring(psc);
+err_pci_free_cmd_ctxt:
+       qwz_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
+       psc->cmd_ctxt = NULL;
+err_pci_free_event_rings:
+       qwz_pci_free_event_rings(psc);
+err_pci_free_event_ctxt:
+       qwz_dmamem_free(sc->sc_dmat, psc->event_ctxt);
+       psc->event_ctxt = NULL;
+err_pci_free_xfer_rings:
+       qwz_pci_free_xfer_rings(psc);
+err_pci_free_chan_ctxt:
+       qwz_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
+       psc->chan_ctxt = NULL;
+err_pci_disable_msi:
+err_pci_free_region:
+       pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
+       return;
+}
+
+int
+qwz_pci_detach(struct device *self, int flags)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)self;
+       struct qwz_softc *sc = &psc->sc_sc;
+
+       if (psc->sc_ih[0]) {
+               pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
+               psc->sc_ih[0] = NULL;
+       }
+
+       qwz_detach(sc);
+
+       qwz_pci_free_event_rings(psc);
+       qwz_pci_free_xfer_rings(psc);
+       qwz_pci_free_cmd_ring(psc);
+
+       if (psc->event_ctxt) {
+               qwz_dmamem_free(sc->sc_dmat, psc->event_ctxt);
+               psc->event_ctxt = NULL;
+       }
+       if (psc->chan_ctxt) {
+               qwz_dmamem_free(sc->sc_dmat, psc->chan_ctxt);
+               psc->chan_ctxt = NULL;
+       }
+       if (psc->cmd_ctxt) {
+               qwz_dmamem_free(sc->sc_dmat, psc->cmd_ctxt);
+               psc->cmd_ctxt = NULL;
+       }
+
+       if (psc->amss_data) {
+               qwz_dmamem_free(sc->sc_dmat, psc->amss_data);
+               psc->amss_data = NULL;
+       }
+       if (psc->amss_vec) {
+               qwz_dmamem_free(sc->sc_dmat, psc->amss_vec);
+               psc->amss_vec = NULL;
+       }
+
+       return 0;
+}
+
+void
+qwz_pci_attach_hook(struct device *self)
+{
+       struct qwz_softc *sc = (void *)self;
+       int s = splnet();
+
+       qwz_attach(sc);
+
+       splx(s);
+}
+
+void
+qwz_pci_free_xfer_rings(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int i;
+
+       for (i = 0; i < nitems(psc->xfer_rings); i++) {
+               struct qwz_pci_xfer_ring *ring = &psc->xfer_rings[i];
+               if (ring->dmamem) {
+                       qwz_dmamem_free(sc->sc_dmat, ring->dmamem);
+                       ring->dmamem = NULL;
+               }
+               memset(ring, 0, sizeof(*ring));
+       }
+}
+
+int
+qwz_pci_alloc_xfer_ring(struct qwz_softc *sc, struct qwz_pci_xfer_ring *ring,
+    uint32_t id, uint32_t direction, uint32_t event_ring_index,
+    size_t num_elements)
+{
+       bus_size_t size;
+       int i, err;
+
+       memset(ring, 0, sizeof(*ring));
+
+       size = sizeof(struct qwz_mhi_ring_element) * num_elements;
+       /* Hardware requires that rings are aligned to ring size. */
+       ring->dmamem = qwz_dmamem_alloc(sc->sc_dmat, size, size);
+       if (ring->dmamem == NULL)
+               return ENOMEM;
+
+       ring->size = size;
+       ring->mhi_chan_id = id;
+       ring->mhi_chan_state = MHI_CH_STATE_DISABLED;
+       ring->mhi_chan_direction = direction;
+       ring->mhi_chan_event_ring_index = event_ring_index;
+       ring->num_elements = num_elements;
+
+       memset(ring->data, 0, sizeof(ring->data));
+       for (i = 0; i < ring->num_elements; i++) {
+               struct qwz_xfer_data *xfer = &ring->data[i];
+               
+               err = bus_dmamap_create(sc->sc_dmat, QWZ_PCI_XFER_MAX_DATA_SIZE,
+                   1, QWZ_PCI_XFER_MAX_DATA_SIZE, 0, BUS_DMA_NOWAIT,
+                   &xfer->map);
+               if (err) {
+                       printf("%s: could not create xfer DMA map\n",
+                           sc->sc_dev.dv_xname);
+                       goto fail;
+               }
+
+               if (direction == MHI_CHAN_TYPE_INBOUND) {
+                       struct mbuf *m;
+
+                       m = m_gethdr(M_DONTWAIT, MT_DATA);
+                       if (m == NULL) {
+                               err = ENOBUFS;
+                               goto fail;
+                       }
+
+                       MCLGETL(m, M_DONTWAIT, QWZ_PCI_XFER_MAX_DATA_SIZE);
+                       if ((m->m_flags & M_EXT) == 0) {
+                               m_freem(m);
+                               err = ENOBUFS;
+                               goto fail;
+                       }
+
+                       m->m_len = m->m_pkthdr.len = QWZ_PCI_XFER_MAX_DATA_SIZE;
+                       err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map,
+                           m, BUS_DMA_READ | BUS_DMA_NOWAIT);
+                       if (err) {
+                               printf("%s: can't map mbuf (error %d)\n",
+                                   sc->sc_dev.dv_xname, err);
+                               m_freem(m);
+                               goto fail;
+                       }
+
+                       bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
+                           QWZ_PCI_XFER_MAX_DATA_SIZE, BUS_DMASYNC_PREREAD);
+                       xfer->m = m;
+               }
+       }
+
+       return 0;
+fail:
+       for (i = 0; i < ring->num_elements; i++) {
+               struct qwz_xfer_data *xfer = &ring->data[i];
+
+               if (xfer->map) {
+                       bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
+                           xfer->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(sc->sc_dmat, xfer->map);
+                       bus_dmamap_destroy(sc->sc_dmat, xfer->map);
+                       xfer->map = NULL;
+               }
+
+               if (xfer->m) {
+                       m_freem(xfer->m);
+                       xfer->m = NULL;
+               }
+       }
+       return 1;
+}
+
+int
+qwz_pci_alloc_xfer_rings_qca6390(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int ret;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_LOOPBACK_OUTBOUND],
+           0, MHI_CHAN_TYPE_OUTBOUND, 0, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_LOOPBACK_INBOUND],
+           1, MHI_CHAN_TYPE_INBOUND, 0, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_OUTBOUND],
+           20, MHI_CHAN_TYPE_OUTBOUND, 1, 64);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_INBOUND],
+           21, MHI_CHAN_TYPE_INBOUND, 1, 64);
+       if (ret)
+               goto fail;
+
+       return 0;
+fail:
+       qwz_pci_free_xfer_rings(psc);
+       return ret;
+}
+
+int
+qwz_pci_alloc_xfer_rings_qcn9074(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int ret;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_LOOPBACK_OUTBOUND],
+           0, MHI_CHAN_TYPE_OUTBOUND, 1, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_LOOPBACK_INBOUND],
+           1, MHI_CHAN_TYPE_INBOUND, 1, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_OUTBOUND],
+           20, MHI_CHAN_TYPE_OUTBOUND, 1, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_xfer_ring(sc,
+           &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_INBOUND],
+           21, MHI_CHAN_TYPE_INBOUND, 1, 32);
+       if (ret)
+               goto fail;
+
+       return 0;
+fail:
+       qwz_pci_free_xfer_rings(psc);
+       return ret;
+}
+
+void
+qwz_pci_free_event_rings(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int i;
+
+       for (i = 0; i < nitems(psc->event_rings); i++) {
+               struct qwz_pci_event_ring *ring = &psc->event_rings[i];
+               if (ring->dmamem) {
+                       qwz_dmamem_free(sc->sc_dmat, ring->dmamem);
+                       ring->dmamem = NULL;
+               }
+               memset(ring, 0, sizeof(*ring));
+       }
+}
+
+int
+qwz_pci_alloc_event_ring(struct qwz_softc *sc, struct qwz_pci_event_ring *ring,
+    uint32_t type, uint32_t irq, uint32_t intmod, size_t num_elements)
+{
+       bus_size_t size;
+
+       memset(ring, 0, sizeof(*ring));
+
+       size = sizeof(struct qwz_mhi_ring_element) * num_elements;
+       /* Hardware requires that rings are aligned to ring size. */
+       ring->dmamem = qwz_dmamem_alloc(sc->sc_dmat, size, size);
+       if (ring->dmamem == NULL)
+               return ENOMEM;
+
+       ring->size = size;
+       ring->mhi_er_type = type;
+       ring->mhi_er_irq = irq;
+       ring->mhi_er_irq_moderation_ms = intmod;
+       ring->num_elements = num_elements;
+       return 0;
+}
+
+int
+qwz_pci_alloc_event_rings(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int ret;
+
+       ret = qwz_pci_alloc_event_ring(sc, &psc->event_rings[0],
+           MHI_ER_CTRL, psc->mhi_irq[MHI_ER_CTRL], 0, 32);
+       if (ret)
+               goto fail;
+
+       ret = qwz_pci_alloc_event_ring(sc, &psc->event_rings[1],
+           MHI_ER_DATA, psc->mhi_irq[MHI_ER_DATA], 1, 256);
+       if (ret)
+               goto fail;
+
+       return 0;
+fail:
+       qwz_pci_free_event_rings(psc);
+       return ret;
+}
+
+void
+qwz_pci_free_cmd_ring(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_pci_cmd_ring *ring = &psc->cmd_ring;
+
+       if (ring->dmamem)
+               qwz_dmamem_free(sc->sc_dmat, ring->dmamem);
+
+       memset(ring, 0, sizeof(*ring));
+}
+
+int
+qwz_pci_init_cmd_ring(struct qwz_softc *sc, struct qwz_pci_cmd_ring *ring)
+{
+       memset(ring, 0, sizeof(*ring));
+
+       ring->num_elements = QWZ_PCI_CMD_RING_MAX_ELEMENTS;
+       ring->size = sizeof(struct qwz_mhi_ring_element) * ring->num_elements;
+
+       /* Hardware requires that rings are aligned to ring size. */
+       ring->dmamem = qwz_dmamem_alloc(sc->sc_dmat, ring->size, ring->size);
+       if (ring->dmamem == NULL)
+               return ENOMEM;
+
+       return 0;
+}
+
+uint32_t
+qwz_pci_read(struct qwz_softc *sc, uint32_t addr)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       return (bus_space_read_4(psc->sc_st, psc->sc_sh, addr));
+}
+
+void
+qwz_pci_write(struct qwz_softc *sc, uint32_t addr, uint32_t val)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       bus_space_write_4(psc->sc_st, psc->sc_sh, addr, val);
+}
+
+void
+qwz_pci_read_hw_version(struct qwz_softc *sc, uint32_t *major,
+    uint32_t *minor)
+{
+       uint32_t soc_hw_version;
+
+       soc_hw_version = qwz_pcic_read32(sc, TCSR_SOC_HW_VERSION);
+       *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, soc_hw_version);
+       *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, soc_hw_version);
+       DPRINTF("%s: pci tcsr_soc_hw_version major %d minor %d\n",
+           sc->sc_dev.dv_xname, *major, *minor);
+}
+
+uint32_t
+qwz_pcic_read32(struct qwz_softc *sc, uint32_t offset)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       int ret = 0;
+       uint32_t val;
+       bool wakeup_required;
+
+       /* for offset beyond BAR + 4K - 32, may
+        * need to wakeup the device to access.
+        */
+       wakeup_required = test_bit(ATH12K_FLAG_DEVICE_INIT_DONE, sc->sc_flags)
+           && offset >= ATH12K_PCI_ACCESS_ALWAYS_OFF;
+       if (wakeup_required && psc->sc_pci_ops->wakeup)
+               ret = psc->sc_pci_ops->wakeup(sc);
+
+       if (offset < ATH12K_PCI_WINDOW_START)
+               val = qwz_pci_read(sc, offset);
+       else
+               val = psc->sc_pci_ops->window_read32(sc, offset);
+
+       if (wakeup_required && !ret && psc->sc_pci_ops->release)
+               psc->sc_pci_ops->release(sc);
+
+       return val;
+}
+
+void
+qwz_pcic_write32(struct qwz_softc *sc, uint32_t offset, uint32_t value)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       int ret = 0;
+       bool wakeup_required;
+
+       /* for offset beyond BAR + 4K - 32, may
+        * need to wakeup the device to access.
+        */
+       wakeup_required = test_bit(ATH12K_FLAG_DEVICE_INIT_DONE, sc->sc_flags)
+           && offset >= ATH12K_PCI_ACCESS_ALWAYS_OFF;
+       if (wakeup_required && psc->sc_pci_ops->wakeup)
+               ret = psc->sc_pci_ops->wakeup(sc);
+
+       if (offset < ATH12K_PCI_WINDOW_START)
+               qwz_pci_write(sc, offset, value);
+       else
+               psc->sc_pci_ops->window_write32(sc, offset, value);
+
+       if (wakeup_required && !ret && psc->sc_pci_ops->release)
+               psc->sc_pci_ops->release(sc);
+}
+
+void
+qwz_pcic_ext_irq_disable(struct qwz_softc *sc)
+{
+       clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags);
+
+       /* In case of one MSI vector, we handle irq enable/disable in a
+        * uniform way since we only have one irq
+        */
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return;
+
+       DPRINTF("%s not implemented\n", __func__);
+}
+
+void
+qwz_pcic_ext_irq_enable(struct qwz_softc *sc)
+{
+       set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags);
+
+       /* In case of one MSI vector, we handle irq enable/disable in a
+        * uniform way since we only have one irq
+        */
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return;
+
+       DPRINTF("%s not implemented\n", __func__);
+}
+
+void
+qwz_pcic_ce_irq_enable(struct qwz_softc *sc, uint16_t ce_id)
+{
+       /* In case of one MSI vector, we handle irq enable/disable in a
+        * uniform way since we only have one irq
+        */
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return;
+
+       /* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
+       sc->msi_ce_irqmask |= (1U << ce_id);
+}
+
+void
+qwz_pcic_ce_irq_disable(struct qwz_softc *sc, uint16_t ce_id)
+{
+       /* In case of one MSI vector, we handle irq enable/disable in a
+        * uniform way since we only have one irq
+        */
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return;
+
+       /* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
+       sc->msi_ce_irqmask &= ~(1U << ce_id);
+}
+
+void
+qwz_pcic_ext_grp_disable(struct qwz_ext_irq_grp *irq_grp)
+{
+       struct qwz_softc *sc = irq_grp->sc;
+
+       /* In case of one MSI vector, we handle irq enable/disable
+        * in a uniform way since we only have one irq
+        */
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return;
+}
+
+int
+qwz_pcic_ext_irq_config(struct qwz_softc *sc, struct pci_attach_args *pa)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       int i, ret, num_vectors = 0;
+       uint32_t msi_data_start = 0;
+       uint32_t base_vector = 0;
+
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return 0;
+
+       ret = qwz_pcic_get_user_msi_vector(sc, "DP", &num_vectors,
+           &msi_data_start, &base_vector);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
+               struct qwz_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+               uint32_t num_irq = 0;
+
+               irq_grp->sc = sc;
+               irq_grp->grp_id = i;
+#if 0  
+               init_dummy_netdev(&irq_grp->napi_ndev);
+               netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+                              ath12k_pcic_ext_grp_napi_poll);
+#endif
+               if (sc->hw_params.ring_mask->tx[i] ||
+                   sc->hw_params.ring_mask->rx[i] ||
+                   sc->hw_params.ring_mask->rx_err[i] ||
+                   sc->hw_params.ring_mask->rx_wbm_rel[i] ||
+                   sc->hw_params.ring_mask->reo_status[i] ||
+                   sc->hw_params.ring_mask->rxdma2host[i] ||
+                   sc->hw_params.ring_mask->host2rxdma[i] ||
+                   sc->hw_params.ring_mask->rx_mon_status[i]) {
+                       num_irq = 1;
+               }
+
+               irq_grp->num_irq = num_irq;
+               irq_grp->irqs[0] = ATH12K_PCI_IRQ_DP_OFFSET + i;
+
+               if (num_irq) {
+                       int irq_idx = irq_grp->irqs[0];
+                       pci_intr_handle_t ih;
+
+                       if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
+                           pci_intr_map(pa, &ih) != 0) {
+                               printf("%s: can't map interrupt\n",
+                                   sc->sc_dev.dv_xname);
+                               return EIO;
+                       }
+
+                       snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
+                           "%s:ex%d", sc->sc_dev.dv_xname, i);
+                       psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
+                           IPL_NET, qwz_ext_intr, irq_grp, psc->sc_ivname[irq_idx]);
+                       if (psc->sc_ih[irq_idx] == NULL) {
+                               printf("%s: failed to request irq %d\n",
+                                   sc->sc_dev.dv_xname, irq_idx);
+                               return EIO;
+                       }
+               }
+
+               qwz_pcic_ext_grp_disable(irq_grp);
+       }
+
+       return 0;
+}
+
+int
+qwz_pcic_config_irq(struct qwz_softc *sc, struct pci_attach_args *pa)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       struct qwz_ce_pipe *ce_pipe;
+       uint32_t msi_data_start;
+       uint32_t msi_data_count, msi_data_idx;
+       uint32_t msi_irq_start;
+       int i, ret, irq_idx;
+       pci_intr_handle_t ih;
+
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               return 0;
+
+       ret = qwz_pcic_get_user_msi_vector(sc, "CE", &msi_data_count,
+           &msi_data_start, &msi_irq_start);
+       if (ret)
+               return ret;
+
+       /* Configure CE irqs */
+       for (i = 0, msi_data_idx = 0; i < sc->hw_params.ce_count; i++) {
+               if (qwz_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
+                       continue;
+
+               ce_pipe = &sc->ce.ce_pipe[i];
+               irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+
+               if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
+                   pci_intr_map(pa, &ih) != 0) {
+                       printf("%s: can't map interrupt\n",
+                           sc->sc_dev.dv_xname);
+                       return EIO;
+               }
+
+               snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
+                   "%s:ce%d", sc->sc_dev.dv_xname, ce_pipe->pipe_num);
+               psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
+                   IPL_NET, qwz_ce_intr, ce_pipe, psc->sc_ivname[irq_idx]);
+               if (psc->sc_ih[irq_idx] == NULL) {
+                       printf("%s: failed to request irq %d\n",
+                           sc->sc_dev.dv_xname, irq_idx);
+                       return EIO;
+               }
+
+               msi_data_idx++;
+
+               qwz_pcic_ce_irq_disable(sc, i);
+       }
+
+       ret = qwz_pcic_ext_irq_config(sc, pa);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+void
+qwz_pcic_ce_irqs_enable(struct qwz_softc *sc)
+{
+       int i;
+
+       set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, sc->sc_flags);
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               if (qwz_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
+                       continue;
+               qwz_pcic_ce_irq_enable(sc, i);
+       }
+}
+
+void
+qwz_pcic_ce_irqs_disable(struct qwz_softc *sc)
+{
+       int i;
+
+       clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, sc->sc_flags);
+
+       for (i = 0; i < sc->hw_params.ce_count; i++) {
+               if (qwz_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
+                       continue;
+               qwz_pcic_ce_irq_disable(sc, i);
+       }
+}
+
+int
+qwz_pci_start(struct qwz_softc *sc)
+{
+       /* TODO: for now don't restore ASPM in case of single MSI
+        * vector as MHI register reading in M2 causes system hang.
+        */
+       if (test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
+               qwz_pci_aspm_restore(sc);
+       else
+               DPRINTF("%s: leaving PCI ASPM disabled to avoid MHI M2 problems"
+                   "\n", sc->sc_dev.dv_xname);
+
+       set_bit(ATH12K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
+
+       qwz_ce_rx_post_buf(sc);
+       qwz_pcic_ce_irqs_enable(sc);
+
+       return 0;
+}
+
+void
+qwz_pcic_ce_irq_disable_sync(struct qwz_softc *sc)
+{
+       qwz_pcic_ce_irqs_disable(sc);
+#if 0
+       ath12k_pcic_sync_ce_irqs(ab);
+       ath12k_pcic_kill_tasklets(ab);
+#endif
+}
+
+void
+qwz_pci_stop(struct qwz_softc *sc)
+{
+       qwz_pcic_ce_irq_disable_sync(sc);
+       qwz_ce_cleanup_pipes(sc);
+}
+
+int
+qwz_pci_bus_wake_up(struct qwz_softc *sc)
+{
+       if (qwz_mhi_wake_db_clear_valid(sc))
+               qwz_mhi_device_wake(sc);
+
+       return 0;
+}
+
+void
+qwz_pci_bus_release(struct qwz_softc *sc)
+{
+       if (qwz_mhi_wake_db_clear_valid(sc))
+               qwz_mhi_device_zzz(sc);
+}
+
+uint32_t
+qwz_pci_get_window_start(struct qwz_softc *sc, uint32_t offset)
+{
+       if (!sc->hw_params.static_window_map)
+               return ATH12K_PCI_WINDOW_START;
+
+       if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH12K_PCI_WINDOW_RANGE_MASK)
+               /* if offset lies within DP register range, use 3rd window */
+               return 3 * ATH12K_PCI_WINDOW_START;
+       else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc)) <
+                ATH12K_PCI_WINDOW_RANGE_MASK)
+                /* if offset lies within CE register range, use 2nd window */
+               return 2 * ATH12K_PCI_WINDOW_START;
+       else
+               return ATH12K_PCI_WINDOW_START;
+}
+
+void
+qwz_pci_select_window(struct qwz_softc *sc, uint32_t offset)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       uint32_t window = FIELD_GET(ATH12K_PCI_WINDOW_VALUE_MASK, offset);
+
+#if notyet
+       lockdep_assert_held(&ab_pci->window_lock);
+#endif
+
+       if (window != psc->register_window) {
+               qwz_pci_write(sc, ATH12K_PCI_WINDOW_REG_ADDRESS,
+                   ATH12K_PCI_WINDOW_ENABLE_BIT | window);
+               (void) qwz_pci_read(sc, ATH12K_PCI_WINDOW_REG_ADDRESS);
+               psc->register_window = window;
+       }
+}
+
+void
+qwz_pci_window_write32(struct qwz_softc *sc, uint32_t offset, uint32_t value)
+{
+       uint32_t window_start;
+
+       window_start = qwz_pci_get_window_start(sc, offset);
+
+       if (window_start == ATH12K_PCI_WINDOW_START) {
+#if notyet
+               spin_lock_bh(&ab_pci->window_lock);
+#endif
+               qwz_pci_select_window(sc, offset);
+               qwz_pci_write(sc, window_start +
+                   (offset & ATH12K_PCI_WINDOW_RANGE_MASK), value);
+#if notyet
+               spin_unlock_bh(&ab_pci->window_lock);
+#endif
+       } else {
+               qwz_pci_write(sc, window_start +
+                   (offset & ATH12K_PCI_WINDOW_RANGE_MASK), value);
+       }
+}
+
+uint32_t
+qwz_pci_window_read32(struct qwz_softc *sc, uint32_t offset)
+{
+       uint32_t window_start, val;
+
+       window_start = qwz_pci_get_window_start(sc, offset);
+
+       if (window_start == ATH12K_PCI_WINDOW_START) {
+#if notyet
+               spin_lock_bh(&ab_pci->window_lock);
+#endif
+               qwz_pci_select_window(sc, offset);
+               val = qwz_pci_read(sc, window_start +
+                   (offset & ATH12K_PCI_WINDOW_RANGE_MASK));
+#if notyet
+               spin_unlock_bh(&ab_pci->window_lock);
+#endif
+       } else {
+               val = qwz_pci_read(sc, window_start +
+                   (offset & ATH12K_PCI_WINDOW_RANGE_MASK));
+       }
+
+       return val;
+}
+
+void
+qwz_pci_select_static_window(struct qwz_softc *sc)
+{
+       uint32_t umac_window;
+       uint32_t ce_window;
+       uint32_t window;
+
+       umac_window = FIELD_GET(ATH12K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+       ce_window = FIELD_GET(ATH12K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+       window = (umac_window << 12) | (ce_window << 6);
+
+       qwz_pci_write(sc, ATH12K_PCI_WINDOW_REG_ADDRESS,
+           ATH12K_PCI_WINDOW_ENABLE_BIT | window);
+}
+
+void
+qwz_pci_soc_global_reset(struct qwz_softc *sc)
+{
+       uint32_t val, msecs;
+
+       val = qwz_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET);
+
+       val |= PCIE_SOC_GLOBAL_RESET_V;
+
+       qwz_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val);
+
+       /* TODO: exact time to sleep is uncertain */
+       msecs = 10;
+       DELAY(msecs * 1000);
+
+       /* Need to toggle V bit back otherwise stuck in reset status */
+       val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+       qwz_pcic_write32(sc, PCIE_SOC_GLOBAL_RESET, val);
+
+       DELAY(msecs * 1000);
+
+       val = qwz_pcic_read32(sc, PCIE_SOC_GLOBAL_RESET);
+       if (val == 0xffffffff)
+               printf("%s: link down error during global reset\n",
+                   sc->sc_dev.dv_xname);
+}
+
+void
+qwz_pci_clear_dbg_registers(struct qwz_softc *sc)
+{
+       uint32_t val;
+
+       /* read cookie */
+       val = qwz_pcic_read32(sc, PCIE_Q6_COOKIE_ADDR);
+       DPRINTF("%s: cookie:0x%x\n", sc->sc_dev.dv_xname, val);
+
+       val = qwz_pcic_read32(sc, WLAON_WARM_SW_ENTRY);
+       DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val);
+
+       /* TODO: exact time to sleep is uncertain */
+       DELAY(10 * 1000);
+
+       /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+        * continuing warm path and entering dead loop.
+        */
+       qwz_pcic_write32(sc, WLAON_WARM_SW_ENTRY, 0);
+       DELAY(10 * 1000);
+
+       val = qwz_pcic_read32(sc, WLAON_WARM_SW_ENTRY);
+       DPRINTF("%s: WLAON_WARM_SW_ENTRY 0x%x\n", sc->sc_dev.dv_xname, val);
+
+       /* A read clear register. clear the register to prevent
+        * Q6 from entering wrong code path.
+        */
+       val = qwz_pcic_read32(sc, WLAON_SOC_RESET_CAUSE_REG);
+       DPRINTF("%s: soc reset cause:%d\n", sc->sc_dev.dv_xname, val);
+}
+
+int
+qwz_pci_set_link_reg(struct qwz_softc *sc, uint32_t offset, uint32_t value,
+    uint32_t mask)
+{
+       uint32_t v;
+       int i;
+
+       v = qwz_pcic_read32(sc, offset);
+       if ((v & mask) == value)
+               return 0;
+
+       for (i = 0; i < 10; i++) {
+               qwz_pcic_write32(sc, offset, (v & ~mask) | value);
+
+               v = qwz_pcic_read32(sc, offset);
+               if ((v & mask) == value)
+                       return 0;
+
+               delay((2 * 1000));
+       }
+
+       DPRINTF("failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
+           offset, v & mask, value);
+
+       return ETIMEDOUT;
+}
+
+int
+qwz_pci_fix_l1ss(struct qwz_softc *sc)
+{
+       int ret;
+
+       ret = qwz_pci_set_link_reg(sc,
+                                     PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(sc),
+                                     PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
+                                     PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
+       if (ret) {
+               DPRINTF("failed to set sysclk: %d\n", ret);
+               return ret;
+       }
+
+       ret = qwz_pci_set_link_reg(sc,
+                                     PCIE_PCS_OSC_DTCT_CONFIG1_REG(sc),
+                                     PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
+                                     PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+       if (ret) {
+               DPRINTF("failed to set dtct config1 error: %d\n", ret);
+               return ret;
+       }
+
+       ret = qwz_pci_set_link_reg(sc,
+                                     PCIE_PCS_OSC_DTCT_CONFIG2_REG(sc),
+                                     PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
+                                     PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+       if (ret) {
+               DPRINTF("failed to set dtct config2: %d\n", ret);
+               return ret;
+       }
+
+       ret = qwz_pci_set_link_reg(sc,
+                                     PCIE_PCS_OSC_DTCT_CONFIG4_REG(sc),
+                                     PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
+                                     PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+       if (ret) {
+               DPRINTF("failed to set dtct config4: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void
+qwz_pci_enable_ltssm(struct qwz_softc *sc)
+{
+       uint32_t val;
+       int i;
+
+       val = qwz_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM);
+
+       /* PCIE link seems very unstable after the Hot Reset*/
+       for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
+               if (val == 0xffffffff)
+                       DELAY(5 * 1000);
+
+               qwz_pcic_write32(sc, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+               val = qwz_pcic_read32(sc, PCIE_PCIE_PARF_LTSSM);
+       }
+
+       DPRINTF("%s: pci ltssm 0x%x\n", sc->sc_dev.dv_xname, val);
+
+       val = qwz_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST);
+       val |= GCC_GCC_PCIE_HOT_RST_VAL;
+       qwz_pcic_write32(sc, GCC_GCC_PCIE_HOT_RST, val);
+       val = qwz_pcic_read32(sc, GCC_GCC_PCIE_HOT_RST);
+
+       DPRINTF("%s: pci pcie_hot_rst 0x%x\n", sc->sc_dev.dv_xname, val);
+
+       DELAY(5 * 1000);
+}
+
+void
+qwz_pci_clear_all_intrs(struct qwz_softc *sc)
+{
+       /* This is a WAR for PCIE Hotreset.
+        * When target receive Hotreset, but will set the interrupt.
+        * So when download SBL again, SBL will open Interrupt and
+        * receive it, and crash immediately.
+        */
+       qwz_pcic_write32(sc, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+}
+
+void
+qwz_pci_set_wlaon_pwr_ctrl(struct qwz_softc *sc)
+{
+       uint32_t val;
+
+       val = qwz_pcic_read32(sc, WLAON_QFPROM_PWR_CTRL_REG);
+       val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
+       qwz_pcic_write32(sc, WLAON_QFPROM_PWR_CTRL_REG, val);
+}
+
+void
+qwz_pci_force_wake(struct qwz_softc *sc)
+{
+       qwz_pcic_write32(sc, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+       DELAY(5 * 1000);
+}
+
+void
+qwz_pci_sw_reset(struct qwz_softc *sc, bool power_on)
+{
+       DELAY(100 * 1000); /* msecs */
+
+       if (power_on) {
+               qwz_pci_enable_ltssm(sc);
+               qwz_pci_clear_all_intrs(sc);
+               qwz_pci_set_wlaon_pwr_ctrl(sc);
+               if (sc->hw_params.fix_l1ss)
+                       qwz_pci_fix_l1ss(sc);
+       }
+
+       qwz_mhi_clear_vector(sc);
+       qwz_pci_clear_dbg_registers(sc);
+       qwz_pci_soc_global_reset(sc);
+       qwz_mhi_reset_device(sc, 0);
+}
+
+void
+qwz_pci_msi_config(struct qwz_softc *sc, bool enable)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       uint32_t val;
+
+       val = pci_conf_read(psc->sc_pc, psc->sc_tag,
+           psc->sc_msi_off + PCI_MSI_MC);
+
+       if (enable)
+               val |= PCI_MSI_MC_MSIE;
+       else
+               val &= ~PCI_MSI_MC_MSIE;
+
+       pci_conf_write(psc->sc_pc, psc->sc_tag,  psc->sc_msi_off + PCI_MSI_MC,
+           val);
+}
+
+void
+qwz_pci_msi_enable(struct qwz_softc *sc)
+{
+       qwz_pci_msi_config(sc, true);
+}
+
+void
+qwz_pci_msi_disable(struct qwz_softc *sc)
+{
+       qwz_pci_msi_config(sc, false);
+}
+
+void
+qwz_pci_aspm_disable(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       psc->sc_lcsr = pci_conf_read(psc->sc_pc, psc->sc_tag,
+           psc->sc_cap_off + PCI_PCIE_LCSR);
+
+       DPRINTF("%s: pci link_ctl 0x%04x L0s %d L1 %d\n", sc->sc_dev.dv_xname,
+           (uint16_t)psc->sc_lcsr, (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L0S),
+           (psc->sc_lcsr & PCI_PCIE_LCSR_ASPM_L1));
+
+       /* disable L0s and L1 */
+       pci_conf_write(psc->sc_pc, psc->sc_tag, psc->sc_cap_off + PCI_PCIE_LCSR,
+           psc->sc_lcsr & ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1));
+
+       psc->sc_flags |= ATH12K_PCI_ASPM_RESTORE;
+}
+
+void
+qwz_pci_aspm_restore(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       if (psc->sc_flags & ATH12K_PCI_ASPM_RESTORE) {
+               pci_conf_write(psc->sc_pc, psc->sc_tag,
+                   psc->sc_cap_off + PCI_PCIE_LCSR, psc->sc_lcsr);
+               psc->sc_flags &= ~ATH12K_PCI_ASPM_RESTORE;
+       }
+}
+
+int
+qwz_pci_power_up(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       int error;
+
+       psc->register_window = 0;
+       clear_bit(ATH12K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
+
+       qwz_pci_sw_reset(sc, true);
+
+       /* Disable ASPM during firmware download due to problems switching
+        * to AMSS state.
+        */
+       qwz_pci_aspm_disable(sc);
+
+       qwz_pci_msi_enable(sc);
+
+       error = qwz_mhi_start(psc);
+       if (error)
+               return error;
+
+       if (sc->hw_params.static_window_map)
+               qwz_pci_select_static_window(sc);
+
+       return 0;
+}
+
+void
+qwz_pci_power_down(struct qwz_softc *sc)
+{
+       /* restore aspm in case firmware bootup fails */
+       qwz_pci_aspm_restore(sc);
+
+       qwz_pci_force_wake(sc);
+
+       qwz_pci_msi_disable(sc);
+
+       qwz_mhi_stop(sc);
+       clear_bit(ATH12K_FLAG_DEVICE_INIT_DONE, sc->sc_flags);
+       qwz_pci_sw_reset(sc, false);
+}
+
+/*
+ * MHI
+ */
+int
+qwz_mhi_register(struct qwz_softc *sc)
+{
+       DNPRINTF(QWZ_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__);
+       return 0;
+}
+
+void
+qwz_mhi_unregister(struct qwz_softc *sc)
+{
+       DNPRINTF(QWZ_D_MHI, "%s: STUB %s()\n", sc->sc_dev.dv_xname, __func__);
+}
+
+// XXX MHI is GPLd - we provide a compatible bare-bones implementation
+#define MHI_CFG                                0x10
+#define   MHI_CFG_NHWER_MASK           GENMASK(31, 24)
+#define   MHI_CFG_NHWER_SHFT           24
+#define   MHI_CFG_NER_MASK             GENMASK(23, 16)
+#define   MHI_CFG_NER_SHFT             16
+#define   MHI_CFG_NHWCH_MASK           GENMASK(15, 8)
+#define   MHI_CFG_NHWCH_SHFT           8
+#define   MHI_CFG_NCH_MASK             GENMASK(7, 0)
+#define MHI_CHDBOFF                    0x18
+#define MHI_DEV_WAKE_DB                        127
+#define MHI_ERDBOFF                    0x20
+#define MHI_BHI_OFFSET                 0x28
+#define   MHI_BHI_IMGADDR_LOW                  0x08
+#define   MHI_BHI_IMGADDR_HIGH                 0x0c
+#define   MHI_BHI_IMGSIZE                      0x10
+#define   MHI_BHI_IMGTXDB                      0x18
+#define   MHI_BHI_INTVEC                       0x20
+#define   MHI_BHI_EXECENV                      0x28
+#define   MHI_BHI_STATUS                       0x2c
+#define          MHI_BHI_SERIALNU                      0x40
+#define MHI_BHIE_OFFSET                        0x2c
+#define   MHI_BHIE_TXVECADDR_LOW_OFFS          0x2c
+#define   MHI_BHIE_TXVECADDR_HIGH_OFFS         0x30
+#define   MHI_BHIE_TXVECSIZE_OFFS              0x34
+#define   MHI_BHIE_TXVECDB_OFFS                        0x3c
+#define   MHI_BHIE_TXVECSTATUS_OFFS            0x44
+#define   MHI_BHIE_RXVECADDR_LOW_OFFS          0x60
+#define   MHI_BHIE_RXVECSTATUS_OFFS            0x78
+#define MHI_CTRL                       0x38
+#define    MHI_CTRL_READY_MASK                 0x1
+#define    MHI_CTRL_RESET_MASK                 0x2
+#define    MHI_CTRL_MHISTATE_MASK              GENMASK(15, 8)
+#define    MHI_CTRL_MHISTATE_SHFT              8
+#define MHI_STATUS                     0x48
+#define    MHI_STATUS_MHISTATE_MASK            GENMASK(15, 8)
+#define    MHI_STATUS_MHISTATE_SHFT            8
+#define        MHI_STATE_RESET                 0x0
+#define        MHI_STATE_READY                 0x1
+#define        MHI_STATE_M0                    0x2
+#define        MHI_STATE_M1                    0x3
+#define        MHI_STATE_M2                    0x4
+#define        MHI_STATE_M3                    0x5
+#define        MHI_STATE_M3_FAST               0x6
+#define        MHI_STATE_BHI                   0x7
+#define        MHI_STATE_SYS_ERR               0xff
+#define    MHI_STATUS_READY_MASK               0x1
+#define    MHI_STATUS_SYSERR_MASK              0x4
+#define MHI_CCABAP_LOWER               0x58
+#define MHI_CCABAP_HIGHER              0x5c
+#define MHI_ECABAP_LOWER               0x60
+#define MHI_ECABAP_HIGHER              0x64
+#define MHI_CRCBAP_LOWER               0x68
+#define MHI_CRCBAP_HIGHER              0x6c
+#define MHI_CRDB_LOWER                 0x70
+#define MHI_CRDB_HIGHER                        0x74
+#define MHI_CTRLBASE_LOWER             0x80
+#define MHI_CTRLBASE_HIGHER            0x84
+#define MHI_CTRLLIMIT_LOWER            0x88
+#define MHI_CTRLLIMIT_HIGHER           0x8c
+#define MHI_DATABASE_LOWER             0x98
+#define MHI_DATABASE_HIGHER            0x9c
+#define MHI_DATALIMIT_LOWER            0xa0
+#define MHI_DATALIMIT_HIGHER           0xa4
+
+#define MHI_EE_PBL     0x0     /* Primary Bootloader */
+#define MHI_EE_SBL     0x1     /* Secondary Bootloader */
+#define MHI_EE_AMSS    0x2     /* Modem, aka the primary runtime EE */
+#define MHI_EE_RDDM    0x3     /* Ram dump download mode */
+#define MHI_EE_WFW     0x4     /* WLAN firmware mode */
+#define MHI_EE_PTHRU   0x5     /* Passthrough */
+#define MHI_EE_EDL     0x6     /* Embedded downloader */
+#define MHI_EE_FP      0x7     /* Flash Programmer Environment */
+
+#define MHI_IN_PBL(e) (e == MHI_EE_PBL || e == MHI_EE_PTHRU || e == MHI_EE_EDL)
+#define MHI_POWER_UP_CAPABLE(e) (MHI_IN_PBL(e) || e == MHI_EE_AMSS)
+#define MHI_IN_MISSION_MODE(e) \
+       (e == MHI_EE_AMSS || e == MHI_EE_WFW || e == MHI_EE_FP)
+
+/* BHI register bits */
+#define MHI_BHI_TXDB_SEQNUM_BMSK       GENMASK(29, 0)
+#define MHI_BHI_TXDB_SEQNUM_SHFT       0
+#define MHI_BHI_STATUS_MASK            GENMASK(31, 30)
+#define MHI_BHI_STATUS_SHFT            30
+#define MHI_BHI_STATUS_ERROR           0x03
+#define MHI_BHI_STATUS_SUCCESS         0x02
+#define MHI_BHI_STATUS_RESET           0x00
+
+/* MHI BHIE registers */
+#define MHI_BHIE_MSMSOCID_OFFS         0x00
+#define MHI_BHIE_RXVECADDR_LOW_OFFS    0x60
+#define MHI_BHIE_RXVECADDR_HIGH_OFFS   0x64
+#define MHI_BHIE_RXVECSIZE_OFFS                0x68
+#define MHI_BHIE_RXVECDB_OFFS          0x70
+#define MHI_BHIE_RXVECSTATUS_OFFS      0x78
+
+/* BHIE register bits */
+#define MHI_BHIE_TXVECDB_SEQNUM_BMSK           GENMASK(29, 0)
+#define MHI_BHIE_TXVECDB_SEQNUM_SHFT           0
+#define MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK       GENMASK(29, 0)
+#define MHI_BHIE_TXVECSTATUS_SEQNUM_SHFT       0
+#define MHI_BHIE_TXVECSTATUS_STATUS_BMSK       GENMASK(31, 30)
+#define MHI_BHIE_TXVECSTATUS_STATUS_SHFT       30
+#define MHI_BHIE_TXVECSTATUS_STATUS_RESET      0x00
+#define MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define MHI_BHIE_TXVECSTATUS_STATUS_ERROR      0x03
+#define MHI_BHIE_RXVECDB_SEQNUM_BMSK           GENMASK(29, 0)
+#define MHI_BHIE_RXVECDB_SEQNUM_SHFT           0
+#define MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK       GENMASK(29, 0)
+#define MHI_BHIE_RXVECSTATUS_SEQNUM_SHFT       0
+#define MHI_BHIE_RXVECSTATUS_STATUS_BMSK       GENMASK(31, 30)
+#define MHI_BHIE_RXVECSTATUS_STATUS_SHFT       30
+#define MHI_BHIE_RXVECSTATUS_STATUS_RESET      0x00
+#define MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define MHI_BHIE_RXVECSTATUS_STATUS_ERROR      0x03
+
+#define MHI_EV_CC_INVALID      0x0
+#define MHI_EV_CC_SUCCESS      0x1
+#define MHI_EV_CC_EOT          0x2
+#define MHI_EV_CC_OVERFLOW     0x3
+#define MHI_EV_CC_EOB          0x4
+#define MHI_EV_CC_OOB          0x5
+#define MHI_EV_CC_DB_MODE      0x6
+#define MHI_EV_CC_UNDEFINED_ERR        0x10
+#define MHI_EV_CC_BAD_TRE      0x11
+
+#define MHI_CMD_NOP            01
+#define MHI_CMD_RESET_CHAN     16
+#define MHI_CMD_STOP_CHAN      17
+#define MHI_CMD_START_CHAN     18
+
+#define MHI_TRE_CMD_CHID_MASK  GENMASK(31, 24)
+#define MHI_TRE_CMD_CHID_SHFT  24
+#define MHI_TRE_CMD_CMDID_MASK GENMASK(23, 16)
+#define MHI_TRE_CMD_CMDID_SHFT 16
+
+#define MHI_TRE0_EV_LEN_MASK   GENMASK(15, 0)
+#define MHI_TRE0_EV_LEN_SHFT   0
+#define MHI_TRE0_EV_CODE_MASK  GENMASK(31, 24)
+#define MHI_TRE0_EV_CODE_SHFT  24
+#define MHI_TRE1_EV_TYPE_MASK  GENMASK(23, 16)
+#define MHI_TRE1_EV_TYPE_SHFT  16
+#define MHI_TRE1_EV_CHID_MASK  GENMASK(31, 24)
+#define MHI_TRE1_EV_CHID_SHFT  24
+
+#define MHI_TRE0_DATA_LEN_MASK GENMASK(15, 0)
+#define MHI_TRE0_DATA_LEN_SHFT 0
+#define MHI_TRE1_DATA_CHAIN    (1 << 0)
+#define MHI_TRE1_DATA_IEOB     (1 << 8)
+#define MHI_TRE1_DATA_IEOT     (1 << 9)
+#define MHI_TRE1_DATA_BEI      (1 << 10)
+#define MHI_TRE1_DATA_TYPE_MASK                GENMASK(23, 16)
+#define MHI_TRE1_DATA_TYPE_SHIFT       16
+#define MHI_TRE1_DATA_TYPE_TRANSFER    0x2
+
+#define MHI_PKT_TYPE_INVALID                   0x00
+#define MHI_PKT_TYPE_NOOP_CMD                  0x01
+#define MHI_PKT_TYPE_TRANSFER                  0x02
+#define MHI_PKT_TYPE_COALESCING                        0x08
+#define MHI_PKT_TYPE_RESET_CHAN_CMD            0x10
+#define MHI_PKT_TYPE_STOP_CHAN_CMD             0x11
+#define MHI_PKT_TYPE_START_CHAN_CMD            0x12
+#define MHI_PKT_TYPE_STATE_CHANGE_EVENT                0x20
+#define MHI_PKT_TYPE_CMD_COMPLETION_EVENT      0x21
+#define MHI_PKT_TYPE_TX_EVENT                  0x22
+#define MHI_PKT_TYPE_RSC_TX_EVENT              0x28
+#define MHI_PKT_TYPE_EE_EVENT                  0x40
+#define MHI_PKT_TYPE_TSYNC_EVENT               0x48
+#define MHI_PKT_TYPE_BW_REQ_EVENT              0x50
+
+
+#define MHI_DMA_VEC_CHUNK_SIZE                 524288 /* 512 KB */
+struct qwz_dma_vec_entry {
+       uint64_t paddr;
+       uint64_t size;
+};
+
+void
+qwz_mhi_ring_doorbell(struct qwz_softc *sc, uint64_t db_addr, uint64_t val)
+{
+       qwz_pci_write(sc, db_addr + 4, val >> 32);
+       qwz_pci_write(sc, db_addr, val & 0xffffffff);
+}
+
+void
+qwz_mhi_device_wake(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       /*
+        * Device wake is async only for now because we do not
+        * keep track of PM state in software.
+        */
+       qwz_mhi_ring_doorbell(sc, psc->wake_db, 1);
+}
+
+void
+qwz_mhi_device_zzz(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       qwz_mhi_ring_doorbell(sc, psc->wake_db, 0);
+}
+
+int
+qwz_mhi_wake_db_clear_valid(struct qwz_softc *sc)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+
+       return (psc->mhi_state == MHI_STATE_M0); /* TODO other states? */
+}
+
+void
+qwz_mhi_init_xfer_rings(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int i;
+       uint32_t chcfg;
+       struct qwz_pci_xfer_ring *ring;
+       struct qwz_mhi_chan_ctxt *cbase, *c;
+
+       cbase = (struct qwz_mhi_chan_ctxt *)QWZ_DMA_KVA(psc->chan_ctxt);
+       for (i = 0; i < psc->max_chan; i++) {
+               c = &cbase[i];
+               chcfg = le32toh(c->chcfg);
+               chcfg &= ~(MHI_CHAN_CTX_CHSTATE_MASK |
+                   MHI_CHAN_CTX_BRSTMODE_MASK |
+                   MHI_CHAN_CTX_POLLCFG_MASK);
+               chcfg |= (MHI_CHAN_CTX_CHSTATE_DISABLED |
+                   (MHI_CHAN_CTX_BRSTMODE_DISABLE <<
+                   MHI_CHAN_CTX_BRSTMODE_SHFT));
+               c->chcfg = htole32(chcfg);
+               c->chtype = htole32(MHI_CHAN_TYPE_INVALID);
+               c->erindex = 0;
+       }
+
+       for (i = 0; i < nitems(psc->xfer_rings); i++) {
+               ring = &psc->xfer_rings[i];
+               KASSERT(ring->mhi_chan_id < psc->max_chan);
+               c = &cbase[ring->mhi_chan_id];
+               c->chtype = htole32(ring->mhi_chan_direction);
+               c->erindex = htole32(ring->mhi_chan_event_ring_index);
+               ring->chan_ctxt = c;
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->chan_ctxt), 0,
+           QWZ_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
+}
+
+void
+qwz_mhi_init_event_rings(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int i;
+       uint32_t intmod;
+       uint64_t paddr, len;
+       struct qwz_pci_event_ring *ring;
+       struct qwz_mhi_event_ctxt *c;
+
+       c = (struct qwz_mhi_event_ctxt *)QWZ_DMA_KVA(psc->event_ctxt);
+       for (i = 0; i < nitems(psc->event_rings); i++, c++) {
+               ring = &psc->event_rings[i];
+
+               ring->event_ctxt = c;
+
+               intmod = le32toh(c->intmod);
+               intmod &= ~(MHI_EV_CTX_INTMODC_MASK | MHI_EV_CTX_INTMODT_MASK);
+               intmod |= (ring->mhi_er_irq_moderation_ms <<
+                   MHI_EV_CTX_INTMODT_SHFT) & MHI_EV_CTX_INTMODT_MASK;
+               c->intmod = htole32(intmod);
+
+               c->ertype = htole32(MHI_ER_TYPE_VALID);
+               c->msivec = htole32(ring->mhi_er_irq);
+
+               paddr = QWZ_DMA_DVA(ring->dmamem);
+               ring->rp = paddr;
+               ring->wp = paddr + ring->size -
+                   sizeof(struct qwz_mhi_ring_element);
+               c->rbase = htole64(paddr);
+               c->rp = htole64(ring->rp);
+               c->wp = htole64(ring->wp);
+
+               len = sizeof(struct qwz_mhi_ring_element) * ring->num_elements;
+               c->rlen = htole64(len);
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->event_ctxt), 0,
+           QWZ_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
+}
+
+void
+qwz_mhi_init_cmd_ring(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_pci_cmd_ring *ring = &psc->cmd_ring;
+       struct qwz_mhi_cmd_ctxt *c;
+       uint64_t paddr, len;
+
+       paddr = QWZ_DMA_DVA(ring->dmamem);
+       len = ring->size;
+
+       ring->rp = ring->wp = paddr;
+
+       c = (struct qwz_mhi_cmd_ctxt *)QWZ_DMA_KVA(psc->cmd_ctxt);
+       c->rbase = htole64(paddr);
+       c->rp = htole64(paddr);
+       c->wp = htole64(paddr);
+       c->rlen = htole64(len);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->cmd_ctxt), 0,
+           QWZ_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
+}
+
+void
+qwz_mhi_init_dev_ctxt(struct qwz_pci_softc *psc)
+{
+       qwz_mhi_init_xfer_rings(psc);
+       qwz_mhi_init_event_rings(psc);
+       qwz_mhi_init_cmd_ring(psc);
+}
+
+void *
+qwz_pci_cmd_ring_get_elem(struct qwz_pci_cmd_ring *ring, uint64_t ptr)
+{
+       uint64_t base = QWZ_DMA_DVA(ring->dmamem), offset;
+
+       if (ptr < base || ptr >= base + ring->size)
+               return NULL;
+
+       offset = ptr - base;
+       if (offset >= ring->size)
+               return NULL;
+
+       return QWZ_DMA_KVA(ring->dmamem) + offset;
+}
+
+int
+qwz_mhi_cmd_ring_submit(struct qwz_pci_softc *psc,
+    struct qwz_pci_cmd_ring *ring)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       uint64_t base = QWZ_DMA_DVA(ring->dmamem);
+       struct qwz_mhi_cmd_ctxt *c;
+
+       if (ring->queued >= ring->num_elements)
+               return 1;
+
+       if (ring->wp + sizeof(struct qwz_mhi_ring_element) >= base + ring->size)
+               ring->wp = base;
+       else
+               ring->wp += sizeof(struct qwz_mhi_ring_element);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->cmd_ctxt), 0,
+           QWZ_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_POSTREAD);
+
+       c = (struct qwz_mhi_cmd_ctxt *)QWZ_DMA_KVA(psc->cmd_ctxt);
+       c->wp = htole64(ring->wp);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->cmd_ctxt), 0,
+           QWZ_DMA_LEN(psc->cmd_ctxt), BUS_DMASYNC_PREWRITE);
+
+       ring->queued++;
+       qwz_mhi_ring_doorbell(sc, MHI_CRDB_LOWER, ring->wp);
+       return 0;
+}
+
+int
+qwz_mhi_send_cmd(struct qwz_pci_softc *psc, uint32_t cmd, uint32_t chan)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_pci_cmd_ring *ring = &psc->cmd_ring;
+       struct qwz_mhi_ring_element *e;
+
+       if (ring->queued >= ring->num_elements) {
+               printf("%s: command ring overflow\n", sc->sc_dev.dv_xname);
+               return 1;
+       }
+
+       e = qwz_pci_cmd_ring_get_elem(ring, ring->wp);
+       if (e == NULL)
+               return 1;
+
+       e->ptr = 0ULL;
+       e->dword[0] = 0;
+       e->dword[1] = htole32(
+           ((chan << MHI_TRE_CMD_CHID_SHFT) & MHI_TRE_CMD_CHID_MASK) |
+           ((cmd << MHI_TRE_CMD_CMDID_SHFT) & MHI_TRE_CMD_CMDID_MASK));
+
+       return qwz_mhi_cmd_ring_submit(psc, ring);
+}
+
+void *
+qwz_pci_xfer_ring_get_elem(struct qwz_pci_xfer_ring *ring, uint64_t wp)
+{
+       uint64_t base = QWZ_DMA_DVA(ring->dmamem), offset;
+       void *addr = QWZ_DMA_KVA(ring->dmamem);
+
+       if (wp < base)
+               return NULL;
+
+       offset = wp - base;
+       if (offset >= ring->size)
+               return NULL;
+
+       return addr + offset;
+}
+
+struct qwz_xfer_data *
+qwz_pci_xfer_ring_get_data(struct qwz_pci_xfer_ring *ring, uint64_t wp)
+{
+       uint64_t base = QWZ_DMA_DVA(ring->dmamem), offset;
+
+       if (wp < base)
+               return NULL;
+
+       offset = wp - base;
+       if (offset >= ring->size)
+               return NULL;
+
+       return &ring->data[offset / sizeof(ring->data[0])];
+}
+
+int
+qwz_mhi_submit_xfer(struct qwz_softc *sc, struct mbuf *m)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       struct qwz_pci_xfer_ring *ring;
+       struct qwz_mhi_ring_element *e;
+       struct qwz_xfer_data *xfer;
+       uint64_t paddr, base;
+       int err;
+
+       ring = &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_OUTBOUND];
+
+       if (ring->queued >= ring->num_elements)
+               return 1;
+
+       if (m->m_pkthdr.len > QWZ_PCI_XFER_MAX_DATA_SIZE) {
+               /* TODO: chunk xfers */
+               printf("%s: xfer too large: %d bytes\n", __func__, m->m_pkthdr.len);
+               return 1;
+
+       }
+
+       e = qwz_pci_xfer_ring_get_elem(ring, ring->wp);
+       if (e == NULL)
+               return 1;
+
+       xfer = qwz_pci_xfer_ring_get_data(ring, ring->wp);
+       if (xfer == NULL || xfer->m != NULL)
+               return 1;
+
+       err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m,
+           BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+       if (err && err != EFBIG) {
+               printf("%s: can't map mbuf (error %d)\n",
+                   sc->sc_dev.dv_xname, err);
+               return err;
+       }
+       if (err) {
+               /* Too many DMA segments, linearize mbuf. */
+               if (m_defrag(m, M_DONTWAIT))
+                       return ENOBUFS;
+               err = bus_dmamap_load_mbuf(sc->sc_dmat, xfer->map, m,
+                   BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+               if (err) {
+                       printf("%s: can't map mbuf (error %d)\n",
+                           sc->sc_dev.dv_xname, err);
+                       return err;
+               }
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, xfer->map, 0, m->m_pkthdr.len,
+           BUS_DMASYNC_PREWRITE);
+
+       xfer->m = m;
+       paddr = xfer->map->dm_segs[0].ds_addr;
+
+       e->ptr = htole64(paddr);
+       e->dword[0] = htole32((m->m_pkthdr.len << MHI_TRE0_DATA_LEN_SHFT) &
+           MHI_TRE0_DATA_LEN_MASK);
+       e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
+           MHI_TRE1_DATA_TYPE_TRANSFER << MHI_TRE1_DATA_TYPE_SHIFT);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(ring->dmamem),
+           0, QWZ_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE);
+
+       base = QWZ_DMA_DVA(ring->dmamem);
+       if (ring->wp + sizeof(struct qwz_mhi_ring_element) >= base + ring->size)
+               ring->wp = base;
+       else
+               ring->wp += sizeof(struct qwz_mhi_ring_element);
+       ring->queued++;
+
+       ring->chan_ctxt->wp = htole64(ring->wp);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->chan_ctxt), 0,
+           QWZ_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
+
+       qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       return 0;
+}
+
+int
+qwz_mhi_start_channel(struct qwz_pci_softc *psc,
+       struct qwz_pci_xfer_ring *ring)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_mhi_chan_ctxt *c;
+       int ret = 0;
+       uint32_t chcfg;
+       uint64_t paddr, len;
+
+       DNPRINTF(QWZ_D_MHI, "%s: start MHI channel %d in state %d\n", __func__,
+           ring->mhi_chan_id, ring->mhi_chan_state);
+
+       c = ring->chan_ctxt;
+
+       chcfg = le32toh(c->chcfg);
+       chcfg &= ~MHI_CHAN_CTX_CHSTATE_MASK;
+       chcfg |= MHI_CHAN_CTX_CHSTATE_ENABLED;
+       c->chcfg = htole32(chcfg);
+
+       paddr = QWZ_DMA_DVA(ring->dmamem);
+       ring->rp = ring->wp = paddr;
+       c->rbase = htole64(paddr);
+       c->rp = htole64(ring->rp);
+       c->wp = htole64(ring->wp);
+       len = sizeof(struct qwz_mhi_ring_element) * ring->num_elements;
+       c->rlen = htole64(len);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->chan_ctxt), 0,
+           QWZ_DMA_LEN(psc->chan_ctxt), BUS_DMASYNC_PREWRITE);
+
+       ring->cmd_status = MHI_EV_CC_INVALID;
+       if (qwz_mhi_send_cmd(psc, MHI_CMD_START_CHAN, ring->mhi_chan_id))
+               return 1;
+
+       while (ring->cmd_status != MHI_EV_CC_SUCCESS) {
+               ret = tsleep_nsec(&ring->cmd_status, 0, "qwzcmd",
+                   SEC_TO_NSEC(5));
+               if (ret)
+                       break;
+       }
+
+       if (ret) {
+               printf("%s: could not start MHI channel %d in state %d: status 0x%x\n",
+                   sc->sc_dev.dv_xname, ring->mhi_chan_id,
+                   ring->mhi_chan_state, ring->cmd_status);
+               return 1;
+       }
+
+       if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
+               uint64_t wp = QWZ_DMA_DVA(ring->dmamem);
+               int i;
+
+               for (i = 0; i < ring->num_elements; i++) {
+                       struct qwz_mhi_ring_element *e;
+                       struct qwz_xfer_data *xfer;
+                       uint64_t paddr;
+
+                       e = qwz_pci_xfer_ring_get_elem(ring, wp);
+                       xfer = qwz_pci_xfer_ring_get_data(ring, wp);
+                       paddr = xfer->map->dm_segs[0].ds_addr;
+
+                       e->ptr = htole64(paddr);
+                       e->dword[0] = htole32((QWZ_PCI_XFER_MAX_DATA_SIZE <<
+                           MHI_TRE0_DATA_LEN_SHFT) &
+                           MHI_TRE0_DATA_LEN_MASK);
+                       e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
+                           MHI_TRE1_DATA_BEI |
+                           MHI_TRE1_DATA_TYPE_TRANSFER <<
+                           MHI_TRE1_DATA_TYPE_SHIFT);
+
+                       ring->wp = wp;
+                       wp += sizeof(*e);
+               }
+
+               bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(ring->dmamem), 0,
+                   QWZ_DMA_LEN(ring->dmamem), BUS_DMASYNC_PREWRITE);
+
+               qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       }
+
+       return 0;
+}
+
+int
+qwz_mhi_start_channels(struct qwz_pci_softc *psc)
+{
+       struct qwz_pci_xfer_ring *ring;
+       int ret = 0;
+
+       qwz_mhi_device_wake(&psc->sc_sc);
+
+       ring = &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_OUTBOUND];
+       if (qwz_mhi_start_channel(psc, ring)) {
+               ret = 1;
+               goto done;
+       }
+
+       ring = &psc->xfer_rings[QWZ_PCI_XFER_RING_IPCR_INBOUND];
+       if (qwz_mhi_start_channel(psc, ring))
+               ret = 1;
+done:
+       qwz_mhi_device_zzz(&psc->sc_sc);
+       return ret;
+}
+
+int
+qwz_mhi_start(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       uint32_t off;
+       uint32_t ee, state;
+       int ret;
+
+       qwz_mhi_init_dev_ctxt(psc);
+
+       psc->bhi_off = qwz_pci_read(sc, MHI_BHI_OFFSET);
+       DNPRINTF(QWZ_D_MHI, "%s: BHI offset 0x%x\n", __func__, psc->bhi_off);
+
+       psc->bhie_off = qwz_pci_read(sc, MHI_BHIE_OFFSET);
+       DNPRINTF(QWZ_D_MHI, "%s: BHIE offset 0x%x\n", __func__, psc->bhie_off);
+
+       /* Clean BHIE RX registers */
+       for (off = MHI_BHIE_RXVECADDR_LOW_OFFS;
+            off < (MHI_BHIE_RXVECSTATUS_OFFS - 4);
+            off += 4)
+               qwz_pci_write(sc, psc->bhie_off + off, 0x0);
+
+       qwz_rddm_prepare(psc);
+
+       /* Program BHI INTVEC */
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
+
+       /*
+        * Get BHI execution environment and confirm that it is valid
+        * for power on.
+        */
+       ee = qwz_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
+       if (!MHI_POWER_UP_CAPABLE(ee)) {
+               printf("%s: invalid EE for power on: 0x%x\n",
+                    sc->sc_dev.dv_xname, ee);
+               return 1;
+       }
+
+       /*
+        * Get MHI state of the device and reset it if it is in system
+        * error.
+        */
+       state = qwz_pci_read(sc, MHI_STATUS);
+       DNPRINTF(QWZ_D_MHI, "%s: MHI power on with EE: 0x%x, status: 0x%x\n",
+            sc->sc_dev.dv_xname, ee, state);
+       state = (state & MHI_STATUS_MHISTATE_MASK) >> MHI_STATUS_MHISTATE_SHFT;
+       if (state == MHI_STATE_SYS_ERR) {
+               if (qwz_mhi_reset_device(sc, 0))
+                       return 1;
+               state = qwz_pci_read(sc, MHI_STATUS);
+               DNPRINTF(QWZ_D_MHI, "%s: MHI state after reset: 0x%x\n",
+                   sc->sc_dev.dv_xname, state);
+               state = (state & MHI_STATUS_MHISTATE_MASK) >>
+                   MHI_STATUS_MHISTATE_SHFT;
+               if (state == MHI_STATE_SYS_ERR) {
+                       printf("%s: MHI stuck in system error state\n",
+                           sc->sc_dev.dv_xname);
+                       return 1;
+               }
+       }
+
+       psc->bhi_ee = ee;
+       psc->mhi_state = state;
+
+#if notyet
+       /* Enable IRQs */
+       //  XXX todo?
+#endif
+
+       /* Transition to primary runtime. */
+       if (MHI_IN_PBL(ee)) {
+               ret = qwz_mhi_fw_load_handler(psc);
+               if (ret)
+                       return ret;
+
+               /* XXX without this delay starting the channels may fail */
+               delay(1000);
+               qwz_mhi_start_channels(psc);
+       } else {
+               /* XXX Handle partially initialized device...?!? */
+               ee = qwz_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
+               if (!MHI_IN_MISSION_MODE(ee)) {
+                       printf("%s: failed to power up MHI, ee=0x%x\n",
+                           sc->sc_dev.dv_xname, ee);
+                       return EIO;
+               }
+       }
+
+       return 0;
+}
+
+void
+qwz_mhi_stop(struct qwz_softc *sc)
+{
+       qwz_mhi_reset_device(sc, 1);
+}
+
+int
+qwz_mhi_reset_device(struct qwz_softc *sc, int force)
+{
+       struct qwz_pci_softc *psc = (struct qwz_pci_softc *)sc;
+       uint32_t reg;
+       int ret = 0;
+
+       reg = qwz_pcic_read32(sc, MHI_STATUS);
+
+       DNPRINTF(QWZ_D_MHI, "%s: MHISTATUS 0x%x\n", sc->sc_dev.dv_xname, reg);
+       /*
+        * Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
+        * has SYSERR bit set and thus need to set MHICTRL_RESET
+        * to clear SYSERR.
+        */
+       if (force || (reg & MHI_STATUS_SYSERR_MASK)) {
+               /* Trigger MHI Reset in device. */
+               qwz_pcic_write32(sc, MHI_CTRL, MHI_CTRL_RESET_MASK);
+
+               /* Wait for the reset bit to be cleared by the device. */
+               ret = qwz_mhi_await_device_reset(sc);
+               if (ret)
+                       return ret;
+
+               if (psc->bhi_off == 0)
+                       psc->bhi_off = qwz_pci_read(sc, MHI_BHI_OFFSET);
+
+               /* Device clear BHI INTVEC so re-program it. */
+               qwz_pci_write(sc, psc->bhi_off + MHI_BHI_INTVEC, 0x00);
+       }
+
+       return 0;
+}
+
+static inline void
+qwz_mhi_reset_txvecdb(struct qwz_softc *sc)
+{
+       qwz_pcic_write32(sc, PCIE_TXVECDB, 0);
+}
+
+static inline void
+qwz_mhi_reset_txvecstatus(struct qwz_softc *sc)
+{
+       qwz_pcic_write32(sc, PCIE_TXVECSTATUS, 0);
+}
+
+static inline void
+qwz_mhi_reset_rxvecdb(struct qwz_softc *sc)
+{
+       qwz_pcic_write32(sc, PCIE_RXVECDB, 0);
+}
+
+static inline void
+qwz_mhi_reset_rxvecstatus(struct qwz_softc *sc)
+{
+       qwz_pcic_write32(sc, PCIE_RXVECSTATUS, 0);
+}
+
+void
+qwz_mhi_clear_vector(struct qwz_softc *sc)
+{
+       qwz_mhi_reset_txvecdb(sc);
+       qwz_mhi_reset_txvecstatus(sc);
+       qwz_mhi_reset_rxvecdb(sc);
+       qwz_mhi_reset_rxvecstatus(sc);
+}
+
+int
+qwz_mhi_fw_load_handler(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int ret;
+       char amss_path[PATH_MAX];
+       u_char *data;
+       size_t len;
+
+       if (sc->fw_img[QWZ_FW_AMSS].data) {
+               data = sc->fw_img[QWZ_FW_AMSS].data;
+               len = sc->fw_img[QWZ_FW_AMSS].size;
+       } else {
+               ret = snprintf(amss_path, sizeof(amss_path), "%s-%s-%s",
+                   ATH12K_FW_DIR, sc->hw_params.fw.dir, ATH12K_AMSS_FILE);
+               if (ret < 0 || ret >= sizeof(amss_path))
+                       return ENOSPC;
+
+               ret = loadfirmware(amss_path, &data, &len);
+               if (ret) {
+                       printf("%s: could not read %s (error %d)\n",
+                           sc->sc_dev.dv_xname, amss_path, ret);
+                       return ret;
+               }
+
+               if (len < MHI_DMA_VEC_CHUNK_SIZE) {
+                       printf("%s: %s is too short, have only %zu bytes\n",
+                           sc->sc_dev.dv_xname, amss_path, len);
+                       free(data, M_DEVBUF, len);
+                       return EINVAL;
+               }
+
+               sc->fw_img[QWZ_FW_AMSS].data = data;
+               sc->fw_img[QWZ_FW_AMSS].size = len;
+       }
+
+       /* Second-stage boot loader sits in the first 512 KB of image. */
+       ret = qwz_mhi_fw_load_bhi(psc, data, MHI_DMA_VEC_CHUNK_SIZE);
+       if (ret != 0) {
+               printf("%s: could not load firmware %s\n",
+                   sc->sc_dev.dv_xname, amss_path);
+               return ret;
+       }
+
+       /* Now load the full image. */
+       ret = qwz_mhi_fw_load_bhie(psc, data, len);
+       if (ret != 0) {
+               printf("%s: could not load firmware %s\n",
+                   sc->sc_dev.dv_xname, amss_path);
+               return ret;
+       }
+
+       while (psc->bhi_ee < MHI_EE_AMSS) {
+               ret = tsleep_nsec(&psc->bhi_ee, 0, "qwzamss",
+                   SEC_TO_NSEC(5));
+               if (ret)
+                       break;
+       }
+       if (ret != 0) {
+               printf("%s: device failed to enter AMSS EE\n",
+                   sc->sc_dev.dv_xname);
+       }
+
+       return ret;
+}
+
+int
+qwz_mhi_await_device_reset(struct qwz_softc *sc)
+{
+       const uint32_t msecs = 24, retries = 2;
+       uint32_t reg;
+       int timeout;
+
+       /* Poll for CTRL RESET to clear. */
+       timeout = retries;
+       while (timeout > 0) {
+               reg = qwz_pci_read(sc, MHI_CTRL);
+               DNPRINTF(QWZ_D_MHI, "%s: MHI_CTRL is 0x%x\n", __func__, reg);
+               if ((reg & MHI_CTRL_RESET_MASK) == 0)
+                       break;
+               DELAY((msecs / retries) * 1000);
+               timeout--;
+       }
+       if (timeout == 0) {
+               DNPRINTF(QWZ_D_MHI, "%s: MHI reset failed\n", __func__);
+               return ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+int
+qwz_mhi_await_device_ready(struct qwz_softc *sc)
+{
+       uint32_t reg;
+       int timeout;
+       const uint32_t msecs = 2000, retries = 4;
+
+
+       /* Poll for READY to be set. */
+       timeout = retries;
+       while (timeout > 0) {
+               reg = qwz_pci_read(sc, MHI_STATUS);
+               DNPRINTF(QWZ_D_MHI, "%s: MHI_STATUS is 0x%x\n", __func__, reg);
+               if (reg & MHI_STATUS_READY_MASK) {
+                       reg &= ~MHI_STATUS_READY_MASK;
+                       qwz_pci_write(sc, MHI_STATUS, reg);
+                       break;
+               }
+               DELAY((msecs / retries) * 1000);
+               timeout--;
+       }
+       if (timeout == 0) {
+               printf("%s: MHI not ready\n", sc->sc_dev.dv_xname);
+               return ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+void
+qwz_mhi_ready_state_transition(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int ret, i;
+
+       ret = qwz_mhi_await_device_reset(sc);
+       if (ret)
+               return;
+
+       ret = qwz_mhi_await_device_ready(sc);
+       if (ret)
+               return;
+
+       /* Set up memory-mapped IO for channels, events, etc. */
+       qwz_mhi_init_mmio(psc);
+
+       /* Notify event rings. */
+       for (i = 0; i < nitems(psc->event_rings); i++) {
+               struct qwz_pci_event_ring *ring = &psc->event_rings[i];
+               qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       }
+
+       /*
+        * Set the device into M0 state. The device will transition
+        * into M0 and the execution environment will switch to SBL.
+        */
+       qwz_mhi_set_state(sc, MHI_STATE_M0);
+}
+
+void
+qwz_mhi_mission_mode_state_transition(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       int i;
+
+       qwz_mhi_device_wake(sc);
+
+       /* Notify event rings. */
+       for (i = 0; i < nitems(psc->event_rings); i++) {
+               struct qwz_pci_event_ring *ring = &psc->event_rings[i];
+               qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       }
+
+       /* TODO: Notify transfer/command rings? */
+
+       qwz_mhi_device_zzz(sc);
+}
+
+void
+qwz_mhi_low_power_mode_state_transition(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+
+       qwz_mhi_set_state(sc, MHI_STATE_M2);
+}
+
+void
+qwz_mhi_set_state(struct qwz_softc *sc, uint32_t state)
+{
+       uint32_t reg;
+
+       reg = qwz_pci_read(sc, MHI_CTRL);
+
+       if (state != MHI_STATE_RESET) {
+               reg &= ~MHI_CTRL_MHISTATE_MASK;
+               reg |= (state << MHI_CTRL_MHISTATE_SHFT) & MHI_CTRL_MHISTATE_MASK;
+       } else
+               reg |= MHI_CTRL_RESET_MASK;
+
+       qwz_pci_write(sc, MHI_CTRL, reg);
+}
+
+void
+qwz_mhi_init_mmio(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       uint64_t paddr;
+       uint32_t reg;
+       int i;
+
+       reg = qwz_pci_read(sc, MHI_CHDBOFF);
+
+       /* Set device wake doorbell address. */
+       psc->wake_db = reg + 8 * MHI_DEV_WAKE_DB;
+
+       /* Set doorbell address for each transfer ring. */
+       for (i = 0; i < nitems(psc->xfer_rings); i++) {
+               struct qwz_pci_xfer_ring *ring = &psc->xfer_rings[i];
+               ring->db_addr = reg + (8 * ring->mhi_chan_id);
+       }
+
+       reg = qwz_pci_read(sc, MHI_ERDBOFF);
+       /* Set doorbell address for each event ring. */
+       for (i = 0; i < nitems(psc->event_rings); i++) {
+               struct qwz_pci_event_ring *ring = &psc->event_rings[i];
+               ring->db_addr = reg + (8 * i);
+       }
+
+       paddr = QWZ_DMA_DVA(psc->chan_ctxt);
+       qwz_pci_write(sc, MHI_CCABAP_HIGHER, paddr >> 32);
+       qwz_pci_write(sc, MHI_CCABAP_LOWER, paddr & 0xffffffff);
+
+       paddr = QWZ_DMA_DVA(psc->event_ctxt);
+       qwz_pci_write(sc, MHI_ECABAP_HIGHER, paddr >> 32);
+       qwz_pci_write(sc, MHI_ECABAP_LOWER, paddr & 0xffffffff);
+
+       paddr = QWZ_DMA_DVA(psc->cmd_ctxt);
+       qwz_pci_write(sc, MHI_CRCBAP_HIGHER, paddr >> 32);
+       qwz_pci_write(sc, MHI_CRCBAP_LOWER, paddr & 0xffffffff);
+
+       /* Not (yet?) using fixed memory space from a device-tree. */
+       qwz_pci_write(sc, MHI_CTRLBASE_HIGHER, 0);
+       qwz_pci_write(sc, MHI_CTRLBASE_LOWER, 0);
+       qwz_pci_write(sc, MHI_DATABASE_HIGHER, 0);
+       qwz_pci_write(sc, MHI_DATABASE_LOWER, 0);
+       qwz_pci_write(sc, MHI_CTRLLIMIT_HIGHER, 0x0);
+       qwz_pci_write(sc, MHI_CTRLLIMIT_LOWER, 0xffffffff);
+       qwz_pci_write(sc, MHI_DATALIMIT_HIGHER, 0x0);
+       qwz_pci_write(sc, MHI_DATALIMIT_LOWER, 0xffffffff);
+
+       reg = qwz_pci_read(sc, MHI_CFG);
+       reg &= ~(MHI_CFG_NER_MASK | MHI_CFG_NHWER_MASK);
+       reg |= QWZ_NUM_EVENT_CTX << MHI_CFG_NER_SHFT;
+       qwz_pci_write(sc, MHI_CFG, reg);
+}
+
+int
+qwz_mhi_fw_load_bhi(struct qwz_pci_softc *psc, uint8_t *data, size_t len)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_dmamem *data_adm;
+       uint32_t seq, reg, status = MHI_BHI_STATUS_RESET;
+       uint64_t paddr;
+       int ret;
+
+       data_adm = qwz_dmamem_alloc(sc->sc_dmat, len, 0);
+       if (data_adm == NULL) {
+               printf("%s: could not allocate BHI DMA data buffer\n",
+                   sc->sc_dev.dv_xname);
+               return 1;
+       }
+
+       /* Copy firmware image to DMA memory. */
+       memcpy(QWZ_DMA_KVA(data_adm), data, len);
+
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_STATUS, 0);
+
+       /* Set data physical address and length. */
+       paddr = QWZ_DMA_DVA(data_adm);
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_HIGH, paddr >> 32);
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_IMGADDR_LOW,
+           paddr & 0xffffffff);
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_IMGSIZE, len);
+       
+       /* Set a random transaction sequence number. */
+       do {
+               seq = arc4random_uniform(MHI_BHI_TXDB_SEQNUM_BMSK);
+       } while (seq == 0);
+       qwz_pci_write(sc, psc->bhi_off + MHI_BHI_IMGTXDB, seq);
+
+       /* Wait for completion. */
+       ret = 0;
+       while (status != MHI_BHI_STATUS_SUCCESS && psc->bhi_ee < MHI_EE_SBL) {
+               ret = tsleep_nsec(&psc->bhi_ee, 0, "qwzbhi", SEC_TO_NSEC(5));
+               if (ret)
+                       break;
+               reg = qwz_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
+               status = (reg & MHI_BHI_STATUS_MASK) >> MHI_BHI_STATUS_SHFT;
+       }
+
+       if (ret) {
+               printf("%s: BHI load timeout\n", sc->sc_dev.dv_xname);
+               reg = qwz_pci_read(sc, psc->bhi_off + MHI_BHI_STATUS);
+               status = (reg & MHI_BHI_STATUS_MASK) >> MHI_BHI_STATUS_SHFT;
+               DNPRINTF(QWZ_D_MHI, "%s: BHI status is 0x%x EE is 0x%x\n",
+                   __func__, status, psc->bhi_ee);
+       }
+
+       qwz_dmamem_free(sc->sc_dmat, data_adm);
+       return ret;
+}
+
+int
+qwz_mhi_fw_load_bhie(struct qwz_pci_softc *psc, uint8_t *data, size_t len)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_dma_vec_entry *vec;
+       uint32_t seq, reg, state = MHI_BHIE_TXVECSTATUS_STATUS_RESET;
+       uint64_t paddr;
+       const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE;
+       size_t nseg, remain, vec_size;
+       int i, ret;
+
+       nseg = howmany(len, chunk_size);
+       if (nseg == 0) {
+               printf("%s: BHIE data too short, have only %zu bytes\n",
+                   sc->sc_dev.dv_xname, len);
+               return 1;
+       }
+
+       if (psc->amss_data == NULL || QWZ_DMA_LEN(psc->amss_data) < len) {
+               if (psc->amss_data)
+                       qwz_dmamem_free(sc->sc_dmat, psc->amss_data);
+               psc->amss_data = qwz_dmamem_alloc(sc->sc_dmat, len, 0);
+               if (psc->amss_data == NULL) {
+                       printf("%s: could not allocate BHIE DMA data buffer\n",
+                           sc->sc_dev.dv_xname);
+                       return 1;
+               }
+       }
+
+       vec_size = nseg * sizeof(*vec);
+       if (psc->amss_vec == NULL || QWZ_DMA_LEN(psc->amss_vec) < vec_size) {
+               if (psc->amss_vec)
+                       qwz_dmamem_free(sc->sc_dmat, psc->amss_vec);
+               psc->amss_vec = qwz_dmamem_alloc(sc->sc_dmat, vec_size, 0);
+               if (psc->amss_vec == NULL) {
+                       printf("%s: could not allocate BHIE DMA vec buffer\n",
+                           sc->sc_dev.dv_xname);
+                       qwz_dmamem_free(sc->sc_dmat, psc->amss_data);
+                       psc->amss_data = NULL;
+                       return 1;
+               }
+       }
+
+       /* Copy firmware image to DMA memory. */
+       memcpy(QWZ_DMA_KVA(psc->amss_data), data, len);
+
+       /* Create vector which controls chunk-wise DMA copy in hardware. */
+       paddr = QWZ_DMA_DVA(psc->amss_data);
+       vec = QWZ_DMA_KVA(psc->amss_vec);
+       remain = len;
+       for (i = 0; i < nseg; i++) {
+               vec[i].paddr = paddr;
+               if (remain >= chunk_size) {
+                       vec[i].size = chunk_size;
+                       remain -= chunk_size;
+                       paddr += chunk_size;
+               } else
+                       vec[i].size = remain;
+       }
+
+       /* Set vector physical address and length. */
+       paddr = QWZ_DMA_DVA(psc->amss_vec);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_HIGH_OFFS,
+           paddr >> 32);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECADDR_LOW_OFFS,
+           paddr & 0xffffffff);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECSIZE_OFFS, vec_size);
+
+       /* Set a random transaction sequence number. */
+       do {
+               seq = arc4random_uniform(MHI_BHIE_TXVECSTATUS_SEQNUM_BMSK);
+       } while (seq == 0);
+       reg = qwz_pci_read(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS);
+       reg &= ~MHI_BHIE_TXVECDB_SEQNUM_BMSK;
+       reg |= seq << MHI_BHIE_TXVECDB_SEQNUM_SHFT;
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_TXVECDB_OFFS, reg);
+
+       /* Wait for completion. */
+       ret = 0;
+       while (state != MHI_BHIE_TXVECSTATUS_STATUS_XFER_COMPL) {
+               ret = tsleep_nsec(&psc->bhie_off, 0, "qwzbhie",
+                   SEC_TO_NSEC(5));
+               if (ret)
+                       break;
+               reg = qwz_pci_read(sc,
+                   psc->bhie_off + MHI_BHIE_TXVECSTATUS_OFFS);
+               state = (reg & MHI_BHIE_TXVECSTATUS_STATUS_BMSK) >>
+                   MHI_BHIE_TXVECSTATUS_STATUS_SHFT;
+               DNPRINTF(QWZ_D_MHI, "%s: txvec state is 0x%x\n", __func__,
+                   state);
+       }
+
+       if (ret) {
+               printf("%s: BHIE load timeout\n", sc->sc_dev.dv_xname);
+               return ret;
+       }
+       return 0;
+}
+
+void
+qwz_rddm_prepare(struct qwz_pci_softc *psc)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_dma_vec_entry *vec;
+       struct qwz_dmamem *data_adm, *vec_adm;
+       uint32_t seq, reg;
+       uint64_t paddr;
+       const size_t len = QWZ_RDDM_DUMP_SIZE;
+       const size_t chunk_size = MHI_DMA_VEC_CHUNK_SIZE;
+       size_t nseg, remain, vec_size;
+       int i;
+
+       nseg = howmany(len, chunk_size);
+       if (nseg == 0) {
+               printf("%s: RDDM data too short, have only %zu bytes\n",
+                   sc->sc_dev.dv_xname, len);
+               return;
+       }
+
+       data_adm = qwz_dmamem_alloc(sc->sc_dmat, len, 0);
+       if (data_adm == NULL) {
+               printf("%s: could not allocate BHIE DMA data buffer\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       vec_size = nseg * sizeof(*vec);
+       vec_adm = qwz_dmamem_alloc(sc->sc_dmat, vec_size, 0);
+       if (vec_adm == NULL) {
+               printf("%s: could not allocate BHIE DMA vector buffer\n",
+                   sc->sc_dev.dv_xname);
+               qwz_dmamem_free(sc->sc_dmat, data_adm);
+               return;
+       }
+
+       /* Create vector which controls chunk-wise DMA copy from hardware. */
+       paddr = QWZ_DMA_DVA(data_adm);
+       vec = QWZ_DMA_KVA(vec_adm);
+       remain = len;
+       for (i = 0; i < nseg; i++) {
+               vec[i].paddr = paddr;
+               if (remain >= chunk_size) {
+                       vec[i].size = chunk_size;
+                       remain -= chunk_size;
+                       paddr += chunk_size;
+               } else
+                       vec[i].size = remain;
+       }
+
+       /* Set vector physical address and length. */
+       paddr = QWZ_DMA_DVA(vec_adm);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_HIGH_OFFS,
+           paddr >> 32);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECADDR_LOW_OFFS,
+           paddr & 0xffffffff);
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECSIZE_OFFS, vec_size);
+
+       /* Set a random transaction sequence number. */
+       do {
+               seq = arc4random_uniform(MHI_BHIE_RXVECSTATUS_SEQNUM_BMSK);
+       } while (seq == 0);
+
+       reg = qwz_pci_read(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS);
+       reg &= ~MHI_BHIE_RXVECDB_SEQNUM_BMSK;
+       reg |= seq << MHI_BHIE_RXVECDB_SEQNUM_SHFT;
+       qwz_pci_write(sc, psc->bhie_off + MHI_BHIE_RXVECDB_OFFS, reg);
+
+       psc->rddm_data = data_adm;
+       psc->rddm_vec = vec_adm;
+}
+
+#ifdef QWZ_DEBUG
+void
+qwz_rddm_task(void *arg)
+{
+       struct qwz_pci_softc *psc = arg;
+       struct qwz_softc *sc = &psc->sc_sc;
+       uint32_t reg, state = MHI_BHIE_RXVECSTATUS_STATUS_RESET;
+       const size_t len = QWZ_RDDM_DUMP_SIZE;
+       int i, timeout;
+       const uint32_t msecs = 100, retries = 20;
+       uint8_t *rddm;
+       struct nameidata nd;
+       struct vnode *vp = NULL;
+       struct iovec iov[3];
+       struct uio uio;
+       char path[PATH_MAX];
+       int error = 0;
+
+       if (psc->rddm_data == NULL) {
+               DPRINTF("%s: RDDM not prepared\n", __func__);
+               return;
+       }
+
+       /* Poll for completion */
+       timeout = retries;
+       while (timeout > 0 && state != MHI_BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
+               reg = qwz_pci_read(sc,
+                   psc->bhie_off + MHI_BHIE_RXVECSTATUS_OFFS);
+               state = (reg & MHI_BHIE_RXVECSTATUS_STATUS_BMSK) >>
+                   MHI_BHIE_RXVECSTATUS_STATUS_SHFT;
+               DPRINTF("%s: txvec state is 0x%x\n", __func__, state);
+               DELAY((msecs / retries) * 1000);
+               timeout--;
+       }
+
+       if (timeout == 0) {
+               DPRINTF("%s: RDDM dump failed\n", sc->sc_dev.dv_xname);
+               return;
+       }
+
+       rddm = QWZ_DMA_KVA(psc->rddm_data);
+       DPRINTF("%s: RDDM snippet:\n", __func__);
+       for (i = 0; i < MIN(64, len); i++) {
+               DPRINTF("%s %.2x", i % 16 == 0 ? "\n" : "", rddm[i]);
+       }
+       DPRINTF("\n");
+
+       DPRINTF("%s: sleeping for 30 seconds to allow userland to boot\n", __func__);
+       tsleep_nsec(&psc->rddm_data, 0, "qwzrddm", SEC_TO_NSEC(30));
+
+       snprintf(path, sizeof(path), "/root/%s-rddm.bin", sc->sc_dev.dv_xname);
+       DPRINTF("%s: saving RDDM to %s\n", __func__, path);
+       NDINIT(&nd, 0, 0, UIO_SYSSPACE, path, curproc);
+       nd.ni_pledge = PLEDGE_CPATH | PLEDGE_WPATH;
+       nd.ni_unveil = UNVEIL_CREATE | UNVEIL_WRITE;
+       error = vn_open(&nd, FWRITE | O_CREAT | O_NOFOLLOW | O_TRUNC,
+           S_IRUSR | S_IWUSR);
+       if (error) {
+               DPRINTF("%s: vn_open: error %d\n", __func__, error);
+               goto done;
+       }
+       vp = nd.ni_vp;
+       VOP_UNLOCK(vp);
+
+       iov[0].iov_base = (void *)rddm;
+       iov[0].iov_len = len;
+       iov[1].iov_len = 0;
+       uio.uio_iov = &iov[0];
+       uio.uio_offset = 0;
+       uio.uio_segflg = UIO_SYSSPACE;
+       uio.uio_rw = UIO_WRITE;
+       uio.uio_resid = len;
+       uio.uio_iovcnt = 1;
+       uio.uio_procp = curproc;
+       error = vget(vp, LK_EXCLUSIVE | LK_RETRY);
+       if (error) {
+               DPRINTF("%s: vget: error %d\n", __func__, error);
+               goto done;
+       }
+       error = VOP_WRITE(vp, &uio, IO_UNIT|IO_APPEND, curproc->p_ucred);
+       vput(vp);
+       if (error)
+               DPRINTF("%s: VOP_WRITE: error %d\n", __func__, error);
+       #if 0
+       error = vn_close(vp, FWRITE, curproc->p_ucred, curproc);
+       if (error)
+               DPRINTF("%s: vn_close: error %d\n", __func__, error);
+       #endif
+done:
+       qwz_dmamem_free(sc->sc_dmat, psc->rddm_data);
+       qwz_dmamem_free(sc->sc_dmat, psc->rddm_vec);
+       psc->rddm_data = NULL;
+       psc->rddm_vec = NULL;
+       DPRINTF("%s: done, error %d\n", __func__, error);
+}
+#endif
+
+void *
+qwz_pci_event_ring_get_elem(struct qwz_pci_event_ring *ring, uint64_t rp)
+{
+       uint64_t base = QWZ_DMA_DVA(ring->dmamem), offset;
+       void *addr = QWZ_DMA_KVA(ring->dmamem);
+
+       if (rp < base)
+               return NULL;
+
+       offset = rp - base;
+       if (offset >= ring->size)
+               return NULL;
+
+       return addr + offset;
+}
+
+void
+qwz_mhi_state_change(struct qwz_pci_softc *psc, int ee, int mhi_state)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       uint32_t old_ee = psc->bhi_ee;
+       uint32_t old_mhi_state = psc->mhi_state;
+
+       if (ee != -1 && psc->bhi_ee != ee) {
+               switch (ee) {
+               case MHI_EE_PBL:
+                       DNPRINTF(QWZ_D_MHI, "%s: new EE PBL\n",
+                           sc->sc_dev.dv_xname);
+                       psc->bhi_ee = ee;
+                       break;
+               case MHI_EE_SBL:
+                       psc->bhi_ee = ee;
+                       DNPRINTF(QWZ_D_MHI, "%s: new EE SBL\n",
+                           sc->sc_dev.dv_xname);
+                       break;
+               case MHI_EE_AMSS:
+                       DNPRINTF(QWZ_D_MHI, "%s: new EE AMSS\n",
+                           sc->sc_dev.dv_xname);
+                       psc->bhi_ee = ee;
+                       /* Wake thread loading the full AMSS image. */
+                       wakeup(&psc->bhie_off);
+                       break;
+               case MHI_EE_WFW:
+                       DNPRINTF(QWZ_D_MHI, "%s: new EE WFW\n",
+                           sc->sc_dev.dv_xname);
+                       psc->bhi_ee = ee;
+                       break;
+               default:
+                       printf("%s: unhandled EE change to %x\n",
+                           sc->sc_dev.dv_xname, ee);
+                       break;
+               }
+       }
+
+       if (mhi_state != -1 && psc->mhi_state != mhi_state) {
+               switch (mhi_state) {
+               case -1:
+                       break;
+               case MHI_STATE_RESET:
+                       DNPRINTF(QWZ_D_MHI, "%s: new MHI state RESET\n",
+                           sc->sc_dev.dv_xname);
+                       psc->mhi_state = mhi_state;
+                       break;
+               case MHI_STATE_READY:
+                       DNPRINTF(QWZ_D_MHI, "%s: new MHI state READY\n",
+                           sc->sc_dev.dv_xname);
+                       psc->mhi_state = mhi_state;
+                       qwz_mhi_ready_state_transition(psc);
+                       break;
+               case MHI_STATE_M0:
+                       DNPRINTF(QWZ_D_MHI, "%s: new MHI state M0\n",
+                           sc->sc_dev.dv_xname);
+                       psc->mhi_state = mhi_state;
+                       qwz_mhi_mission_mode_state_transition(psc);
+                       break;
+               case MHI_STATE_M1:
+                       DNPRINTF(QWZ_D_MHI, "%s: new MHI state M1\n",
+                           sc->sc_dev.dv_xname);
+                       psc->mhi_state = mhi_state;
+                       qwz_mhi_low_power_mode_state_transition(psc);
+                       break;
+               case MHI_STATE_SYS_ERR:
+                       DNPRINTF(QWZ_D_MHI,
+                           "%s: new MHI state SYS ERR\n",
+                           sc->sc_dev.dv_xname);
+                       psc->mhi_state = mhi_state;
+                       break;
+               default:
+                       printf("%s: unhandled MHI state change to %x\n",
+                           sc->sc_dev.dv_xname, mhi_state);
+                       break;
+               }
+       }
+
+       if (old_ee != psc->bhi_ee)
+               wakeup(&psc->bhi_ee);
+       if (old_mhi_state != psc->mhi_state)
+               wakeup(&psc->mhi_state);
+}
+
+void
+qwz_pci_intr_ctrl_event_mhi(struct qwz_pci_softc *psc, uint32_t mhi_state)
+{
+       DNPRINTF(QWZ_D_MHI, "%s: MHI state change 0x%x -> 0x%x\n", __func__,
+           psc->mhi_state, mhi_state);
+
+       if (psc->mhi_state != mhi_state)
+               qwz_mhi_state_change(psc, -1, mhi_state);
+}
+
+void
+qwz_pci_intr_ctrl_event_ee(struct qwz_pci_softc *psc, uint32_t ee)
+{
+       DNPRINTF(QWZ_D_MHI, "%s: EE change 0x%x to 0x%x\n", __func__,
+           psc->bhi_ee, ee);
+
+       if (psc->bhi_ee != ee)
+               qwz_mhi_state_change(psc, ee, -1);
+}
+
+void
+qwz_pci_intr_ctrl_event_cmd_complete(struct qwz_pci_softc *psc,
+    uint64_t ptr, uint32_t cmd_status)
+{
+       struct qwz_pci_cmd_ring *cmd_ring = &psc->cmd_ring;
+       uint64_t base = QWZ_DMA_DVA(cmd_ring->dmamem);
+       struct qwz_pci_xfer_ring *xfer_ring = NULL;
+       struct qwz_mhi_ring_element *e;
+       uint32_t tre1, chid;
+       size_t i;
+
+       e = qwz_pci_cmd_ring_get_elem(cmd_ring, ptr);
+       if (e == NULL)
+               return;
+
+       tre1 = le32toh(e->dword[1]);
+       chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
+
+       for (i = 0; i < nitems(psc->xfer_rings); i++) {
+               if (psc->xfer_rings[i].mhi_chan_id == chid) {
+                       xfer_ring = &psc->xfer_rings[i];
+                       break;
+               }
+       }
+       if (xfer_ring == NULL) {
+               printf("%s: no transfer ring found for command completion "
+                   "on channel %u\n", __func__, chid);
+               return;
+       }
+
+       xfer_ring->cmd_status = cmd_status;
+       wakeup(&xfer_ring->cmd_status);
+
+       if (cmd_ring->rp + sizeof(*e) >= base + cmd_ring->size)
+               cmd_ring->rp = base;
+       else
+               cmd_ring->rp += sizeof(*e);
+}
+
+int
+qwz_pci_intr_ctrl_event(struct qwz_pci_softc *psc, struct qwz_pci_event_ring *ring)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_mhi_event_ctxt *c;
+       uint64_t rp, wp, base;
+       struct qwz_mhi_ring_element *e;
+       uint32_t tre0, tre1, type, code, chid, len;
+
+       c = ring->event_ctxt;
+       if (c == NULL) {
+               /*
+                * Interrupts can trigger before mhi_init_event_rings()
+                * if the device is still active after a warm reboot.
+                */
+               return 0;
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->event_ctxt), 0,
+           QWZ_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
+
+       rp = le64toh(c->rp);
+       wp = le64toh(c->wp);
+
+       DNPRINTF(QWZ_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp);
+       DNPRINTF(QWZ_D_MHI, "%s: device rp=0x%llx\n", __func__, rp);
+       DNPRINTF(QWZ_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp);
+       DNPRINTF(QWZ_D_MHI, "%s: device wp=0x%llx\n", __func__, wp);
+
+       base = QWZ_DMA_DVA(ring->dmamem);
+       if (ring->rp == rp || rp < base || rp >= base + ring->size)
+               return 0;
+       if (wp < base || wp >= base + ring->size)
+               return 0;
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(ring->dmamem),
+           0, QWZ_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD);
+
+       while (ring->rp != rp) {
+               e = qwz_pci_event_ring_get_elem(ring, ring->rp);
+               if (e == NULL)
+                       return 0;
+
+               tre0 = le32toh(e->dword[0]);
+               tre1 = le32toh(e->dword[1]);
+
+               len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
+               code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
+               type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT;
+               chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
+               DNPRINTF(QWZ_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n",
+                   __func__, len, code, type, chid);
+
+               switch (type) {
+               case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+                       qwz_pci_intr_ctrl_event_mhi(psc, code);
+                       break;
+               case MHI_PKT_TYPE_EE_EVENT:
+                       qwz_pci_intr_ctrl_event_ee(psc, code);
+                       break;
+               case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+                       qwz_pci_intr_ctrl_event_cmd_complete(psc,
+                           le64toh(e->ptr), code);
+                       break;
+               default:
+                       printf("%s: unhandled event type 0x%x\n",
+                           __func__, type);
+                       break;
+               }
+
+               if (ring->rp + sizeof(*e) >= base + ring->size)
+                       ring->rp = base;
+               else
+                       ring->rp += sizeof(*e);
+
+               if (ring->wp + sizeof(*e) >= base + ring->size)
+                       ring->wp = base;
+               else
+                       ring->wp += sizeof(*e);
+       }
+
+       c->wp = htole64(ring->wp);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->event_ctxt), 0,
+           QWZ_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
+
+       qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       return 1;
+}
+
+void
+qwz_pci_intr_data_event_tx(struct qwz_pci_softc *psc, struct qwz_mhi_ring_element *e)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_pci_xfer_ring *ring;
+       struct qwz_xfer_data *xfer;
+       uint64_t rp, evrp, base, paddr;
+       uint32_t tre0, tre1, code, chid, evlen, len;
+       int i;
+
+       tre0 = le32toh(e->dword[0]);
+       tre1 = le32toh(e->dword[1]);
+
+       evlen = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
+       code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
+       chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
+
+       switch (code) {
+       case MHI_EV_CC_EOT:
+               for (i = 0; i < nitems(psc->xfer_rings); i++) {
+                       ring = &psc->xfer_rings[i];
+                       if (ring->mhi_chan_id == chid)
+                               break;
+               }
+               if (i == nitems(psc->xfer_rings)) {
+                       printf("%s: unhandled channel 0x%x\n",
+                           __func__, chid);
+                       break;
+               }
+               base = QWZ_DMA_DVA(ring->dmamem);
+               /* PTR contains the entry that was last written */
+               evrp = letoh64(e->ptr);
+               rp = evrp;
+               if (rp < base || rp >= base + ring->size) {
+                       printf("%s: invalid ptr 0x%llx\n",
+                           __func__, rp);
+                       break;
+               }
+               /* Point rp to next empty slot */
+               if (rp + sizeof(*e) >= base + ring->size)
+                       rp = base;
+               else
+                       rp += sizeof(*e);
+               /* Parse until next empty slot */
+               while (ring->rp != rp) {
+                       DNPRINTF(QWZ_D_MHI, "%s:%d: ring->rp 0x%llx "
+                           "ring->wp 0x%llx rp 0x%llx\n", __func__,
+                           __LINE__, ring->rp, ring->wp, rp);
+                       e = qwz_pci_xfer_ring_get_elem(ring, ring->rp);
+                       xfer = qwz_pci_xfer_ring_get_data(ring, ring->rp);
+
+                       if (ring->rp == evrp)
+                               len = evlen;
+                       else
+                               len = xfer->m->m_pkthdr.len;
+
+                       bus_dmamap_sync(sc->sc_dmat, xfer->map, 0,
+                           xfer->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
+#ifdef QWZ_DEBUG
+                       {
+                       int i;
+                       DNPRINTF(QWZ_D_MHI, "%s: chan %u data (len %u): ",
+                           __func__,
+                           ring->mhi_chan_id, len);
+                       for (i = 0; i < MIN(32, len); i++) {
+                               DNPRINTF(QWZ_D_MHI, "%02x ",
+                                   (unsigned char)mtod(xfer->m, caddr_t)[i]);
+                       }
+                       if (i < len)
+                               DNPRINTF(QWZ_D_MHI, "...");
+                       DNPRINTF(QWZ_D_MHI, "\n");
+                       }
+#endif
+                       if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
+                               /* Save m_data as upper layers use m_adj(9) */
+                               void *o_data = xfer->m->m_data;
+
+                               /* Pass mbuf to upper layers */
+                               qwz_qrtr_recv_msg(sc, xfer->m);
+
+                               /* Reset RX mbuf instead of free/alloc */
+                               KASSERT(xfer->m->m_next == NULL);
+                               xfer->m->m_data = o_data;
+                               xfer->m->m_len = xfer->m->m_pkthdr.len =
+                                   QWZ_PCI_XFER_MAX_DATA_SIZE;
+
+                               paddr = xfer->map->dm_segs[0].ds_addr;
+
+                               e->ptr = htole64(paddr);
+                               e->dword[0] = htole32((
+                                   QWZ_PCI_XFER_MAX_DATA_SIZE <<
+                                   MHI_TRE0_DATA_LEN_SHFT) &
+                                   MHI_TRE0_DATA_LEN_MASK);
+                               e->dword[1] = htole32(MHI_TRE1_DATA_IEOT |
+                                   MHI_TRE1_DATA_BEI |
+                                   MHI_TRE1_DATA_TYPE_TRANSFER <<
+                                   MHI_TRE1_DATA_TYPE_SHIFT);
+
+                               if (ring->wp + sizeof(*e) >= base + ring->size)
+                                       ring->wp = base;
+                               else
+                                       ring->wp += sizeof(*e);
+                       } else {
+                               /* Unload and free TX mbuf */
+                               bus_dmamap_unload(sc->sc_dmat, xfer->map);
+                               m_freem(xfer->m);
+                               xfer->m = NULL;
+                               ring->queued--;
+                       }
+
+                       if (ring->rp + sizeof(*e) >= base + ring->size)
+                               ring->rp = base;
+                       else
+                               ring->rp += sizeof(*e);
+               }
+
+               if (ring->mhi_chan_direction == MHI_CHAN_TYPE_INBOUND) {
+                       ring->chan_ctxt->wp = htole64(ring->wp);
+
+                       bus_dmamap_sync(sc->sc_dmat,
+                           QWZ_DMA_MAP(psc->chan_ctxt), 0,
+                           QWZ_DMA_LEN(psc->chan_ctxt),
+                           BUS_DMASYNC_PREWRITE);
+
+                       qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+               }
+               break;
+       default:
+               printf("%s: unhandled event code 0x%x\n",
+                   __func__, code);
+       }
+}
+
+int
+qwz_pci_intr_data_event(struct qwz_pci_softc *psc, struct qwz_pci_event_ring *ring)
+{
+       struct qwz_softc *sc = &psc->sc_sc;
+       struct qwz_mhi_event_ctxt *c;
+       uint64_t rp, wp, base;
+       struct qwz_mhi_ring_element *e;
+       uint32_t tre0, tre1, type, code, chid, len;
+
+       c = ring->event_ctxt;
+       if (c == NULL) {
+               /*
+                * Interrupts can trigger before mhi_init_event_rings()
+                * if the device is still active after a warm reboot.
+                */
+               return 0;
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->event_ctxt), 0,
+           QWZ_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_POSTREAD);
+
+       rp = le64toh(c->rp);
+       wp = le64toh(c->wp);
+
+       DNPRINTF(QWZ_D_MHI, "%s: kernel rp=0x%llx\n", __func__, ring->rp);
+       DNPRINTF(QWZ_D_MHI, "%s: device rp=0x%llx\n", __func__, rp);
+       DNPRINTF(QWZ_D_MHI, "%s: kernel wp=0x%llx\n", __func__, ring->wp);
+       DNPRINTF(QWZ_D_MHI, "%s: device wp=0x%llx\n", __func__, wp);
+
+       base = QWZ_DMA_DVA(ring->dmamem);
+       if (ring->rp == rp || rp < base || rp >= base + ring->size)
+               return 0;
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(ring->dmamem),
+           0, QWZ_DMA_LEN(ring->dmamem), BUS_DMASYNC_POSTREAD);
+
+       while (ring->rp != rp) {
+               e = qwz_pci_event_ring_get_elem(ring, ring->rp);
+               if (e == NULL)
+                       return 0;
+
+               tre0 = le32toh(e->dword[0]);
+               tre1 = le32toh(e->dword[1]);
+
+               len = (tre0 & MHI_TRE0_EV_LEN_MASK) >> MHI_TRE0_EV_LEN_SHFT;
+               code = (tre0 & MHI_TRE0_EV_CODE_MASK) >> MHI_TRE0_EV_CODE_SHFT;
+               type = (tre1 & MHI_TRE1_EV_TYPE_MASK) >> MHI_TRE1_EV_TYPE_SHFT;
+               chid = (tre1 & MHI_TRE1_EV_CHID_MASK) >> MHI_TRE1_EV_CHID_SHFT;
+               DNPRINTF(QWZ_D_MHI, "%s: len=%u code=0x%x type=0x%x chid=%d\n",
+                   __func__, len, code, type, chid);
+
+               switch (type) {
+               case MHI_PKT_TYPE_TX_EVENT:
+                       qwz_pci_intr_data_event_tx(psc, e);
+                       break;
+               default:
+                       printf("%s: unhandled event type 0x%x\n",
+                           __func__, type);
+                       break;
+               }
+
+               if (ring->rp + sizeof(*e) >= base + ring->size)
+                       ring->rp = base;
+               else
+                       ring->rp += sizeof(*e);
+
+               if (ring->wp + sizeof(*e) >= base + ring->size)
+                       ring->wp = base;
+               else
+                       ring->wp += sizeof(*e);
+       }
+
+       c->wp = htole64(ring->wp);
+
+       bus_dmamap_sync(sc->sc_dmat, QWZ_DMA_MAP(psc->event_ctxt), 0,
+           QWZ_DMA_LEN(psc->event_ctxt), BUS_DMASYNC_PREWRITE);
+
+       qwz_mhi_ring_doorbell(sc, ring->db_addr, ring->wp);
+       return 1;
+}
+
+int
+qwz_pci_intr_mhi_ctrl(void *arg)
+{
+       struct qwz_pci_softc *psc = arg;
+
+       if (qwz_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
+               return 1;
+
+       return 0;
+}
+
+int
+qwz_pci_intr_mhi_data(void *arg)
+{
+       struct qwz_pci_softc *psc = arg;
+
+       if (qwz_pci_intr_data_event(psc, &psc->event_rings[1]))
+               return 1;
+
+       return 0;
+}
+
+int
+qwz_pci_intr(void *arg)
+{
+       struct qwz_pci_softc *psc = arg;
+       struct qwz_softc *sc = (void *)psc;
+       uint32_t ee, state;
+       int ret = 0;
+
+       /*
+        * Interrupts can trigger before mhi_start() during boot if the device
+        * is still active after a warm reboot.
+        */
+       if (psc->bhi_off == 0)
+               psc->bhi_off = qwz_pci_read(sc, MHI_BHI_OFFSET);
+
+       ee = qwz_pci_read(sc, psc->bhi_off + MHI_BHI_EXECENV);
+       state = qwz_pci_read(sc, MHI_STATUS);
+       state = (state & MHI_STATUS_MHISTATE_MASK) >>
+           MHI_STATUS_MHISTATE_SHFT;
+
+       DNPRINTF(QWZ_D_MHI,
+           "%s: BHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n",
+            sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state);
+
+       if (ee == MHI_EE_RDDM) {
+               /* Firmware crash, e.g. due to invalid DMA memory access. */
+               psc->bhi_ee = ee;
+#ifdef QWZ_DEBUG
+               if (!psc->rddm_triggered) {
+                       /* Write fw memory dump to root's home directory. */
+                       task_add(systq, &psc->rddm_task);
+                       psc->rddm_triggered = 1;
+               }
+#else
+               printf("%s: fatal firmware error\n",
+                  sc->sc_dev.dv_xname);
+               if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags) &&
+                   (sc->sc_ic.ic_if.if_flags & (IFF_UP | IFF_RUNNING)) ==
+                   (IFF_UP | IFF_RUNNING)) {
+                       /* Try to reset the device. */
+                       set_bit(ATH12K_FLAG_CRASH_FLUSH, sc->sc_flags);
+                       task_add(systq, &sc->init_task);
+               }
+#endif
+               return 1;
+       } else if (psc->bhi_ee == MHI_EE_PBL || psc->bhi_ee == MHI_EE_SBL) {
+               int new_ee = -1, new_mhi_state = -1;
+
+               if (psc->bhi_ee != ee)
+                       new_ee = ee;
+
+               if (psc->mhi_state != state)
+                       new_mhi_state = state;
+
+               if (new_ee != -1 || new_mhi_state != -1)
+                       qwz_mhi_state_change(psc, new_ee, new_mhi_state);
+
+               ret = 1;
+       }
+
+       if (!test_bit(ATH12K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
+               int i;
+
+               if (qwz_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
+                       ret = 1;
+               if (qwz_pci_intr_data_event(psc, &psc->event_rings[1]))
+                       ret = 1;
+
+               for (i = 0; i < sc->hw_params.ce_count; i++) {
+                       struct qwz_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i];
+
+                       if (qwz_ce_intr(ce_pipe))
+                               ret = 1;
+               }
+
+               if (test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
+                       for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
+                               if (qwz_dp_service_srng(sc, i))
+                                       ret = 1;
+                       }
+               }
+       }
+
+       return ret;
+}