These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / wireless / ath / ath10k / wmi-tlv.c
index ee0c5f6..6fbd17b 100644 (file)
  */
 #include "core.h"
 #include "debug.h"
+#include "mac.h"
 #include "hw.h"
+#include "mac.h"
 #include "wmi.h"
 #include "wmi-ops.h"
 #include "wmi-tlv.h"
+#include "p2p.h"
+#include "testmode.h"
 
 /***************/
 /* TLV helpers */
@@ -31,9 +35,9 @@ struct wmi_tlv_policy {
 
 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
        [WMI_TLV_TAG_ARRAY_BYTE]
-               = { .min_len = sizeof(u8) },
+               = { .min_len = 0 },
        [WMI_TLV_TAG_ARRAY_UINT32]
-               = { .min_len = sizeof(u32) },
+               = { .min_len = 0 },
        [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
                = { .min_len = sizeof(struct wmi_scan_event) },
        [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +66,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
                = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
        [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
                = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+       [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+       [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+       [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+               = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+       [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
 };
 
 static int
@@ -168,6 +180,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
 {
        const void **tb;
        const struct wmi_tlv_bcn_tx_status_ev *ev;
+       struct ath10k_vif *arvif;
        u32 vdev_id, tx_status;
        int ret;
 
@@ -201,6 +214,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
                break;
        }
 
+       arvif = ath10k_get_arvif(ar, vdev_id);
+       if (arvif && arvif->is_up && arvif->vif->csa_active)
+               ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
        kfree(tb);
        return 0;
 }
@@ -296,6 +313,105 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+                                       struct sk_buff *skb)
+{
+       const void **tb;
+       const struct wmi_tlv_p2p_noa_ev *ev;
+       const struct wmi_p2p_noa_info *noa;
+       int ret, vdev_id;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+       noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+       if (!ev || !noa) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       vdev_id = __le32_to_cpu(ev->vdev_id);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+                  vdev_id, noa->num_descriptors);
+
+       ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+       kfree(tb);
+       return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+                                        struct sk_buff *skb)
+{
+       const void **tb;
+       const struct wmi_tlv_tx_pause_ev *ev;
+       int ret, vdev_id;
+       u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       pause_id = __le32_to_cpu(ev->pause_id);
+       action = __le32_to_cpu(ev->action);
+       vdev_map = __le32_to_cpu(ev->vdev_map);
+       peer_id = __le32_to_cpu(ev->peer_id);
+       tid_map = __le32_to_cpu(ev->tid_map);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+                  pause_id, action, vdev_map, peer_id, tid_map);
+
+       switch (pause_id) {
+       case WMI_TLV_TX_PAUSE_ID_MCC:
+       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PS:
+       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+               for (vdev_id = 0; vdev_map; vdev_id++) {
+                       if (!(vdev_map & BIT(vdev_id)))
+                               continue;
+
+                       vdev_map &= ~BIT(vdev_id);
+                       ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+                                                       action);
+               }
+               break;
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+       case WMI_TLV_TX_PAUSE_ID_HOST:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unsupported tx pause id %d\n",
+                          pause_id);
+               break;
+       default:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unknown tx pause vdev %d\n",
+                          pause_id);
+               break;
+       }
+
+       kfree(tb);
+       return 0;
+}
+
 /***********/
 /* TLV ops */
 /***********/
@@ -304,15 +420,28 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd_hdr;
        enum wmi_tlv_event_id id;
+       bool consumed;
 
        cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
        id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
        if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
-               return;
+               goto out;
 
        trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+       consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+       /* Ready event must be handled normally also in UTF mode so that we
+        * know the UTF firmware has booted, others we are just bypass WMI
+        * events to testmode.
+        */
+       if (consumed && id != WMI_TLV_READY_EVENTID) {
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "wmi tlv testmode consumed 0x%x\n", id);
+               goto out;
+       }
+
        switch (id) {
        case WMI_TLV_MGMT_RX_EVENTID:
                ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -404,7 +533,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_TLV_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_TLV_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -417,11 +546,18 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
        case WMI_TLV_DIAG_EVENTID:
                ath10k_wmi_tlv_event_diag(ar, skb);
                break;
+       case WMI_TLV_P2P_NOA_EVENTID:
+               ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+               break;
+       case WMI_TLV_TX_PAUSE_EVENTID:
+               ath10k_wmi_tlv_event_tx_pause(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
        }
 
+out:
        dev_kfree_skb(skb);
 }
 
@@ -609,6 +745,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
                                         const void *ptr, void *data)
 {
        struct wmi_tlv_swba_parse *swba = data;
+       struct wmi_tim_info_arg *tim_info_arg;
+       const struct wmi_tim_info *tim_info_ev = ptr;
 
        if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
                return -EPROTO;
@@ -616,7 +754,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
        if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
                return -ENOBUFS;
 
-       swba->arg->tim_info[swba->n_tim++] = ptr;
+       if (__le32_to_cpu(tim_info_ev->tim_len) >
+            sizeof(tim_info_ev->tim_bitmap)) {
+               ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+               return -EPROTO;
+       }
+
+       tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+       tim_info_arg->tim_len = tim_info_ev->tim_len;
+       tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+       tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+       tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+       tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+       swba->n_tim++;
+
        return 0;
 }
 
@@ -700,9 +852,9 @@ static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
        return 0;
 }
 
-static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
-                                           struct sk_buff *skb,
-                                           struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+                                               struct sk_buff *skb,
+                                               struct wmi_phyerr_hdr_arg *arg)
 {
        const void **tb;
        const struct wmi_tlv_phyerr_ev *ev;
@@ -724,10 +876,10 @@ static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
                return -EPROTO;
        }
 
-       arg->num_phyerrs  = ev->num_phyerrs;
-       arg->tsf_l32 = ev->tsf_l32;
-       arg->tsf_u32 = ev->tsf_u32;
-       arg->buf_len = ev->buf_len;
+       arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
+       arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+       arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+       arg->buf_len = __le32_to_cpu(ev->buf_len);
        arg->phyerrs = phyerrs;
 
        kfree(tb);
@@ -1012,6 +1164,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+                                         struct sk_buff *skb,
+                                         struct wmi_roam_ev_arg *arg)
+{
+       const void **tb;
+       const struct wmi_tlv_roam_ev *ev;
+       int ret;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       arg->vdev_id = ev->vdev_id;
+       arg->reason = ev->reason;
+       arg->rssi = ev->rssi;
+
+       kfree(tb);
+       return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+                             struct wmi_wow_ev_arg *arg)
+{
+       const void **tb;
+       const struct wmi_tlv_wow_event_info *ev;
+       int ret;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+       arg->flag = __le32_to_cpu(ev->flag);
+       arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+       arg->data_len = __le32_to_cpu(ev->data_len);
+
+       kfree(tb);
+       return 0;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
 {
@@ -1082,6 +1293,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
        return skb;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+       return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
                                     u32 param_value)
@@ -1160,8 +1376,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
 
        if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
-               cfg->num_offload_peers = __cpu_to_le32(3);
-               cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+               cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+               cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        } else {
                cfg->num_offload_peers = __cpu_to_le32(0);
                cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1176,10 +1392,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
-       cfg->rx_decap_mode = __cpu_to_le32(1);
+       cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        cfg->scan_max_pending_reqs = __cpu_to_le32(4);
-       cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
-       cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+       cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+       cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
        cfg->num_mcast_groups = __cpu_to_le32(0);
        cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1409,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
        cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
        cfg->max_frag_entries = __cpu_to_le32(2);
-       cfg->num_tdls_vdevs = __cpu_to_le32(1);
+       cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
        cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
        cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
        cfg->num_multicast_filter_entries = __cpu_to_le32(5);
-       cfg->num_wow_filters = __cpu_to_le32(0x16);
+       cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
        cfg->num_keep_alive_pattern = __cpu_to_le32(6);
        cfg->keep_alive_pattern_size = __cpu_to_le32(0);
        cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1464,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
        cmd = (void *)tlv->value;
 
        ath10k_wmi_put_start_scan_common(&cmd->common, arg);
-       cmd->burst_duration_ms = __cpu_to_le32(0);
+       cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
        cmd->num_channels = __cpu_to_le32(arg->n_channels);
        cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
        cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1624,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
        void *ptr;
        u32 flags = 0;
 
-       if (WARN_ON(arg->ssid && arg->ssid_len == 0))
-               return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->hidden_ssid && !arg->ssid))
                return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1996,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
 
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
-                                 const u8 peer_addr[ETH_ALEN])
+                                 const u8 peer_addr[ETH_ALEN],
+                                 enum wmi_peer_type peer_type)
 {
        struct wmi_tlv_peer_create_cmd *cmd;
        struct wmi_tlv *tlv;
@@ -1797,7 +2012,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
        tlv->len = __cpu_to_le16(sizeof(*cmd));
        cmd = (void *)tlv->value;
        cmd->vdev_id = __cpu_to_le32(vdev_id);
-       cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+       cmd->peer_type = __cpu_to_le32(peer_type);
        ether_addr_copy(cmd->peer_addr.addr, peer_addr);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2242,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
        if (!mac)
                return ERR_PTR(-EINVAL);
 
-       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
@@ -2485,6 +2700,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+                                          enum wmi_tdls_state state)
+{
+       struct wmi_tdls_set_state_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+       /* Set to options from wmi_tlv_tdls_options,
+        * for now none of them are enabled.
+        */
+       u32 options = 0;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+       cmd = (void *)tlv->value;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->state = __cpu_to_le32(state);
+       cmd->notification_interval_ms = __cpu_to_le32(5000);
+       cmd->tx_discovery_threshold = __cpu_to_le32(100);
+       cmd->tx_teardown_threshold = __cpu_to_le32(5);
+       cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+       cmd->rssi_delta = __cpu_to_le32(-20);
+       cmd->tdls_options = __cpu_to_le32(options);
+       cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+       cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+       cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+       cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+       cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+                  state, vdev_id);
+       return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+       u32 peer_qos = 0;
+
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+       peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+       return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+                                      const struct wmi_tdls_peer_update_cmd_arg *arg,
+                                      const struct wmi_tdls_peer_capab_arg *cap,
+                                      const struct wmi_channel_arg *chan_arg)
+{
+       struct wmi_tdls_peer_update_cmd *cmd;
+       struct wmi_tdls_peer_capab *peer_cap;
+       struct wmi_channel *chan;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       u32 peer_qos;
+       void *ptr;
+       int len;
+       int i;
+
+       len = sizeof(*tlv) + sizeof(*cmd) +
+             sizeof(*tlv) + sizeof(*peer_cap) +
+             sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+       cmd = (void *)tlv->value;
+       cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+       ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+       cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+       tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+       peer_cap = (void *)tlv->value;
+       peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+                                                  cap->peer_max_sp);
+       peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+       peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+       peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+       peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+       peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+       peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+       peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+       for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+               peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+       peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+       peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+       peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*peer_cap);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+       ptr += sizeof(*tlv);
+
+       for (i = 0; i < cap->peer_chan_len; i++) {
+               tlv = ptr;
+               tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+               tlv->len = __cpu_to_le16(sizeof(*chan));
+               chan = (void *)tlv->value;
+               ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+               ptr += sizeof(*tlv);
+               ptr += sizeof(*chan);
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+                  arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+       struct wmi_tlv_wow_enable_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->enable = __cpu_to_le32(1);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+                                          u32 vdev_id,
+                                          enum wmi_wow_wakeup_event event,
+                                          u32 enable)
+{
+       struct wmi_tlv_wow_add_del_event_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->is_add = __cpu_to_le32(enable);
+       cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+                  wow_wakeup_event(event), enable, vdev_id);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+       struct wmi_tlv_wow_host_wakeup_ind *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+                                     u32 pattern_id, const u8 *pattern,
+                                     const u8 *bitmask, int pattern_len,
+                                     int pattern_offset)
+{
+       struct wmi_tlv_wow_add_pattern_cmd *cmd;
+       struct wmi_tlv_wow_bitmap_pattern *bitmap;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd) +
+             sizeof(*tlv) +                    /* array struct */
+             sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
+             sizeof(*tlv) +                    /* empty ipv4 sync */
+             sizeof(*tlv) +                    /* empty ipv6 sync */
+             sizeof(*tlv) +                    /* empty magic */
+             sizeof(*tlv) +                    /* empty info timeout */
+             sizeof(*tlv) + sizeof(u32);       /* ratelimit interval */
+
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       /* cmd */
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->pattern_id = __cpu_to_le32(pattern_id);
+       cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       /* bitmap */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+       ptr += sizeof(*tlv);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+       tlv->len = __cpu_to_le16(sizeof(*bitmap));
+       bitmap = (void *)tlv->value;
+
+       memcpy(bitmap->patternbuf, pattern, pattern_len);
+       memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+       bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+       bitmap->pattern_len = __cpu_to_le32(pattern_len);
+       bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+       bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*bitmap);
+
+       /* ipv4 sync */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* ipv6 sync */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* magic */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* pattern info timeout */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* ratelimit interval */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+       tlv->len = __cpu_to_le16(sizeof(u32));
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+                  vdev_id, pattern_id, pattern_offset);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+                                     u32 pattern_id)
+{
+       struct wmi_tlv_wow_del_pattern_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->pattern_id = __cpu_to_le32(pattern_id);
+       cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+                  vdev_id, pattern_id);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+       struct wmi_tlv_adaptive_qcs *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+       cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+       return skb;
+}
+
 /****************/
 /* TLV mappings */
 /****************/
@@ -2609,6 +3205,41 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
        .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+       .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+       .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+       .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2662,6 +3293,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
        .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -2720,6 +3393,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
        .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                                        WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
@@ -2732,10 +3421,14 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
        .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
        .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
-       .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+       .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+       .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
        .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+       .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+       .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+       .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
        .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3474,15 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
        .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
        .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+       .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+       .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+       .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+       .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+       .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+       .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+       .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+       .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+       .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
 };
 
 /************/