2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
82 const struct wmi_wmm_params_all_arg *arg);
83 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN]);
87 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
88 const u8 peer_addr[ETH_ALEN],
90 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
92 enum wmi_peer_param param_id,
94 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
95 const struct wmi_peer_assoc_complete_arg *arg);
96 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_ps_mode psmode);
98 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
99 enum wmi_sta_powersave_param param_id,
101 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
103 enum wmi_ap_ps_peer_param param_id,
105 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
106 const struct wmi_scan_chan_list_arg *arg);
107 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
108 const void *bcn, size_t bcn_len,
109 u32 bcn_paddr, bool dtim_zero,
111 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
112 const struct wmi_wmm_params_all_arg *arg);
113 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
114 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
115 enum wmi_force_fw_hang_type type,
117 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
118 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
120 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
121 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
122 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
123 u32 period, u32 duration,
126 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
127 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
129 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
130 const u8 *mac, u32 tid, u32 buf_size);
131 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
132 const u8 *mac, u32 tid,
134 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
135 const u8 *mac, u32 tid, u32 initiator,
137 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
138 u32 tim_ie_offset, struct sk_buff *bcn,
139 u32 prb_caps, u32 prb_erp,
140 void *prb_ies, size_t prb_ies_len);
141 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
142 struct sk_buff *bcn);
143 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
145 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
146 const u8 peer_addr[ETH_ALEN],
147 const struct wmi_sta_uapsd_auto_trig_arg *args,
149 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
150 const struct wmi_sta_keepalive_arg *arg);
153 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
156 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
158 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
161 ar->wmi.ops->rx(ar, skb);
166 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
169 if (!ar->wmi.ops->map_svc)
172 ar->wmi.ops->map_svc(in, out, len);
177 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
178 struct wmi_scan_ev_arg *arg)
180 if (!ar->wmi.ops->pull_scan)
183 return ar->wmi.ops->pull_scan(ar, skb, arg);
187 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
188 struct wmi_mgmt_rx_ev_arg *arg)
190 if (!ar->wmi.ops->pull_mgmt_rx)
193 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
197 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
198 struct wmi_ch_info_ev_arg *arg)
200 if (!ar->wmi.ops->pull_ch_info)
203 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
207 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
208 struct wmi_vdev_start_ev_arg *arg)
210 if (!ar->wmi.ops->pull_vdev_start)
213 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
217 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
218 struct wmi_peer_kick_ev_arg *arg)
220 if (!ar->wmi.ops->pull_peer_kick)
223 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
227 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
228 struct wmi_swba_ev_arg *arg)
230 if (!ar->wmi.ops->pull_swba)
233 return ar->wmi.ops->pull_swba(ar, skb, arg);
237 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
238 struct wmi_phyerr_ev_arg *arg)
240 if (!ar->wmi.ops->pull_phyerr)
243 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
247 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
248 struct wmi_svc_rdy_ev_arg *arg)
250 if (!ar->wmi.ops->pull_svc_rdy)
253 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
257 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
258 struct wmi_rdy_ev_arg *arg)
260 if (!ar->wmi.ops->pull_rdy)
263 return ar->wmi.ops->pull_rdy(ar, skb, arg);
267 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
268 struct ath10k_fw_stats *stats)
270 if (!ar->wmi.ops->pull_fw_stats)
273 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
277 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
283 if (!ar->wmi.ops->gen_mgmt_tx)
286 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
290 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
294 /* FIXME There's no ACK event for Management Tx. This probably
295 * shouldn't be called here either. */
296 info->flags |= IEEE80211_TX_STAT_ACK;
297 ieee80211_tx_status_irqsafe(ar->hw, msdu);
303 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
304 u16 ctl2g, u16 ctl5g,
305 enum wmi_dfs_region dfs_reg)
309 if (!ar->wmi.ops->gen_pdev_set_rd)
312 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
317 return ath10k_wmi_cmd_send(ar, skb,
318 ar->wmi.cmd->pdev_set_regdomain_cmdid);
322 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
326 if (!ar->wmi.ops->gen_pdev_suspend)
329 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
333 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
337 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
341 if (!ar->wmi.ops->gen_pdev_resume)
344 skb = ar->wmi.ops->gen_pdev_resume(ar);
348 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
352 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
356 if (!ar->wmi.ops->gen_pdev_set_param)
359 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
363 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
367 ath10k_wmi_cmd_init(struct ath10k *ar)
371 if (!ar->wmi.ops->gen_init)
374 skb = ar->wmi.ops->gen_init(ar);
378 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
382 ath10k_wmi_start_scan(struct ath10k *ar,
383 const struct wmi_start_scan_arg *arg)
387 if (!ar->wmi.ops->gen_start_scan)
390 skb = ar->wmi.ops->gen_start_scan(ar, arg);
394 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
398 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
402 if (!ar->wmi.ops->gen_stop_scan)
405 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
409 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
413 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
414 enum wmi_vdev_type type,
415 enum wmi_vdev_subtype subtype,
416 const u8 macaddr[ETH_ALEN])
420 if (!ar->wmi.ops->gen_vdev_create)
423 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
427 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
431 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
435 if (!ar->wmi.ops->gen_vdev_delete)
438 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
442 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
446 ath10k_wmi_vdev_start(struct ath10k *ar,
447 const struct wmi_vdev_start_request_arg *arg)
451 if (!ar->wmi.ops->gen_vdev_start)
454 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
458 return ath10k_wmi_cmd_send(ar, skb,
459 ar->wmi.cmd->vdev_start_request_cmdid);
463 ath10k_wmi_vdev_restart(struct ath10k *ar,
464 const struct wmi_vdev_start_request_arg *arg)
468 if (!ar->wmi.ops->gen_vdev_start)
471 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
475 return ath10k_wmi_cmd_send(ar, skb,
476 ar->wmi.cmd->vdev_restart_request_cmdid);
480 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
484 if (!ar->wmi.ops->gen_vdev_stop)
487 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
491 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
495 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
499 if (!ar->wmi.ops->gen_vdev_up)
502 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
506 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
510 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
514 if (!ar->wmi.ops->gen_vdev_down)
517 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
525 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
530 if (!ar->wmi.ops->gen_vdev_set_param)
533 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
538 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
542 ath10k_wmi_vdev_install_key(struct ath10k *ar,
543 const struct wmi_vdev_install_key_arg *arg)
547 if (!ar->wmi.ops->gen_vdev_install_key)
550 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
554 return ath10k_wmi_cmd_send(ar, skb,
555 ar->wmi.cmd->vdev_install_key_cmdid);
559 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
560 const struct wmi_vdev_spectral_conf_arg *arg)
565 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
569 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
570 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
574 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
580 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
585 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
586 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
590 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
591 const u8 peer_addr[ETH_ALEN],
592 const struct wmi_sta_uapsd_auto_trig_arg *args,
598 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
601 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
606 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
607 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
611 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
612 const struct wmi_wmm_params_all_arg *arg)
617 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
621 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
622 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
626 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
627 const u8 peer_addr[ETH_ALEN])
631 if (!ar->wmi.ops->gen_peer_create)
634 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
638 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
642 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
643 const u8 peer_addr[ETH_ALEN])
647 if (!ar->wmi.ops->gen_peer_delete)
650 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
654 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
658 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
659 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
663 if (!ar->wmi.ops->gen_peer_flush)
666 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
670 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
674 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
675 enum wmi_peer_param param_id, u32 param_value)
679 if (!ar->wmi.ops->gen_peer_set_param)
682 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
687 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
691 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
692 enum wmi_sta_ps_mode psmode)
696 if (!ar->wmi.ops->gen_set_psmode)
699 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
703 return ath10k_wmi_cmd_send(ar, skb,
704 ar->wmi.cmd->sta_powersave_mode_cmdid);
708 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
709 enum wmi_sta_powersave_param param_id, u32 value)
713 if (!ar->wmi.ops->gen_set_sta_ps)
716 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
720 return ath10k_wmi_cmd_send(ar, skb,
721 ar->wmi.cmd->sta_powersave_param_cmdid);
725 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
726 enum wmi_ap_ps_peer_param param_id, u32 value)
730 if (!ar->wmi.ops->gen_set_ap_ps)
733 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
737 return ath10k_wmi_cmd_send(ar, skb,
738 ar->wmi.cmd->ap_ps_peer_param_cmdid);
742 ath10k_wmi_scan_chan_list(struct ath10k *ar,
743 const struct wmi_scan_chan_list_arg *arg)
747 if (!ar->wmi.ops->gen_scan_chan_list)
750 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
754 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
758 ath10k_wmi_peer_assoc(struct ath10k *ar,
759 const struct wmi_peer_assoc_complete_arg *arg)
763 if (!ar->wmi.ops->gen_peer_assoc)
766 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
770 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
774 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
775 const void *bcn, size_t bcn_len,
776 u32 bcn_paddr, bool dtim_zero,
782 if (!ar->wmi.ops->gen_beacon_dma)
785 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
786 dtim_zero, deliver_cab);
790 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
791 ar->wmi.cmd->pdev_send_bcn_cmdid);
801 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
802 const struct wmi_wmm_params_all_arg *arg)
806 if (!ar->wmi.ops->gen_pdev_set_wmm)
809 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
813 return ath10k_wmi_cmd_send(ar, skb,
814 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
818 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
822 if (!ar->wmi.ops->gen_request_stats)
825 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
829 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
833 ath10k_wmi_force_fw_hang(struct ath10k *ar,
834 enum wmi_force_fw_hang_type type, u32 delay_ms)
838 if (!ar->wmi.ops->gen_force_fw_hang)
841 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
845 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
849 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
853 if (!ar->wmi.ops->gen_dbglog_cfg)
856 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
864 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
868 if (!ar->wmi.ops->gen_pktlog_enable)
871 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
875 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
879 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
883 if (!ar->wmi.ops->gen_pktlog_disable)
886 skb = ar->wmi.ops->gen_pktlog_disable(ar);
890 return ath10k_wmi_cmd_send(ar, skb,
891 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
895 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
896 u32 next_offset, u32 enabled)
900 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
903 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
904 next_offset, enabled);
908 return ath10k_wmi_cmd_send(ar, skb,
909 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
913 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
917 if (!ar->wmi.ops->gen_pdev_get_temperature)
920 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
924 return ath10k_wmi_cmd_send(ar, skb,
925 ar->wmi.cmd->pdev_get_temperature_cmdid);
929 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
933 if (!ar->wmi.ops->gen_addba_clear_resp)
936 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
940 return ath10k_wmi_cmd_send(ar, skb,
941 ar->wmi.cmd->addba_clear_resp_cmdid);
945 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
946 u32 tid, u32 buf_size)
950 if (!ar->wmi.ops->gen_addba_send)
953 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
957 return ath10k_wmi_cmd_send(ar, skb,
958 ar->wmi.cmd->addba_send_cmdid);
962 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
967 if (!ar->wmi.ops->gen_addba_set_resp)
970 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
974 return ath10k_wmi_cmd_send(ar, skb,
975 ar->wmi.cmd->addba_set_resp_cmdid);
979 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
980 u32 tid, u32 initiator, u32 reason)
984 if (!ar->wmi.ops->gen_delba_send)
987 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
992 return ath10k_wmi_cmd_send(ar, skb,
993 ar->wmi.cmd->delba_send_cmdid);
997 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
998 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
999 void *prb_ies, size_t prb_ies_len)
1001 struct sk_buff *skb;
1003 if (!ar->wmi.ops->gen_bcn_tmpl)
1006 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1007 prb_caps, prb_erp, prb_ies,
1010 return PTR_ERR(skb);
1012 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1016 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1018 struct sk_buff *skb;
1020 if (!ar->wmi.ops->gen_prb_tmpl)
1023 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1025 return PTR_ERR(skb);
1027 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1031 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1033 struct sk_buff *skb;
1035 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1038 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1040 return PTR_ERR(skb);
1042 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1046 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1047 const struct wmi_sta_keepalive_arg *arg)
1049 struct sk_buff *skb;
1052 if (!ar->wmi.ops->gen_sta_keepalive)
1055 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1057 return PTR_ERR(skb);
1059 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1060 return ath10k_wmi_cmd_send(ar, skb, cmd_id);