These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / ethernet / qlogic / qlcnic / qlcnic_sriov_common.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include <linux/types.h>
9
10 #include "qlcnic_sriov.h"
11 #include "qlcnic.h"
12 #include "qlcnic_83xx_hw.h"
13
14 #define QLC_BC_COMMAND  0
15 #define QLC_BC_RESPONSE 1
16
17 #define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
18 #define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
19
20 #define QLC_BC_MSG              0
21 #define QLC_BC_CFREE            1
22 #define QLC_BC_FLR              2
23 #define QLC_BC_HDR_SZ           16
24 #define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
25
26 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
27 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
28
29 #define QLC_83XX_VF_RESET_FAIL_THRESH   8
30 #define QLC_BC_CMD_MAX_RETRY_CNT        5
31
32 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
33 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
34 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
35 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
36 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
37 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
38                                   struct qlcnic_cmd_args *);
39 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
40 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
41 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
42 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
43 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
44                                         struct qlcnic_cmd_args *);
45
46 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
47         .read_crb                       = qlcnic_83xx_read_crb,
48         .write_crb                      = qlcnic_83xx_write_crb,
49         .read_reg                       = qlcnic_83xx_rd_reg_indirect,
50         .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
51         .get_mac_address                = qlcnic_83xx_get_mac_address,
52         .setup_intr                     = qlcnic_83xx_setup_intr,
53         .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
54         .mbx_cmd                        = qlcnic_sriov_issue_cmd,
55         .get_func_no                    = qlcnic_83xx_get_func_no,
56         .api_lock                       = qlcnic_83xx_cam_lock,
57         .api_unlock                     = qlcnic_83xx_cam_unlock,
58         .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
59         .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
60         .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
61         .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
62         .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
63         .setup_link_event               = qlcnic_83xx_setup_link_event,
64         .get_nic_info                   = qlcnic_83xx_get_nic_info,
65         .get_pci_info                   = qlcnic_83xx_get_pci_info,
66         .set_nic_info                   = qlcnic_83xx_set_nic_info,
67         .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
68         .napi_enable                    = qlcnic_83xx_napi_enable,
69         .napi_disable                   = qlcnic_83xx_napi_disable,
70         .config_intr_coal               = qlcnic_83xx_config_intr_coal,
71         .config_rss                     = qlcnic_83xx_config_rss,
72         .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
73         .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
74         .change_l2_filter               = qlcnic_83xx_change_l2_filter,
75         .get_board_info                 = qlcnic_83xx_get_port_info,
76         .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
77         .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
78         .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
79 };
80
81 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
82         .config_bridged_mode    = qlcnic_config_bridged_mode,
83         .config_led             = qlcnic_config_led,
84         .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
85         .napi_add               = qlcnic_83xx_napi_add,
86         .napi_del               = qlcnic_83xx_napi_del,
87         .shutdown               = qlcnic_sriov_vf_shutdown,
88         .resume                 = qlcnic_sriov_vf_resume,
89         .config_ipaddr          = qlcnic_83xx_config_ipaddr,
90         .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
91 };
92
93 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
94         {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
95         {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
96         {QLCNIC_BC_CMD_GET_ACL, 3, 14},
97         {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
98 };
99
100 static inline bool qlcnic_sriov_bc_msg_check(u32 val)
101 {
102         return (val & (1 << QLC_BC_MSG)) ? true : false;
103 }
104
105 static inline bool qlcnic_sriov_channel_free_check(u32 val)
106 {
107         return (val & (1 << QLC_BC_CFREE)) ? true : false;
108 }
109
110 static inline bool qlcnic_sriov_flr_check(u32 val)
111 {
112         return (val & (1 << QLC_BC_FLR)) ? true : false;
113 }
114
115 static inline u8 qlcnic_sriov_target_func_id(u32 val)
116 {
117         return (val >> 4) & 0xff;
118 }
119
120 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
121 {
122         struct pci_dev *dev = adapter->pdev;
123         int pos;
124         u16 stride, offset;
125
126         if (qlcnic_sriov_vf_check(adapter))
127                 return 0;
128
129         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
130         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
131         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
132
133         return (dev->devfn + offset + stride * vf_id) & 0xff;
134 }
135
136 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
137 {
138         struct qlcnic_sriov *sriov;
139         struct qlcnic_back_channel *bc;
140         struct workqueue_struct *wq;
141         struct qlcnic_vport *vp;
142         struct qlcnic_vf_info *vf;
143         int err, i;
144
145         if (!qlcnic_sriov_enable_check(adapter))
146                 return -EIO;
147
148         sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
149         if (!sriov)
150                 return -ENOMEM;
151
152         adapter->ahw->sriov = sriov;
153         sriov->num_vfs = num_vfs;
154         bc = &sriov->bc;
155         sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
156                                  num_vfs, GFP_KERNEL);
157         if (!sriov->vf_info) {
158                 err = -ENOMEM;
159                 goto qlcnic_free_sriov;
160         }
161
162         wq = create_singlethread_workqueue("bc-trans");
163         if (wq == NULL) {
164                 err = -ENOMEM;
165                 dev_err(&adapter->pdev->dev,
166                         "Cannot create bc-trans workqueue\n");
167                 goto qlcnic_free_vf_info;
168         }
169
170         bc->bc_trans_wq = wq;
171
172         wq = create_singlethread_workqueue("async");
173         if (wq == NULL) {
174                 err = -ENOMEM;
175                 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
176                 goto qlcnic_destroy_trans_wq;
177         }
178
179         bc->bc_async_wq =  wq;
180         INIT_LIST_HEAD(&bc->async_list);
181
182         for (i = 0; i < num_vfs; i++) {
183                 vf = &sriov->vf_info[i];
184                 vf->adapter = adapter;
185                 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
186                 mutex_init(&vf->send_cmd_lock);
187                 spin_lock_init(&vf->vlan_list_lock);
188                 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
189                 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
190                 spin_lock_init(&vf->rcv_act.lock);
191                 spin_lock_init(&vf->rcv_pend.lock);
192                 init_completion(&vf->ch_free_cmpl);
193
194                 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
195
196                 if (qlcnic_sriov_pf_check(adapter)) {
197                         vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
198                         if (!vp) {
199                                 err = -ENOMEM;
200                                 goto qlcnic_destroy_async_wq;
201                         }
202                         sriov->vf_info[i].vp = vp;
203                         vp->vlan_mode = QLC_GUEST_VLAN_MODE;
204                         vp->max_tx_bw = MAX_BW;
205                         vp->min_tx_bw = MIN_BW;
206                         vp->spoofchk = false;
207                         random_ether_addr(vp->mac);
208                         dev_info(&adapter->pdev->dev,
209                                  "MAC Address %pM is configured for VF %d\n",
210                                  vp->mac, i);
211                 }
212         }
213
214         return 0;
215
216 qlcnic_destroy_async_wq:
217         destroy_workqueue(bc->bc_async_wq);
218
219 qlcnic_destroy_trans_wq:
220         destroy_workqueue(bc->bc_trans_wq);
221
222 qlcnic_free_vf_info:
223         kfree(sriov->vf_info);
224
225 qlcnic_free_sriov:
226         kfree(adapter->ahw->sriov);
227         return err;
228 }
229
230 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
231 {
232         struct qlcnic_bc_trans *trans;
233         struct qlcnic_cmd_args cmd;
234         unsigned long flags;
235
236         spin_lock_irqsave(&t_list->lock, flags);
237
238         while (!list_empty(&t_list->wait_list)) {
239                 trans = list_first_entry(&t_list->wait_list,
240                                          struct qlcnic_bc_trans, list);
241                 list_del(&trans->list);
242                 t_list->count--;
243                 cmd.req.arg = (u32 *)trans->req_pay;
244                 cmd.rsp.arg = (u32 *)trans->rsp_pay;
245                 qlcnic_free_mbx_args(&cmd);
246                 qlcnic_sriov_cleanup_transaction(trans);
247         }
248
249         spin_unlock_irqrestore(&t_list->lock, flags);
250 }
251
252 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
253 {
254         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
255         struct qlcnic_back_channel *bc = &sriov->bc;
256         struct qlcnic_vf_info *vf;
257         int i;
258
259         if (!qlcnic_sriov_enable_check(adapter))
260                 return;
261
262         qlcnic_sriov_cleanup_async_list(bc);
263         destroy_workqueue(bc->bc_async_wq);
264
265         for (i = 0; i < sriov->num_vfs; i++) {
266                 vf = &sriov->vf_info[i];
267                 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
268                 cancel_work_sync(&vf->trans_work);
269                 qlcnic_sriov_cleanup_list(&vf->rcv_act);
270         }
271
272         destroy_workqueue(bc->bc_trans_wq);
273
274         for (i = 0; i < sriov->num_vfs; i++)
275                 kfree(sriov->vf_info[i].vp);
276
277         kfree(sriov->vf_info);
278         kfree(adapter->ahw->sriov);
279 }
280
281 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
282 {
283         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
284         qlcnic_sriov_cfg_bc_intr(adapter, 0);
285         __qlcnic_sriov_cleanup(adapter);
286 }
287
288 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
289 {
290         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
291                 return;
292
293         qlcnic_sriov_free_vlans(adapter);
294
295         if (qlcnic_sriov_pf_check(adapter))
296                 qlcnic_sriov_pf_cleanup(adapter);
297
298         if (qlcnic_sriov_vf_check(adapter))
299                 qlcnic_sriov_vf_cleanup(adapter);
300 }
301
302 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
303                                     u32 *pay, u8 pci_func, u8 size)
304 {
305         struct qlcnic_hardware_context *ahw = adapter->ahw;
306         struct qlcnic_mailbox *mbx = ahw->mailbox;
307         struct qlcnic_cmd_args cmd;
308         unsigned long timeout;
309         int err;
310
311         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
312         cmd.hdr = hdr;
313         cmd.pay = pay;
314         cmd.pay_size = size;
315         cmd.func_num = pci_func;
316         cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
317         cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
318
319         err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
320         if (err) {
321                 dev_err(&adapter->pdev->dev,
322                         "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
323                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
324                         ahw->op_mode);
325                 return err;
326         }
327
328         if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
329                 dev_err(&adapter->pdev->dev,
330                         "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
331                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
332                         ahw->op_mode);
333                 flush_workqueue(mbx->work_q);
334         }
335
336         return cmd.rsp_opcode;
337 }
338
339 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
340 {
341         adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
342         adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
343         adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
344         adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
345         adapter->num_txd = MAX_CMD_DESCRIPTORS;
346         adapter->max_rds_rings = MAX_RDS_RINGS;
347 }
348
349 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
350                                    struct qlcnic_info *npar_info, u16 vport_id)
351 {
352         struct device *dev = &adapter->pdev->dev;
353         struct qlcnic_cmd_args cmd;
354         int err;
355         u32 status;
356
357         err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
358         if (err)
359                 return err;
360
361         cmd.req.arg[1] = vport_id << 16 | 0x1;
362         err = qlcnic_issue_cmd(adapter, &cmd);
363         if (err) {
364                 dev_err(&adapter->pdev->dev,
365                         "Failed to get vport info, err=%d\n", err);
366                 qlcnic_free_mbx_args(&cmd);
367                 return err;
368         }
369
370         status = cmd.rsp.arg[2] & 0xffff;
371         if (status & BIT_0)
372                 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
373         if (status & BIT_1)
374                 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
375         if (status & BIT_2)
376                 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
377         if (status & BIT_3)
378                 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
379         if (status & BIT_4)
380                 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
381         if (status & BIT_5)
382                 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
383         if (status & BIT_6)
384                 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
385         if (status & BIT_7)
386                 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
387         if (status & BIT_8)
388                 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
389         if (status & BIT_9)
390                 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
391
392         npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
393         npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
394         npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
395         npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
396
397         dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
398                  "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
399                  "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
400                  "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
401                  "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
402                  "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
403                  npar_info->min_tx_bw, npar_info->max_tx_bw,
404                  npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
405                  npar_info->max_rx_mcast_mac_filters,
406                  npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
407                  npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
408                  npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
409                  npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
410                  npar_info->max_remote_ipv6_addrs);
411
412         qlcnic_free_mbx_args(&cmd);
413         return err;
414 }
415
416 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
417                                       struct qlcnic_cmd_args *cmd)
418 {
419         adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
420         adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
421         return 0;
422 }
423
424 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
425                                             struct qlcnic_cmd_args *cmd)
426 {
427         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
428         int i, num_vlans;
429         u16 *vlans;
430
431         if (sriov->allowed_vlans)
432                 return 0;
433
434         sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
435         sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
436         dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
437                  sriov->num_allowed_vlans);
438
439         qlcnic_sriov_alloc_vlans(adapter);
440
441         if (!sriov->any_vlan)
442                 return 0;
443
444         num_vlans = sriov->num_allowed_vlans;
445         sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
446         if (!sriov->allowed_vlans)
447                 return -ENOMEM;
448
449         vlans = (u16 *)&cmd->rsp.arg[3];
450         for (i = 0; i < num_vlans; i++)
451                 sriov->allowed_vlans[i] = vlans[i];
452
453         return 0;
454 }
455
456 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
457 {
458         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
459         struct qlcnic_cmd_args cmd;
460         int ret = 0;
461
462         memset(&cmd, 0, sizeof(cmd));
463         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
464         if (ret)
465                 return ret;
466
467         ret = qlcnic_issue_cmd(adapter, &cmd);
468         if (ret) {
469                 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
470                         ret);
471         } else {
472                 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
473                 switch (sriov->vlan_mode) {
474                 case QLC_GUEST_VLAN_MODE:
475                         ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
476                         break;
477                 case QLC_PVID_MODE:
478                         ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
479                         break;
480                 }
481         }
482
483         qlcnic_free_mbx_args(&cmd);
484         return ret;
485 }
486
487 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
488 {
489         struct qlcnic_hardware_context *ahw = adapter->ahw;
490         struct qlcnic_info nic_info;
491         int err;
492
493         err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
494         if (err)
495                 return err;
496
497         ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
498
499         err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
500         if (err)
501                 return -EIO;
502
503         if (qlcnic_83xx_get_port_info(adapter))
504                 return -EIO;
505
506         qlcnic_sriov_vf_cfg_buff_desc(adapter);
507         adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
508         dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
509                  adapter->ahw->fw_hal_version);
510
511         ahw->physical_port = (u8) nic_info.phys_port;
512         ahw->switch_mode = nic_info.switch_mode;
513         ahw->max_mtu = nic_info.max_mtu;
514         ahw->op_mode = nic_info.op_mode;
515         ahw->capabilities = nic_info.capabilities;
516         return 0;
517 }
518
519 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
520                                  int pci_using_dac)
521 {
522         int err;
523
524         adapter->flags |= QLCNIC_VLAN_FILTERING;
525         adapter->ahw->total_nic_func = 1;
526         INIT_LIST_HEAD(&adapter->vf_mc_list);
527         if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
528                 dev_warn(&adapter->pdev->dev,
529                          "Device does not support MSI interrupts\n");
530
531         /* compute and set default and max tx/sds rings */
532         qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
533         qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
534
535         err = qlcnic_setup_intr(adapter);
536         if (err) {
537                 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
538                 goto err_out_disable_msi;
539         }
540
541         err = qlcnic_83xx_setup_mbx_intr(adapter);
542         if (err)
543                 goto err_out_disable_msi;
544
545         err = qlcnic_sriov_init(adapter, 1);
546         if (err)
547                 goto err_out_disable_mbx_intr;
548
549         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
550         if (err)
551                 goto err_out_cleanup_sriov;
552
553         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
554         if (err)
555                 goto err_out_disable_bc_intr;
556
557         err = qlcnic_sriov_vf_init_driver(adapter);
558         if (err)
559                 goto err_out_send_channel_term;
560
561         err = qlcnic_sriov_get_vf_acl(adapter);
562         if (err)
563                 goto err_out_send_channel_term;
564
565         err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
566         if (err)
567                 goto err_out_send_channel_term;
568
569         pci_set_drvdata(adapter->pdev, adapter);
570         dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
571                  adapter->netdev->name);
572
573         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
574                              adapter->ahw->idc.delay);
575         return 0;
576
577 err_out_send_channel_term:
578         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
579
580 err_out_disable_bc_intr:
581         qlcnic_sriov_cfg_bc_intr(adapter, 0);
582
583 err_out_cleanup_sriov:
584         __qlcnic_sriov_cleanup(adapter);
585
586 err_out_disable_mbx_intr:
587         qlcnic_83xx_free_mbx_intr(adapter);
588
589 err_out_disable_msi:
590         qlcnic_teardown_intr(adapter);
591         return err;
592 }
593
594 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
595 {
596         u32 state;
597
598         do {
599                 msleep(20);
600                 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
601                         return -EIO;
602                 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
603         } while (state != QLC_83XX_IDC_DEV_READY);
604
605         return 0;
606 }
607
608 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
609 {
610         struct qlcnic_hardware_context *ahw = adapter->ahw;
611         int err;
612
613         set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
614         ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
615         ahw->reset_context = 0;
616         adapter->fw_fail_cnt = 0;
617         ahw->msix_supported = 1;
618         adapter->need_fw_reset = 0;
619         adapter->flags |= QLCNIC_TX_INTR_SHARED;
620
621         err = qlcnic_sriov_check_dev_ready(adapter);
622         if (err)
623                 return err;
624
625         err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
626         if (err)
627                 return err;
628
629         if (qlcnic_read_mac_addr(adapter))
630                 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
631
632         INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
633
634         clear_bit(__QLCNIC_RESETTING, &adapter->state);
635         return 0;
636 }
637
638 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
639 {
640         struct qlcnic_hardware_context *ahw = adapter->ahw;
641
642         ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
643         dev_info(&adapter->pdev->dev,
644                  "HAL Version: %d Non Privileged SRIOV function\n",
645                  ahw->fw_hal_version);
646         adapter->nic_ops = &qlcnic_sriov_vf_ops;
647         set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
648         return;
649 }
650
651 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
652 {
653         ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
654         ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
655         ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
656 }
657
658 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
659 {
660         u32 pay_size;
661
662         pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
663
664         if (pay_size)
665                 pay_size = QLC_BC_PAYLOAD_SZ;
666         else
667                 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
668
669         return pay_size;
670 }
671
672 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
673 {
674         struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
675         u8 i;
676
677         if (qlcnic_sriov_vf_check(adapter))
678                 return 0;
679
680         for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
681                 if (vf_info[i].pci_func == pci_func)
682                         return i;
683         }
684
685         return -EINVAL;
686 }
687
688 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
689 {
690         *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
691         if (!*trans)
692                 return -ENOMEM;
693
694         init_completion(&(*trans)->resp_cmpl);
695         return 0;
696 }
697
698 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
699                                             u32 size)
700 {
701         *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
702         if (!*hdr)
703                 return -ENOMEM;
704
705         return 0;
706 }
707
708 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
709 {
710         const struct qlcnic_mailbox_metadata *mbx_tbl;
711         int i, size;
712
713         mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
714         size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
715
716         for (i = 0; i < size; i++) {
717                 if (type == mbx_tbl[i].cmd) {
718                         mbx->op_type = QLC_BC_CMD;
719                         mbx->req.num = mbx_tbl[i].in_args;
720                         mbx->rsp.num = mbx_tbl[i].out_args;
721                         mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
722                                                GFP_ATOMIC);
723                         if (!mbx->req.arg)
724                                 return -ENOMEM;
725                         mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
726                                                GFP_ATOMIC);
727                         if (!mbx->rsp.arg) {
728                                 kfree(mbx->req.arg);
729                                 mbx->req.arg = NULL;
730                                 return -ENOMEM;
731                         }
732                         mbx->req.arg[0] = (type | (mbx->req.num << 16) |
733                                            (3 << 29));
734                         mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
735                         return 0;
736                 }
737         }
738         return -EINVAL;
739 }
740
741 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
742                                        struct qlcnic_cmd_args *cmd,
743                                        u16 seq, u8 msg_type)
744 {
745         struct qlcnic_bc_hdr *hdr;
746         int i;
747         u32 num_regs, bc_pay_sz;
748         u16 remainder;
749         u8 cmd_op, num_frags, t_num_frags;
750
751         bc_pay_sz = QLC_BC_PAYLOAD_SZ;
752         if (msg_type == QLC_BC_COMMAND) {
753                 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
754                 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
755                 num_regs = cmd->req.num;
756                 trans->req_pay_size = (num_regs * 4);
757                 num_regs = cmd->rsp.num;
758                 trans->rsp_pay_size = (num_regs * 4);
759                 cmd_op = cmd->req.arg[0] & 0xff;
760                 remainder = (trans->req_pay_size) % (bc_pay_sz);
761                 num_frags = (trans->req_pay_size) / (bc_pay_sz);
762                 if (remainder)
763                         num_frags++;
764                 t_num_frags = num_frags;
765                 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
766                         return -ENOMEM;
767                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
768                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
769                 if (remainder)
770                         num_frags++;
771                 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
772                         return -ENOMEM;
773                 num_frags  = t_num_frags;
774                 hdr = trans->req_hdr;
775         }  else {
776                 cmd->req.arg = (u32 *)trans->req_pay;
777                 cmd->rsp.arg = (u32 *)trans->rsp_pay;
778                 cmd_op = cmd->req.arg[0] & 0xff;
779                 cmd->cmd_op = cmd_op;
780                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
781                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
782                 if (remainder)
783                         num_frags++;
784                 cmd->req.num = trans->req_pay_size / 4;
785                 cmd->rsp.num = trans->rsp_pay_size / 4;
786                 hdr = trans->rsp_hdr;
787                 cmd->op_type = trans->req_hdr->op_type;
788         }
789
790         trans->trans_id = seq;
791         trans->cmd_id = cmd_op;
792         for (i = 0; i < num_frags; i++) {
793                 hdr[i].version = 2;
794                 hdr[i].msg_type = msg_type;
795                 hdr[i].op_type = cmd->op_type;
796                 hdr[i].num_cmds = 1;
797                 hdr[i].num_frags = num_frags;
798                 hdr[i].frag_num = i + 1;
799                 hdr[i].cmd_op = cmd_op;
800                 hdr[i].seq_id = seq;
801         }
802         return 0;
803 }
804
805 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
806 {
807         if (!trans)
808                 return;
809         kfree(trans->req_hdr);
810         kfree(trans->rsp_hdr);
811         kfree(trans);
812 }
813
814 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
815                                     struct qlcnic_bc_trans *trans, u8 type)
816 {
817         struct qlcnic_trans_list *t_list;
818         unsigned long flags;
819         int ret = 0;
820
821         if (type == QLC_BC_RESPONSE) {
822                 t_list = &vf->rcv_act;
823                 spin_lock_irqsave(&t_list->lock, flags);
824                 t_list->count--;
825                 list_del(&trans->list);
826                 if (t_list->count > 0)
827                         ret = 1;
828                 spin_unlock_irqrestore(&t_list->lock, flags);
829         }
830         if (type == QLC_BC_COMMAND) {
831                 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
832                         msleep(100);
833                 vf->send_cmd = NULL;
834                 clear_bit(QLC_BC_VF_SEND, &vf->state);
835         }
836         return ret;
837 }
838
839 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
840                                          struct qlcnic_vf_info *vf,
841                                          work_func_t func)
842 {
843         if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
844             vf->adapter->need_fw_reset)
845                 return;
846
847         queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
848 }
849
850 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
851 {
852         struct completion *cmpl = &trans->resp_cmpl;
853
854         if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
855                 trans->trans_state = QLC_END;
856         else
857                 trans->trans_state = QLC_ABORT;
858
859         return;
860 }
861
862 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
863                                             u8 type)
864 {
865         if (type == QLC_BC_RESPONSE) {
866                 trans->curr_rsp_frag++;
867                 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
868                         trans->trans_state = QLC_INIT;
869                 else
870                         trans->trans_state = QLC_END;
871         } else {
872                 trans->curr_req_frag++;
873                 if (trans->curr_req_frag < trans->req_hdr->num_frags)
874                         trans->trans_state = QLC_INIT;
875                 else
876                         trans->trans_state = QLC_WAIT_FOR_RESP;
877         }
878 }
879
880 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
881                                                u8 type)
882 {
883         struct qlcnic_vf_info *vf = trans->vf;
884         struct completion *cmpl = &vf->ch_free_cmpl;
885
886         if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
887                 trans->trans_state = QLC_ABORT;
888                 return;
889         }
890
891         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
892         qlcnic_sriov_handle_multi_frags(trans, type);
893 }
894
895 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
896                                      u32 *hdr, u32 *pay, u32 size)
897 {
898         struct qlcnic_hardware_context *ahw = adapter->ahw;
899         u32 fw_mbx;
900         u8 i, max = 2, hdr_size, j;
901
902         hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
903         max = (size / sizeof(u32)) + hdr_size;
904
905         fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
906         for (i = 2, j = 0; j < hdr_size; i++, j++)
907                 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
908         for (; j < max; i++, j++)
909                 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
910 }
911
912 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
913 {
914         int ret = -EBUSY;
915         u32 timeout = 10000;
916
917         do {
918                 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
919                         ret = 0;
920                         break;
921                 }
922                 mdelay(1);
923         } while (--timeout);
924
925         return ret;
926 }
927
928 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
929 {
930         struct qlcnic_vf_info *vf = trans->vf;
931         u32 pay_size, hdr_size;
932         u32 *hdr, *pay;
933         int ret;
934         u8 pci_func = trans->func_id;
935
936         if (__qlcnic_sriov_issue_bc_post(vf))
937                 return -EBUSY;
938
939         if (type == QLC_BC_COMMAND) {
940                 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
941                 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
942                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
943                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
944                                                        trans->curr_req_frag);
945                 pay_size = (pay_size / sizeof(u32));
946         } else {
947                 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
948                 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
949                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
950                 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
951                                                        trans->curr_rsp_frag);
952                 pay_size = (pay_size / sizeof(u32));
953         }
954
955         ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
956                                        pci_func, pay_size);
957         return ret;
958 }
959
960 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
961                                       struct qlcnic_vf_info *vf, u8 type)
962 {
963         bool flag = true;
964         int err = -EIO;
965
966         while (flag) {
967                 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
968                     vf->adapter->need_fw_reset)
969                         trans->trans_state = QLC_ABORT;
970
971                 switch (trans->trans_state) {
972                 case QLC_INIT:
973                         trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
974                         if (qlcnic_sriov_issue_bc_post(trans, type))
975                                 trans->trans_state = QLC_ABORT;
976                         break;
977                 case QLC_WAIT_FOR_CHANNEL_FREE:
978                         qlcnic_sriov_wait_for_channel_free(trans, type);
979                         break;
980                 case QLC_WAIT_FOR_RESP:
981                         qlcnic_sriov_wait_for_resp(trans);
982                         break;
983                 case QLC_END:
984                         err = 0;
985                         flag = false;
986                         break;
987                 case QLC_ABORT:
988                         err = -EIO;
989                         flag = false;
990                         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
991                         break;
992                 default:
993                         err = -EIO;
994                         flag = false;
995                 }
996         }
997         return err;
998 }
999
1000 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1001                                     struct qlcnic_bc_trans *trans, int pci_func)
1002 {
1003         struct qlcnic_vf_info *vf;
1004         int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1005
1006         if (index < 0)
1007                 return -EIO;
1008
1009         vf = &adapter->ahw->sriov->vf_info[index];
1010         trans->vf = vf;
1011         trans->func_id = pci_func;
1012
1013         if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1014                 if (qlcnic_sriov_pf_check(adapter))
1015                         return -EIO;
1016                 if (qlcnic_sriov_vf_check(adapter) &&
1017                     trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1018                         return -EIO;
1019         }
1020
1021         mutex_lock(&vf->send_cmd_lock);
1022         vf->send_cmd = trans;
1023         err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1024         qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1025         mutex_unlock(&vf->send_cmd_lock);
1026         return err;
1027 }
1028
1029 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1030                                           struct qlcnic_bc_trans *trans,
1031                                           struct qlcnic_cmd_args *cmd)
1032 {
1033 #ifdef CONFIG_QLCNIC_SRIOV
1034         if (qlcnic_sriov_pf_check(adapter)) {
1035                 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1036                 return;
1037         }
1038 #endif
1039         cmd->rsp.arg[0] |= (0x9 << 25);
1040         return;
1041 }
1042
1043 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1044 {
1045         struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1046                                                  trans_work);
1047         struct qlcnic_bc_trans *trans = NULL;
1048         struct qlcnic_adapter *adapter  = vf->adapter;
1049         struct qlcnic_cmd_args cmd;
1050         u8 req;
1051
1052         if (adapter->need_fw_reset)
1053                 return;
1054
1055         if (test_bit(QLC_BC_VF_FLR, &vf->state))
1056                 return;
1057
1058         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1059         trans = list_first_entry(&vf->rcv_act.wait_list,
1060                                  struct qlcnic_bc_trans, list);
1061         adapter = vf->adapter;
1062
1063         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1064                                         QLC_BC_RESPONSE))
1065                 goto cleanup_trans;
1066
1067         __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1068         trans->trans_state = QLC_INIT;
1069         __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1070
1071 cleanup_trans:
1072         qlcnic_free_mbx_args(&cmd);
1073         req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1074         qlcnic_sriov_cleanup_transaction(trans);
1075         if (req)
1076                 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1077                                              qlcnic_sriov_process_bc_cmd);
1078 }
1079
1080 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1081                                         struct qlcnic_vf_info *vf)
1082 {
1083         struct qlcnic_bc_trans *trans;
1084         u32 pay_size;
1085
1086         if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1087                 return;
1088
1089         trans = vf->send_cmd;
1090
1091         if (trans == NULL)
1092                 goto clear_send;
1093
1094         if (trans->trans_id != hdr->seq_id)
1095                 goto clear_send;
1096
1097         pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1098                                                trans->curr_rsp_frag);
1099         qlcnic_sriov_pull_bc_msg(vf->adapter,
1100                                  (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1101                                  (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1102                                  pay_size);
1103         if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1104                 goto clear_send;
1105
1106         complete(&trans->resp_cmpl);
1107
1108 clear_send:
1109         clear_bit(QLC_BC_VF_SEND, &vf->state);
1110 }
1111
1112 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1113                                 struct qlcnic_vf_info *vf,
1114                                 struct qlcnic_bc_trans *trans)
1115 {
1116         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1117
1118         t_list->count++;
1119         list_add_tail(&trans->list, &t_list->wait_list);
1120         if (t_list->count == 1)
1121                 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1122                                              qlcnic_sriov_process_bc_cmd);
1123         return 0;
1124 }
1125
1126 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1127                                      struct qlcnic_vf_info *vf,
1128                                      struct qlcnic_bc_trans *trans)
1129 {
1130         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1131
1132         spin_lock(&t_list->lock);
1133
1134         __qlcnic_sriov_add_act_list(sriov, vf, trans);
1135
1136         spin_unlock(&t_list->lock);
1137         return 0;
1138 }
1139
1140 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1141                                               struct qlcnic_vf_info *vf,
1142                                               struct qlcnic_bc_hdr *hdr)
1143 {
1144         struct qlcnic_bc_trans *trans = NULL;
1145         struct list_head *node;
1146         u32 pay_size, curr_frag;
1147         u8 found = 0, active = 0;
1148
1149         spin_lock(&vf->rcv_pend.lock);
1150         if (vf->rcv_pend.count > 0) {
1151                 list_for_each(node, &vf->rcv_pend.wait_list) {
1152                         trans = list_entry(node, struct qlcnic_bc_trans, list);
1153                         if (trans->trans_id == hdr->seq_id) {
1154                                 found = 1;
1155                                 break;
1156                         }
1157                 }
1158         }
1159
1160         if (found) {
1161                 curr_frag = trans->curr_req_frag;
1162                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1163                                                        curr_frag);
1164                 qlcnic_sriov_pull_bc_msg(vf->adapter,
1165                                          (u32 *)(trans->req_hdr + curr_frag),
1166                                          (u32 *)(trans->req_pay + curr_frag),
1167                                          pay_size);
1168                 trans->curr_req_frag++;
1169                 if (trans->curr_req_frag >= hdr->num_frags) {
1170                         vf->rcv_pend.count--;
1171                         list_del(&trans->list);
1172                         active = 1;
1173                 }
1174         }
1175         spin_unlock(&vf->rcv_pend.lock);
1176
1177         if (active)
1178                 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1179                         qlcnic_sriov_cleanup_transaction(trans);
1180
1181         return;
1182 }
1183
1184 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1185                                        struct qlcnic_bc_hdr *hdr,
1186                                        struct qlcnic_vf_info *vf)
1187 {
1188         struct qlcnic_bc_trans *trans;
1189         struct qlcnic_adapter *adapter = vf->adapter;
1190         struct qlcnic_cmd_args cmd;
1191         u32 pay_size;
1192         int err;
1193         u8 cmd_op;
1194
1195         if (adapter->need_fw_reset)
1196                 return;
1197
1198         if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1199             hdr->op_type != QLC_BC_CMD &&
1200             hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1201                 return;
1202
1203         if (hdr->frag_num > 1) {
1204                 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1205                 return;
1206         }
1207
1208         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1209         cmd_op = hdr->cmd_op;
1210         if (qlcnic_sriov_alloc_bc_trans(&trans))
1211                 return;
1212
1213         if (hdr->op_type == QLC_BC_CMD)
1214                 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1215         else
1216                 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1217
1218         if (err) {
1219                 qlcnic_sriov_cleanup_transaction(trans);
1220                 return;
1221         }
1222
1223         cmd.op_type = hdr->op_type;
1224         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1225                                         QLC_BC_COMMAND)) {
1226                 qlcnic_free_mbx_args(&cmd);
1227                 qlcnic_sriov_cleanup_transaction(trans);
1228                 return;
1229         }
1230
1231         pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1232                                          trans->curr_req_frag);
1233         qlcnic_sriov_pull_bc_msg(vf->adapter,
1234                                  (u32 *)(trans->req_hdr + trans->curr_req_frag),
1235                                  (u32 *)(trans->req_pay + trans->curr_req_frag),
1236                                  pay_size);
1237         trans->func_id = vf->pci_func;
1238         trans->vf = vf;
1239         trans->trans_id = hdr->seq_id;
1240         trans->curr_req_frag++;
1241
1242         if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1243                 return;
1244
1245         if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1246                 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1247                         qlcnic_free_mbx_args(&cmd);
1248                         qlcnic_sriov_cleanup_transaction(trans);
1249                 }
1250         } else {
1251                 spin_lock(&vf->rcv_pend.lock);
1252                 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1253                 vf->rcv_pend.count++;
1254                 spin_unlock(&vf->rcv_pend.lock);
1255         }
1256 }
1257
1258 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1259                                           struct qlcnic_vf_info *vf)
1260 {
1261         struct qlcnic_bc_hdr hdr;
1262         u32 *ptr = (u32 *)&hdr;
1263         u8 msg_type, i;
1264
1265         for (i = 2; i < 6; i++)
1266                 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1267         msg_type = hdr.msg_type;
1268
1269         switch (msg_type) {
1270         case QLC_BC_COMMAND:
1271                 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1272                 break;
1273         case QLC_BC_RESPONSE:
1274                 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1275                 break;
1276         }
1277 }
1278
1279 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1280                                           struct qlcnic_vf_info *vf)
1281 {
1282         struct qlcnic_adapter *adapter = vf->adapter;
1283
1284         if (qlcnic_sriov_pf_check(adapter))
1285                 qlcnic_sriov_pf_handle_flr(sriov, vf);
1286         else
1287                 dev_err(&adapter->pdev->dev,
1288                         "Invalid event to VF. VF should not get FLR event\n");
1289 }
1290
1291 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1292 {
1293         struct qlcnic_vf_info *vf;
1294         struct qlcnic_sriov *sriov;
1295         int index;
1296         u8 pci_func;
1297
1298         sriov = adapter->ahw->sriov;
1299         pci_func = qlcnic_sriov_target_func_id(event);
1300         index = qlcnic_sriov_func_to_index(adapter, pci_func);
1301
1302         if (index < 0)
1303                 return;
1304
1305         vf = &sriov->vf_info[index];
1306         vf->pci_func = pci_func;
1307
1308         if (qlcnic_sriov_channel_free_check(event))
1309                 complete(&vf->ch_free_cmpl);
1310
1311         if (qlcnic_sriov_flr_check(event)) {
1312                 qlcnic_sriov_handle_flr_event(sriov, vf);
1313                 return;
1314         }
1315
1316         if (qlcnic_sriov_bc_msg_check(event))
1317                 qlcnic_sriov_handle_msg_event(sriov, vf);
1318 }
1319
1320 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1321 {
1322         struct qlcnic_cmd_args cmd;
1323         int err;
1324
1325         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1326                 return 0;
1327
1328         if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1329                 return -ENOMEM;
1330
1331         if (enable)
1332                 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1333
1334         err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1335
1336         if (err != QLCNIC_RCODE_SUCCESS) {
1337                 dev_err(&adapter->pdev->dev,
1338                         "Failed to %s bc events, err=%d\n",
1339                         (enable ? "enable" : "disable"), err);
1340         }
1341
1342         qlcnic_free_mbx_args(&cmd);
1343         return err;
1344 }
1345
1346 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1347                                      struct qlcnic_bc_trans *trans)
1348 {
1349         u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1350         u32 state;
1351
1352         state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1353         if (state == QLC_83XX_IDC_DEV_READY) {
1354                 msleep(20);
1355                 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1356                 trans->trans_state = QLC_INIT;
1357                 if (++adapter->fw_fail_cnt > max)
1358                         return -EIO;
1359                 else
1360                         return 0;
1361         }
1362
1363         return -EIO;
1364 }
1365
1366 static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1367                                   struct qlcnic_cmd_args *cmd)
1368 {
1369         struct qlcnic_hardware_context *ahw = adapter->ahw;
1370         struct qlcnic_mailbox *mbx = ahw->mailbox;
1371         struct device *dev = &adapter->pdev->dev;
1372         struct qlcnic_bc_trans *trans;
1373         int err;
1374         u32 rsp_data, opcode, mbx_err_code, rsp;
1375         u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1376         u8 func = ahw->pci_func;
1377
1378         rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1379         if (rsp)
1380                 goto free_cmd;
1381
1382         rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1383         if (rsp)
1384                 goto cleanup_transaction;
1385
1386 retry:
1387         if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1388                 rsp = -EIO;
1389                 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1390                       QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1391                 goto err_out;
1392         }
1393
1394         err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1395         if (err) {
1396                 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1397                         (cmd->req.arg[0] & 0xffff), func);
1398                 rsp = QLCNIC_RCODE_TIMEOUT;
1399
1400                 /* After adapter reset PF driver may take some time to
1401                  * respond to VF's request. Retry request till maximum retries.
1402                  */
1403                 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1404                     !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1405                         goto retry;
1406
1407                 goto err_out;
1408         }
1409
1410         rsp_data = cmd->rsp.arg[0];
1411         mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1412         opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1413
1414         if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1415             (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1416                 rsp = QLCNIC_RCODE_SUCCESS;
1417         } else {
1418                 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1419                         rsp = QLCNIC_RCODE_SUCCESS;
1420                 } else {
1421                         rsp = mbx_err_code;
1422                         if (!rsp)
1423                                 rsp = 1;
1424
1425                         dev_err(dev,
1426                                 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1427                                 opcode, mbx_err_code, func);
1428                 }
1429         }
1430
1431 err_out:
1432         if (rsp == QLCNIC_RCODE_TIMEOUT) {
1433                 ahw->reset_context = 1;
1434                 adapter->need_fw_reset = 1;
1435                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1436         }
1437
1438 cleanup_transaction:
1439         qlcnic_sriov_cleanup_transaction(trans);
1440
1441 free_cmd:
1442         if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1443                 qlcnic_free_mbx_args(cmd);
1444                 kfree(cmd);
1445         }
1446
1447         return rsp;
1448 }
1449
1450
1451 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1452                                   struct qlcnic_cmd_args *cmd)
1453 {
1454         if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1455                 return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1456         else
1457                 return __qlcnic_sriov_issue_cmd(adapter, cmd);
1458 }
1459
1460 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1461 {
1462         struct qlcnic_cmd_args cmd;
1463         struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1464         int ret;
1465
1466         memset(&cmd, 0, sizeof(cmd));
1467         if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1468                 return -ENOMEM;
1469
1470         ret = qlcnic_issue_cmd(adapter, &cmd);
1471         if (ret) {
1472                 dev_err(&adapter->pdev->dev,
1473                         "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1474                         ret);
1475                 goto out;
1476         }
1477
1478         cmd_op = (cmd.rsp.arg[0] & 0xff);
1479         if (cmd.rsp.arg[0] >> 25 == 2)
1480                 return 2;
1481         if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1482                 set_bit(QLC_BC_VF_STATE, &vf->state);
1483         else
1484                 clear_bit(QLC_BC_VF_STATE, &vf->state);
1485
1486 out:
1487         qlcnic_free_mbx_args(&cmd);
1488         return ret;
1489 }
1490
1491 static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1492                                   enum qlcnic_mac_type mac_type)
1493 {
1494         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1495         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1496         struct qlcnic_vf_info *vf;
1497         u16 vlan_id;
1498         int i;
1499
1500         vf = &adapter->ahw->sriov->vf_info[0];
1501
1502         if (!qlcnic_sriov_check_any_vlan(vf)) {
1503                 qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1504         } else {
1505                 spin_lock(&vf->vlan_list_lock);
1506                 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1507                         vlan_id = vf->sriov_vlans[i];
1508                         if (vlan_id)
1509                                 qlcnic_nic_add_mac(adapter, mac, vlan_id,
1510                                                    mac_type);
1511                 }
1512                 spin_unlock(&vf->vlan_list_lock);
1513                 if (qlcnic_84xx_check(adapter))
1514                         qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1515         }
1516 }
1517
1518 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1519 {
1520         struct list_head *head = &bc->async_list;
1521         struct qlcnic_async_work_list *entry;
1522
1523         flush_workqueue(bc->bc_async_wq);
1524         while (!list_empty(head)) {
1525                 entry = list_entry(head->next, struct qlcnic_async_work_list,
1526                                    list);
1527                 cancel_work_sync(&entry->work);
1528                 list_del(&entry->list);
1529                 kfree(entry);
1530         }
1531 }
1532
1533 void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1534 {
1535         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1536         struct qlcnic_hardware_context *ahw = adapter->ahw;
1537         static const u8 bcast_addr[ETH_ALEN] = {
1538                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1539         };
1540         struct netdev_hw_addr *ha;
1541         u32 mode = VPORT_MISS_MODE_DROP;
1542
1543         if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1544                 return;
1545
1546         if (netdev->flags & IFF_PROMISC) {
1547                 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1548                         mode = VPORT_MISS_MODE_ACCEPT_ALL;
1549         } else if ((netdev->flags & IFF_ALLMULTI) ||
1550                    (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1551                 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1552         } else {
1553                 qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
1554                 if (!netdev_mc_empty(netdev)) {
1555                         qlcnic_flush_mcast_mac(adapter);
1556                         netdev_for_each_mc_addr(ha, netdev)
1557                                 qlcnic_vf_add_mc_list(netdev, ha->addr,
1558                                                       QLCNIC_MULTICAST_MAC);
1559                 }
1560         }
1561
1562         /* configure unicast MAC address, if there is not sufficient space
1563          * to store all the unicast addresses then enable promiscuous mode
1564          */
1565         if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1566                 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1567         } else if (!netdev_uc_empty(netdev)) {
1568                 netdev_for_each_uc_addr(ha, netdev)
1569                         qlcnic_vf_add_mc_list(netdev, ha->addr,
1570                                               QLCNIC_UNICAST_MAC);
1571         }
1572
1573         if (adapter->pdev->is_virtfn) {
1574                 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1575                     !adapter->fdb_mac_learn) {
1576                         qlcnic_alloc_lb_filters_mem(adapter);
1577                         adapter->drv_mac_learn = 1;
1578                         adapter->rx_mac_learn = true;
1579                 } else {
1580                         adapter->drv_mac_learn = 0;
1581                         adapter->rx_mac_learn = false;
1582                 }
1583         }
1584
1585         qlcnic_nic_set_promisc(adapter, mode);
1586 }
1587
1588 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1589 {
1590         struct qlcnic_async_work_list *entry;
1591         struct qlcnic_adapter *adapter;
1592         struct qlcnic_cmd_args *cmd;
1593
1594         entry = container_of(work, struct qlcnic_async_work_list, work);
1595         adapter = entry->ptr;
1596         cmd = entry->cmd;
1597         __qlcnic_sriov_issue_cmd(adapter, cmd);
1598         return;
1599 }
1600
1601 static struct qlcnic_async_work_list *
1602 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1603 {
1604         struct list_head *node;
1605         struct qlcnic_async_work_list *entry = NULL;
1606         u8 empty = 0;
1607
1608         list_for_each(node, &bc->async_list) {
1609                 entry = list_entry(node, struct qlcnic_async_work_list, list);
1610                 if (!work_pending(&entry->work)) {
1611                         empty = 1;
1612                         break;
1613                 }
1614         }
1615
1616         if (!empty) {
1617                 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1618                                 GFP_ATOMIC);
1619                 if (entry == NULL)
1620                         return NULL;
1621                 list_add_tail(&entry->list, &bc->async_list);
1622         }
1623
1624         return entry;
1625 }
1626
1627 static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1628                                             work_func_t func, void *data,
1629                                             struct qlcnic_cmd_args *cmd)
1630 {
1631         struct qlcnic_async_work_list *entry = NULL;
1632
1633         entry = qlcnic_sriov_get_free_node_async_work(bc);
1634         if (!entry)
1635                 return;
1636
1637         entry->ptr = data;
1638         entry->cmd = cmd;
1639         INIT_WORK(&entry->work, func);
1640         queue_work(bc->bc_async_wq, &entry->work);
1641 }
1642
1643 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1644                                         struct qlcnic_cmd_args *cmd)
1645 {
1646
1647         struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1648
1649         if (adapter->need_fw_reset)
1650                 return -EIO;
1651
1652         qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
1653                                         adapter, cmd);
1654         return 0;
1655 }
1656
1657 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1658 {
1659         int err;
1660
1661         adapter->need_fw_reset = 0;
1662         qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1663         qlcnic_83xx_enable_mbx_interrupt(adapter);
1664
1665         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1666         if (err)
1667                 return err;
1668
1669         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1670         if (err)
1671                 goto err_out_cleanup_bc_intr;
1672
1673         err = qlcnic_sriov_vf_init_driver(adapter);
1674         if (err)
1675                 goto err_out_term_channel;
1676
1677         return 0;
1678
1679 err_out_term_channel:
1680         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1681
1682 err_out_cleanup_bc_intr:
1683         qlcnic_sriov_cfg_bc_intr(adapter, 0);
1684         return err;
1685 }
1686
1687 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1688 {
1689         struct net_device *netdev = adapter->netdev;
1690
1691         if (netif_running(netdev)) {
1692                 if (!qlcnic_up(adapter, netdev))
1693                         qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1694         }
1695
1696         netif_device_attach(netdev);
1697 }
1698
1699 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1700 {
1701         struct qlcnic_hardware_context *ahw = adapter->ahw;
1702         struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1703         struct net_device *netdev = adapter->netdev;
1704         u8 i, max_ints = ahw->num_msix - 1;
1705
1706         netif_device_detach(netdev);
1707         qlcnic_83xx_detach_mailbox_work(adapter);
1708         qlcnic_83xx_disable_mbx_intr(adapter);
1709
1710         if (netif_running(netdev))
1711                 qlcnic_down(adapter, netdev);
1712
1713         for (i = 0; i < max_ints; i++) {
1714                 intr_tbl[i].id = i;
1715                 intr_tbl[i].enabled = 0;
1716                 intr_tbl[i].src = 0;
1717         }
1718         ahw->reset_context = 0;
1719 }
1720
1721 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1722 {
1723         struct qlcnic_hardware_context *ahw = adapter->ahw;
1724         struct device *dev = &adapter->pdev->dev;
1725         struct qlc_83xx_idc *idc = &ahw->idc;
1726         u8 func = ahw->pci_func;
1727         u32 state;
1728
1729         if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1730             (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1731                 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1732                         qlcnic_sriov_vf_attach(adapter);
1733                         adapter->fw_fail_cnt = 0;
1734                         dev_info(dev,
1735                                  "%s: Reinitialization of VF 0x%x done after FW reset\n",
1736                                  __func__, func);
1737                 } else {
1738                         dev_err(dev,
1739                                 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1740                                 __func__, func);
1741                         state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1742                         dev_info(dev, "Current state 0x%x after FW reset\n",
1743                                  state);
1744                 }
1745         }
1746
1747         return 0;
1748 }
1749
1750 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1751 {
1752         struct qlcnic_hardware_context *ahw = adapter->ahw;
1753         struct qlcnic_mailbox *mbx = ahw->mailbox;
1754         struct device *dev = &adapter->pdev->dev;
1755         struct qlc_83xx_idc *idc = &ahw->idc;
1756         u8 func = ahw->pci_func;
1757         u32 state;
1758
1759         adapter->reset_ctx_cnt++;
1760
1761         /* Skip the context reset and check if FW is hung */
1762         if (adapter->reset_ctx_cnt < 3) {
1763                 adapter->need_fw_reset = 1;
1764                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1765                 dev_info(dev,
1766                          "Resetting context, wait here to check if FW is in failed state\n");
1767                 return 0;
1768         }
1769
1770         /* Check if number of resets exceed the threshold.
1771          * If it exceeds the threshold just fail the VF.
1772          */
1773         if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1774                 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1775                 adapter->tx_timeo_cnt = 0;
1776                 adapter->fw_fail_cnt = 0;
1777                 adapter->reset_ctx_cnt = 0;
1778                 qlcnic_sriov_vf_detach(adapter);
1779                 dev_err(dev,
1780                         "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1781                 return -EIO;
1782         }
1783
1784         dev_info(dev, "Resetting context of VF 0x%x\n", func);
1785         dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1786                  __func__, adapter->reset_ctx_cnt, func);
1787         set_bit(__QLCNIC_RESETTING, &adapter->state);
1788         adapter->need_fw_reset = 1;
1789         clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1790         qlcnic_sriov_vf_detach(adapter);
1791         adapter->need_fw_reset = 0;
1792
1793         if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1794                 qlcnic_sriov_vf_attach(adapter);
1795                 adapter->tx_timeo_cnt = 0;
1796                 adapter->reset_ctx_cnt = 0;
1797                 adapter->fw_fail_cnt = 0;
1798                 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1799         } else {
1800                 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1801                         __func__, func);
1802                 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1803                 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1804         }
1805
1806         return 0;
1807 }
1808
1809 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1810 {
1811         struct qlcnic_hardware_context *ahw = adapter->ahw;
1812         int ret = 0;
1813
1814         if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1815                 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1816         else if (ahw->reset_context)
1817                 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1818
1819         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1820         return ret;
1821 }
1822
1823 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1824 {
1825         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1826
1827         dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1828         if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1829                 qlcnic_sriov_vf_detach(adapter);
1830
1831         clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1832         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1833         return -EIO;
1834 }
1835
1836 static int
1837 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1838 {
1839         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1840         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1841
1842         dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1843         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1844                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1845                 adapter->tx_timeo_cnt = 0;
1846                 adapter->reset_ctx_cnt = 0;
1847                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1848                 qlcnic_sriov_vf_detach(adapter);
1849         }
1850
1851         return 0;
1852 }
1853
1854 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1855 {
1856         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1857         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1858         u8 func = adapter->ahw->pci_func;
1859
1860         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1861                 dev_err(&adapter->pdev->dev,
1862                         "Firmware hang detected by VF 0x%x\n", func);
1863                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1864                 adapter->tx_timeo_cnt = 0;
1865                 adapter->reset_ctx_cnt = 0;
1866                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1867                 qlcnic_sriov_vf_detach(adapter);
1868         }
1869         return 0;
1870 }
1871
1872 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1873 {
1874         dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1875         return 0;
1876 }
1877
1878 static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1879 {
1880         if (adapter->fhash.fnum)
1881                 qlcnic_prune_lb_filters(adapter);
1882 }
1883
1884 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1885 {
1886         struct qlcnic_adapter *adapter;
1887         struct qlc_83xx_idc *idc;
1888         int ret = 0;
1889
1890         adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1891         idc = &adapter->ahw->idc;
1892         idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1893
1894         switch (idc->curr_state) {
1895         case QLC_83XX_IDC_DEV_READY:
1896                 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1897                 break;
1898         case QLC_83XX_IDC_DEV_NEED_RESET:
1899         case QLC_83XX_IDC_DEV_INIT:
1900                 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1901                 break;
1902         case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1903                 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1904                 break;
1905         case QLC_83XX_IDC_DEV_FAILED:
1906                 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1907                 break;
1908         case QLC_83XX_IDC_DEV_QUISCENT:
1909                 break;
1910         default:
1911                 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1912         }
1913
1914         idc->prev_state = idc->curr_state;
1915         qlcnic_sriov_vf_periodic_tasks(adapter);
1916
1917         if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1918                 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1919                                      idc->delay);
1920 }
1921
1922 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1923 {
1924         while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1925                 msleep(20);
1926
1927         clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1928         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1929         cancel_delayed_work_sync(&adapter->fw_work);
1930 }
1931
1932 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1933                                       struct qlcnic_vf_info *vf, u16 vlan_id)
1934 {
1935         int i, err = -EINVAL;
1936
1937         if (!vf->sriov_vlans)
1938                 return err;
1939
1940         spin_lock_bh(&vf->vlan_list_lock);
1941
1942         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1943                 if (vf->sriov_vlans[i] == vlan_id) {
1944                         err = 0;
1945                         break;
1946                 }
1947         }
1948
1949         spin_unlock_bh(&vf->vlan_list_lock);
1950         return err;
1951 }
1952
1953 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1954                                            struct qlcnic_vf_info *vf)
1955 {
1956         int err = 0;
1957
1958         spin_lock_bh(&vf->vlan_list_lock);
1959
1960         if (vf->num_vlan >= sriov->num_allowed_vlans)
1961                 err = -EINVAL;
1962
1963         spin_unlock_bh(&vf->vlan_list_lock);
1964         return err;
1965 }
1966
1967 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1968                                           u16 vid, u8 enable)
1969 {
1970         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1971         struct qlcnic_vf_info *vf;
1972         bool vlan_exist;
1973         u8 allowed = 0;
1974         int i;
1975
1976         vf = &adapter->ahw->sriov->vf_info[0];
1977         vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1978         if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1979                 return -EINVAL;
1980
1981         if (enable) {
1982                 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1983                         return -EINVAL;
1984
1985                 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
1986                         return -EINVAL;
1987
1988                 if (sriov->any_vlan) {
1989                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1990                                 if (sriov->allowed_vlans[i] == vid)
1991                                         allowed = 1;
1992                         }
1993
1994                         if (!allowed)
1995                                 return -EINVAL;
1996                 }
1997         } else {
1998                 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
1999                         return -EINVAL;
2000         }
2001
2002         return 0;
2003 }
2004
2005 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
2006                                         enum qlcnic_vlan_operations opcode)
2007 {
2008         struct qlcnic_adapter *adapter = vf->adapter;
2009         struct qlcnic_sriov *sriov;
2010
2011         sriov = adapter->ahw->sriov;
2012
2013         if (!vf->sriov_vlans)
2014                 return;
2015
2016         spin_lock_bh(&vf->vlan_list_lock);
2017
2018         switch (opcode) {
2019         case QLC_VLAN_ADD:
2020                 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
2021                 break;
2022         case QLC_VLAN_DELETE:
2023                 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
2024                 break;
2025         default:
2026                 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
2027         }
2028
2029         spin_unlock_bh(&vf->vlan_list_lock);
2030         return;
2031 }
2032
2033 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2034                                    u16 vid, u8 enable)
2035 {
2036         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2037         struct net_device *netdev = adapter->netdev;
2038         struct qlcnic_vf_info *vf;
2039         struct qlcnic_cmd_args cmd;
2040         int ret;
2041
2042         memset(&cmd, 0, sizeof(cmd));
2043         if (vid == 0)
2044                 return 0;
2045
2046         vf = &adapter->ahw->sriov->vf_info[0];
2047         ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
2048         if (ret)
2049                 return ret;
2050
2051         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2052                                              QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2053         if (ret)
2054                 return ret;
2055
2056         cmd.req.arg[1] = (enable & 1) | vid << 16;
2057
2058         qlcnic_sriov_cleanup_async_list(&sriov->bc);
2059         ret = qlcnic_issue_cmd(adapter, &cmd);
2060         if (ret) {
2061                 dev_err(&adapter->pdev->dev,
2062                         "Failed to configure guest VLAN, err=%d\n", ret);
2063         } else {
2064                 netif_addr_lock_bh(netdev);
2065                 qlcnic_free_mac_list(adapter);
2066                 netif_addr_unlock_bh(netdev);
2067
2068                 if (enable)
2069                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2070                 else
2071                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2072
2073                 netif_addr_lock_bh(netdev);
2074                 qlcnic_set_multi(netdev);
2075                 netif_addr_unlock_bh(netdev);
2076         }
2077
2078         qlcnic_free_mbx_args(&cmd);
2079         return ret;
2080 }
2081
2082 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2083 {
2084         struct list_head *head = &adapter->mac_list;
2085         struct qlcnic_mac_vlan_list *cur;
2086
2087         while (!list_empty(head)) {
2088                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2089                 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2090                                           QLCNIC_MAC_DEL);
2091                 list_del(&cur->list);
2092                 kfree(cur);
2093         }
2094 }
2095
2096
2097 static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2098 {
2099         struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2100         struct net_device *netdev = adapter->netdev;
2101         int retval;
2102
2103         netif_device_detach(netdev);
2104         qlcnic_cancel_idc_work(adapter);
2105
2106         if (netif_running(netdev))
2107                 qlcnic_down(adapter, netdev);
2108
2109         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2110         qlcnic_sriov_cfg_bc_intr(adapter, 0);
2111         qlcnic_83xx_disable_mbx_intr(adapter);
2112         cancel_delayed_work_sync(&adapter->idc_aen_work);
2113
2114         retval = pci_save_state(pdev);
2115         if (retval)
2116                 return retval;
2117
2118         return 0;
2119 }
2120
2121 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2122 {
2123         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2124         struct net_device *netdev = adapter->netdev;
2125         int err;
2126
2127         set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2128         qlcnic_83xx_enable_mbx_interrupt(adapter);
2129         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2130         if (err)
2131                 return err;
2132
2133         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2134         if (!err) {
2135                 if (netif_running(netdev)) {
2136                         err = qlcnic_up(adapter, netdev);
2137                         if (!err)
2138                                 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2139                 }
2140         }
2141
2142         netif_device_attach(netdev);
2143         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2144                              idc->delay);
2145         return err;
2146 }
2147
2148 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2149 {
2150         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2151         struct qlcnic_vf_info *vf;
2152         int i;
2153
2154         for (i = 0; i < sriov->num_vfs; i++) {
2155                 vf = &sriov->vf_info[i];
2156                 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2157                                           sizeof(*vf->sriov_vlans), GFP_KERNEL);
2158         }
2159 }
2160
2161 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2162 {
2163         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2164         struct qlcnic_vf_info *vf;
2165         int i;
2166
2167         for (i = 0; i < sriov->num_vfs; i++) {
2168                 vf = &sriov->vf_info[i];
2169                 kfree(vf->sriov_vlans);
2170                 vf->sriov_vlans = NULL;
2171         }
2172 }
2173
2174 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2175                               struct qlcnic_vf_info *vf, u16 vlan_id)
2176 {
2177         int i;
2178
2179         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2180                 if (!vf->sriov_vlans[i]) {
2181                         vf->sriov_vlans[i] = vlan_id;
2182                         vf->num_vlan++;
2183                         return;
2184                 }
2185         }
2186 }
2187
2188 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2189                               struct qlcnic_vf_info *vf, u16 vlan_id)
2190 {
2191         int i;
2192
2193         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2194                 if (vf->sriov_vlans[i] == vlan_id) {
2195                         vf->sriov_vlans[i] = 0;
2196                         vf->num_vlan--;
2197                         return;
2198                 }
2199         }
2200 }
2201
2202 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2203 {
2204         bool err = false;
2205
2206         spin_lock_bh(&vf->vlan_list_lock);
2207
2208         if (vf->num_vlan)
2209                 err = true;
2210
2211         spin_unlock_bh(&vf->vlan_list_lock);
2212         return err;
2213 }