These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / ethernet / qlogic / qed / qed_dev.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dev_api.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_mcp.h"
31 #include "qed_reg_addr.h"
32 #include "qed_sp.h"
33
34 /* API common to all protocols */
35 void qed_init_dp(struct qed_dev *cdev,
36                  u32 dp_module, u8 dp_level)
37 {
38         u32 i;
39
40         cdev->dp_level = dp_level;
41         cdev->dp_module = dp_module;
42         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44
45                 p_hwfn->dp_level = dp_level;
46                 p_hwfn->dp_module = dp_module;
47         }
48 }
49
50 void qed_init_struct(struct qed_dev *cdev)
51 {
52         u8 i;
53
54         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56
57                 p_hwfn->cdev = cdev;
58                 p_hwfn->my_id = i;
59                 p_hwfn->b_active = false;
60
61                 mutex_init(&p_hwfn->dmae_info.mutex);
62         }
63
64         /* hwfn 0 is always active */
65         cdev->hwfns[0].b_active = true;
66
67         /* set the default cache alignment to 128 */
68         cdev->cache_shift = 7;
69 }
70
71 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72 {
73         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74
75         kfree(qm_info->qm_pq_params);
76         qm_info->qm_pq_params = NULL;
77         kfree(qm_info->qm_vport_params);
78         qm_info->qm_vport_params = NULL;
79         kfree(qm_info->qm_port_params);
80         qm_info->qm_port_params = NULL;
81 }
82
83 void qed_resc_free(struct qed_dev *cdev)
84 {
85         int i;
86
87         kfree(cdev->fw_data);
88         cdev->fw_data = NULL;
89
90         kfree(cdev->reset_stats);
91
92         for_each_hwfn(cdev, i) {
93                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94
95                 kfree(p_hwfn->p_tx_cids);
96                 p_hwfn->p_tx_cids = NULL;
97                 kfree(p_hwfn->p_rx_cids);
98                 p_hwfn->p_rx_cids = NULL;
99         }
100
101         for_each_hwfn(cdev, i) {
102                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
103
104                 qed_cxt_mngr_free(p_hwfn);
105                 qed_qm_info_free(p_hwfn);
106                 qed_spq_free(p_hwfn);
107                 qed_eq_free(p_hwfn, p_hwfn->p_eq);
108                 qed_consq_free(p_hwfn, p_hwfn->p_consq);
109                 qed_int_free(p_hwfn);
110                 qed_dmae_info_free(p_hwfn);
111         }
112 }
113
114 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
115 {
116         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
117         struct init_qm_port_params *p_qm_port;
118         u8 num_vports, i, vport_id, num_ports;
119         u16 num_pqs, multi_cos_tcs = 1;
120
121         memset(qm_info, 0, sizeof(*qm_info));
122
123         num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
124         num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
125
126         /* Sanity checking that setup requires legal number of resources */
127         if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
128                 DP_ERR(p_hwfn,
129                        "Need too many Physical queues - 0x%04x when only %04x are available\n",
130                        num_pqs, RESC_NUM(p_hwfn, QED_PQ));
131                 return -EINVAL;
132         }
133
134         /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
135          */
136         qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
137                                         num_pqs, GFP_ATOMIC);
138         if (!qm_info->qm_pq_params)
139                 goto alloc_err;
140
141         qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
142                                            num_vports, GFP_ATOMIC);
143         if (!qm_info->qm_vport_params)
144                 goto alloc_err;
145
146         qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
147                                           MAX_NUM_PORTS, GFP_ATOMIC);
148         if (!qm_info->qm_port_params)
149                 goto alloc_err;
150
151         vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
152
153         /* First init per-TC PQs */
154         for (i = 0; i < multi_cos_tcs; i++) {
155                 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
156
157                 params->vport_id = vport_id;
158                 params->tc_id = p_hwfn->hw_info.non_offload_tc;
159                 params->wrr_group = 1;
160         }
161
162         /* Then init pure-LB PQ */
163         qm_info->pure_lb_pq = i;
164         qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
165         qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
166         qm_info->qm_pq_params[i].wrr_group = 1;
167         i++;
168
169         qm_info->offload_pq = 0;
170         qm_info->num_pqs = num_pqs;
171         qm_info->num_vports = num_vports;
172
173         /* Initialize qm port parameters */
174         num_ports = p_hwfn->cdev->num_ports_in_engines;
175         for (i = 0; i < num_ports; i++) {
176                 p_qm_port = &qm_info->qm_port_params[i];
177                 p_qm_port->active = 1;
178                 p_qm_port->num_active_phys_tcs = 4;
179                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
180                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
181         }
182
183         qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
184
185         qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
186
187         qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
188
189         qm_info->pf_wfq = 0;
190         qm_info->pf_rl = 0;
191         qm_info->vport_rl_en = 1;
192
193         return 0;
194
195 alloc_err:
196         DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
197         kfree(qm_info->qm_pq_params);
198         kfree(qm_info->qm_vport_params);
199         kfree(qm_info->qm_port_params);
200
201         return -ENOMEM;
202 }
203
204 int qed_resc_alloc(struct qed_dev *cdev)
205 {
206         struct qed_consq *p_consq;
207         struct qed_eq *p_eq;
208         int i, rc = 0;
209
210         cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
211         if (!cdev->fw_data)
212                 return -ENOMEM;
213
214         /* Allocate Memory for the Queue->CID mapping */
215         for_each_hwfn(cdev, i) {
216                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
217                 int tx_size = sizeof(struct qed_hw_cid_data) *
218                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
219                 int rx_size = sizeof(struct qed_hw_cid_data) *
220                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
221
222                 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
223                 if (!p_hwfn->p_tx_cids) {
224                         DP_NOTICE(p_hwfn,
225                                   "Failed to allocate memory for Tx Cids\n");
226                         rc = -ENOMEM;
227                         goto alloc_err;
228                 }
229
230                 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
231                 if (!p_hwfn->p_rx_cids) {
232                         DP_NOTICE(p_hwfn,
233                                   "Failed to allocate memory for Rx Cids\n");
234                         rc = -ENOMEM;
235                         goto alloc_err;
236                 }
237         }
238
239         for_each_hwfn(cdev, i) {
240                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
241
242                 /* First allocate the context manager structure */
243                 rc = qed_cxt_mngr_alloc(p_hwfn);
244                 if (rc)
245                         goto alloc_err;
246
247                 /* Set the HW cid/tid numbers (in the contest manager)
248                  * Must be done prior to any further computations.
249                  */
250                 rc = qed_cxt_set_pf_params(p_hwfn);
251                 if (rc)
252                         goto alloc_err;
253
254                 /* Prepare and process QM requirements */
255                 rc = qed_init_qm_info(p_hwfn);
256                 if (rc)
257                         goto alloc_err;
258
259                 /* Compute the ILT client partition */
260                 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
261                 if (rc)
262                         goto alloc_err;
263
264                 /* CID map / ILT shadow table / T2
265                  * The talbes sizes are determined by the computations above
266                  */
267                 rc = qed_cxt_tables_alloc(p_hwfn);
268                 if (rc)
269                         goto alloc_err;
270
271                 /* SPQ, must follow ILT because initializes SPQ context */
272                 rc = qed_spq_alloc(p_hwfn);
273                 if (rc)
274                         goto alloc_err;
275
276                 /* SP status block allocation */
277                 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
278                                                          RESERVED_PTT_DPC);
279
280                 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
281                 if (rc)
282                         goto alloc_err;
283
284                 /* EQ */
285                 p_eq = qed_eq_alloc(p_hwfn, 256);
286                 if (!p_eq) {
287                         rc = -ENOMEM;
288                         goto alloc_err;
289                 }
290                 p_hwfn->p_eq = p_eq;
291
292                 p_consq = qed_consq_alloc(p_hwfn);
293                 if (!p_consq) {
294                         rc = -ENOMEM;
295                         goto alloc_err;
296                 }
297                 p_hwfn->p_consq = p_consq;
298
299                 /* DMA info initialization */
300                 rc = qed_dmae_info_alloc(p_hwfn);
301                 if (rc) {
302                         DP_NOTICE(p_hwfn,
303                                   "Failed to allocate memory for dmae_info structure\n");
304                         goto alloc_err;
305                 }
306         }
307
308         cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
309         if (!cdev->reset_stats) {
310                 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
311                 rc = -ENOMEM;
312                 goto alloc_err;
313         }
314
315         return 0;
316
317 alloc_err:
318         qed_resc_free(cdev);
319         return rc;
320 }
321
322 void qed_resc_setup(struct qed_dev *cdev)
323 {
324         int i;
325
326         for_each_hwfn(cdev, i) {
327                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
328
329                 qed_cxt_mngr_setup(p_hwfn);
330                 qed_spq_setup(p_hwfn);
331                 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
332                 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
333
334                 /* Read shadow of current MFW mailbox */
335                 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
336                 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
337                        p_hwfn->mcp_info->mfw_mb_cur,
338                        p_hwfn->mcp_info->mfw_mb_length);
339
340                 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
341         }
342 }
343
344 #define FINAL_CLEANUP_CMD_OFFSET        (0)
345 #define FINAL_CLEANUP_CMD (0x1)
346 #define FINAL_CLEANUP_VALID_OFFSET      (6)
347 #define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
348 #define FINAL_CLEANUP_COMP (0x2)
349 #define FINAL_CLEANUP_POLL_CNT          (100)
350 #define FINAL_CLEANUP_POLL_TIME         (10)
351 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
352                       struct qed_ptt *p_ptt,
353                       u16 id)
354 {
355         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
356         int rc = -EBUSY;
357
358         addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
359
360         command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
361         command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
362         command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
363         command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
364
365         /* Make sure notification is not set before initiating final cleanup */
366         if (REG_RD(p_hwfn, addr)) {
367                 DP_NOTICE(
368                         p_hwfn,
369                         "Unexpected; Found final cleanup notification before initiating final cleanup\n");
370                 REG_WR(p_hwfn, addr, 0);
371         }
372
373         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
374                    "Sending final cleanup for PFVF[%d] [Command %08x\n]",
375                    id, command);
376
377         qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
378
379         /* Poll until completion */
380         while (!REG_RD(p_hwfn, addr) && count--)
381                 msleep(FINAL_CLEANUP_POLL_TIME);
382
383         if (REG_RD(p_hwfn, addr))
384                 rc = 0;
385         else
386                 DP_NOTICE(p_hwfn,
387                           "Failed to receive FW final cleanup notification\n");
388
389         /* Cleanup afterwards */
390         REG_WR(p_hwfn, addr, 0);
391
392         return rc;
393 }
394
395 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
396 {
397         int hw_mode = 0;
398
399         hw_mode = (1 << MODE_BB_A0);
400
401         switch (p_hwfn->cdev->num_ports_in_engines) {
402         case 1:
403                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
404                 break;
405         case 2:
406                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
407                 break;
408         case 4:
409                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
410                 break;
411         default:
412                 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
413                           p_hwfn->cdev->num_ports_in_engines);
414                 return;
415         }
416
417         switch (p_hwfn->cdev->mf_mode) {
418         case SF:
419                 hw_mode |= 1 << MODE_SF;
420                 break;
421         case MF_OVLAN:
422                 hw_mode |= 1 << MODE_MF_SD;
423                 break;
424         case MF_NPAR:
425                 hw_mode |= 1 << MODE_MF_SI;
426                 break;
427         default:
428                 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
429                 hw_mode |= 1 << MODE_SF;
430         }
431
432         hw_mode |= 1 << MODE_ASIC;
433
434         p_hwfn->hw_info.hw_mode = hw_mode;
435 }
436
437 /* Init run time data for all PFs on an engine. */
438 static void qed_init_cau_rt_data(struct qed_dev *cdev)
439 {
440         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
441         int i, sb_id;
442
443         for_each_hwfn(cdev, i) {
444                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
445                 struct qed_igu_info *p_igu_info;
446                 struct qed_igu_block *p_block;
447                 struct cau_sb_entry sb_entry;
448
449                 p_igu_info = p_hwfn->hw_info.p_igu_info;
450
451                 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
452                      sb_id++) {
453                         p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
454                         if (!p_block->is_pf)
455                                 continue;
456
457                         qed_init_cau_sb_entry(p_hwfn, &sb_entry,
458                                               p_block->function_id,
459                                               0, 0);
460                         STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
461                                          sb_entry);
462                 }
463         }
464 }
465
466 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
467                               struct qed_ptt *p_ptt,
468                               int hw_mode)
469 {
470         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
471         struct qed_qm_common_rt_init_params params;
472         struct qed_dev *cdev = p_hwfn->cdev;
473         int rc = 0;
474
475         qed_init_cau_rt_data(cdev);
476
477         /* Program GTT windows */
478         qed_gtt_init(p_hwfn);
479
480         if (p_hwfn->mcp_info) {
481                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
482                         qm_info->pf_rl_en = 1;
483                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
484                         qm_info->pf_wfq_en = 1;
485         }
486
487         memset(&params, 0, sizeof(params));
488         params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
489         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
490         params.pf_rl_en = qm_info->pf_rl_en;
491         params.pf_wfq_en = qm_info->pf_wfq_en;
492         params.vport_rl_en = qm_info->vport_rl_en;
493         params.vport_wfq_en = qm_info->vport_wfq_en;
494         params.port_params = qm_info->qm_port_params;
495
496         qed_qm_common_rt_init(p_hwfn, &params);
497
498         qed_cxt_hw_init_common(p_hwfn);
499
500         /* Close gate from NIG to BRB/Storm; By default they are open, but
501          * we close them to prevent NIG from passing data to reset blocks.
502          * Should have been done in the ENGINE phase, but init-tool lacks
503          * proper port-pretend capabilities.
504          */
505         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
506         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
507         qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
508         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
509         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
510         qed_port_unpretend(p_hwfn, p_ptt);
511
512         rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
513         if (rc != 0)
514                 return rc;
515
516         qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
517         qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
518
519         /* Disable relaxed ordering in the PCI config space */
520         qed_wr(p_hwfn, p_ptt, 0x20b4,
521                qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
522
523         return rc;
524 }
525
526 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
527                             struct qed_ptt *p_ptt,
528                             int hw_mode)
529 {
530         int rc = 0;
531
532         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
533                           hw_mode);
534         return rc;
535 }
536
537 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
538                           struct qed_ptt *p_ptt,
539                           int hw_mode,
540                           bool b_hw_start,
541                           enum qed_int_mode int_mode,
542                           bool allow_npar_tx_switch)
543 {
544         u8 rel_pf_id = p_hwfn->rel_pf_id;
545         int rc = 0;
546
547         if (p_hwfn->mcp_info) {
548                 struct qed_mcp_function_info *p_info;
549
550                 p_info = &p_hwfn->mcp_info->func_info;
551                 if (p_info->bandwidth_min)
552                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
553
554                 /* Update rate limit once we'll actually have a link */
555                 p_hwfn->qm_info.pf_rl = 100;
556         }
557
558         qed_cxt_hw_init_pf(p_hwfn);
559
560         qed_int_igu_init_rt(p_hwfn);
561
562         /* Set VLAN in NIG if needed */
563         if (hw_mode & (1 << MODE_MF_SD)) {
564                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
565                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
566                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
567                              p_hwfn->hw_info.ovlan);
568         }
569
570         /* Enable classification by MAC if needed */
571         if (hw_mode & (1 << MODE_MF_SI)) {
572                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
573                            "Configuring TAGMAC_CLS_TYPE\n");
574                 STORE_RT_REG(p_hwfn,
575                              NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
576         }
577
578         /* Protocl Configuration  */
579         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
580         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
581         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
582
583         /* Cleanup chip from previous driver if such remains exist */
584         rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
585         if (rc != 0)
586                 return rc;
587
588         /* PF Init sequence */
589         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
590         if (rc)
591                 return rc;
592
593         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
594         rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
595         if (rc)
596                 return rc;
597
598         /* Pure runtime initializations - directly to the HW  */
599         qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
600
601         if (b_hw_start) {
602                 /* enable interrupts */
603                 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
604
605                 /* send function start command */
606                 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
607                 if (rc)
608                         DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
609         }
610         return rc;
611 }
612
613 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
614                                struct qed_ptt *p_ptt,
615                                u8 enable)
616 {
617         u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
618
619         /* Change PF in PXP */
620         qed_wr(p_hwfn, p_ptt,
621                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
622
623         /* wait until value is set - try for 1 second every 50us */
624         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
625                 val = qed_rd(p_hwfn, p_ptt,
626                              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
627                 if (val == set_val)
628                         break;
629
630                 usleep_range(50, 60);
631         }
632
633         if (val != set_val) {
634                 DP_NOTICE(p_hwfn,
635                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
636                 return -EAGAIN;
637         }
638
639         return 0;
640 }
641
642 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
643                                 struct qed_ptt *p_main_ptt)
644 {
645         /* Read shadow of current MFW mailbox */
646         qed_mcp_read_mb(p_hwfn, p_main_ptt);
647         memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
648                p_hwfn->mcp_info->mfw_mb_cur,
649                p_hwfn->mcp_info->mfw_mb_length);
650 }
651
652 int qed_hw_init(struct qed_dev *cdev,
653                 bool b_hw_start,
654                 enum qed_int_mode int_mode,
655                 bool allow_npar_tx_switch,
656                 const u8 *bin_fw_data)
657 {
658         struct qed_storm_stats *p_stat;
659         u32 load_code, param, *p_address;
660         int rc, mfw_rc, i;
661         u8 fw_vport = 0;
662
663         rc = qed_init_fw_data(cdev, bin_fw_data);
664         if (rc != 0)
665                 return rc;
666
667         for_each_hwfn(cdev, i) {
668                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
669
670                 rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
671                 if (rc != 0)
672                         return rc;
673
674                 /* Enable DMAE in PXP */
675                 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
676
677                 qed_calc_hw_mode(p_hwfn);
678
679                 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
680                                       &load_code);
681                 if (rc) {
682                         DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
683                         return rc;
684                 }
685
686                 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
687
688                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
689                            "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
690                            rc, load_code);
691
692                 p_hwfn->first_on_engine = (load_code ==
693                                            FW_MSG_CODE_DRV_LOAD_ENGINE);
694
695                 switch (load_code) {
696                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
697                         rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
698                                                 p_hwfn->hw_info.hw_mode);
699                         if (rc)
700                                 break;
701                 /* Fall into */
702                 case FW_MSG_CODE_DRV_LOAD_PORT:
703                         rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
704                                               p_hwfn->hw_info.hw_mode);
705                         if (rc)
706                                 break;
707
708                 /* Fall into */
709                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
710                         rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
711                                             p_hwfn->hw_info.hw_mode,
712                                             b_hw_start, int_mode,
713                                             allow_npar_tx_switch);
714                         break;
715                 default:
716                         rc = -EINVAL;
717                         break;
718                 }
719
720                 if (rc)
721                         DP_NOTICE(p_hwfn,
722                                   "init phase failed for loadcode 0x%x (rc %d)\n",
723                                    load_code, rc);
724
725                 /* ACK mfw regardless of success or failure of initialization */
726                 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
727                                      DRV_MSG_CODE_LOAD_DONE,
728                                      0, &load_code, &param);
729                 if (rc)
730                         return rc;
731                 if (mfw_rc) {
732                         DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
733                         return mfw_rc;
734                 }
735
736                 p_hwfn->hw_init_done = true;
737
738                 /* init PF stats */
739                 p_stat = &p_hwfn->storm_stats;
740                 p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
741                                          MSTORM_QUEUE_STAT_OFFSET(fw_vport);
742                 p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
743
744                 p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
745                                          USTORM_QUEUE_STAT_OFFSET(fw_vport);
746                 p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
747
748                 p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
749                                          PSTORM_QUEUE_STAT_OFFSET(fw_vport);
750                 p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
751
752                 p_address = &p_stat->tstats.address;
753                 *p_address = BAR0_MAP_REG_TSDM_RAM +
754                              TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
755                 p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
756         }
757
758         return 0;
759 }
760
761 #define QED_HW_STOP_RETRY_LIMIT (10)
762 int qed_hw_stop(struct qed_dev *cdev)
763 {
764         int rc = 0, t_rc;
765         int i, j;
766
767         for_each_hwfn(cdev, j) {
768                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
769                 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
770
771                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
772
773                 /* mark the hw as uninitialized... */
774                 p_hwfn->hw_init_done = false;
775
776                 rc = qed_sp_pf_stop(p_hwfn);
777                 if (rc)
778                         return rc;
779
780                 qed_wr(p_hwfn, p_ptt,
781                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
782
783                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
784                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
785                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
786                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
787                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
788
789                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
790                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
791                 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
792                         if ((!qed_rd(p_hwfn, p_ptt,
793                                      TM_REG_PF_SCAN_ACTIVE_CONN)) &&
794                             (!qed_rd(p_hwfn, p_ptt,
795                                      TM_REG_PF_SCAN_ACTIVE_TASK)))
796                                 break;
797
798                         usleep_range(1000, 2000);
799                 }
800                 if (i == QED_HW_STOP_RETRY_LIMIT)
801                         DP_NOTICE(p_hwfn,
802                                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
803                                   (u8)qed_rd(p_hwfn, p_ptt,
804                                              TM_REG_PF_SCAN_ACTIVE_CONN),
805                                   (u8)qed_rd(p_hwfn, p_ptt,
806                                              TM_REG_PF_SCAN_ACTIVE_TASK));
807
808                 /* Disable Attention Generation */
809                 qed_int_igu_disable_int(p_hwfn, p_ptt);
810
811                 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
812                 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
813
814                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
815
816                 /* Need to wait 1ms to guarantee SBs are cleared */
817                 usleep_range(1000, 2000);
818         }
819
820         /* Disable DMAE in PXP - in CMT, this should only be done for
821          * first hw-function, and only after all transactions have
822          * stopped for all active hw-functions.
823          */
824         t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
825                                    cdev->hwfns[0].p_main_ptt,
826                                    false);
827         if (t_rc != 0)
828                 rc = t_rc;
829
830         return rc;
831 }
832
833 void qed_hw_stop_fastpath(struct qed_dev *cdev)
834 {
835         int i, j;
836
837         for_each_hwfn(cdev, j) {
838                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
839                 struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
840
841                 DP_VERBOSE(p_hwfn,
842                            NETIF_MSG_IFDOWN,
843                            "Shutting down the fastpath\n");
844
845                 qed_wr(p_hwfn, p_ptt,
846                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
847
848                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
849                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
850                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
851                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
852                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
853
854                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
855                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
856                 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
857                         if ((!qed_rd(p_hwfn, p_ptt,
858                                      TM_REG_PF_SCAN_ACTIVE_CONN)) &&
859                             (!qed_rd(p_hwfn, p_ptt,
860                                      TM_REG_PF_SCAN_ACTIVE_TASK)))
861                                 break;
862
863                         usleep_range(1000, 2000);
864                 }
865                 if (i == QED_HW_STOP_RETRY_LIMIT)
866                         DP_NOTICE(p_hwfn,
867                                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
868                                   (u8)qed_rd(p_hwfn, p_ptt,
869                                              TM_REG_PF_SCAN_ACTIVE_CONN),
870                                   (u8)qed_rd(p_hwfn, p_ptt,
871                                              TM_REG_PF_SCAN_ACTIVE_TASK));
872
873                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
874
875                 /* Need to wait 1ms to guarantee SBs are cleared */
876                 usleep_range(1000, 2000);
877         }
878 }
879
880 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
881 {
882         /* Re-open incoming traffic */
883         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
884                NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
885 }
886
887 static int qed_reg_assert(struct qed_hwfn *hwfn,
888                           struct qed_ptt *ptt, u32 reg,
889                           bool expected)
890 {
891         u32 assert_val = qed_rd(hwfn, ptt, reg);
892
893         if (assert_val != expected) {
894                 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
895                           reg, expected);
896                 return -EINVAL;
897         }
898
899         return 0;
900 }
901
902 int qed_hw_reset(struct qed_dev *cdev)
903 {
904         int rc = 0;
905         u32 unload_resp, unload_param;
906         int i;
907
908         for_each_hwfn(cdev, i) {
909                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
910
911                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
912
913                 /* Check for incorrect states */
914                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
915                                QM_REG_USG_CNT_PF_TX, 0);
916                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
917                                QM_REG_USG_CNT_PF_OTHER, 0);
918
919                 /* Disable PF in HW blocks */
920                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
921                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
922                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
923                        TCFC_REG_STRONG_ENABLE_PF, 0);
924                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
925                        CCFC_REG_STRONG_ENABLE_PF, 0);
926
927                 /* Send unload command to MCP */
928                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
929                                  DRV_MSG_CODE_UNLOAD_REQ,
930                                  DRV_MB_PARAM_UNLOAD_WOL_MCP,
931                                  &unload_resp, &unload_param);
932                 if (rc) {
933                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
934                         unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
935                 }
936
937                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
938                                  DRV_MSG_CODE_UNLOAD_DONE,
939                                  0, &unload_resp, &unload_param);
940                 if (rc) {
941                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
942                         return rc;
943                 }
944         }
945
946         return rc;
947 }
948
949 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
950 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
951 {
952         qed_ptt_pool_free(p_hwfn);
953         kfree(p_hwfn->hw_info.p_igu_info);
954 }
955
956 /* Setup bar access */
957 static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
958 {
959         int rc;
960
961         /* Allocate PTT pool */
962         rc = qed_ptt_pool_alloc(p_hwfn);
963         if (rc)
964                 return rc;
965
966         /* Allocate the main PTT */
967         p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
968
969         /* clear indirect access */
970         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
971         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
972         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
973         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
974
975         /* Clean Previous errors if such exist */
976         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
977                PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
978                1 << p_hwfn->abs_pf_id);
979
980         /* enable internal target-read */
981         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
982                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
983
984         return 0;
985 }
986
987 static void get_function_id(struct qed_hwfn *p_hwfn)
988 {
989         /* ME Register */
990         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
991
992         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
993
994         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
995         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
996                                       PXP_CONCRETE_FID_PFID);
997         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
998                                     PXP_CONCRETE_FID_PORT);
999 }
1000
1001 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1002 {
1003         u32 *feat_num = p_hwfn->hw_info.feat_num;
1004         int num_features = 1;
1005
1006         feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1007                                                 num_features,
1008                                         RESC_NUM(p_hwfn, QED_L2_QUEUE));
1009         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1010                    "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1011                    feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1012                    num_features);
1013 }
1014
1015 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1016 {
1017         u32 *resc_start = p_hwfn->hw_info.resc_start;
1018         u32 *resc_num = p_hwfn->hw_info.resc_num;
1019         int num_funcs, i;
1020
1021         num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
1022                                   : p_hwfn->cdev->num_ports_in_engines;
1023
1024         resc_num[QED_SB] = min_t(u32,
1025                                  (MAX_SB_PER_PATH_BB / num_funcs),
1026                                  qed_int_get_num_sbs(p_hwfn, NULL));
1027         resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1028         resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1029         resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1030         resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1031         resc_num[QED_RL] = 8;
1032         resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1033         resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1034                              num_funcs;
1035         resc_num[QED_ILT] = 950;
1036
1037         for (i = 0; i < QED_MAX_RESC; i++)
1038                 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1039
1040         qed_hw_set_feat(p_hwfn);
1041
1042         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1043                    "The numbers for each resource are:\n"
1044                    "SB = %d start = %d\n"
1045                    "L2_QUEUE = %d start = %d\n"
1046                    "VPORT = %d start = %d\n"
1047                    "PQ = %d start = %d\n"
1048                    "RL = %d start = %d\n"
1049                    "MAC = %d start = %d\n"
1050                    "VLAN = %d start = %d\n"
1051                    "ILT = %d start = %d\n",
1052                    p_hwfn->hw_info.resc_num[QED_SB],
1053                    p_hwfn->hw_info.resc_start[QED_SB],
1054                    p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1055                    p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1056                    p_hwfn->hw_info.resc_num[QED_VPORT],
1057                    p_hwfn->hw_info.resc_start[QED_VPORT],
1058                    p_hwfn->hw_info.resc_num[QED_PQ],
1059                    p_hwfn->hw_info.resc_start[QED_PQ],
1060                    p_hwfn->hw_info.resc_num[QED_RL],
1061                    p_hwfn->hw_info.resc_start[QED_RL],
1062                    p_hwfn->hw_info.resc_num[QED_MAC],
1063                    p_hwfn->hw_info.resc_start[QED_MAC],
1064                    p_hwfn->hw_info.resc_num[QED_VLAN],
1065                    p_hwfn->hw_info.resc_start[QED_VLAN],
1066                    p_hwfn->hw_info.resc_num[QED_ILT],
1067                    p_hwfn->hw_info.resc_start[QED_ILT]);
1068 }
1069
1070 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1071                                struct qed_ptt *p_ptt)
1072 {
1073         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1074         u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
1075         struct qed_mcp_link_params *link;
1076
1077         /* Read global nvm_cfg address */
1078         nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1079
1080         /* Verify MCP has initialized it */
1081         if (!nvm_cfg_addr) {
1082                 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1083                 return -EINVAL;
1084         }
1085
1086         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1087         nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1088
1089         /* Read Vendor Id / Device Id */
1090         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1091                offsetof(struct nvm_cfg1, glob) +
1092                offsetof(struct nvm_cfg1_glob, pci_id);
1093         p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
1094                                     NVM_CFG1_GLOB_VENDOR_ID_MASK;
1095
1096         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1097                offsetof(struct nvm_cfg1, glob) +
1098                offsetof(struct nvm_cfg1_glob, core_cfg);
1099
1100         core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1101
1102         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1103                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1104         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1105                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1106                 break;
1107         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1108                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1109                 break;
1110         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1111                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1112                 break;
1113         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1114                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1115                 break;
1116         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1117                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1118                 break;
1119         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1120                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1121                 break;
1122         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1123                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1124                 break;
1125         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1126                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1127                 break;
1128         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1129                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1130                 break;
1131         default:
1132                 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1133                           core_cfg);
1134                 break;
1135         }
1136
1137         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1138                offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
1139                offsetof(struct nvm_cfg1_func, device_id);
1140         val = qed_rd(p_hwfn, p_ptt, addr);
1141
1142         if (IS_MF(p_hwfn)) {
1143                 p_hwfn->hw_info.device_id =
1144                         (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
1145                         NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
1146         } else {
1147                 p_hwfn->hw_info.device_id =
1148                         (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
1149                         NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
1150         }
1151
1152         /* Read default link configuration */
1153         link = &p_hwfn->mcp_info->link_input;
1154         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1155                         offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1156         link_temp = qed_rd(p_hwfn, p_ptt,
1157                            port_cfg_addr +
1158                            offsetof(struct nvm_cfg1_port, speed_cap_mask));
1159         link->speed.advertised_speeds =
1160                 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1161
1162         p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1163                                                 link->speed.advertised_speeds;
1164
1165         link_temp = qed_rd(p_hwfn, p_ptt,
1166                            port_cfg_addr +
1167                            offsetof(struct nvm_cfg1_port, link_settings));
1168         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1169                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1170         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1171                 link->speed.autoneg = true;
1172                 break;
1173         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1174                 link->speed.forced_speed = 1000;
1175                 break;
1176         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1177                 link->speed.forced_speed = 10000;
1178                 break;
1179         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1180                 link->speed.forced_speed = 25000;
1181                 break;
1182         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1183                 link->speed.forced_speed = 40000;
1184                 break;
1185         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1186                 link->speed.forced_speed = 50000;
1187                 break;
1188         case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1189                 link->speed.forced_speed = 100000;
1190                 break;
1191         default:
1192                 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1193                           link_temp);
1194         }
1195
1196         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1197         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1198         link->pause.autoneg = !!(link_temp &
1199                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1200         link->pause.forced_rx = !!(link_temp &
1201                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1202         link->pause.forced_tx = !!(link_temp &
1203                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1204         link->loopback_mode = 0;
1205
1206         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1207                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1208                    link->speed.forced_speed, link->speed.advertised_speeds,
1209                    link->speed.autoneg, link->pause.autoneg);
1210
1211         /* Read Multi-function information from shmem */
1212         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1213                offsetof(struct nvm_cfg1, glob) +
1214                offsetof(struct nvm_cfg1_glob, generic_cont0);
1215
1216         generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1217
1218         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1219                   NVM_CFG1_GLOB_MF_MODE_OFFSET;
1220
1221         switch (mf_mode) {
1222         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1223                 p_hwfn->cdev->mf_mode = MF_OVLAN;
1224                 break;
1225         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1226                 p_hwfn->cdev->mf_mode = MF_NPAR;
1227                 break;
1228         case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
1229                 p_hwfn->cdev->mf_mode = SF;
1230                 break;
1231         }
1232         DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1233                 p_hwfn->cdev->mf_mode);
1234
1235         return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1236 }
1237
1238 static int
1239 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1240                 struct qed_ptt *p_ptt,
1241                 enum qed_pci_personality personality)
1242 {
1243         u32 port_mode;
1244         int rc;
1245
1246         /* Read the port mode */
1247         port_mode = qed_rd(p_hwfn, p_ptt,
1248                            CNIG_REG_NW_PORT_MODE_BB_B0);
1249
1250         if (port_mode < 3) {
1251                 p_hwfn->cdev->num_ports_in_engines = 1;
1252         } else if (port_mode <= 5) {
1253                 p_hwfn->cdev->num_ports_in_engines = 2;
1254         } else {
1255                 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1256                           p_hwfn->cdev->num_ports_in_engines);
1257
1258                 /* Default num_ports_in_engines to something */
1259                 p_hwfn->cdev->num_ports_in_engines = 1;
1260         }
1261
1262         qed_hw_get_nvm_info(p_hwfn, p_ptt);
1263
1264         rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1265         if (rc)
1266                 return rc;
1267
1268         if (qed_mcp_is_init(p_hwfn))
1269                 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1270                                 p_hwfn->mcp_info->func_info.mac);
1271         else
1272                 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1273
1274         if (qed_mcp_is_init(p_hwfn)) {
1275                 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1276                         p_hwfn->hw_info.ovlan =
1277                                 p_hwfn->mcp_info->func_info.ovlan;
1278
1279                 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1280         }
1281
1282         if (qed_mcp_is_init(p_hwfn)) {
1283                 enum qed_pci_personality protocol;
1284
1285                 protocol = p_hwfn->mcp_info->func_info.protocol;
1286                 p_hwfn->hw_info.personality = protocol;
1287         }
1288
1289         qed_hw_get_resc(p_hwfn);
1290
1291         return rc;
1292 }
1293
1294 static void qed_get_dev_info(struct qed_dev *cdev)
1295 {
1296         u32 tmp;
1297
1298         cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1299                                      MISCS_REG_CHIP_NUM);
1300         cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1301                                      MISCS_REG_CHIP_REV);
1302         MASK_FIELD(CHIP_REV, cdev->chip_rev);
1303
1304         /* Learn number of HW-functions */
1305         tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1306                      MISCS_REG_CMT_ENABLED_FOR_PAIR);
1307
1308         if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
1309                 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1310                 cdev->num_hwfns = 2;
1311         } else {
1312                 cdev->num_hwfns = 1;
1313         }
1314
1315         cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1316                                     MISCS_REG_CHIP_TEST_REG) >> 4;
1317         MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1318         cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1319                                        MISCS_REG_CHIP_METAL);
1320         MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1321
1322         DP_INFO(cdev->hwfns,
1323                 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1324                 cdev->chip_num, cdev->chip_rev,
1325                 cdev->chip_bond_id, cdev->chip_metal);
1326 }
1327
1328 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1329                                  void __iomem *p_regview,
1330                                  void __iomem *p_doorbells,
1331                                  enum qed_pci_personality personality)
1332 {
1333         int rc = 0;
1334
1335         /* Split PCI bars evenly between hwfns */
1336         p_hwfn->regview = p_regview;
1337         p_hwfn->doorbells = p_doorbells;
1338
1339         /* Validate that chip access is feasible */
1340         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1341                 DP_ERR(p_hwfn,
1342                        "Reading the ME register returns all Fs; Preventing further chip access\n");
1343                 return -EINVAL;
1344         }
1345
1346         get_function_id(p_hwfn);
1347
1348         rc = qed_hw_hwfn_prepare(p_hwfn);
1349         if (rc) {
1350                 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1351                 goto err0;
1352         }
1353
1354         /* First hwfn learns basic information, e.g., number of hwfns */
1355         if (!p_hwfn->my_id)
1356                 qed_get_dev_info(p_hwfn->cdev);
1357
1358         /* Initialize MCP structure */
1359         rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1360         if (rc) {
1361                 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1362                 goto err1;
1363         }
1364
1365         /* Read the device configuration information from the HW and SHMEM */
1366         rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1367         if (rc) {
1368                 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1369                 goto err2;
1370         }
1371
1372         /* Allocate the init RT array and initialize the init-ops engine */
1373         rc = qed_init_alloc(p_hwfn);
1374         if (rc) {
1375                 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1376                 goto err2;
1377         }
1378
1379         return rc;
1380 err2:
1381         qed_mcp_free(p_hwfn);
1382 err1:
1383         qed_hw_hwfn_free(p_hwfn);
1384 err0:
1385         return rc;
1386 }
1387
1388 static u32 qed_hw_bar_size(struct qed_hwfn      *p_hwfn,
1389                            u8                   bar_id)
1390 {
1391         u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1392                        : PGLUE_B_REG_PF_BAR1_SIZE);
1393         u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
1394
1395         /* Get the BAR size(in KB) from hardware given val */
1396         return 1 << (val + 15);
1397 }
1398
1399 int qed_hw_prepare(struct qed_dev *cdev,
1400                    int personality)
1401 {
1402         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1403         int rc;
1404
1405         /* Store the precompiled init data ptrs */
1406         qed_init_iro_array(cdev);
1407
1408         /* Initialize the first hwfn - will learn number of hwfns */
1409         rc = qed_hw_prepare_single(p_hwfn,
1410                                    cdev->regview,
1411                                    cdev->doorbells, personality);
1412         if (rc)
1413                 return rc;
1414
1415         personality = p_hwfn->hw_info.personality;
1416
1417         /* Initialize the rest of the hwfns */
1418         if (cdev->num_hwfns > 1) {
1419                 void __iomem *p_regview, *p_doorbell;
1420                 u8 __iomem *addr;
1421
1422                 /* adjust bar offset for second engine */
1423                 addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
1424                 p_regview = addr;
1425
1426                 /* adjust doorbell bar offset for second engine */
1427                 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
1428                 p_doorbell = addr;
1429
1430                 /* prepare second hw function */
1431                 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1432                                            p_doorbell, personality);
1433
1434                 /* in case of error, need to free the previously
1435                  * initiliazed hwfn 0.
1436                  */
1437                 if (rc) {
1438                         qed_init_free(p_hwfn);
1439                         qed_mcp_free(p_hwfn);
1440                         qed_hw_hwfn_free(p_hwfn);
1441                 }
1442         }
1443
1444         return rc;
1445 }
1446
1447 void qed_hw_remove(struct qed_dev *cdev)
1448 {
1449         int i;
1450
1451         for_each_hwfn(cdev, i) {
1452                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1453
1454                 qed_init_free(p_hwfn);
1455                 qed_hw_hwfn_free(p_hwfn);
1456                 qed_mcp_free(p_hwfn);
1457         }
1458 }
1459
1460 int qed_chain_alloc(struct qed_dev *cdev,
1461                     enum qed_chain_use_mode intended_use,
1462                     enum qed_chain_mode mode,
1463                     u16 num_elems,
1464                     size_t elem_size,
1465                     struct qed_chain *p_chain)
1466 {
1467         dma_addr_t p_pbl_phys = 0;
1468         void *p_pbl_virt = NULL;
1469         dma_addr_t p_phys = 0;
1470         void *p_virt = NULL;
1471         u16 page_cnt = 0;
1472         size_t size;
1473
1474         if (mode == QED_CHAIN_MODE_SINGLE)
1475                 page_cnt = 1;
1476         else
1477                 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1478
1479         size = page_cnt * QED_CHAIN_PAGE_SIZE;
1480         p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1481                                     size, &p_phys, GFP_KERNEL);
1482         if (!p_virt) {
1483                 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1484                 goto nomem;
1485         }
1486
1487         if (mode == QED_CHAIN_MODE_PBL) {
1488                 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1489                 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1490                                                 size, &p_pbl_phys,
1491                                                 GFP_KERNEL);
1492                 if (!p_pbl_virt) {
1493                         DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1494                         goto nomem;
1495                 }
1496
1497                 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1498                                    (u8)elem_size, intended_use,
1499                                    p_pbl_phys, p_pbl_virt);
1500         } else {
1501                 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1502                                (u8)elem_size, intended_use, mode);
1503         }
1504
1505         return 0;
1506
1507 nomem:
1508         dma_free_coherent(&cdev->pdev->dev,
1509                           page_cnt * QED_CHAIN_PAGE_SIZE,
1510                           p_virt, p_phys);
1511         dma_free_coherent(&cdev->pdev->dev,
1512                           page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1513                           p_pbl_virt, p_pbl_phys);
1514
1515         return -ENOMEM;
1516 }
1517
1518 void qed_chain_free(struct qed_dev *cdev,
1519                     struct qed_chain *p_chain)
1520 {
1521         size_t size;
1522
1523         if (!p_chain->p_virt_addr)
1524                 return;
1525
1526         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1527                 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1528                 dma_free_coherent(&cdev->pdev->dev, size,
1529                                   p_chain->pbl.p_virt_table,
1530                                   p_chain->pbl.p_phys_table);
1531         }
1532
1533         size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1534         dma_free_coherent(&cdev->pdev->dev, size,
1535                           p_chain->p_virt_addr,
1536                           p_chain->p_phys_addr);
1537 }
1538
1539 static void __qed_get_vport_stats(struct qed_dev *cdev,
1540                                   struct qed_eth_stats  *stats)
1541 {
1542         int i, j;
1543
1544         memset(stats, 0, sizeof(*stats));
1545
1546         for_each_hwfn(cdev, i) {
1547                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1548                 struct eth_mstorm_per_queue_stat mstats;
1549                 struct eth_ustorm_per_queue_stat ustats;
1550                 struct eth_pstorm_per_queue_stat pstats;
1551                 struct tstorm_per_port_stat tstats;
1552                 struct port_stats port_stats;
1553                 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1554
1555                 if (!p_ptt) {
1556                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1557                         continue;
1558                 }
1559
1560                 memset(&mstats, 0, sizeof(mstats));
1561                 qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1562                                 p_hwfn->storm_stats.mstats.address,
1563                                 p_hwfn->storm_stats.mstats.len);
1564
1565                 memset(&ustats, 0, sizeof(ustats));
1566                 qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1567                                 p_hwfn->storm_stats.ustats.address,
1568                                 p_hwfn->storm_stats.ustats.len);
1569
1570                 memset(&pstats, 0, sizeof(pstats));
1571                 qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1572                                 p_hwfn->storm_stats.pstats.address,
1573                                 p_hwfn->storm_stats.pstats.len);
1574
1575                 memset(&tstats, 0, sizeof(tstats));
1576                 qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1577                                 p_hwfn->storm_stats.tstats.address,
1578                                 p_hwfn->storm_stats.tstats.len);
1579
1580                 memset(&port_stats, 0, sizeof(port_stats));
1581
1582                 if (p_hwfn->mcp_info)
1583                         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1584                                         p_hwfn->mcp_info->port_addr +
1585                                         offsetof(struct public_port, stats),
1586                                         sizeof(port_stats));
1587                 qed_ptt_release(p_hwfn, p_ptt);
1588
1589                 stats->no_buff_discards +=
1590                         HILO_64_REGPAIR(mstats.no_buff_discard);
1591                 stats->packet_too_big_discard +=
1592                         HILO_64_REGPAIR(mstats.packet_too_big_discard);
1593                 stats->ttl0_discard +=
1594                         HILO_64_REGPAIR(mstats.ttl0_discard);
1595                 stats->tpa_coalesced_pkts +=
1596                         HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1597                 stats->tpa_coalesced_events +=
1598                         HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1599                 stats->tpa_aborts_num +=
1600                         HILO_64_REGPAIR(mstats.tpa_aborts_num);
1601                 stats->tpa_coalesced_bytes +=
1602                         HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1603
1604                 stats->rx_ucast_bytes +=
1605                         HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1606                 stats->rx_mcast_bytes +=
1607                         HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1608                 stats->rx_bcast_bytes +=
1609                         HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1610                 stats->rx_ucast_pkts +=
1611                         HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1612                 stats->rx_mcast_pkts +=
1613                         HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1614                 stats->rx_bcast_pkts +=
1615                         HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1616
1617                 stats->mftag_filter_discards +=
1618                         HILO_64_REGPAIR(tstats.mftag_filter_discard);
1619                 stats->mac_filter_discards +=
1620                         HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1621
1622                 stats->tx_ucast_bytes +=
1623                         HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1624                 stats->tx_mcast_bytes +=
1625                         HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1626                 stats->tx_bcast_bytes +=
1627                         HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1628                 stats->tx_ucast_pkts +=
1629                         HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1630                 stats->tx_mcast_pkts +=
1631                         HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1632                 stats->tx_bcast_pkts +=
1633                         HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1634                 stats->tx_err_drop_pkts +=
1635                         HILO_64_REGPAIR(pstats.error_drop_pkts);
1636                 stats->rx_64_byte_packets       += port_stats.pmm.r64;
1637                 stats->rx_127_byte_packets      += port_stats.pmm.r127;
1638                 stats->rx_255_byte_packets      += port_stats.pmm.r255;
1639                 stats->rx_511_byte_packets      += port_stats.pmm.r511;
1640                 stats->rx_1023_byte_packets     += port_stats.pmm.r1023;
1641                 stats->rx_1518_byte_packets     += port_stats.pmm.r1518;
1642                 stats->rx_1522_byte_packets     += port_stats.pmm.r1522;
1643                 stats->rx_2047_byte_packets     += port_stats.pmm.r2047;
1644                 stats->rx_4095_byte_packets     += port_stats.pmm.r4095;
1645                 stats->rx_9216_byte_packets     += port_stats.pmm.r9216;
1646                 stats->rx_16383_byte_packets    += port_stats.pmm.r16383;
1647                 stats->rx_crc_errors        += port_stats.pmm.rfcs;
1648                 stats->rx_mac_crtl_frames       += port_stats.pmm.rxcf;
1649                 stats->rx_pause_frames    += port_stats.pmm.rxpf;
1650                 stats->rx_pfc_frames        += port_stats.pmm.rxpp;
1651                 stats->rx_align_errors    += port_stats.pmm.raln;
1652                 stats->rx_carrier_errors        += port_stats.pmm.rfcr;
1653                 stats->rx_oversize_packets      += port_stats.pmm.rovr;
1654                 stats->rx_jabbers              += port_stats.pmm.rjbr;
1655                 stats->rx_undersize_packets     += port_stats.pmm.rund;
1656                 stats->rx_fragments          += port_stats.pmm.rfrg;
1657                 stats->tx_64_byte_packets       += port_stats.pmm.t64;
1658                 stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
1659                 stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
1660                 stats->tx_256_to_511_byte_packets  += port_stats.pmm.t511;
1661                 stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
1662                 stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
1663                 stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
1664                 stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
1665                 stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
1666                 stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
1667                 stats->tx_pause_frames    += port_stats.pmm.txpf;
1668                 stats->tx_pfc_frames        += port_stats.pmm.txpp;
1669                 stats->tx_lpi_entry_count       += port_stats.pmm.tlpiec;
1670                 stats->tx_total_collisions      += port_stats.pmm.tncl;
1671                 stats->rx_mac_bytes          += port_stats.pmm.rbyte;
1672                 stats->rx_mac_uc_packets        += port_stats.pmm.rxuca;
1673                 stats->rx_mac_mc_packets        += port_stats.pmm.rxmca;
1674                 stats->rx_mac_bc_packets        += port_stats.pmm.rxbca;
1675                 stats->rx_mac_frames_ok  += port_stats.pmm.rxpok;
1676                 stats->tx_mac_bytes          += port_stats.pmm.tbyte;
1677                 stats->tx_mac_uc_packets        += port_stats.pmm.txuca;
1678                 stats->tx_mac_mc_packets        += port_stats.pmm.txmca;
1679                 stats->tx_mac_bc_packets        += port_stats.pmm.txbca;
1680                 stats->tx_mac_ctrl_frames       += port_stats.pmm.txcf;
1681
1682                 for (j = 0; j < 8; j++) {
1683                         stats->brb_truncates += port_stats.brb.brb_truncate[j];
1684                         stats->brb_discards += port_stats.brb.brb_discard[j];
1685                 }
1686         }
1687 }
1688
1689 void qed_get_vport_stats(struct qed_dev *cdev,
1690                          struct qed_eth_stats *stats)
1691 {
1692         u32 i;
1693
1694         if (!cdev) {
1695                 memset(stats, 0, sizeof(*stats));
1696                 return;
1697         }
1698
1699         __qed_get_vport_stats(cdev, stats);
1700
1701         if (!cdev->reset_stats)
1702                 return;
1703
1704         /* Reduce the statistics baseline */
1705         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1706                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1707 }
1708
1709 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1710 void qed_reset_vport_stats(struct qed_dev *cdev)
1711 {
1712         int i;
1713
1714         for_each_hwfn(cdev, i) {
1715                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1716                 struct eth_mstorm_per_queue_stat mstats;
1717                 struct eth_ustorm_per_queue_stat ustats;
1718                 struct eth_pstorm_per_queue_stat pstats;
1719                 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1720
1721                 if (!p_ptt) {
1722                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1723                         continue;
1724                 }
1725
1726                 memset(&mstats, 0, sizeof(mstats));
1727                 qed_memcpy_to(p_hwfn, p_ptt,
1728                               p_hwfn->storm_stats.mstats.address,
1729                               &mstats,
1730                               p_hwfn->storm_stats.mstats.len);
1731
1732                 memset(&ustats, 0, sizeof(ustats));
1733                 qed_memcpy_to(p_hwfn, p_ptt,
1734                               p_hwfn->storm_stats.ustats.address,
1735                               &ustats,
1736                               p_hwfn->storm_stats.ustats.len);
1737
1738                 memset(&pstats, 0, sizeof(pstats));
1739                 qed_memcpy_to(p_hwfn, p_ptt,
1740                               p_hwfn->storm_stats.pstats.address,
1741                               &pstats,
1742                               p_hwfn->storm_stats.pstats.len);
1743
1744                 qed_ptt_release(p_hwfn, p_ptt);
1745         }
1746
1747         /* PORT statistics are not necessarily reset, so we need to
1748          * read and create a baseline for future statistics.
1749          */
1750         if (!cdev->reset_stats)
1751                 DP_INFO(cdev, "Reset stats not allocated\n");
1752         else
1753                 __qed_get_vport_stats(cdev, cdev->reset_stats);
1754 }
1755
1756 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1757                     u16 src_id, u16 *dst_id)
1758 {
1759         if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1760                 u16 min, max;
1761
1762                 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1763                 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1764                 DP_NOTICE(p_hwfn,
1765                           "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1766                           src_id, min, max);
1767
1768                 return -EINVAL;
1769         }
1770
1771         *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1772
1773         return 0;
1774 }
1775
1776 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1777                  u8 src_id, u8 *dst_id)
1778 {
1779         if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1780                 u8 min, max;
1781
1782                 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1783                 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1784                 DP_NOTICE(p_hwfn,
1785                           "vport id [%d] is not valid, available indices [%d - %d]\n",
1786                           src_id, min, max);
1787
1788                 return -EINVAL;
1789         }
1790
1791         *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1792
1793         return 0;
1794 }
1795
1796 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1797                    u8 src_id, u8 *dst_id)
1798 {
1799         if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1800                 u8 min, max;
1801
1802                 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1803                 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1804                 DP_NOTICE(p_hwfn,
1805                           "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1806                           src_id, min, max);
1807
1808                 return -EINVAL;
1809         }
1810
1811         *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1812
1813         return 0;
1814 }