Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <linux/crash_dump.h>
25 #include <net/tcp.h>
26 #include <net/ipv6.h>
27 #include <net/ip6_checksum.h>
28 #include <net/busy_poll.h>
29 #include <linux/prefetch.h>
30 #include "bnx2x_cmn.h"
31 #include "bnx2x_init.h"
32 #include "bnx2x_sp.h"
33
34 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37 static int bnx2x_poll(struct napi_struct *napi, int budget);
38
39 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
40 {
41         int i;
42
43         /* Add NAPI objects */
44         for_each_rx_queue_cnic(bp, i) {
45                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46                                bnx2x_poll, NAPI_POLL_WEIGHT);
47                 napi_hash_add(&bnx2x_fp(bp, i, napi));
48         }
49 }
50
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
52 {
53         int i;
54
55         /* Add NAPI objects */
56         for_each_eth_queue(bp, i) {
57                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58                                bnx2x_poll, NAPI_POLL_WEIGHT);
59                 napi_hash_add(&bnx2x_fp(bp, i, napi));
60         }
61 }
62
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66
67         /* Reduce memory usage in kdump environment by using only one queue */
68         if (is_kdump_kernel())
69                 nq = 1;
70
71         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72         return nq;
73 }
74
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:         driver handle
79  * @from:       source FP index
80  * @to:         destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90         struct bnx2x_fastpath *from_fp = &bp->fp[from];
91         struct bnx2x_fastpath *to_fp = &bp->fp[to];
92         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96         int old_max_eth_txqs, new_max_eth_txqs;
97         int old_txdata_index = 0, new_txdata_index = 0;
98         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99
100         /* Copy the NAPI object as it has been already initialized */
101         from_fp->napi = to_fp->napi;
102
103         /* Move bnx2x_fastpath contents */
104         memcpy(to_fp, from_fp, sizeof(*to_fp));
105         to_fp->index = to;
106
107         /* Retain the tpa_info of the original `to' version as we don't want
108          * 2 FPs to contain the same tpa_info pointer.
109          */
110         to_fp->tpa_info = old_tpa_info;
111
112         /* move sp_objs contents as well, as their indices match fp ones */
113         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115         /* move fp_stats contents as well, as their indices match fp ones */
116         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
118         /* Update txdata pointers in fp and move txdata content accordingly:
119          * Each fp consumes 'max_cos' txdata structures, so the index should be
120          * decremented by max_cos x delta.
121          */
122
123         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125                                 (bp)->max_cos;
126         if (from == FCOE_IDX(bp)) {
127                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129         }
130
131         memcpy(&bp->bnx2x_txq[new_txdata_index],
132                &bp->bnx2x_txq[old_txdata_index],
133                sizeof(struct bnx2x_fp_txdata));
134         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147         if (IS_PF(bp)) {
148                 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150                 phy_fw_ver[0] = '\0';
151                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152                                              phy_fw_ver, PHY_FW_VER_LEN);
153                 strlcpy(buf, bp->fw_ver, buf_len);
154                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155                          "bc %d.%d.%d%s%s",
156                          (bp->common.bc_ver & 0xff0000) >> 16,
157                          (bp->common.bc_ver & 0xff00) >> 8,
158                          (bp->common.bc_ver & 0xff),
159                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160         } else {
161                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162         }
163 }
164
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp: driver handle
169  * @delta:      number of eth queues which were not allocated
170  */
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176          * backward along the array could cause memory to be overridden
177          */
178         for (cos = 1; cos < bp->max_cos; cos++) {
179                 for (i = 0; i < old_eth_num - delta; i++) {
180                         struct bnx2x_fastpath *fp = &bp->fp[i];
181                         int new_idx = cos * (old_eth_num - delta) + i;
182
183                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184                                sizeof(struct bnx2x_fp_txdata));
185                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186                 }
187         }
188 }
189
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196                              u16 idx, unsigned int *pkts_compl,
197                              unsigned int *bytes_compl)
198 {
199         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200         struct eth_tx_start_bd *tx_start_bd;
201         struct eth_tx_bd *tx_data_bd;
202         struct sk_buff *skb = tx_buf->skb;
203         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204         int nbd;
205         u16 split_bd_len = 0;
206
207         /* prefetch skb end pointer to speedup dev_kfree_skb() */
208         prefetch(&skb->end);
209
210         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211            txdata->txq_index, idx, tx_buf, skb);
212
213         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214
215         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218                 BNX2X_ERR("BAD nbd!\n");
219                 bnx2x_panic();
220         }
221 #endif
222         new_cons = nbd + tx_buf->first_bd;
223
224         /* Get the next bd */
225         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227         /* Skip a parse bd... */
228         --nbd;
229         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
231         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232                 /* Skip second parse bd... */
233                 --nbd;
234                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235         }
236
237         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241                 --nbd;
242                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243         }
244
245         /* unmap first bd */
246         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248                          DMA_TO_DEVICE);
249
250         /* now free frags */
251         while (nbd > 0) {
252
253                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256                 if (--nbd)
257                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258         }
259
260         /* release skb */
261         WARN_ON(!skb);
262         if (likely(skb)) {
263                 (*pkts_compl)++;
264                 (*bytes_compl) += skb->len;
265         }
266
267         dev_kfree_skb_any(skb);
268         tx_buf->first_bd = 0;
269         tx_buf->skb = NULL;
270
271         return new_cons;
272 }
273
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276         struct netdev_queue *txq;
277         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278         unsigned int pkts_compl = 0, bytes_compl = 0;
279
280 #ifdef BNX2X_STOP_ON_ERROR
281         if (unlikely(bp->panic))
282                 return -1;
283 #endif
284
285         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287         sw_cons = txdata->tx_pkt_cons;
288
289         while (sw_cons != hw_cons) {
290                 u16 pkt_cons;
291
292                 pkt_cons = TX_BD(sw_cons);
293
294                 DP(NETIF_MSG_TX_DONE,
295                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
296                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297
298                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299                                             &pkts_compl, &bytes_compl);
300
301                 sw_cons++;
302         }
303
304         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
306         txdata->tx_pkt_cons = sw_cons;
307         txdata->tx_bd_cons = bd_cons;
308
309         /* Need to make the tx_bd_cons update visible to start_xmit()
310          * before checking for netif_tx_queue_stopped().  Without the
311          * memory barrier, there is a small possibility that
312          * start_xmit() will miss it and cause the queue to be stopped
313          * forever.
314          * On the other hand we need an rmb() here to ensure the proper
315          * ordering of bit testing in the following
316          * netif_tx_queue_stopped(txq) call.
317          */
318         smp_mb();
319
320         if (unlikely(netif_tx_queue_stopped(txq))) {
321                 /* Taking tx_lock() is needed to prevent re-enabling the queue
322                  * while it's empty. This could have happen if rx_action() gets
323                  * suspended in bnx2x_tx_int() after the condition before
324                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325                  *
326                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
327                  * sends some packets consuming the whole queue again->
328                  * stops the queue
329                  */
330
331                 __netif_tx_lock(txq, smp_processor_id());
332
333                 if ((netif_tx_queue_stopped(txq)) &&
334                     (bp->state == BNX2X_STATE_OPEN) &&
335                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336                         netif_tx_wake_queue(txq);
337
338                 __netif_tx_unlock(txq);
339         }
340         return 0;
341 }
342
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344                                              u16 idx)
345 {
346         u16 last_max = fp->last_max_sge;
347
348         if (SUB_S16(idx, last_max) > 0)
349                 fp->last_max_sge = idx;
350 }
351
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353                                          u16 sge_len,
354                                          struct eth_end_agg_rx_cqe *cqe)
355 {
356         struct bnx2x *bp = fp->bp;
357         u16 last_max, last_elem, first_elem;
358         u16 delta = 0;
359         u16 i;
360
361         if (!sge_len)
362                 return;
363
364         /* First mark all used pages */
365         for (i = 0; i < sge_len; i++)
366                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368
369         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371
372         /* Here we assume that the last SGE index is the biggest */
373         prefetch((void *)(fp->sge_mask));
374         bnx2x_update_last_max_sge(fp,
375                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377         last_max = RX_SGE(fp->last_max_sge);
378         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380
381         /* If ring is not full */
382         if (last_elem + 1 != first_elem)
383                 last_elem++;
384
385         /* Now update the prod */
386         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387                 if (likely(fp->sge_mask[i]))
388                         break;
389
390                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391                 delta += BIT_VEC64_ELEM_SZ;
392         }
393
394         if (delta > 0) {
395                 fp->rx_sge_prod += delta;
396                 /* clear page-end entries */
397                 bnx2x_clear_sge_mask_next_elems(fp);
398         }
399
400         DP(NETIF_MSG_RX_STATUS,
401            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
402            fp->last_max_sge, fp->rx_sge_prod);
403 }
404
405 /* Get Toeplitz hash value in the skb using the value from the
406  * CQE (calculated by HW).
407  */
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409                             const struct eth_fast_path_rx_cqe *cqe,
410                             enum pkt_hash_types *rxhash_type)
411 {
412         /* Get Toeplitz hash from CQE */
413         if ((bp->dev->features & NETIF_F_RXHASH) &&
414             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415                 enum eth_rss_hash_type htype;
416
417                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419                                 (htype == TCP_IPV6_HASH_TYPE)) ?
420                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
422                 return le32_to_cpu(cqe->rss_hash_result);
423         }
424         *rxhash_type = PKT_HASH_TYPE_NONE;
425         return 0;
426 }
427
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429                             u16 cons, u16 prod,
430                             struct eth_fast_path_rx_cqe *cqe)
431 {
432         struct bnx2x *bp = fp->bp;
433         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436         dma_addr_t mapping;
437         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439
440         /* print error if current state != stop */
441         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
444         /* Try to map an empty data buffer from the aggregation info  */
445         mapping = dma_map_single(&bp->pdev->dev,
446                                  first_buf->data + NET_SKB_PAD,
447                                  fp->rx_buf_size, DMA_FROM_DEVICE);
448         /*
449          *  ...if it fails - move the skb from the consumer to the producer
450          *  and set the current aggregation state as ERROR to drop it
451          *  when TPA_STOP arrives.
452          */
453
454         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455                 /* Move the BD from the consumer to the producer */
456                 bnx2x_reuse_rx_data(fp, cons, prod);
457                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458                 return;
459         }
460
461         /* move empty data from pool to prod */
462         prod_rx_buf->data = first_buf->data;
463         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464         /* point prod_bd to new data */
465         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
468         /* move partial skb from cons to pool (don't unmap yet) */
469         *first_buf = *cons_rx_buf;
470
471         /* mark bin state as START */
472         tpa_info->parsing_flags =
473                 le16_to_cpu(cqe->pars_flags.flags);
474         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475         tpa_info->tpa_state = BNX2X_TPA_START;
476         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477         tpa_info->placement_offset = cqe->placement_offset;
478         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479         if (fp->mode == TPA_MODE_GRO) {
480                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482                 tpa_info->gro_size = gro_size;
483         }
484
485 #ifdef BNX2X_STOP_ON_ERROR
486         fp->tpa_queue_used |= (1 << queue);
487         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
488            fp->tpa_queue_used);
489 #endif
490 }
491
492 /* Timestamp option length allowed for TPA aggregation:
493  *
494  *              nop nop kind length echo val
495  */
496 #define TPA_TSTAMP_OPT_LEN      12
497 /**
498  * bnx2x_set_gro_params - compute GRO values
499  *
500  * @skb:                packet skb
501  * @parsing_flags:      parsing flags from the START CQE
502  * @len_on_bd:          total length of the first packet for the
503  *                      aggregation.
504  * @pkt_len:            length of all segments
505  *
506  * Approximate value of the MSS for this aggregation calculated using
507  * the first packet of it.
508  * Compute number of aggregated segments, and gso_type.
509  */
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511                                  u16 len_on_bd, unsigned int pkt_len,
512                                  u16 num_of_coalesced_segs)
513 {
514         /* TPA aggregation won't have either IP options or TCP options
515          * other than timestamp or IPv6 extension headers.
516          */
517         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520             PRS_FLAG_OVERETH_IPV6) {
521                 hdrs_len += sizeof(struct ipv6hdr);
522                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523         } else {
524                 hdrs_len += sizeof(struct iphdr);
525                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526         }
527
528         /* Check if there was a TCP timestamp, if there is it's will
529          * always be 12 bytes length: nop nop kind length echo val.
530          *
531          * Otherwise FW would close the aggregation.
532          */
533         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534                 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
536         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539          * to skb_shinfo(skb)->gso_segs
540          */
541         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
542 }
543
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545                               u16 index, gfp_t gfp_mask)
546 {
547         struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
548         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
549         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
550         dma_addr_t mapping;
551
552         if (unlikely(page == NULL)) {
553                 BNX2X_ERR("Can't alloc sge\n");
554                 return -ENOMEM;
555         }
556
557         mapping = dma_map_page(&bp->pdev->dev, page, 0,
558                                SGE_PAGES, DMA_FROM_DEVICE);
559         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
560                 __free_pages(page, PAGES_PER_SGE_SHIFT);
561                 BNX2X_ERR("Can't map sge\n");
562                 return -ENOMEM;
563         }
564
565         sw_buf->page = page;
566         dma_unmap_addr_set(sw_buf, mapping, mapping);
567
568         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
569         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
570
571         return 0;
572 }
573
574 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
575                                struct bnx2x_agg_info *tpa_info,
576                                u16 pages,
577                                struct sk_buff *skb,
578                                struct eth_end_agg_rx_cqe *cqe,
579                                u16 cqe_idx)
580 {
581         struct sw_rx_page *rx_pg, old_rx_pg;
582         u32 i, frag_len, frag_size;
583         int err, j, frag_id = 0;
584         u16 len_on_bd = tpa_info->len_on_bd;
585         u16 full_page = 0, gro_size = 0;
586
587         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
588
589         if (fp->mode == TPA_MODE_GRO) {
590                 gro_size = tpa_info->gro_size;
591                 full_page = tpa_info->full_page;
592         }
593
594         /* This is needed in order to enable forwarding support */
595         if (frag_size)
596                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
597                                      le16_to_cpu(cqe->pkt_len),
598                                      le16_to_cpu(cqe->num_of_coalesced_segs));
599
600 #ifdef BNX2X_STOP_ON_ERROR
601         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
602                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
603                           pages, cqe_idx);
604                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
605                 bnx2x_panic();
606                 return -EINVAL;
607         }
608 #endif
609
610         /* Run through the SGL and compose the fragmented skb */
611         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
612                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
613
614                 /* FW gives the indices of the SGE as if the ring is an array
615                    (meaning that "next" element will consume 2 indices) */
616                 if (fp->mode == TPA_MODE_GRO)
617                         frag_len = min_t(u32, frag_size, (u32)full_page);
618                 else /* LRO */
619                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
620
621                 rx_pg = &fp->rx_page_ring[sge_idx];
622                 old_rx_pg = *rx_pg;
623
624                 /* If we fail to allocate a substitute page, we simply stop
625                    where we are and drop the whole packet */
626                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
627                 if (unlikely(err)) {
628                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
629                         return err;
630                 }
631
632                 /* Unmap the page as we're going to pass it to the stack */
633                 dma_unmap_page(&bp->pdev->dev,
634                                dma_unmap_addr(&old_rx_pg, mapping),
635                                SGE_PAGES, DMA_FROM_DEVICE);
636                 /* Add one frag and update the appropriate fields in the skb */
637                 if (fp->mode == TPA_MODE_LRO)
638                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
639                 else { /* GRO */
640                         int rem;
641                         int offset = 0;
642                         for (rem = frag_len; rem > 0; rem -= gro_size) {
643                                 int len = rem > gro_size ? gro_size : rem;
644                                 skb_fill_page_desc(skb, frag_id++,
645                                                    old_rx_pg.page, offset, len);
646                                 if (offset)
647                                         get_page(old_rx_pg.page);
648                                 offset += len;
649                         }
650                 }
651
652                 skb->data_len += frag_len;
653                 skb->truesize += SGE_PAGES;
654                 skb->len += frag_len;
655
656                 frag_size -= frag_len;
657         }
658
659         return 0;
660 }
661
662 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
663 {
664         if (fp->rx_frag_size)
665                 put_page(virt_to_head_page(data));
666         else
667                 kfree(data);
668 }
669
670 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
671 {
672         if (fp->rx_frag_size) {
673                 /* GFP_KERNEL allocations are used only during initialization */
674                 if (unlikely(gfp_mask & __GFP_WAIT))
675                         return (void *)__get_free_page(gfp_mask);
676
677                 return netdev_alloc_frag(fp->rx_frag_size);
678         }
679
680         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
681 }
682
683 #ifdef CONFIG_INET
684 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
685 {
686         const struct iphdr *iph = ip_hdr(skb);
687         struct tcphdr *th;
688
689         skb_set_transport_header(skb, sizeof(struct iphdr));
690         th = tcp_hdr(skb);
691
692         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
693                                   iph->saddr, iph->daddr, 0);
694 }
695
696 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
697 {
698         struct ipv6hdr *iph = ipv6_hdr(skb);
699         struct tcphdr *th;
700
701         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
702         th = tcp_hdr(skb);
703
704         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
705                                   &iph->saddr, &iph->daddr, 0);
706 }
707
708 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
709                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
710 {
711         skb_set_network_header(skb, 0);
712         gro_func(bp, skb);
713         tcp_gro_complete(skb);
714 }
715 #endif
716
717 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718                                struct sk_buff *skb)
719 {
720 #ifdef CONFIG_INET
721         if (skb_shinfo(skb)->gso_size) {
722                 switch (be16_to_cpu(skb->protocol)) {
723                 case ETH_P_IP:
724                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
725                         break;
726                 case ETH_P_IPV6:
727                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
728                         break;
729                 default:
730                         BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
731                                   be16_to_cpu(skb->protocol));
732                 }
733         }
734 #endif
735         skb_record_rx_queue(skb, fp->rx_queue);
736         napi_gro_receive(&fp->napi, skb);
737 }
738
739 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                            struct bnx2x_agg_info *tpa_info,
741                            u16 pages,
742                            struct eth_end_agg_rx_cqe *cqe,
743                            u16 cqe_idx)
744 {
745         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
746         u8 pad = tpa_info->placement_offset;
747         u16 len = tpa_info->len_on_bd;
748         struct sk_buff *skb = NULL;
749         u8 *new_data, *data = rx_buf->data;
750         u8 old_tpa_state = tpa_info->tpa_state;
751
752         tpa_info->tpa_state = BNX2X_TPA_STOP;
753
754         /* If we there was an error during the handling of the TPA_START -
755          * drop this aggregation.
756          */
757         if (old_tpa_state == BNX2X_TPA_ERROR)
758                 goto drop;
759
760         /* Try to allocate the new data */
761         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
762         /* Unmap skb in the pool anyway, as we are going to change
763            pool entry status to BNX2X_TPA_STOP even if new skb allocation
764            fails. */
765         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
766                          fp->rx_buf_size, DMA_FROM_DEVICE);
767         if (likely(new_data))
768                 skb = build_skb(data, fp->rx_frag_size);
769
770         if (likely(skb)) {
771 #ifdef BNX2X_STOP_ON_ERROR
772                 if (pad + len > fp->rx_buf_size) {
773                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
774                                   pad, len, fp->rx_buf_size);
775                         bnx2x_panic();
776                         return;
777                 }
778 #endif
779
780                 skb_reserve(skb, pad + NET_SKB_PAD);
781                 skb_put(skb, len);
782                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
783
784                 skb->protocol = eth_type_trans(skb, bp->dev);
785                 skb->ip_summed = CHECKSUM_UNNECESSARY;
786
787                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
788                                          skb, cqe, cqe_idx)) {
789                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
790                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
791                         bnx2x_gro_receive(bp, fp, skb);
792                 } else {
793                         DP(NETIF_MSG_RX_STATUS,
794                            "Failed to allocate new pages - dropping packet!\n");
795                         dev_kfree_skb_any(skb);
796                 }
797
798                 /* put new data in bin */
799                 rx_buf->data = new_data;
800
801                 return;
802         }
803         if (new_data)
804                 bnx2x_frag_free(fp, new_data);
805 drop:
806         /* drop the packet and keep the buffer in the bin */
807         DP(NETIF_MSG_RX_STATUS,
808            "Failed to allocate or map a new skb - dropping packet!\n");
809         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
810 }
811
812 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813                                u16 index, gfp_t gfp_mask)
814 {
815         u8 *data;
816         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
817         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
818         dma_addr_t mapping;
819
820         data = bnx2x_frag_alloc(fp, gfp_mask);
821         if (unlikely(data == NULL))
822                 return -ENOMEM;
823
824         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
825                                  fp->rx_buf_size,
826                                  DMA_FROM_DEVICE);
827         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
828                 bnx2x_frag_free(fp, data);
829                 BNX2X_ERR("Can't map rx data\n");
830                 return -ENOMEM;
831         }
832
833         rx_buf->data = data;
834         dma_unmap_addr_set(rx_buf, mapping, mapping);
835
836         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
837         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
838
839         return 0;
840 }
841
842 static
843 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
844                                  struct bnx2x_fastpath *fp,
845                                  struct bnx2x_eth_q_stats *qstats)
846 {
847         /* Do nothing if no L4 csum validation was done.
848          * We do not check whether IP csum was validated. For IPv4 we assume
849          * that if the card got as far as validating the L4 csum, it also
850          * validated the IP csum. IPv6 has no IP csum.
851          */
852         if (cqe->fast_path_cqe.status_flags &
853             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
854                 return;
855
856         /* If L4 validation was done, check if an error was found. */
857
858         if (cqe->fast_path_cqe.type_error_flags &
859             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
860              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
861                 qstats->hw_csum_err++;
862         else
863                 skb->ip_summed = CHECKSUM_UNNECESSARY;
864 }
865
866 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
867 {
868         struct bnx2x *bp = fp->bp;
869         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
870         u16 sw_comp_cons, sw_comp_prod;
871         int rx_pkt = 0;
872         union eth_rx_cqe *cqe;
873         struct eth_fast_path_rx_cqe *cqe_fp;
874
875 #ifdef BNX2X_STOP_ON_ERROR
876         if (unlikely(bp->panic))
877                 return 0;
878 #endif
879         if (budget <= 0)
880                 return rx_pkt;
881
882         bd_cons = fp->rx_bd_cons;
883         bd_prod = fp->rx_bd_prod;
884         bd_prod_fw = bd_prod;
885         sw_comp_cons = fp->rx_comp_cons;
886         sw_comp_prod = fp->rx_comp_prod;
887
888         comp_ring_cons = RCQ_BD(sw_comp_cons);
889         cqe = &fp->rx_comp_ring[comp_ring_cons];
890         cqe_fp = &cqe->fast_path_cqe;
891
892         DP(NETIF_MSG_RX_STATUS,
893            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
894
895         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
896                 struct sw_rx_bd *rx_buf = NULL;
897                 struct sk_buff *skb;
898                 u8 cqe_fp_flags;
899                 enum eth_rx_cqe_type cqe_fp_type;
900                 u16 len, pad, queue;
901                 u8 *data;
902                 u32 rxhash;
903                 enum pkt_hash_types rxhash_type;
904
905 #ifdef BNX2X_STOP_ON_ERROR
906                 if (unlikely(bp->panic))
907                         return 0;
908 #endif
909
910                 bd_prod = RX_BD(bd_prod);
911                 bd_cons = RX_BD(bd_cons);
912
913                 /* A rmb() is required to ensure that the CQE is not read
914                  * before it is written by the adapter DMA.  PCI ordering
915                  * rules will make sure the other fields are written before
916                  * the marker at the end of struct eth_fast_path_rx_cqe
917                  * but without rmb() a weakly ordered processor can process
918                  * stale data.  Without the barrier TPA state-machine might
919                  * enter inconsistent state and kernel stack might be
920                  * provided with incorrect packet description - these lead
921                  * to various kernel crashed.
922                  */
923                 rmb();
924
925                 cqe_fp_flags = cqe_fp->type_error_flags;
926                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
927
928                 DP(NETIF_MSG_RX_STATUS,
929                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
930                    CQE_TYPE(cqe_fp_flags),
931                    cqe_fp_flags, cqe_fp->status_flags,
932                    le32_to_cpu(cqe_fp->rss_hash_result),
933                    le16_to_cpu(cqe_fp->vlan_tag),
934                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
935
936                 /* is this a slowpath msg? */
937                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
938                         bnx2x_sp_event(fp, cqe);
939                         goto next_cqe;
940                 }
941
942                 rx_buf = &fp->rx_buf_ring[bd_cons];
943                 data = rx_buf->data;
944
945                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
946                         struct bnx2x_agg_info *tpa_info;
947                         u16 frag_size, pages;
948 #ifdef BNX2X_STOP_ON_ERROR
949                         /* sanity check */
950                         if (fp->mode == TPA_MODE_DISABLED &&
951                             (CQE_TYPE_START(cqe_fp_type) ||
952                              CQE_TYPE_STOP(cqe_fp_type)))
953                                 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
954                                           CQE_TYPE(cqe_fp_type));
955 #endif
956
957                         if (CQE_TYPE_START(cqe_fp_type)) {
958                                 u16 queue = cqe_fp->queue_index;
959                                 DP(NETIF_MSG_RX_STATUS,
960                                    "calling tpa_start on queue %d\n",
961                                    queue);
962
963                                 bnx2x_tpa_start(fp, queue,
964                                                 bd_cons, bd_prod,
965                                                 cqe_fp);
966
967                                 goto next_rx;
968                         }
969                         queue = cqe->end_agg_cqe.queue_index;
970                         tpa_info = &fp->tpa_info[queue];
971                         DP(NETIF_MSG_RX_STATUS,
972                            "calling tpa_stop on queue %d\n",
973                            queue);
974
975                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
976                                     tpa_info->len_on_bd;
977
978                         if (fp->mode == TPA_MODE_GRO)
979                                 pages = (frag_size + tpa_info->full_page - 1) /
980                                          tpa_info->full_page;
981                         else
982                                 pages = SGE_PAGE_ALIGN(frag_size) >>
983                                         SGE_PAGE_SHIFT;
984
985                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
986                                        &cqe->end_agg_cqe, comp_ring_cons);
987 #ifdef BNX2X_STOP_ON_ERROR
988                         if (bp->panic)
989                                 return 0;
990 #endif
991
992                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
993                         goto next_cqe;
994                 }
995                 /* non TPA */
996                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
997                 pad = cqe_fp->placement_offset;
998                 dma_sync_single_for_cpu(&bp->pdev->dev,
999                                         dma_unmap_addr(rx_buf, mapping),
1000                                         pad + RX_COPY_THRESH,
1001                                         DMA_FROM_DEVICE);
1002                 pad += NET_SKB_PAD;
1003                 prefetch(data + pad); /* speedup eth_type_trans() */
1004                 /* is this an error packet? */
1005                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1006                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1007                            "ERROR  flags %x  rx packet %u\n",
1008                            cqe_fp_flags, sw_comp_cons);
1009                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1010                         goto reuse_rx;
1011                 }
1012
1013                 /* Since we don't have a jumbo ring
1014                  * copy small packets if mtu > 1500
1015                  */
1016                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1017                     (len <= RX_COPY_THRESH)) {
1018                         skb = napi_alloc_skb(&fp->napi, len);
1019                         if (skb == NULL) {
1020                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021                                    "ERROR  packet dropped because of alloc failure\n");
1022                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1023                                 goto reuse_rx;
1024                         }
1025                         memcpy(skb->data, data + pad, len);
1026                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1027                 } else {
1028                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1029                                                        GFP_ATOMIC) == 0)) {
1030                                 dma_unmap_single(&bp->pdev->dev,
1031                                                  dma_unmap_addr(rx_buf, mapping),
1032                                                  fp->rx_buf_size,
1033                                                  DMA_FROM_DEVICE);
1034                                 skb = build_skb(data, fp->rx_frag_size);
1035                                 if (unlikely(!skb)) {
1036                                         bnx2x_frag_free(fp, data);
1037                                         bnx2x_fp_qstats(bp, fp)->
1038                                                         rx_skb_alloc_failed++;
1039                                         goto next_rx;
1040                                 }
1041                                 skb_reserve(skb, pad);
1042                         } else {
1043                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1044                                    "ERROR  packet dropped because of alloc failure\n");
1045                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1046 reuse_rx:
1047                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1048                                 goto next_rx;
1049                         }
1050                 }
1051
1052                 skb_put(skb, len);
1053                 skb->protocol = eth_type_trans(skb, bp->dev);
1054
1055                 /* Set Toeplitz hash for a none-LRO skb */
1056                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1057                 skb_set_hash(skb, rxhash, rxhash_type);
1058
1059                 skb_checksum_none_assert(skb);
1060
1061                 if (bp->dev->features & NETIF_F_RXCSUM)
1062                         bnx2x_csum_validate(skb, cqe, fp,
1063                                             bnx2x_fp_qstats(bp, fp));
1064
1065                 skb_record_rx_queue(skb, fp->rx_queue);
1066
1067                 /* Check if this packet was timestamped */
1068                 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1069                              (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1070                         bnx2x_set_rx_ts(bp, skb);
1071
1072                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1073                     PARSING_FLAGS_VLAN)
1074                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1075                                                le16_to_cpu(cqe_fp->vlan_tag));
1076
1077                 skb_mark_napi_id(skb, &fp->napi);
1078
1079                 if (bnx2x_fp_ll_polling(fp))
1080                         netif_receive_skb(skb);
1081                 else
1082                         napi_gro_receive(&fp->napi, skb);
1083 next_rx:
1084                 rx_buf->data = NULL;
1085
1086                 bd_cons = NEXT_RX_IDX(bd_cons);
1087                 bd_prod = NEXT_RX_IDX(bd_prod);
1088                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1089                 rx_pkt++;
1090 next_cqe:
1091                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1092                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1093
1094                 /* mark CQE as free */
1095                 BNX2X_SEED_CQE(cqe_fp);
1096
1097                 if (rx_pkt == budget)
1098                         break;
1099
1100                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1101                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1102                 cqe_fp = &cqe->fast_path_cqe;
1103         } /* while */
1104
1105         fp->rx_bd_cons = bd_cons;
1106         fp->rx_bd_prod = bd_prod_fw;
1107         fp->rx_comp_cons = sw_comp_cons;
1108         fp->rx_comp_prod = sw_comp_prod;
1109
1110         /* Update producers */
1111         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1112                              fp->rx_sge_prod);
1113
1114         fp->rx_pkt += rx_pkt;
1115         fp->rx_calls++;
1116
1117         return rx_pkt;
1118 }
1119
1120 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1121 {
1122         struct bnx2x_fastpath *fp = fp_cookie;
1123         struct bnx2x *bp = fp->bp;
1124         u8 cos;
1125
1126         DP(NETIF_MSG_INTR,
1127            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1128            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1129
1130         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1131
1132 #ifdef BNX2X_STOP_ON_ERROR
1133         if (unlikely(bp->panic))
1134                 return IRQ_HANDLED;
1135 #endif
1136
1137         /* Handle Rx and Tx according to MSI-X vector */
1138         for_each_cos_in_tx_queue(fp, cos)
1139                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1140
1141         prefetch(&fp->sb_running_index[SM_RX_ID]);
1142         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1143
1144         return IRQ_HANDLED;
1145 }
1146
1147 /* HW Lock for shared dual port PHYs */
1148 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1149 {
1150         mutex_lock(&bp->port.phy_mutex);
1151
1152         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1153 }
1154
1155 void bnx2x_release_phy_lock(struct bnx2x *bp)
1156 {
1157         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1158
1159         mutex_unlock(&bp->port.phy_mutex);
1160 }
1161
1162 /* calculates MF speed according to current linespeed and MF configuration */
1163 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1164 {
1165         u16 line_speed = bp->link_vars.line_speed;
1166         if (IS_MF(bp)) {
1167                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1168                                                    bp->mf_config[BP_VN(bp)]);
1169
1170                 /* Calculate the current MAX line speed limit for the MF
1171                  * devices
1172                  */
1173                 if (IS_MF_SI(bp))
1174                         line_speed = (line_speed * maxCfg) / 100;
1175                 else { /* SD mode */
1176                         u16 vn_max_rate = maxCfg * 100;
1177
1178                         if (vn_max_rate < line_speed)
1179                                 line_speed = vn_max_rate;
1180                 }
1181         }
1182
1183         return line_speed;
1184 }
1185
1186 /**
1187  * bnx2x_fill_report_data - fill link report data to report
1188  *
1189  * @bp:         driver handle
1190  * @data:       link state to update
1191  *
1192  * It uses a none-atomic bit operations because is called under the mutex.
1193  */
1194 static void bnx2x_fill_report_data(struct bnx2x *bp,
1195                                    struct bnx2x_link_report_data *data)
1196 {
1197         memset(data, 0, sizeof(*data));
1198
1199         if (IS_PF(bp)) {
1200                 /* Fill the report data: effective line speed */
1201                 data->line_speed = bnx2x_get_mf_speed(bp);
1202
1203                 /* Link is down */
1204                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1205                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1206                                   &data->link_report_flags);
1207
1208                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1209                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210                                   &data->link_report_flags);
1211
1212                 /* Full DUPLEX */
1213                 if (bp->link_vars.duplex == DUPLEX_FULL)
1214                         __set_bit(BNX2X_LINK_REPORT_FD,
1215                                   &data->link_report_flags);
1216
1217                 /* Rx Flow Control is ON */
1218                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1219                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1220                                   &data->link_report_flags);
1221
1222                 /* Tx Flow Control is ON */
1223                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1224                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1225                                   &data->link_report_flags);
1226         } else { /* VF */
1227                 *data = bp->vf_link_vars;
1228         }
1229 }
1230
1231 /**
1232  * bnx2x_link_report - report link status to OS.
1233  *
1234  * @bp:         driver handle
1235  *
1236  * Calls the __bnx2x_link_report() under the same locking scheme
1237  * as a link/PHY state managing code to ensure a consistent link
1238  * reporting.
1239  */
1240
1241 void bnx2x_link_report(struct bnx2x *bp)
1242 {
1243         bnx2x_acquire_phy_lock(bp);
1244         __bnx2x_link_report(bp);
1245         bnx2x_release_phy_lock(bp);
1246 }
1247
1248 /**
1249  * __bnx2x_link_report - report link status to OS.
1250  *
1251  * @bp:         driver handle
1252  *
1253  * None atomic implementation.
1254  * Should be called under the phy_lock.
1255  */
1256 void __bnx2x_link_report(struct bnx2x *bp)
1257 {
1258         struct bnx2x_link_report_data cur_data;
1259
1260         /* reread mf_cfg */
1261         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1262                 bnx2x_read_mf_cfg(bp);
1263
1264         /* Read the current link report info */
1265         bnx2x_fill_report_data(bp, &cur_data);
1266
1267         /* Don't report link down or exactly the same link status twice */
1268         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1269             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1270                       &bp->last_reported_link.link_report_flags) &&
1271              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1272                       &cur_data.link_report_flags)))
1273                 return;
1274
1275         bp->link_cnt++;
1276
1277         /* We are going to report a new link parameters now -
1278          * remember the current data for the next time.
1279          */
1280         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1281
1282         /* propagate status to VFs */
1283         if (IS_PF(bp))
1284                 bnx2x_iov_link_update(bp);
1285
1286         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1287                      &cur_data.link_report_flags)) {
1288                 netif_carrier_off(bp->dev);
1289                 netdev_err(bp->dev, "NIC Link is Down\n");
1290                 return;
1291         } else {
1292                 const char *duplex;
1293                 const char *flow;
1294
1295                 netif_carrier_on(bp->dev);
1296
1297                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1298                                        &cur_data.link_report_flags))
1299                         duplex = "full";
1300                 else
1301                         duplex = "half";
1302
1303                 /* Handle the FC at the end so that only these flags would be
1304                  * possibly set. This way we may easily check if there is no FC
1305                  * enabled.
1306                  */
1307                 if (cur_data.link_report_flags) {
1308                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1309                                      &cur_data.link_report_flags)) {
1310                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1311                                      &cur_data.link_report_flags))
1312                                         flow = "ON - receive & transmit";
1313                                 else
1314                                         flow = "ON - receive";
1315                         } else {
1316                                 flow = "ON - transmit";
1317                         }
1318                 } else {
1319                         flow = "none";
1320                 }
1321                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1322                             cur_data.line_speed, duplex, flow);
1323         }
1324 }
1325
1326 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1327 {
1328         int i;
1329
1330         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1331                 struct eth_rx_sge *sge;
1332
1333                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1334                 sge->addr_hi =
1335                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1336                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1337
1338                 sge->addr_lo =
1339                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1340                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1341         }
1342 }
1343
1344 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1345                                 struct bnx2x_fastpath *fp, int last)
1346 {
1347         int i;
1348
1349         for (i = 0; i < last; i++) {
1350                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1351                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1352                 u8 *data = first_buf->data;
1353
1354                 if (data == NULL) {
1355                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1356                         continue;
1357                 }
1358                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1359                         dma_unmap_single(&bp->pdev->dev,
1360                                          dma_unmap_addr(first_buf, mapping),
1361                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1362                 bnx2x_frag_free(fp, data);
1363                 first_buf->data = NULL;
1364         }
1365 }
1366
1367 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1368 {
1369         int j;
1370
1371         for_each_rx_queue_cnic(bp, j) {
1372                 struct bnx2x_fastpath *fp = &bp->fp[j];
1373
1374                 fp->rx_bd_cons = 0;
1375
1376                 /* Activate BD ring */
1377                 /* Warning!
1378                  * this will generate an interrupt (to the TSTORM)
1379                  * must only be done after chip is initialized
1380                  */
1381                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1382                                      fp->rx_sge_prod);
1383         }
1384 }
1385
1386 void bnx2x_init_rx_rings(struct bnx2x *bp)
1387 {
1388         int func = BP_FUNC(bp);
1389         u16 ring_prod;
1390         int i, j;
1391
1392         /* Allocate TPA resources */
1393         for_each_eth_queue(bp, j) {
1394                 struct bnx2x_fastpath *fp = &bp->fp[j];
1395
1396                 DP(NETIF_MSG_IFUP,
1397                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1398
1399                 if (fp->mode != TPA_MODE_DISABLED) {
1400                         /* Fill the per-aggregation pool */
1401                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1402                                 struct bnx2x_agg_info *tpa_info =
1403                                         &fp->tpa_info[i];
1404                                 struct sw_rx_bd *first_buf =
1405                                         &tpa_info->first_buf;
1406
1407                                 first_buf->data =
1408                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1409                                 if (!first_buf->data) {
1410                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1411                                                   j);
1412                                         bnx2x_free_tpa_pool(bp, fp, i);
1413                                         fp->mode = TPA_MODE_DISABLED;
1414                                         break;
1415                                 }
1416                                 dma_unmap_addr_set(first_buf, mapping, 0);
1417                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1418                         }
1419
1420                         /* "next page" elements initialization */
1421                         bnx2x_set_next_page_sgl(fp);
1422
1423                         /* set SGEs bit mask */
1424                         bnx2x_init_sge_ring_bit_mask(fp);
1425
1426                         /* Allocate SGEs and initialize the ring elements */
1427                         for (i = 0, ring_prod = 0;
1428                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1429
1430                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1431                                                        GFP_KERNEL) < 0) {
1432                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1433                                                   i);
1434                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1435                                                   j);
1436                                         /* Cleanup already allocated elements */
1437                                         bnx2x_free_rx_sge_range(bp, fp,
1438                                                                 ring_prod);
1439                                         bnx2x_free_tpa_pool(bp, fp,
1440                                                             MAX_AGG_QS(bp));
1441                                         fp->mode = TPA_MODE_DISABLED;
1442                                         ring_prod = 0;
1443                                         break;
1444                                 }
1445                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1446                         }
1447
1448                         fp->rx_sge_prod = ring_prod;
1449                 }
1450         }
1451
1452         for_each_eth_queue(bp, j) {
1453                 struct bnx2x_fastpath *fp = &bp->fp[j];
1454
1455                 fp->rx_bd_cons = 0;
1456
1457                 /* Activate BD ring */
1458                 /* Warning!
1459                  * this will generate an interrupt (to the TSTORM)
1460                  * must only be done after chip is initialized
1461                  */
1462                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1463                                      fp->rx_sge_prod);
1464
1465                 if (j != 0)
1466                         continue;
1467
1468                 if (CHIP_IS_E1(bp)) {
1469                         REG_WR(bp, BAR_USTRORM_INTMEM +
1470                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1471                                U64_LO(fp->rx_comp_mapping));
1472                         REG_WR(bp, BAR_USTRORM_INTMEM +
1473                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1474                                U64_HI(fp->rx_comp_mapping));
1475                 }
1476         }
1477 }
1478
1479 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1480 {
1481         u8 cos;
1482         struct bnx2x *bp = fp->bp;
1483
1484         for_each_cos_in_tx_queue(fp, cos) {
1485                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1486                 unsigned pkts_compl = 0, bytes_compl = 0;
1487
1488                 u16 sw_prod = txdata->tx_pkt_prod;
1489                 u16 sw_cons = txdata->tx_pkt_cons;
1490
1491                 while (sw_cons != sw_prod) {
1492                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1493                                           &pkts_compl, &bytes_compl);
1494                         sw_cons++;
1495                 }
1496
1497                 netdev_tx_reset_queue(
1498                         netdev_get_tx_queue(bp->dev,
1499                                             txdata->txq_index));
1500         }
1501 }
1502
1503 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1504 {
1505         int i;
1506
1507         for_each_tx_queue_cnic(bp, i) {
1508                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1509         }
1510 }
1511
1512 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1513 {
1514         int i;
1515
1516         for_each_eth_queue(bp, i) {
1517                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1518         }
1519 }
1520
1521 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1522 {
1523         struct bnx2x *bp = fp->bp;
1524         int i;
1525
1526         /* ring wasn't allocated */
1527         if (fp->rx_buf_ring == NULL)
1528                 return;
1529
1530         for (i = 0; i < NUM_RX_BD; i++) {
1531                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1532                 u8 *data = rx_buf->data;
1533
1534                 if (data == NULL)
1535                         continue;
1536                 dma_unmap_single(&bp->pdev->dev,
1537                                  dma_unmap_addr(rx_buf, mapping),
1538                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1539
1540                 rx_buf->data = NULL;
1541                 bnx2x_frag_free(fp, data);
1542         }
1543 }
1544
1545 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1546 {
1547         int j;
1548
1549         for_each_rx_queue_cnic(bp, j) {
1550                 bnx2x_free_rx_bds(&bp->fp[j]);
1551         }
1552 }
1553
1554 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1555 {
1556         int j;
1557
1558         for_each_eth_queue(bp, j) {
1559                 struct bnx2x_fastpath *fp = &bp->fp[j];
1560
1561                 bnx2x_free_rx_bds(fp);
1562
1563                 if (fp->mode != TPA_MODE_DISABLED)
1564                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1565         }
1566 }
1567
1568 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1569 {
1570         bnx2x_free_tx_skbs_cnic(bp);
1571         bnx2x_free_rx_skbs_cnic(bp);
1572 }
1573
1574 void bnx2x_free_skbs(struct bnx2x *bp)
1575 {
1576         bnx2x_free_tx_skbs(bp);
1577         bnx2x_free_rx_skbs(bp);
1578 }
1579
1580 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1581 {
1582         /* load old values */
1583         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1584
1585         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1586                 /* leave all but MAX value */
1587                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1588
1589                 /* set new MAX value */
1590                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1591                                 & FUNC_MF_CFG_MAX_BW_MASK;
1592
1593                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1594         }
1595 }
1596
1597 /**
1598  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1599  *
1600  * @bp:         driver handle
1601  * @nvecs:      number of vectors to be released
1602  */
1603 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1604 {
1605         int i, offset = 0;
1606
1607         if (nvecs == offset)
1608                 return;
1609
1610         /* VFs don't have a default SB */
1611         if (IS_PF(bp)) {
1612                 free_irq(bp->msix_table[offset].vector, bp->dev);
1613                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1614                    bp->msix_table[offset].vector);
1615                 offset++;
1616         }
1617
1618         if (CNIC_SUPPORT(bp)) {
1619                 if (nvecs == offset)
1620                         return;
1621                 offset++;
1622         }
1623
1624         for_each_eth_queue(bp, i) {
1625                 if (nvecs == offset)
1626                         return;
1627                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1628                    i, bp->msix_table[offset].vector);
1629
1630                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1631         }
1632 }
1633
1634 void bnx2x_free_irq(struct bnx2x *bp)
1635 {
1636         if (bp->flags & USING_MSIX_FLAG &&
1637             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1638                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1639
1640                 /* vfs don't have a default status block */
1641                 if (IS_PF(bp))
1642                         nvecs++;
1643
1644                 bnx2x_free_msix_irqs(bp, nvecs);
1645         } else {
1646                 free_irq(bp->dev->irq, bp->dev);
1647         }
1648 }
1649
1650 int bnx2x_enable_msix(struct bnx2x *bp)
1651 {
1652         int msix_vec = 0, i, rc;
1653
1654         /* VFs don't have a default status block */
1655         if (IS_PF(bp)) {
1656                 bp->msix_table[msix_vec].entry = msix_vec;
1657                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1658                                bp->msix_table[0].entry);
1659                 msix_vec++;
1660         }
1661
1662         /* Cnic requires an msix vector for itself */
1663         if (CNIC_SUPPORT(bp)) {
1664                 bp->msix_table[msix_vec].entry = msix_vec;
1665                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1666                                msix_vec, bp->msix_table[msix_vec].entry);
1667                 msix_vec++;
1668         }
1669
1670         /* We need separate vectors for ETH queues only (not FCoE) */
1671         for_each_eth_queue(bp, i) {
1672                 bp->msix_table[msix_vec].entry = msix_vec;
1673                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1674                                msix_vec, msix_vec, i);
1675                 msix_vec++;
1676         }
1677
1678         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1679            msix_vec);
1680
1681         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1682                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1683         /*
1684          * reconfigure number of tx/rx queues according to available
1685          * MSI-X vectors
1686          */
1687         if (rc == -ENOSPC) {
1688                 /* Get by with single vector */
1689                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1690                 if (rc < 0) {
1691                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1692                                        rc);
1693                         goto no_msix;
1694                 }
1695
1696                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1697                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1698
1699                 BNX2X_DEV_INFO("set number of queues to 1\n");
1700                 bp->num_ethernet_queues = 1;
1701                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1702         } else if (rc < 0) {
1703                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1704                 goto no_msix;
1705         } else if (rc < msix_vec) {
1706                 /* how less vectors we will have? */
1707                 int diff = msix_vec - rc;
1708
1709                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1710
1711                 /*
1712                  * decrease number of queues by number of unallocated entries
1713                  */
1714                 bp->num_ethernet_queues -= diff;
1715                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1716
1717                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1718                                bp->num_queues);
1719         }
1720
1721         bp->flags |= USING_MSIX_FLAG;
1722
1723         return 0;
1724
1725 no_msix:
1726         /* fall to INTx if not enough memory */
1727         if (rc == -ENOMEM)
1728                 bp->flags |= DISABLE_MSI_FLAG;
1729
1730         return rc;
1731 }
1732
1733 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1734 {
1735         int i, rc, offset = 0;
1736
1737         /* no default status block for vf */
1738         if (IS_PF(bp)) {
1739                 rc = request_irq(bp->msix_table[offset++].vector,
1740                                  bnx2x_msix_sp_int, 0,
1741                                  bp->dev->name, bp->dev);
1742                 if (rc) {
1743                         BNX2X_ERR("request sp irq failed\n");
1744                         return -EBUSY;
1745                 }
1746         }
1747
1748         if (CNIC_SUPPORT(bp))
1749                 offset++;
1750
1751         for_each_eth_queue(bp, i) {
1752                 struct bnx2x_fastpath *fp = &bp->fp[i];
1753                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1754                          bp->dev->name, i);
1755
1756                 rc = request_irq(bp->msix_table[offset].vector,
1757                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1758                 if (rc) {
1759                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1760                               bp->msix_table[offset].vector, rc);
1761                         bnx2x_free_msix_irqs(bp, offset);
1762                         return -EBUSY;
1763                 }
1764
1765                 offset++;
1766         }
1767
1768         i = BNX2X_NUM_ETH_QUEUES(bp);
1769         if (IS_PF(bp)) {
1770                 offset = 1 + CNIC_SUPPORT(bp);
1771                 netdev_info(bp->dev,
1772                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1773                             bp->msix_table[0].vector,
1774                             0, bp->msix_table[offset].vector,
1775                             i - 1, bp->msix_table[offset + i - 1].vector);
1776         } else {
1777                 offset = CNIC_SUPPORT(bp);
1778                 netdev_info(bp->dev,
1779                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1780                             0, bp->msix_table[offset].vector,
1781                             i - 1, bp->msix_table[offset + i - 1].vector);
1782         }
1783         return 0;
1784 }
1785
1786 int bnx2x_enable_msi(struct bnx2x *bp)
1787 {
1788         int rc;
1789
1790         rc = pci_enable_msi(bp->pdev);
1791         if (rc) {
1792                 BNX2X_DEV_INFO("MSI is not attainable\n");
1793                 return -1;
1794         }
1795         bp->flags |= USING_MSI_FLAG;
1796
1797         return 0;
1798 }
1799
1800 static int bnx2x_req_irq(struct bnx2x *bp)
1801 {
1802         unsigned long flags;
1803         unsigned int irq;
1804
1805         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1806                 flags = 0;
1807         else
1808                 flags = IRQF_SHARED;
1809
1810         if (bp->flags & USING_MSIX_FLAG)
1811                 irq = bp->msix_table[0].vector;
1812         else
1813                 irq = bp->pdev->irq;
1814
1815         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1816 }
1817
1818 static int bnx2x_setup_irqs(struct bnx2x *bp)
1819 {
1820         int rc = 0;
1821         if (bp->flags & USING_MSIX_FLAG &&
1822             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1823                 rc = bnx2x_req_msix_irqs(bp);
1824                 if (rc)
1825                         return rc;
1826         } else {
1827                 rc = bnx2x_req_irq(bp);
1828                 if (rc) {
1829                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1830                         return rc;
1831                 }
1832                 if (bp->flags & USING_MSI_FLAG) {
1833                         bp->dev->irq = bp->pdev->irq;
1834                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1835                                     bp->dev->irq);
1836                 }
1837                 if (bp->flags & USING_MSIX_FLAG) {
1838                         bp->dev->irq = bp->msix_table[0].vector;
1839                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1840                                     bp->dev->irq);
1841                 }
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1848 {
1849         int i;
1850
1851         for_each_rx_queue_cnic(bp, i) {
1852                 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1853                 napi_enable(&bnx2x_fp(bp, i, napi));
1854         }
1855 }
1856
1857 static void bnx2x_napi_enable(struct bnx2x *bp)
1858 {
1859         int i;
1860
1861         for_each_eth_queue(bp, i) {
1862                 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1863                 napi_enable(&bnx2x_fp(bp, i, napi));
1864         }
1865 }
1866
1867 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1868 {
1869         int i;
1870
1871         for_each_rx_queue_cnic(bp, i) {
1872                 napi_disable(&bnx2x_fp(bp, i, napi));
1873                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1874                         usleep_range(1000, 2000);
1875         }
1876 }
1877
1878 static void bnx2x_napi_disable(struct bnx2x *bp)
1879 {
1880         int i;
1881
1882         for_each_eth_queue(bp, i) {
1883                 napi_disable(&bnx2x_fp(bp, i, napi));
1884                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1885                         usleep_range(1000, 2000);
1886         }
1887 }
1888
1889 void bnx2x_netif_start(struct bnx2x *bp)
1890 {
1891         if (netif_running(bp->dev)) {
1892                 bnx2x_napi_enable(bp);
1893                 if (CNIC_LOADED(bp))
1894                         bnx2x_napi_enable_cnic(bp);
1895                 bnx2x_int_enable(bp);
1896                 if (bp->state == BNX2X_STATE_OPEN)
1897                         netif_tx_wake_all_queues(bp->dev);
1898         }
1899 }
1900
1901 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1902 {
1903         bnx2x_int_disable_sync(bp, disable_hw);
1904         bnx2x_napi_disable(bp);
1905         if (CNIC_LOADED(bp))
1906                 bnx2x_napi_disable_cnic(bp);
1907 }
1908
1909 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1910                        void *accel_priv, select_queue_fallback_t fallback)
1911 {
1912         struct bnx2x *bp = netdev_priv(dev);
1913
1914         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1915                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1916                 u16 ether_type = ntohs(hdr->h_proto);
1917
1918                 /* Skip VLAN tag if present */
1919                 if (ether_type == ETH_P_8021Q) {
1920                         struct vlan_ethhdr *vhdr =
1921                                 (struct vlan_ethhdr *)skb->data;
1922
1923                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1924                 }
1925
1926                 /* If ethertype is FCoE or FIP - use FCoE ring */
1927                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1928                         return bnx2x_fcoe_tx(bp, txq_index);
1929         }
1930
1931         /* select a non-FCoE queue */
1932         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1933 }
1934
1935 void bnx2x_set_num_queues(struct bnx2x *bp)
1936 {
1937         /* RSS queues */
1938         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1939
1940         /* override in STORAGE SD modes */
1941         if (IS_MF_STORAGE_ONLY(bp))
1942                 bp->num_ethernet_queues = 1;
1943
1944         /* Add special queues */
1945         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1946         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1947
1948         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1949 }
1950
1951 /**
1952  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1953  *
1954  * @bp:         Driver handle
1955  *
1956  * We currently support for at most 16 Tx queues for each CoS thus we will
1957  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1958  * bp->max_cos.
1959  *
1960  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1961  * index after all ETH L2 indices.
1962  *
1963  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1964  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1965  * 16..31,...) with indices that are not coupled with any real Tx queue.
1966  *
1967  * The proper configuration of skb->queue_mapping is handled by
1968  * bnx2x_select_queue() and __skb_tx_hash().
1969  *
1970  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1971  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1972  */
1973 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1974 {
1975         int rc, tx, rx;
1976
1977         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1978         rx = BNX2X_NUM_ETH_QUEUES(bp);
1979
1980 /* account for fcoe queue */
1981         if (include_cnic && !NO_FCOE(bp)) {
1982                 rx++;
1983                 tx++;
1984         }
1985
1986         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1987         if (rc) {
1988                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1989                 return rc;
1990         }
1991         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1992         if (rc) {
1993                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1994                 return rc;
1995         }
1996
1997         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1998                           tx, rx);
1999
2000         return rc;
2001 }
2002
2003 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2004 {
2005         int i;
2006
2007         for_each_queue(bp, i) {
2008                 struct bnx2x_fastpath *fp = &bp->fp[i];
2009                 u32 mtu;
2010
2011                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2012                 if (IS_FCOE_IDX(i))
2013                         /*
2014                          * Although there are no IP frames expected to arrive to
2015                          * this ring we still want to add an
2016                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2017                          * overrun attack.
2018                          */
2019                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2020                 else
2021                         mtu = bp->dev->mtu;
2022                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2023                                   IP_HEADER_ALIGNMENT_PADDING +
2024                                   ETH_OVREHEAD +
2025                                   mtu +
2026                                   BNX2X_FW_RX_ALIGN_END;
2027                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2028                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2029                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2030                 else
2031                         fp->rx_frag_size = 0;
2032         }
2033 }
2034
2035 static int bnx2x_init_rss(struct bnx2x *bp)
2036 {
2037         int i;
2038         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2039
2040         /* Prepare the initial contents for the indirection table if RSS is
2041          * enabled
2042          */
2043         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2044                 bp->rss_conf_obj.ind_table[i] =
2045                         bp->fp->cl_id +
2046                         ethtool_rxfh_indir_default(i, num_eth_queues);
2047
2048         /*
2049          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2050          * per-port, so if explicit configuration is needed , do it only
2051          * for a PMF.
2052          *
2053          * For 57712 and newer on the other hand it's a per-function
2054          * configuration.
2055          */
2056         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2057 }
2058
2059 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2060               bool config_hash, bool enable)
2061 {
2062         struct bnx2x_config_rss_params params = {NULL};
2063
2064         /* Although RSS is meaningless when there is a single HW queue we
2065          * still need it enabled in order to have HW Rx hash generated.
2066          *
2067          * if (!is_eth_multi(bp))
2068          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2069          */
2070
2071         params.rss_obj = rss_obj;
2072
2073         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2074
2075         if (enable) {
2076                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2077
2078                 /* RSS configuration */
2079                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2080                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2081                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2082                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2083                 if (rss_obj->udp_rss_v4)
2084                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2085                 if (rss_obj->udp_rss_v6)
2086                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2087
2088                 if (!CHIP_IS_E1x(bp))
2089                         /* valid only for TUNN_MODE_GRE tunnel mode */
2090                         __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
2091         } else {
2092                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2093         }
2094
2095         /* Hash bits */
2096         params.rss_result_mask = MULTI_MASK;
2097
2098         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2099
2100         if (config_hash) {
2101                 /* RSS keys */
2102                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2103                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2104         }
2105
2106         if (IS_PF(bp))
2107                 return bnx2x_config_rss(bp, &params);
2108         else
2109                 return bnx2x_vfpf_config_rss(bp, &params);
2110 }
2111
2112 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2113 {
2114         struct bnx2x_func_state_params func_params = {NULL};
2115
2116         /* Prepare parameters for function state transitions */
2117         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2118
2119         func_params.f_obj = &bp->func_obj;
2120         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2121
2122         func_params.params.hw_init.load_phase = load_code;
2123
2124         return bnx2x_func_state_change(bp, &func_params);
2125 }
2126
2127 /*
2128  * Cleans the object that have internal lists without sending
2129  * ramrods. Should be run when interrupts are disabled.
2130  */
2131 void bnx2x_squeeze_objects(struct bnx2x *bp)
2132 {
2133         int rc;
2134         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2135         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2136         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2137
2138         /***************** Cleanup MACs' object first *************************/
2139
2140         /* Wait for completion of requested */
2141         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2142         /* Perform a dry cleanup */
2143         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2144
2145         /* Clean ETH primary MAC */
2146         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2147         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2148                                  &ramrod_flags);
2149         if (rc != 0)
2150                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2151
2152         /* Cleanup UC list */
2153         vlan_mac_flags = 0;
2154         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2155         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2156                                  &ramrod_flags);
2157         if (rc != 0)
2158                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2159
2160         /***************** Now clean mcast object *****************************/
2161         rparam.mcast_obj = &bp->mcast_obj;
2162         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2163
2164         /* Add a DEL command... - Since we're doing a driver cleanup only,
2165          * we take a lock surrounding both the initial send and the CONTs,
2166          * as we don't want a true completion to disrupt us in the middle.
2167          */
2168         netif_addr_lock_bh(bp->dev);
2169         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2170         if (rc < 0)
2171                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2172                           rc);
2173
2174         /* ...and wait until all pending commands are cleared */
2175         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2176         while (rc != 0) {
2177                 if (rc < 0) {
2178                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2179                                   rc);
2180                         netif_addr_unlock_bh(bp->dev);
2181                         return;
2182                 }
2183
2184                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2185         }
2186         netif_addr_unlock_bh(bp->dev);
2187 }
2188
2189 #ifndef BNX2X_STOP_ON_ERROR
2190 #define LOAD_ERROR_EXIT(bp, label) \
2191         do { \
2192                 (bp)->state = BNX2X_STATE_ERROR; \
2193                 goto label; \
2194         } while (0)
2195
2196 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2197         do { \
2198                 bp->cnic_loaded = false; \
2199                 goto label; \
2200         } while (0)
2201 #else /*BNX2X_STOP_ON_ERROR*/
2202 #define LOAD_ERROR_EXIT(bp, label) \
2203         do { \
2204                 (bp)->state = BNX2X_STATE_ERROR; \
2205                 (bp)->panic = 1; \
2206                 return -EBUSY; \
2207         } while (0)
2208 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2209         do { \
2210                 bp->cnic_loaded = false; \
2211                 (bp)->panic = 1; \
2212                 return -EBUSY; \
2213         } while (0)
2214 #endif /*BNX2X_STOP_ON_ERROR*/
2215
2216 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2217 {
2218         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2219                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2220         return;
2221 }
2222
2223 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2224 {
2225         int num_groups, vf_headroom = 0;
2226         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2227
2228         /* number of queues for statistics is number of eth queues + FCoE */
2229         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2230
2231         /* Total number of FW statistics requests =
2232          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2233          * and fcoe l2 queue) stats + num of queues (which includes another 1
2234          * for fcoe l2 queue if applicable)
2235          */
2236         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2237
2238         /* vf stats appear in the request list, but their data is allocated by
2239          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2240          * it is used to determine where to place the vf stats queries in the
2241          * request struct
2242          */
2243         if (IS_SRIOV(bp))
2244                 vf_headroom = bnx2x_vf_headroom(bp);
2245
2246         /* Request is built from stats_query_header and an array of
2247          * stats_query_cmd_group each of which contains
2248          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2249          * configured in the stats_query_header.
2250          */
2251         num_groups =
2252                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2253                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2254                  1 : 0));
2255
2256         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2257            bp->fw_stats_num, vf_headroom, num_groups);
2258         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2259                 num_groups * sizeof(struct stats_query_cmd_group);
2260
2261         /* Data for statistics requests + stats_counter
2262          * stats_counter holds per-STORM counters that are incremented
2263          * when STORM has finished with the current request.
2264          * memory for FCoE offloaded statistics are counted anyway,
2265          * even if they will not be sent.
2266          * VF stats are not accounted for here as the data of VF stats is stored
2267          * in memory allocated by the VF, not here.
2268          */
2269         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2270                 sizeof(struct per_pf_stats) +
2271                 sizeof(struct fcoe_statistics_params) +
2272                 sizeof(struct per_queue_stats) * num_queue_stats +
2273                 sizeof(struct stats_counter);
2274
2275         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2276                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2277         if (!bp->fw_stats)
2278                 goto alloc_mem_err;
2279
2280         /* Set shortcuts */
2281         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2282         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2283         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2284                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2285         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2286                 bp->fw_stats_req_sz;
2287
2288         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2289            U64_HI(bp->fw_stats_req_mapping),
2290            U64_LO(bp->fw_stats_req_mapping));
2291         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2292            U64_HI(bp->fw_stats_data_mapping),
2293            U64_LO(bp->fw_stats_data_mapping));
2294         return 0;
2295
2296 alloc_mem_err:
2297         bnx2x_free_fw_stats_mem(bp);
2298         BNX2X_ERR("Can't allocate FW stats memory\n");
2299         return -ENOMEM;
2300 }
2301
2302 /* send load request to mcp and analyze response */
2303 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2304 {
2305         u32 param;
2306
2307         /* init fw_seq */
2308         bp->fw_seq =
2309                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2310                  DRV_MSG_SEQ_NUMBER_MASK);
2311         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2312
2313         /* Get current FW pulse sequence */
2314         bp->fw_drv_pulse_wr_seq =
2315                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2316                  DRV_PULSE_SEQ_MASK);
2317         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2318
2319         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2320
2321         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2322                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2323
2324         /* load request */
2325         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2326
2327         /* if mcp fails to respond we must abort */
2328         if (!(*load_code)) {
2329                 BNX2X_ERR("MCP response failure, aborting\n");
2330                 return -EBUSY;
2331         }
2332
2333         /* If mcp refused (e.g. other port is in diagnostic mode) we
2334          * must abort
2335          */
2336         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2337                 BNX2X_ERR("MCP refused load request, aborting\n");
2338                 return -EBUSY;
2339         }
2340         return 0;
2341 }
2342
2343 /* check whether another PF has already loaded FW to chip. In
2344  * virtualized environments a pf from another VM may have already
2345  * initialized the device including loading FW
2346  */
2347 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2348 {
2349         /* is another pf loaded on this engine? */
2350         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2351             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2352                 /* build my FW version dword */
2353                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2354                         (BCM_5710_FW_MINOR_VERSION << 8) +
2355                         (BCM_5710_FW_REVISION_VERSION << 16) +
2356                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2357
2358                 /* read loaded FW from chip */
2359                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2360
2361                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2362                    loaded_fw, my_fw);
2363
2364                 /* abort nic load if version mismatch */
2365                 if (my_fw != loaded_fw) {
2366                         if (print_err)
2367                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2368                                           loaded_fw, my_fw);
2369                         else
2370                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2371                                                loaded_fw, my_fw);
2372                         return -EBUSY;
2373                 }
2374         }
2375         return 0;
2376 }
2377
2378 /* returns the "mcp load_code" according to global load_count array */
2379 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2380 {
2381         int path = BP_PATH(bp);
2382
2383         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2384            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2385            bnx2x_load_count[path][2]);
2386         bnx2x_load_count[path][0]++;
2387         bnx2x_load_count[path][1 + port]++;
2388         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2389            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2390            bnx2x_load_count[path][2]);
2391         if (bnx2x_load_count[path][0] == 1)
2392                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2393         else if (bnx2x_load_count[path][1 + port] == 1)
2394                 return FW_MSG_CODE_DRV_LOAD_PORT;
2395         else
2396                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2397 }
2398
2399 /* mark PMF if applicable */
2400 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2401 {
2402         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2403             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2404             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2405                 bp->port.pmf = 1;
2406                 /* We need the barrier to ensure the ordering between the
2407                  * writing to bp->port.pmf here and reading it from the
2408                  * bnx2x_periodic_task().
2409                  */
2410                 smp_mb();
2411         } else {
2412                 bp->port.pmf = 0;
2413         }
2414
2415         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2416 }
2417
2418 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2419 {
2420         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2421              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2422             (bp->common.shmem2_base)) {
2423                 if (SHMEM2_HAS(bp, dcc_support))
2424                         SHMEM2_WR(bp, dcc_support,
2425                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2426                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2427                 if (SHMEM2_HAS(bp, afex_driver_support))
2428                         SHMEM2_WR(bp, afex_driver_support,
2429                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2430         }
2431
2432         /* Set AFEX default VLAN tag to an invalid value */
2433         bp->afex_def_vlan_tag = -1;
2434 }
2435
2436 /**
2437  * bnx2x_bz_fp - zero content of the fastpath structure.
2438  *
2439  * @bp:         driver handle
2440  * @index:      fastpath index to be zeroed
2441  *
2442  * Makes sure the contents of the bp->fp[index].napi is kept
2443  * intact.
2444  */
2445 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2446 {
2447         struct bnx2x_fastpath *fp = &bp->fp[index];
2448         int cos;
2449         struct napi_struct orig_napi = fp->napi;
2450         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2451
2452         /* bzero bnx2x_fastpath contents */
2453         if (fp->tpa_info)
2454                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2455                        sizeof(struct bnx2x_agg_info));
2456         memset(fp, 0, sizeof(*fp));
2457
2458         /* Restore the NAPI object as it has been already initialized */
2459         fp->napi = orig_napi;
2460         fp->tpa_info = orig_tpa_info;
2461         fp->bp = bp;
2462         fp->index = index;
2463         if (IS_ETH_FP(fp))
2464                 fp->max_cos = bp->max_cos;
2465         else
2466                 /* Special queues support only one CoS */
2467                 fp->max_cos = 1;
2468
2469         /* Init txdata pointers */
2470         if (IS_FCOE_FP(fp))
2471                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2472         if (IS_ETH_FP(fp))
2473                 for_each_cos_in_tx_queue(fp, cos)
2474                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2475                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2476
2477         /* set the tpa flag for each queue. The tpa flag determines the queue
2478          * minimal size so it must be set prior to queue memory allocation
2479          */
2480         if (bp->dev->features & NETIF_F_LRO)
2481                 fp->mode = TPA_MODE_LRO;
2482         else if (bp->dev->features & NETIF_F_GRO &&
2483                  bnx2x_mtu_allows_gro(bp->dev->mtu))
2484                 fp->mode = TPA_MODE_GRO;
2485         else
2486                 fp->mode = TPA_MODE_DISABLED;
2487
2488         /* We don't want TPA if it's disabled in bp
2489          * or if this is an FCoE L2 ring.
2490          */
2491         if (bp->disable_tpa || IS_FCOE_FP(fp))
2492                 fp->mode = TPA_MODE_DISABLED;
2493 }
2494
2495 int bnx2x_load_cnic(struct bnx2x *bp)
2496 {
2497         int i, rc, port = BP_PORT(bp);
2498
2499         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2500
2501         mutex_init(&bp->cnic_mutex);
2502
2503         if (IS_PF(bp)) {
2504                 rc = bnx2x_alloc_mem_cnic(bp);
2505                 if (rc) {
2506                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2507                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2508                 }
2509         }
2510
2511         rc = bnx2x_alloc_fp_mem_cnic(bp);
2512         if (rc) {
2513                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2514                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2515         }
2516
2517         /* Update the number of queues with the cnic queues */
2518         rc = bnx2x_set_real_num_queues(bp, 1);
2519         if (rc) {
2520                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2521                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2522         }
2523
2524         /* Add all CNIC NAPI objects */
2525         bnx2x_add_all_napi_cnic(bp);
2526         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2527         bnx2x_napi_enable_cnic(bp);
2528
2529         rc = bnx2x_init_hw_func_cnic(bp);
2530         if (rc)
2531                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2532
2533         bnx2x_nic_init_cnic(bp);
2534
2535         if (IS_PF(bp)) {
2536                 /* Enable Timer scan */
2537                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2538
2539                 /* setup cnic queues */
2540                 for_each_cnic_queue(bp, i) {
2541                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2542                         if (rc) {
2543                                 BNX2X_ERR("Queue setup failed\n");
2544                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2545                         }
2546                 }
2547         }
2548
2549         /* Initialize Rx filter. */
2550         bnx2x_set_rx_mode_inner(bp);
2551
2552         /* re-read iscsi info */
2553         bnx2x_get_iscsi_info(bp);
2554         bnx2x_setup_cnic_irq_info(bp);
2555         bnx2x_setup_cnic_info(bp);
2556         bp->cnic_loaded = true;
2557         if (bp->state == BNX2X_STATE_OPEN)
2558                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2559
2560         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2561
2562         return 0;
2563
2564 #ifndef BNX2X_STOP_ON_ERROR
2565 load_error_cnic2:
2566         /* Disable Timer scan */
2567         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2568
2569 load_error_cnic1:
2570         bnx2x_napi_disable_cnic(bp);
2571         /* Update the number of queues without the cnic queues */
2572         if (bnx2x_set_real_num_queues(bp, 0))
2573                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2574 load_error_cnic0:
2575         BNX2X_ERR("CNIC-related load failed\n");
2576         bnx2x_free_fp_mem_cnic(bp);
2577         bnx2x_free_mem_cnic(bp);
2578         return rc;
2579 #endif /* ! BNX2X_STOP_ON_ERROR */
2580 }
2581
2582 /* must be called with rtnl_lock */
2583 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2584 {
2585         int port = BP_PORT(bp);
2586         int i, rc = 0, load_code = 0;
2587
2588         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2589         DP(NETIF_MSG_IFUP,
2590            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2591
2592 #ifdef BNX2X_STOP_ON_ERROR
2593         if (unlikely(bp->panic)) {
2594                 BNX2X_ERR("Can't load NIC when there is panic\n");
2595                 return -EPERM;
2596         }
2597 #endif
2598
2599         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2600
2601         /* zero the structure w/o any lock, before SP handler is initialized */
2602         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2603         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2604                 &bp->last_reported_link.link_report_flags);
2605
2606         if (IS_PF(bp))
2607                 /* must be called before memory allocation and HW init */
2608                 bnx2x_ilt_set_info(bp);
2609
2610         /*
2611          * Zero fastpath structures preserving invariants like napi, which are
2612          * allocated only once, fp index, max_cos, bp pointer.
2613          * Also set fp->mode and txdata_ptr.
2614          */
2615         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2616         for_each_queue(bp, i)
2617                 bnx2x_bz_fp(bp, i);
2618         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2619                                   bp->num_cnic_queues) *
2620                                   sizeof(struct bnx2x_fp_txdata));
2621
2622         bp->fcoe_init = false;
2623
2624         /* Set the receive queues buffer size */
2625         bnx2x_set_rx_buf_size(bp);
2626
2627         if (IS_PF(bp)) {
2628                 rc = bnx2x_alloc_mem(bp);
2629                 if (rc) {
2630                         BNX2X_ERR("Unable to allocate bp memory\n");
2631                         return rc;
2632                 }
2633         }
2634
2635         /* need to be done after alloc mem, since it's self adjusting to amount
2636          * of memory available for RSS queues
2637          */
2638         rc = bnx2x_alloc_fp_mem(bp);
2639         if (rc) {
2640                 BNX2X_ERR("Unable to allocate memory for fps\n");
2641                 LOAD_ERROR_EXIT(bp, load_error0);
2642         }
2643
2644         /* Allocated memory for FW statistics  */
2645         if (bnx2x_alloc_fw_stats_mem(bp))
2646                 LOAD_ERROR_EXIT(bp, load_error0);
2647
2648         /* request pf to initialize status blocks */
2649         if (IS_VF(bp)) {
2650                 rc = bnx2x_vfpf_init(bp);
2651                 if (rc)
2652                         LOAD_ERROR_EXIT(bp, load_error0);
2653         }
2654
2655         /* As long as bnx2x_alloc_mem() may possibly update
2656          * bp->num_queues, bnx2x_set_real_num_queues() should always
2657          * come after it. At this stage cnic queues are not counted.
2658          */
2659         rc = bnx2x_set_real_num_queues(bp, 0);
2660         if (rc) {
2661                 BNX2X_ERR("Unable to set real_num_queues\n");
2662                 LOAD_ERROR_EXIT(bp, load_error0);
2663         }
2664
2665         /* configure multi cos mappings in kernel.
2666          * this configuration may be overridden by a multi class queue
2667          * discipline or by a dcbx negotiation result.
2668          */
2669         bnx2x_setup_tc(bp->dev, bp->max_cos);
2670
2671         /* Add all NAPI objects */
2672         bnx2x_add_all_napi(bp);
2673         DP(NETIF_MSG_IFUP, "napi added\n");
2674         bnx2x_napi_enable(bp);
2675
2676         if (IS_PF(bp)) {
2677                 /* set pf load just before approaching the MCP */
2678                 bnx2x_set_pf_load(bp);
2679
2680                 /* if mcp exists send load request and analyze response */
2681                 if (!BP_NOMCP(bp)) {
2682                         /* attempt to load pf */
2683                         rc = bnx2x_nic_load_request(bp, &load_code);
2684                         if (rc)
2685                                 LOAD_ERROR_EXIT(bp, load_error1);
2686
2687                         /* what did mcp say? */
2688                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2689                         if (rc) {
2690                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2691                                 LOAD_ERROR_EXIT(bp, load_error2);
2692                         }
2693                 } else {
2694                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2695                 }
2696
2697                 /* mark pmf if applicable */
2698                 bnx2x_nic_load_pmf(bp, load_code);
2699
2700                 /* Init Function state controlling object */
2701                 bnx2x__init_func_obj(bp);
2702
2703                 /* Initialize HW */
2704                 rc = bnx2x_init_hw(bp, load_code);
2705                 if (rc) {
2706                         BNX2X_ERR("HW init failed, aborting\n");
2707                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2708                         LOAD_ERROR_EXIT(bp, load_error2);
2709                 }
2710         }
2711
2712         bnx2x_pre_irq_nic_init(bp);
2713
2714         /* Connect to IRQs */
2715         rc = bnx2x_setup_irqs(bp);
2716         if (rc) {
2717                 BNX2X_ERR("setup irqs failed\n");
2718                 if (IS_PF(bp))
2719                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2720                 LOAD_ERROR_EXIT(bp, load_error2);
2721         }
2722
2723         /* Init per-function objects */
2724         if (IS_PF(bp)) {
2725                 /* Setup NIC internals and enable interrupts */
2726                 bnx2x_post_irq_nic_init(bp, load_code);
2727
2728                 bnx2x_init_bp_objs(bp);
2729                 bnx2x_iov_nic_init(bp);
2730
2731                 /* Set AFEX default VLAN tag to an invalid value */
2732                 bp->afex_def_vlan_tag = -1;
2733                 bnx2x_nic_load_afex_dcc(bp, load_code);
2734                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2735                 rc = bnx2x_func_start(bp);
2736                 if (rc) {
2737                         BNX2X_ERR("Function start failed!\n");
2738                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2739
2740                         LOAD_ERROR_EXIT(bp, load_error3);
2741                 }
2742
2743                 /* Send LOAD_DONE command to MCP */
2744                 if (!BP_NOMCP(bp)) {
2745                         load_code = bnx2x_fw_command(bp,
2746                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2747                         if (!load_code) {
2748                                 BNX2X_ERR("MCP response failure, aborting\n");
2749                                 rc = -EBUSY;
2750                                 LOAD_ERROR_EXIT(bp, load_error3);
2751                         }
2752                 }
2753
2754                 /* initialize FW coalescing state machines in RAM */
2755                 bnx2x_update_coalesce(bp);
2756         }
2757
2758         /* setup the leading queue */
2759         rc = bnx2x_setup_leading(bp);
2760         if (rc) {
2761                 BNX2X_ERR("Setup leading failed!\n");
2762                 LOAD_ERROR_EXIT(bp, load_error3);
2763         }
2764
2765         /* set up the rest of the queues */
2766         for_each_nondefault_eth_queue(bp, i) {
2767                 if (IS_PF(bp))
2768                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2769                 else /* VF */
2770                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2771                 if (rc) {
2772                         BNX2X_ERR("Queue %d setup failed\n", i);
2773                         LOAD_ERROR_EXIT(bp, load_error3);
2774                 }
2775         }
2776
2777         /* setup rss */
2778         rc = bnx2x_init_rss(bp);
2779         if (rc) {
2780                 BNX2X_ERR("PF RSS init failed\n");
2781                 LOAD_ERROR_EXIT(bp, load_error3);
2782         }
2783
2784         /* Now when Clients are configured we are ready to work */
2785         bp->state = BNX2X_STATE_OPEN;
2786
2787         /* Configure a ucast MAC */
2788         if (IS_PF(bp))
2789                 rc = bnx2x_set_eth_mac(bp, true);
2790         else /* vf */
2791                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2792                                            true);
2793         if (rc) {
2794                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2795                 LOAD_ERROR_EXIT(bp, load_error3);
2796         }
2797
2798         if (IS_PF(bp) && bp->pending_max) {
2799                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2800                 bp->pending_max = 0;
2801         }
2802
2803         if (bp->port.pmf) {
2804                 rc = bnx2x_initial_phy_init(bp, load_mode);
2805                 if (rc)
2806                         LOAD_ERROR_EXIT(bp, load_error3);
2807         }
2808         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2809
2810         /* Start fast path */
2811
2812         /* Initialize Rx filter. */
2813         bnx2x_set_rx_mode_inner(bp);
2814
2815         if (bp->flags & PTP_SUPPORTED) {
2816                 bnx2x_init_ptp(bp);
2817                 bnx2x_configure_ptp_filters(bp);
2818         }
2819         /* Start Tx */
2820         switch (load_mode) {
2821         case LOAD_NORMAL:
2822                 /* Tx queue should be only re-enabled */
2823                 netif_tx_wake_all_queues(bp->dev);
2824                 break;
2825
2826         case LOAD_OPEN:
2827                 netif_tx_start_all_queues(bp->dev);
2828                 smp_mb__after_atomic();
2829                 break;
2830
2831         case LOAD_DIAG:
2832         case LOAD_LOOPBACK_EXT:
2833                 bp->state = BNX2X_STATE_DIAG;
2834                 break;
2835
2836         default:
2837                 break;
2838         }
2839
2840         if (bp->port.pmf)
2841                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2842         else
2843                 bnx2x__link_status_update(bp);
2844
2845         /* start the timer */
2846         mod_timer(&bp->timer, jiffies + bp->current_interval);
2847
2848         if (CNIC_ENABLED(bp))
2849                 bnx2x_load_cnic(bp);
2850
2851         if (IS_PF(bp))
2852                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2853
2854         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2855                 /* mark driver is loaded in shmem2 */
2856                 u32 val;
2857                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2858                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2859                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2860                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2861         }
2862
2863         /* Wait for all pending SP commands to complete */
2864         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2865                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2866                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2867                 return -EBUSY;
2868         }
2869
2870         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2871         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2872                 bnx2x_dcbx_init(bp, false);
2873
2874         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2875
2876         return 0;
2877
2878 #ifndef BNX2X_STOP_ON_ERROR
2879 load_error3:
2880         if (IS_PF(bp)) {
2881                 bnx2x_int_disable_sync(bp, 1);
2882
2883                 /* Clean queueable objects */
2884                 bnx2x_squeeze_objects(bp);
2885         }
2886
2887         /* Free SKBs, SGEs, TPA pool and driver internals */
2888         bnx2x_free_skbs(bp);
2889         for_each_rx_queue(bp, i)
2890                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2891
2892         /* Release IRQs */
2893         bnx2x_free_irq(bp);
2894 load_error2:
2895         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2896                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2897                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2898         }
2899
2900         bp->port.pmf = 0;
2901 load_error1:
2902         bnx2x_napi_disable(bp);
2903         bnx2x_del_all_napi(bp);
2904
2905         /* clear pf_load status, as it was already set */
2906         if (IS_PF(bp))
2907                 bnx2x_clear_pf_load(bp);
2908 load_error0:
2909         bnx2x_free_fw_stats_mem(bp);
2910         bnx2x_free_fp_mem(bp);
2911         bnx2x_free_mem(bp);
2912
2913         return rc;
2914 #endif /* ! BNX2X_STOP_ON_ERROR */
2915 }
2916
2917 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2918 {
2919         u8 rc = 0, cos, i;
2920
2921         /* Wait until tx fastpath tasks complete */
2922         for_each_tx_queue(bp, i) {
2923                 struct bnx2x_fastpath *fp = &bp->fp[i];
2924
2925                 for_each_cos_in_tx_queue(fp, cos)
2926                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2927                 if (rc)
2928                         return rc;
2929         }
2930         return 0;
2931 }
2932
2933 /* must be called with rtnl_lock */
2934 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2935 {
2936         int i;
2937         bool global = false;
2938
2939         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2940
2941         /* mark driver is unloaded in shmem2 */
2942         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2943                 u32 val;
2944                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2945                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2946                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2947         }
2948
2949         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2950             (bp->state == BNX2X_STATE_CLOSED ||
2951              bp->state == BNX2X_STATE_ERROR)) {
2952                 /* We can get here if the driver has been unloaded
2953                  * during parity error recovery and is either waiting for a
2954                  * leader to complete or for other functions to unload and
2955                  * then ifdown has been issued. In this case we want to
2956                  * unload and let other functions to complete a recovery
2957                  * process.
2958                  */
2959                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2960                 bp->is_leader = 0;
2961                 bnx2x_release_leader_lock(bp);
2962                 smp_mb();
2963
2964                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2965                 BNX2X_ERR("Can't unload in closed or error state\n");
2966                 return -EINVAL;
2967         }
2968
2969         /* Nothing to do during unload if previous bnx2x_nic_load()
2970          * have not completed successfully - all resources are released.
2971          *
2972          * we can get here only after unsuccessful ndo_* callback, during which
2973          * dev->IFF_UP flag is still on.
2974          */
2975         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2976                 return 0;
2977
2978         /* It's important to set the bp->state to the value different from
2979          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2980          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2981          */
2982         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2983         smp_mb();
2984
2985         /* indicate to VFs that the PF is going down */
2986         bnx2x_iov_channel_down(bp);
2987
2988         if (CNIC_LOADED(bp))
2989                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2990
2991         /* Stop Tx */
2992         bnx2x_tx_disable(bp);
2993         netdev_reset_tc(bp->dev);
2994
2995         bp->rx_mode = BNX2X_RX_MODE_NONE;
2996
2997         del_timer_sync(&bp->timer);
2998
2999         if (IS_PF(bp)) {
3000                 /* Set ALWAYS_ALIVE bit in shmem */
3001                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3002                 bnx2x_drv_pulse(bp);
3003                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3004                 bnx2x_save_statistics(bp);
3005         }
3006
3007         /* wait till consumers catch up with producers in all queues */
3008         bnx2x_drain_tx_queues(bp);
3009
3010         /* if VF indicate to PF this function is going down (PF will delete sp
3011          * elements and clear initializations
3012          */
3013         if (IS_VF(bp))
3014                 bnx2x_vfpf_close_vf(bp);
3015         else if (unload_mode != UNLOAD_RECOVERY)
3016                 /* if this is a normal/close unload need to clean up chip*/
3017                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3018         else {
3019                 /* Send the UNLOAD_REQUEST to the MCP */
3020                 bnx2x_send_unload_req(bp, unload_mode);
3021
3022                 /* Prevent transactions to host from the functions on the
3023                  * engine that doesn't reset global blocks in case of global
3024                  * attention once global blocks are reset and gates are opened
3025                  * (the engine which leader will perform the recovery
3026                  * last).
3027                  */
3028                 if (!CHIP_IS_E1x(bp))
3029                         bnx2x_pf_disable(bp);
3030
3031                 /* Disable HW interrupts, NAPI */
3032                 bnx2x_netif_stop(bp, 1);
3033                 /* Delete all NAPI objects */
3034                 bnx2x_del_all_napi(bp);
3035                 if (CNIC_LOADED(bp))
3036                         bnx2x_del_all_napi_cnic(bp);
3037                 /* Release IRQs */
3038                 bnx2x_free_irq(bp);
3039
3040                 /* Report UNLOAD_DONE to MCP */
3041                 bnx2x_send_unload_done(bp, false);
3042         }
3043
3044         /*
3045          * At this stage no more interrupts will arrive so we may safely clean
3046          * the queueable objects here in case they failed to get cleaned so far.
3047          */
3048         if (IS_PF(bp))
3049                 bnx2x_squeeze_objects(bp);
3050
3051         /* There should be no more pending SP commands at this stage */
3052         bp->sp_state = 0;
3053
3054         bp->port.pmf = 0;
3055
3056         /* clear pending work in rtnl task */
3057         bp->sp_rtnl_state = 0;
3058         smp_mb();
3059
3060         /* Free SKBs, SGEs, TPA pool and driver internals */
3061         bnx2x_free_skbs(bp);
3062         if (CNIC_LOADED(bp))
3063                 bnx2x_free_skbs_cnic(bp);
3064         for_each_rx_queue(bp, i)
3065                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3066
3067         bnx2x_free_fp_mem(bp);
3068         if (CNIC_LOADED(bp))
3069                 bnx2x_free_fp_mem_cnic(bp);
3070
3071         if (IS_PF(bp)) {
3072                 if (CNIC_LOADED(bp))
3073                         bnx2x_free_mem_cnic(bp);
3074         }
3075         bnx2x_free_mem(bp);
3076
3077         bp->state = BNX2X_STATE_CLOSED;
3078         bp->cnic_loaded = false;
3079
3080         /* Clear driver version indication in shmem */
3081         if (IS_PF(bp))
3082                 bnx2x_update_mng_version(bp);
3083
3084         /* Check if there are pending parity attentions. If there are - set
3085          * RECOVERY_IN_PROGRESS.
3086          */
3087         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3088                 bnx2x_set_reset_in_progress(bp);
3089
3090                 /* Set RESET_IS_GLOBAL if needed */
3091                 if (global)
3092                         bnx2x_set_reset_global(bp);
3093         }
3094
3095         /* The last driver must disable a "close the gate" if there is no
3096          * parity attention or "process kill" pending.
3097          */
3098         if (IS_PF(bp) &&
3099             !bnx2x_clear_pf_load(bp) &&
3100             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3101                 bnx2x_disable_close_the_gate(bp);
3102
3103         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3104
3105         return 0;
3106 }
3107
3108 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3109 {
3110         u16 pmcsr;
3111
3112         /* If there is no power capability, silently succeed */
3113         if (!bp->pdev->pm_cap) {
3114                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3115                 return 0;
3116         }
3117
3118         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3119
3120         switch (state) {
3121         case PCI_D0:
3122                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3123                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3124                                        PCI_PM_CTRL_PME_STATUS));
3125
3126                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3127                         /* delay required during transition out of D3hot */
3128                         msleep(20);
3129                 break;
3130
3131         case PCI_D3hot:
3132                 /* If there are other clients above don't
3133                    shut down the power */
3134                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3135                         return 0;
3136                 /* Don't shut down the power for emulation and FPGA */
3137                 if (CHIP_REV_IS_SLOW(bp))
3138                         return 0;
3139
3140                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3141                 pmcsr |= 3;
3142
3143                 if (bp->wol)
3144                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3145
3146                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3147                                       pmcsr);
3148
3149                 /* No more memory access after this point until
3150                 * device is brought back to D0.
3151                 */
3152                 break;
3153
3154         default:
3155                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3156                 return -EINVAL;
3157         }
3158         return 0;
3159 }
3160
3161 /*
3162  * net_device service functions
3163  */
3164 static int bnx2x_poll(struct napi_struct *napi, int budget)
3165 {
3166         int work_done = 0;
3167         u8 cos;
3168         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3169                                                  napi);
3170         struct bnx2x *bp = fp->bp;
3171
3172         while (1) {
3173 #ifdef BNX2X_STOP_ON_ERROR
3174                 if (unlikely(bp->panic)) {
3175                         napi_complete(napi);
3176                         return 0;
3177                 }
3178 #endif
3179                 if (!bnx2x_fp_lock_napi(fp))
3180                         return budget;
3181
3182                 for_each_cos_in_tx_queue(fp, cos)
3183                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3184                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3185
3186                 if (bnx2x_has_rx_work(fp)) {
3187                         work_done += bnx2x_rx_int(fp, budget - work_done);
3188
3189                         /* must not complete if we consumed full budget */
3190                         if (work_done >= budget) {
3191                                 bnx2x_fp_unlock_napi(fp);
3192                                 break;
3193                         }
3194                 }
3195
3196                 bnx2x_fp_unlock_napi(fp);
3197
3198                 /* Fall out from the NAPI loop if needed */
3199                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3200
3201                         /* No need to update SB for FCoE L2 ring as long as
3202                          * it's connected to the default SB and the SB
3203                          * has been updated when NAPI was scheduled.
3204                          */
3205                         if (IS_FCOE_FP(fp)) {
3206                                 napi_complete(napi);
3207                                 break;
3208                         }
3209                         bnx2x_update_fpsb_idx(fp);
3210                         /* bnx2x_has_rx_work() reads the status block,
3211                          * thus we need to ensure that status block indices
3212                          * have been actually read (bnx2x_update_fpsb_idx)
3213                          * prior to this check (bnx2x_has_rx_work) so that
3214                          * we won't write the "newer" value of the status block
3215                          * to IGU (if there was a DMA right after
3216                          * bnx2x_has_rx_work and if there is no rmb, the memory
3217                          * reading (bnx2x_update_fpsb_idx) may be postponed
3218                          * to right before bnx2x_ack_sb). In this case there
3219                          * will never be another interrupt until there is
3220                          * another update of the status block, while there
3221                          * is still unhandled work.
3222                          */
3223                         rmb();
3224
3225                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3226                                 napi_complete(napi);
3227                                 /* Re-enable interrupts */
3228                                 DP(NETIF_MSG_RX_STATUS,
3229                                    "Update index to %d\n", fp->fp_hc_idx);
3230                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3231                                              le16_to_cpu(fp->fp_hc_idx),
3232                                              IGU_INT_ENABLE, 1);
3233                                 break;
3234                         }
3235                 }
3236         }
3237
3238         return work_done;
3239 }
3240
3241 #ifdef CONFIG_NET_RX_BUSY_POLL
3242 /* must be called with local_bh_disable()d */
3243 int bnx2x_low_latency_recv(struct napi_struct *napi)
3244 {
3245         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3246                                                  napi);
3247         struct bnx2x *bp = fp->bp;
3248         int found = 0;
3249
3250         if ((bp->state == BNX2X_STATE_CLOSED) ||
3251             (bp->state == BNX2X_STATE_ERROR) ||
3252             (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3253                 return LL_FLUSH_FAILED;
3254
3255         if (!bnx2x_fp_lock_poll(fp))
3256                 return LL_FLUSH_BUSY;
3257
3258         if (bnx2x_has_rx_work(fp))
3259                 found = bnx2x_rx_int(fp, 4);
3260
3261         bnx2x_fp_unlock_poll(fp);
3262
3263         return found;
3264 }
3265 #endif
3266
3267 /* we split the first BD into headers and data BDs
3268  * to ease the pain of our fellow microcode engineers
3269  * we use one mapping for both BDs
3270  */
3271 static u16 bnx2x_tx_split(struct bnx2x *bp,
3272                           struct bnx2x_fp_txdata *txdata,
3273                           struct sw_tx_bd *tx_buf,
3274                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3275                           u16 bd_prod)
3276 {
3277         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3278         struct eth_tx_bd *d_tx_bd;
3279         dma_addr_t mapping;
3280         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3281
3282         /* first fix first BD */
3283         h_tx_bd->nbytes = cpu_to_le16(hlen);
3284
3285         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3286            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3287
3288         /* now get a new data BD
3289          * (after the pbd) and fill it */
3290         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3291         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3292
3293         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3294                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3295
3296         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3297         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3298         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3299
3300         /* this marks the BD as one that has no individual mapping */
3301         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3302
3303         DP(NETIF_MSG_TX_QUEUED,
3304            "TSO split data size is %d (%x:%x)\n",
3305            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3306
3307         /* update tx_bd */
3308         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3309
3310         return bd_prod;
3311 }
3312
3313 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3314 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3315 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3316 {
3317         __sum16 tsum = (__force __sum16) csum;
3318
3319         if (fix > 0)
3320                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3321                                   csum_partial(t_header - fix, fix, 0)));
3322
3323         else if (fix < 0)
3324                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3325                                   csum_partial(t_header, -fix, 0)));
3326
3327         return bswab16(tsum);
3328 }
3329
3330 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3331 {
3332         u32 rc;
3333         __u8 prot = 0;
3334         __be16 protocol;
3335
3336         if (skb->ip_summed != CHECKSUM_PARTIAL)
3337                 return XMIT_PLAIN;
3338
3339         protocol = vlan_get_protocol(skb);
3340         if (protocol == htons(ETH_P_IPV6)) {
3341                 rc = XMIT_CSUM_V6;
3342                 prot = ipv6_hdr(skb)->nexthdr;
3343         } else {
3344                 rc = XMIT_CSUM_V4;
3345                 prot = ip_hdr(skb)->protocol;
3346         }
3347
3348         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3349                 if (inner_ip_hdr(skb)->version == 6) {
3350                         rc |= XMIT_CSUM_ENC_V6;
3351                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3352                                 rc |= XMIT_CSUM_TCP;
3353                 } else {
3354                         rc |= XMIT_CSUM_ENC_V4;
3355                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3356                                 rc |= XMIT_CSUM_TCP;
3357                 }
3358         }
3359         if (prot == IPPROTO_TCP)
3360                 rc |= XMIT_CSUM_TCP;
3361
3362         if (skb_is_gso(skb)) {
3363                 if (skb_is_gso_v6(skb)) {
3364                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3365                         if (rc & XMIT_CSUM_ENC)
3366                                 rc |= XMIT_GSO_ENC_V6;
3367                 } else {
3368                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3369                         if (rc & XMIT_CSUM_ENC)
3370                                 rc |= XMIT_GSO_ENC_V4;
3371                 }
3372         }
3373
3374         return rc;
3375 }
3376
3377 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3378 /* check if packet requires linearization (packet is too fragmented)
3379    no need to check fragmentation if page size > 8K (there will be no
3380    violation to FW restrictions) */
3381 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3382                              u32 xmit_type)
3383 {
3384         int to_copy = 0;
3385         int hlen = 0;
3386         int first_bd_sz = 0;
3387
3388         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3389         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3390
3391                 if (xmit_type & XMIT_GSO) {
3392                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3393                         /* Check if LSO packet needs to be copied:
3394                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3395                         int wnd_size = MAX_FETCH_BD - 3;
3396                         /* Number of windows to check */
3397                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3398                         int wnd_idx = 0;
3399                         int frag_idx = 0;
3400                         u32 wnd_sum = 0;
3401
3402                         /* Headers length */
3403                         hlen = (int)(skb_transport_header(skb) - skb->data) +
3404                                 tcp_hdrlen(skb);
3405
3406                         /* Amount of data (w/o headers) on linear part of SKB*/
3407                         first_bd_sz = skb_headlen(skb) - hlen;
3408
3409                         wnd_sum  = first_bd_sz;
3410
3411                         /* Calculate the first sum - it's special */
3412                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3413                                 wnd_sum +=
3414                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3415
3416                         /* If there was data on linear skb data - check it */
3417                         if (first_bd_sz > 0) {
3418                                 if (unlikely(wnd_sum < lso_mss)) {
3419                                         to_copy = 1;
3420                                         goto exit_lbl;
3421                                 }
3422
3423                                 wnd_sum -= first_bd_sz;
3424                         }
3425
3426                         /* Others are easier: run through the frag list and
3427                            check all windows */
3428                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3429                                 wnd_sum +=
3430                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3431
3432                                 if (unlikely(wnd_sum < lso_mss)) {
3433                                         to_copy = 1;
3434                                         break;
3435                                 }
3436                                 wnd_sum -=
3437                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3438                         }
3439                 } else {
3440                         /* in non-LSO too fragmented packet should always
3441                            be linearized */
3442                         to_copy = 1;
3443                 }
3444         }
3445
3446 exit_lbl:
3447         if (unlikely(to_copy))
3448                 DP(NETIF_MSG_TX_QUEUED,
3449                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3450                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3451                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3452
3453         return to_copy;
3454 }
3455 #endif
3456
3457 /**
3458  * bnx2x_set_pbd_gso - update PBD in GSO case.
3459  *
3460  * @skb:        packet skb
3461  * @pbd:        parse BD
3462  * @xmit_type:  xmit flags
3463  */
3464 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3465                               struct eth_tx_parse_bd_e1x *pbd,
3466                               u32 xmit_type)
3467 {
3468         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3469         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3470         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3471
3472         if (xmit_type & XMIT_GSO_V4) {
3473                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3474                 pbd->tcp_pseudo_csum =
3475                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3476                                                    ip_hdr(skb)->daddr,
3477                                                    0, IPPROTO_TCP, 0));
3478         } else {
3479                 pbd->tcp_pseudo_csum =
3480                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3481                                                  &ipv6_hdr(skb)->daddr,
3482                                                  0, IPPROTO_TCP, 0));
3483         }
3484
3485         pbd->global_data |=
3486                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3487 }
3488
3489 /**
3490  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3491  *
3492  * @bp:                 driver handle
3493  * @skb:                packet skb
3494  * @parsing_data:       data to be updated
3495  * @xmit_type:          xmit flags
3496  *
3497  * 57712/578xx related, when skb has encapsulation
3498  */
3499 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3500                                  u32 *parsing_data, u32 xmit_type)
3501 {
3502         *parsing_data |=
3503                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3504                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3505                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3506
3507         if (xmit_type & XMIT_CSUM_TCP) {
3508                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3509                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3510                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3511
3512                 return skb_inner_transport_header(skb) +
3513                         inner_tcp_hdrlen(skb) - skb->data;
3514         }
3515
3516         /* We support checksum offload for TCP and UDP only.
3517          * No need to pass the UDP header length - it's a constant.
3518          */
3519         return skb_inner_transport_header(skb) +
3520                 sizeof(struct udphdr) - skb->data;
3521 }
3522
3523 /**
3524  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3525  *
3526  * @bp:                 driver handle
3527  * @skb:                packet skb
3528  * @parsing_data:       data to be updated
3529  * @xmit_type:          xmit flags
3530  *
3531  * 57712/578xx related
3532  */
3533 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3534                                 u32 *parsing_data, u32 xmit_type)
3535 {
3536         *parsing_data |=
3537                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3538                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3539                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3540
3541         if (xmit_type & XMIT_CSUM_TCP) {
3542                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3543                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3544                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3545
3546                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3547         }
3548         /* We support checksum offload for TCP and UDP only.
3549          * No need to pass the UDP header length - it's a constant.
3550          */
3551         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3552 }
3553
3554 /* set FW indication according to inner or outer protocols if tunneled */
3555 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3556                                struct eth_tx_start_bd *tx_start_bd,
3557                                u32 xmit_type)
3558 {
3559         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3560
3561         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3562                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3563
3564         if (!(xmit_type & XMIT_CSUM_TCP))
3565                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3566 }
3567
3568 /**
3569  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3570  *
3571  * @bp:         driver handle
3572  * @skb:        packet skb
3573  * @pbd:        parse BD to be updated
3574  * @xmit_type:  xmit flags
3575  */
3576 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3577                              struct eth_tx_parse_bd_e1x *pbd,
3578                              u32 xmit_type)
3579 {
3580         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3581
3582         /* for now NS flag is not used in Linux */
3583         pbd->global_data =
3584                 cpu_to_le16(hlen |
3585                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3586                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3587
3588         pbd->ip_hlen_w = (skb_transport_header(skb) -
3589                         skb_network_header(skb)) >> 1;
3590
3591         hlen += pbd->ip_hlen_w;
3592
3593         /* We support checksum offload for TCP and UDP only */
3594         if (xmit_type & XMIT_CSUM_TCP)
3595                 hlen += tcp_hdrlen(skb) / 2;
3596         else
3597                 hlen += sizeof(struct udphdr) / 2;
3598
3599         pbd->total_hlen_w = cpu_to_le16(hlen);
3600         hlen = hlen*2;
3601
3602         if (xmit_type & XMIT_CSUM_TCP) {
3603                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3604
3605         } else {
3606                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3607
3608                 DP(NETIF_MSG_TX_QUEUED,
3609                    "hlen %d  fix %d  csum before fix %x\n",
3610                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3611
3612                 /* HW bug: fixup the CSUM */
3613                 pbd->tcp_pseudo_csum =
3614                         bnx2x_csum_fix(skb_transport_header(skb),
3615                                        SKB_CS(skb), fix);
3616
3617                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3618                    pbd->tcp_pseudo_csum);
3619         }
3620
3621         return hlen;
3622 }
3623
3624 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3625                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3626                                       struct eth_tx_parse_2nd_bd *pbd2,
3627                                       u16 *global_data,
3628                                       u32 xmit_type)
3629 {
3630         u16 hlen_w = 0;
3631         u8 outerip_off, outerip_len = 0;
3632
3633         /* from outer IP to transport */
3634         hlen_w = (skb_inner_transport_header(skb) -
3635                   skb_network_header(skb)) >> 1;
3636
3637         /* transport len */
3638         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3639
3640         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3641
3642         /* outer IP header info */
3643         if (xmit_type & XMIT_CSUM_V4) {
3644                 struct iphdr *iph = ip_hdr(skb);
3645                 u32 csum = (__force u32)(~iph->check) -
3646                            (__force u32)iph->tot_len -
3647                            (__force u32)iph->frag_off;
3648
3649                 outerip_len = iph->ihl << 1;
3650
3651                 pbd2->fw_ip_csum_wo_len_flags_frag =
3652                         bswab16(csum_fold((__force __wsum)csum));
3653         } else {
3654                 pbd2->fw_ip_hdr_to_payload_w =
3655                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3656                 pbd_e2->data.tunnel_data.flags |=
3657                         ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
3658         }
3659
3660         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3661
3662         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3663
3664         /* inner IP header info */
3665         if (xmit_type & XMIT_CSUM_ENC_V4) {
3666                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3667
3668                 pbd_e2->data.tunnel_data.pseudo_csum =
3669                         bswab16(~csum_tcpudp_magic(
3670                                         inner_ip_hdr(skb)->saddr,
3671                                         inner_ip_hdr(skb)->daddr,
3672                                         0, IPPROTO_TCP, 0));
3673         } else {
3674                 pbd_e2->data.tunnel_data.pseudo_csum =
3675                         bswab16(~csum_ipv6_magic(
3676                                         &inner_ipv6_hdr(skb)->saddr,
3677                                         &inner_ipv6_hdr(skb)->daddr,
3678                                         0, IPPROTO_TCP, 0));
3679         }
3680
3681         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3682
3683         *global_data |=
3684                 outerip_off |
3685                 (outerip_len <<
3686                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3687                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3688                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3689
3690         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3691                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3692                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3693         }
3694 }
3695
3696 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3697                                          u32 xmit_type)
3698 {
3699         struct ipv6hdr *ipv6;
3700
3701         if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3702                 return;
3703
3704         if (xmit_type & XMIT_GSO_ENC_V6)
3705                 ipv6 = inner_ipv6_hdr(skb);
3706         else /* XMIT_GSO_V6 */
3707                 ipv6 = ipv6_hdr(skb);
3708
3709         if (ipv6->nexthdr == NEXTHDR_IPV6)
3710                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3711 }
3712
3713 /* called with netif_tx_lock
3714  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3715  * netif_wake_queue()
3716  */
3717 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3718 {
3719         struct bnx2x *bp = netdev_priv(dev);
3720
3721         struct netdev_queue *txq;
3722         struct bnx2x_fp_txdata *txdata;
3723         struct sw_tx_bd *tx_buf;
3724         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3725         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3726         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3727         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3728         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3729         u32 pbd_e2_parsing_data = 0;
3730         u16 pkt_prod, bd_prod;
3731         int nbd, txq_index;
3732         dma_addr_t mapping;
3733         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3734         int i;
3735         u8 hlen = 0;
3736         __le16 pkt_size = 0;
3737         struct ethhdr *eth;
3738         u8 mac_type = UNICAST_ADDRESS;
3739
3740 #ifdef BNX2X_STOP_ON_ERROR
3741         if (unlikely(bp->panic))
3742                 return NETDEV_TX_BUSY;
3743 #endif
3744
3745         txq_index = skb_get_queue_mapping(skb);
3746         txq = netdev_get_tx_queue(dev, txq_index);
3747
3748         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3749
3750         txdata = &bp->bnx2x_txq[txq_index];
3751
3752         /* enable this debug print to view the transmission queue being used
3753         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3754            txq_index, fp_index, txdata_index); */
3755
3756         /* enable this debug print to view the transmission details
3757         DP(NETIF_MSG_TX_QUEUED,
3758            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3759            txdata->cid, fp_index, txdata_index, txdata, fp); */
3760
3761         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3762                         skb_shinfo(skb)->nr_frags +
3763                         BDS_PER_TX_PKT +
3764                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3765                 /* Handle special storage cases separately */
3766                 if (txdata->tx_ring_size == 0) {
3767                         struct bnx2x_eth_q_stats *q_stats =
3768                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3769                         q_stats->driver_filtered_tx_pkt++;
3770                         dev_kfree_skb(skb);
3771                         return NETDEV_TX_OK;
3772                 }
3773                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3774                 netif_tx_stop_queue(txq);
3775                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3776
3777                 return NETDEV_TX_BUSY;
3778         }
3779
3780         DP(NETIF_MSG_TX_QUEUED,
3781            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3782            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3783            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3784            skb->len);
3785
3786         eth = (struct ethhdr *)skb->data;
3787
3788         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3789         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3790                 if (is_broadcast_ether_addr(eth->h_dest))
3791                         mac_type = BROADCAST_ADDRESS;
3792                 else
3793                         mac_type = MULTICAST_ADDRESS;
3794         }
3795
3796 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3797         /* First, check if we need to linearize the skb (due to FW
3798            restrictions). No need to check fragmentation if page size > 8K
3799            (there will be no violation to FW restrictions) */
3800         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3801                 /* Statistics of linearization */
3802                 bp->lin_cnt++;
3803                 if (skb_linearize(skb) != 0) {
3804                         DP(NETIF_MSG_TX_QUEUED,
3805                            "SKB linearization failed - silently dropping this SKB\n");
3806                         dev_kfree_skb_any(skb);
3807                         return NETDEV_TX_OK;
3808                 }
3809         }
3810 #endif
3811         /* Map skb linear data for DMA */
3812         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3813                                  skb_headlen(skb), DMA_TO_DEVICE);
3814         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3815                 DP(NETIF_MSG_TX_QUEUED,
3816                    "SKB mapping failed - silently dropping this SKB\n");
3817                 dev_kfree_skb_any(skb);
3818                 return NETDEV_TX_OK;
3819         }
3820         /*
3821         Please read carefully. First we use one BD which we mark as start,
3822         then we have a parsing info BD (used for TSO or xsum),
3823         and only then we have the rest of the TSO BDs.
3824         (don't forget to mark the last one as last,
3825         and to unmap only AFTER you write to the BD ...)
3826         And above all, all pdb sizes are in words - NOT DWORDS!
3827         */
3828
3829         /* get current pkt produced now - advance it just before sending packet
3830          * since mapping of pages may fail and cause packet to be dropped
3831          */
3832         pkt_prod = txdata->tx_pkt_prod;
3833         bd_prod = TX_BD(txdata->tx_bd_prod);
3834
3835         /* get a tx_buf and first BD
3836          * tx_start_bd may be changed during SPLIT,
3837          * but first_bd will always stay first
3838          */
3839         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3840         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3841         first_bd = tx_start_bd;
3842
3843         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3844
3845         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3846                 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3847                         BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3848                 } else if (bp->ptp_tx_skb) {
3849                         BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3850                 } else {
3851                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3852                         /* schedule check for Tx timestamp */
3853                         bp->ptp_tx_skb = skb_get(skb);
3854                         bp->ptp_tx_start = jiffies;
3855                         schedule_work(&bp->ptp_task);
3856                 }
3857         }
3858
3859         /* header nbd: indirectly zero other flags! */
3860         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3861
3862         /* remember the first BD of the packet */
3863         tx_buf->first_bd = txdata->tx_bd_prod;
3864         tx_buf->skb = skb;
3865         tx_buf->flags = 0;
3866
3867         DP(NETIF_MSG_TX_QUEUED,
3868            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3869            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3870
3871         if (skb_vlan_tag_present(skb)) {
3872                 tx_start_bd->vlan_or_ethertype =
3873                     cpu_to_le16(skb_vlan_tag_get(skb));
3874                 tx_start_bd->bd_flags.as_bitfield |=
3875                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3876         } else {
3877                 /* when transmitting in a vf, start bd must hold the ethertype
3878                  * for fw to enforce it
3879                  */
3880 #ifndef BNX2X_STOP_ON_ERROR
3881                 if (IS_VF(bp))
3882 #endif
3883                         tx_start_bd->vlan_or_ethertype =
3884                                 cpu_to_le16(ntohs(eth->h_proto));
3885 #ifndef BNX2X_STOP_ON_ERROR
3886                 else
3887                         /* used by FW for packet accounting */
3888                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3889 #endif
3890         }
3891
3892         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3893
3894         /* turn on parsing and get a BD */
3895         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3896
3897         if (xmit_type & XMIT_CSUM)
3898                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3899
3900         if (!CHIP_IS_E1x(bp)) {
3901                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3902                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3903
3904                 if (xmit_type & XMIT_CSUM_ENC) {
3905                         u16 global_data = 0;
3906
3907                         /* Set PBD in enc checksum offload case */
3908                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3909                                                       &pbd_e2_parsing_data,
3910                                                       xmit_type);
3911
3912                         /* turn on 2nd parsing and get a BD */
3913                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3914
3915                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3916
3917                         memset(pbd2, 0, sizeof(*pbd2));
3918
3919                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3920                                 (skb_inner_network_header(skb) -
3921                                  skb->data) >> 1;
3922
3923                         if (xmit_type & XMIT_GSO_ENC)
3924                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3925                                                           &global_data,
3926                                                           xmit_type);
3927
3928                         pbd2->global_data = cpu_to_le16(global_data);
3929
3930                         /* add addition parse BD indication to start BD */
3931                         SET_FLAG(tx_start_bd->general_data,
3932                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3933                         /* set encapsulation flag in start BD */
3934                         SET_FLAG(tx_start_bd->general_data,
3935                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3936
3937                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3938
3939                         nbd++;
3940                 } else if (xmit_type & XMIT_CSUM) {
3941                         /* Set PBD in checksum offload case w/o encapsulation */
3942                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3943                                                      &pbd_e2_parsing_data,
3944                                                      xmit_type);
3945                 }
3946
3947                 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3948                 /* Add the macs to the parsing BD if this is a vf or if
3949                  * Tx Switching is enabled.
3950                  */
3951                 if (IS_VF(bp)) {
3952                         /* override GRE parameters in BD */
3953                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3954                                               &pbd_e2->data.mac_addr.src_mid,
3955                                               &pbd_e2->data.mac_addr.src_lo,
3956                                               eth->h_source);
3957
3958                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3959                                               &pbd_e2->data.mac_addr.dst_mid,
3960                                               &pbd_e2->data.mac_addr.dst_lo,
3961                                               eth->h_dest);
3962                 } else {
3963                         if (bp->flags & TX_SWITCHING)
3964                                 bnx2x_set_fw_mac_addr(
3965                                                 &pbd_e2->data.mac_addr.dst_hi,
3966                                                 &pbd_e2->data.mac_addr.dst_mid,
3967                                                 &pbd_e2->data.mac_addr.dst_lo,
3968                                                 eth->h_dest);
3969 #ifdef BNX2X_STOP_ON_ERROR
3970                         /* Enforce security is always set in Stop on Error -
3971                          * source mac should be present in the parsing BD
3972                          */
3973                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3974                                               &pbd_e2->data.mac_addr.src_mid,
3975                                               &pbd_e2->data.mac_addr.src_lo,
3976                                               eth->h_source);
3977 #endif
3978                 }
3979
3980                 SET_FLAG(pbd_e2_parsing_data,
3981                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3982         } else {
3983                 u16 global_data = 0;
3984                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3985                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3986                 /* Set PBD in checksum offload case */
3987                 if (xmit_type & XMIT_CSUM)
3988                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3989
3990                 SET_FLAG(global_data,
3991                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3992                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3993         }
3994
3995         /* Setup the data pointer of the first BD of the packet */
3996         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3997         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3998         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3999         pkt_size = tx_start_bd->nbytes;
4000
4001         DP(NETIF_MSG_TX_QUEUED,
4002            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4003            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4004            le16_to_cpu(tx_start_bd->nbytes),
4005            tx_start_bd->bd_flags.as_bitfield,
4006            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4007
4008         if (xmit_type & XMIT_GSO) {
4009
4010                 DP(NETIF_MSG_TX_QUEUED,
4011                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4012                    skb->len, hlen, skb_headlen(skb),
4013                    skb_shinfo(skb)->gso_size);
4014
4015                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4016
4017                 if (unlikely(skb_headlen(skb) > hlen)) {
4018                         nbd++;
4019                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4020                                                  &tx_start_bd, hlen,
4021                                                  bd_prod);
4022                 }
4023                 if (!CHIP_IS_E1x(bp))
4024                         pbd_e2_parsing_data |=
4025                                 (skb_shinfo(skb)->gso_size <<
4026                                  ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4027                                  ETH_TX_PARSE_BD_E2_LSO_MSS;
4028                 else
4029                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4030         }
4031
4032         /* Set the PBD's parsing_data field if not zero
4033          * (for the chips newer than 57711).
4034          */
4035         if (pbd_e2_parsing_data)
4036                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4037
4038         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4039
4040         /* Handle fragmented skb */
4041         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4042                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4043
4044                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4045                                            skb_frag_size(frag), DMA_TO_DEVICE);
4046                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4047                         unsigned int pkts_compl = 0, bytes_compl = 0;
4048
4049                         DP(NETIF_MSG_TX_QUEUED,
4050                            "Unable to map page - dropping packet...\n");
4051
4052                         /* we need unmap all buffers already mapped
4053                          * for this SKB;
4054                          * first_bd->nbd need to be properly updated
4055                          * before call to bnx2x_free_tx_pkt
4056                          */
4057                         first_bd->nbd = cpu_to_le16(nbd);
4058                         bnx2x_free_tx_pkt(bp, txdata,
4059                                           TX_BD(txdata->tx_pkt_prod),
4060                                           &pkts_compl, &bytes_compl);
4061                         return NETDEV_TX_OK;
4062                 }
4063
4064                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4065                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4066                 if (total_pkt_bd == NULL)
4067                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4068
4069                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4070                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4071                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4072                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4073                 nbd++;
4074
4075                 DP(NETIF_MSG_TX_QUEUED,
4076                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4077                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4078                    le16_to_cpu(tx_data_bd->nbytes));
4079         }
4080
4081         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4082
4083         /* update with actual num BDs */
4084         first_bd->nbd = cpu_to_le16(nbd);
4085
4086         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4087
4088         /* now send a tx doorbell, counting the next BD
4089          * if the packet contains or ends with it
4090          */
4091         if (TX_BD_POFF(bd_prod) < nbd)
4092                 nbd++;
4093
4094         /* total_pkt_bytes should be set on the first data BD if
4095          * it's not an LSO packet and there is more than one
4096          * data BD. In this case pkt_size is limited by an MTU value.
4097          * However we prefer to set it for an LSO packet (while we don't
4098          * have to) in order to save some CPU cycles in a none-LSO
4099          * case, when we much more care about them.
4100          */
4101         if (total_pkt_bd != NULL)
4102                 total_pkt_bd->total_pkt_bytes = pkt_size;
4103
4104         if (pbd_e1x)
4105                 DP(NETIF_MSG_TX_QUEUED,
4106                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4107                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4108                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4109                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4110                     le16_to_cpu(pbd_e1x->total_hlen_w));
4111         if (pbd_e2)
4112                 DP(NETIF_MSG_TX_QUEUED,
4113                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4114                    pbd_e2,
4115                    pbd_e2->data.mac_addr.dst_hi,
4116                    pbd_e2->data.mac_addr.dst_mid,
4117                    pbd_e2->data.mac_addr.dst_lo,
4118                    pbd_e2->data.mac_addr.src_hi,
4119                    pbd_e2->data.mac_addr.src_mid,
4120                    pbd_e2->data.mac_addr.src_lo,
4121                    pbd_e2->parsing_data);
4122         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4123
4124         netdev_tx_sent_queue(txq, skb->len);
4125
4126         skb_tx_timestamp(skb);
4127
4128         txdata->tx_pkt_prod++;
4129         /*
4130          * Make sure that the BD data is updated before updating the producer
4131          * since FW might read the BD right after the producer is updated.
4132          * This is only applicable for weak-ordered memory model archs such
4133          * as IA-64. The following barrier is also mandatory since FW will
4134          * assumes packets must have BDs.
4135          */
4136         wmb();
4137
4138         txdata->tx_db.data.prod += nbd;
4139         barrier();
4140
4141         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4142
4143         mmiowb();
4144
4145         txdata->tx_bd_prod += nbd;
4146
4147         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4148                 netif_tx_stop_queue(txq);
4149
4150                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4151                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4152                  * fp->bd_tx_cons */
4153                 smp_mb();
4154
4155                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4156                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4157                         netif_tx_wake_queue(txq);
4158         }
4159         txdata->tx_pkt++;
4160
4161         return NETDEV_TX_OK;
4162 }
4163
4164 /**
4165  * bnx2x_setup_tc - routine to configure net_device for multi tc
4166  *
4167  * @netdev: net device to configure
4168  * @tc: number of traffic classes to enable
4169  *
4170  * callback connected to the ndo_setup_tc function pointer
4171  */
4172 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4173 {
4174         int cos, prio, count, offset;
4175         struct bnx2x *bp = netdev_priv(dev);
4176
4177         /* setup tc must be called under rtnl lock */
4178         ASSERT_RTNL();
4179
4180         /* no traffic classes requested. Aborting */
4181         if (!num_tc) {
4182                 netdev_reset_tc(dev);
4183                 return 0;
4184         }
4185
4186         /* requested to support too many traffic classes */
4187         if (num_tc > bp->max_cos) {
4188                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4189                           num_tc, bp->max_cos);
4190                 return -EINVAL;
4191         }
4192
4193         /* declare amount of supported traffic classes */
4194         if (netdev_set_num_tc(dev, num_tc)) {
4195                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4196                 return -EINVAL;
4197         }
4198
4199         /* configure priority to traffic class mapping */
4200         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4201                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4202                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4203                    "mapping priority %d to tc %d\n",
4204                    prio, bp->prio_to_cos[prio]);
4205         }
4206
4207         /* Use this configuration to differentiate tc0 from other COSes
4208            This can be used for ets or pfc, and save the effort of setting
4209            up a multio class queue disc or negotiating DCBX with a switch
4210         netdev_set_prio_tc_map(dev, 0, 0);
4211         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4212         for (prio = 1; prio < 16; prio++) {
4213                 netdev_set_prio_tc_map(dev, prio, 1);
4214                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4215         } */
4216
4217         /* configure traffic class to transmission queue mapping */
4218         for (cos = 0; cos < bp->max_cos; cos++) {
4219                 count = BNX2X_NUM_ETH_QUEUES(bp);
4220                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4221                 netdev_set_tc_queue(dev, cos, count, offset);
4222                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4223                    "mapping tc %d to offset %d count %d\n",
4224                    cos, offset, count);
4225         }
4226
4227         return 0;
4228 }
4229
4230 /* called with rtnl_lock */
4231 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4232 {
4233         struct sockaddr *addr = p;
4234         struct bnx2x *bp = netdev_priv(dev);
4235         int rc = 0;
4236
4237         if (!is_valid_ether_addr(addr->sa_data)) {
4238                 BNX2X_ERR("Requested MAC address is not valid\n");
4239                 return -EINVAL;
4240         }
4241
4242         if (IS_MF_STORAGE_ONLY(bp)) {
4243                 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4244                 return -EINVAL;
4245         }
4246
4247         if (netif_running(dev))  {
4248                 rc = bnx2x_set_eth_mac(bp, false);
4249                 if (rc)
4250                         return rc;
4251         }
4252
4253         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4254
4255         if (netif_running(dev))
4256                 rc = bnx2x_set_eth_mac(bp, true);
4257
4258         return rc;
4259 }
4260
4261 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4262 {
4263         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4264         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4265         u8 cos;
4266
4267         /* Common */
4268
4269         if (IS_FCOE_IDX(fp_index)) {
4270                 memset(sb, 0, sizeof(union host_hc_status_block));
4271                 fp->status_blk_mapping = 0;
4272         } else {
4273                 /* status blocks */
4274                 if (!CHIP_IS_E1x(bp))
4275                         BNX2X_PCI_FREE(sb->e2_sb,
4276                                        bnx2x_fp(bp, fp_index,
4277                                                 status_blk_mapping),
4278                                        sizeof(struct host_hc_status_block_e2));
4279                 else
4280                         BNX2X_PCI_FREE(sb->e1x_sb,
4281                                        bnx2x_fp(bp, fp_index,
4282                                                 status_blk_mapping),
4283                                        sizeof(struct host_hc_status_block_e1x));
4284         }
4285
4286         /* Rx */
4287         if (!skip_rx_queue(bp, fp_index)) {
4288                 bnx2x_free_rx_bds(fp);
4289
4290                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4291                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4292                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4293                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4294                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4295
4296                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4297                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4298                                sizeof(struct eth_fast_path_rx_cqe) *
4299                                NUM_RCQ_BD);
4300
4301                 /* SGE ring */
4302                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4303                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4304                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4305                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4306         }
4307
4308         /* Tx */
4309         if (!skip_tx_queue(bp, fp_index)) {
4310                 /* fastpath tx rings: tx_buf tx_desc */
4311                 for_each_cos_in_tx_queue(fp, cos) {
4312                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4313
4314                         DP(NETIF_MSG_IFDOWN,
4315                            "freeing tx memory of fp %d cos %d cid %d\n",
4316                            fp_index, cos, txdata->cid);
4317
4318                         BNX2X_FREE(txdata->tx_buf_ring);
4319                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4320                                 txdata->tx_desc_mapping,
4321                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4322                 }
4323         }
4324         /* end of fastpath */
4325 }
4326
4327 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4328 {
4329         int i;
4330         for_each_cnic_queue(bp, i)
4331                 bnx2x_free_fp_mem_at(bp, i);
4332 }
4333
4334 void bnx2x_free_fp_mem(struct bnx2x *bp)
4335 {
4336         int i;
4337         for_each_eth_queue(bp, i)
4338                 bnx2x_free_fp_mem_at(bp, i);
4339 }
4340
4341 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4342 {
4343         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4344         if (!CHIP_IS_E1x(bp)) {
4345                 bnx2x_fp(bp, index, sb_index_values) =
4346                         (__le16 *)status_blk.e2_sb->sb.index_values;
4347                 bnx2x_fp(bp, index, sb_running_index) =
4348                         (__le16 *)status_blk.e2_sb->sb.running_index;
4349         } else {
4350                 bnx2x_fp(bp, index, sb_index_values) =
4351                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4352                 bnx2x_fp(bp, index, sb_running_index) =
4353                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4354         }
4355 }
4356
4357 /* Returns the number of actually allocated BDs */
4358 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4359                               int rx_ring_size)
4360 {
4361         struct bnx2x *bp = fp->bp;
4362         u16 ring_prod, cqe_ring_prod;
4363         int i, failure_cnt = 0;
4364
4365         fp->rx_comp_cons = 0;
4366         cqe_ring_prod = ring_prod = 0;
4367
4368         /* This routine is called only during fo init so
4369          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4370          */
4371         for (i = 0; i < rx_ring_size; i++) {
4372                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4373                         failure_cnt++;
4374                         continue;
4375                 }
4376                 ring_prod = NEXT_RX_IDX(ring_prod);
4377                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4378                 WARN_ON(ring_prod <= (i - failure_cnt));
4379         }
4380
4381         if (failure_cnt)
4382                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4383                           i - failure_cnt, fp->index);
4384
4385         fp->rx_bd_prod = ring_prod;
4386         /* Limit the CQE producer by the CQE ring size */
4387         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4388                                cqe_ring_prod);
4389         fp->rx_pkt = fp->rx_calls = 0;
4390
4391         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4392
4393         return i - failure_cnt;
4394 }
4395
4396 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4397 {
4398         int i;
4399
4400         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4401                 struct eth_rx_cqe_next_page *nextpg;
4402
4403                 nextpg = (struct eth_rx_cqe_next_page *)
4404                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4405                 nextpg->addr_hi =
4406                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4407                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4408                 nextpg->addr_lo =
4409                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4410                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4411         }
4412 }
4413
4414 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4415 {
4416         union host_hc_status_block *sb;
4417         struct bnx2x_fastpath *fp = &bp->fp[index];
4418         int ring_size = 0;
4419         u8 cos;
4420         int rx_ring_size = 0;
4421
4422         if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4423                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4424                 bp->rx_ring_size = rx_ring_size;
4425         } else if (!bp->rx_ring_size) {
4426                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4427
4428                 if (CHIP_IS_E3(bp)) {
4429                         u32 cfg = SHMEM_RD(bp,
4430                                            dev_info.port_hw_config[BP_PORT(bp)].
4431                                            default_cfg);
4432
4433                         /* Decrease ring size for 1G functions */
4434                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4435                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4436                                 rx_ring_size /= 10;
4437                 }
4438
4439                 /* allocate at least number of buffers required by FW */
4440                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4441                                      MIN_RX_SIZE_TPA, rx_ring_size);
4442
4443                 bp->rx_ring_size = rx_ring_size;
4444         } else /* if rx_ring_size specified - use it */
4445                 rx_ring_size = bp->rx_ring_size;
4446
4447         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4448
4449         /* Common */
4450         sb = &bnx2x_fp(bp, index, status_blk);
4451
4452         if (!IS_FCOE_IDX(index)) {
4453                 /* status blocks */
4454                 if (!CHIP_IS_E1x(bp)) {
4455                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4456                                                     sizeof(struct host_hc_status_block_e2));
4457                         if (!sb->e2_sb)
4458                                 goto alloc_mem_err;
4459                 } else {
4460                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4461                                                      sizeof(struct host_hc_status_block_e1x));
4462                         if (!sb->e1x_sb)
4463                                 goto alloc_mem_err;
4464                 }
4465         }
4466
4467         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4468          * set shortcuts for it.
4469          */
4470         if (!IS_FCOE_IDX(index))
4471                 set_sb_shortcuts(bp, index);
4472
4473         /* Tx */
4474         if (!skip_tx_queue(bp, index)) {
4475                 /* fastpath tx rings: tx_buf tx_desc */
4476                 for_each_cos_in_tx_queue(fp, cos) {
4477                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4478
4479                         DP(NETIF_MSG_IFUP,
4480                            "allocating tx memory of fp %d cos %d\n",
4481                            index, cos);
4482
4483                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4484                                                       sizeof(struct sw_tx_bd),
4485                                                       GFP_KERNEL);
4486                         if (!txdata->tx_buf_ring)
4487                                 goto alloc_mem_err;
4488                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4489                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4490                         if (!txdata->tx_desc_ring)
4491                                 goto alloc_mem_err;
4492                 }
4493         }
4494
4495         /* Rx */
4496         if (!skip_rx_queue(bp, index)) {
4497                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4498                 bnx2x_fp(bp, index, rx_buf_ring) =
4499                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4500                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4501                         goto alloc_mem_err;
4502                 bnx2x_fp(bp, index, rx_desc_ring) =
4503                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4504                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4505                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4506                         goto alloc_mem_err;
4507
4508                 /* Seed all CQEs by 1s */
4509                 bnx2x_fp(bp, index, rx_comp_ring) =
4510                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4511                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4512                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4513                         goto alloc_mem_err;
4514
4515                 /* SGE ring */
4516                 bnx2x_fp(bp, index, rx_page_ring) =
4517                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4518                                 GFP_KERNEL);
4519                 if (!bnx2x_fp(bp, index, rx_page_ring))
4520                         goto alloc_mem_err;
4521                 bnx2x_fp(bp, index, rx_sge_ring) =
4522                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4523                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4524                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4525                         goto alloc_mem_err;
4526                 /* RX BD ring */
4527                 bnx2x_set_next_page_rx_bd(fp);
4528
4529                 /* CQ ring */
4530                 bnx2x_set_next_page_rx_cq(fp);
4531
4532                 /* BDs */
4533                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4534                 if (ring_size < rx_ring_size)
4535                         goto alloc_mem_err;
4536         }
4537
4538         return 0;
4539
4540 /* handles low memory cases */
4541 alloc_mem_err:
4542         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4543                                                 index, ring_size);
4544         /* FW will drop all packets if queue is not big enough,
4545          * In these cases we disable the queue
4546          * Min size is different for OOO, TPA and non-TPA queues
4547          */
4548         if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4549                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4550                         /* release memory allocated for this queue */
4551                         bnx2x_free_fp_mem_at(bp, index);
4552                         return -ENOMEM;
4553         }
4554         return 0;
4555 }
4556
4557 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4558 {
4559         if (!NO_FCOE(bp))
4560                 /* FCoE */
4561                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4562                         /* we will fail load process instead of mark
4563                          * NO_FCOE_FLAG
4564                          */
4565                         return -ENOMEM;
4566
4567         return 0;
4568 }
4569
4570 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4571 {
4572         int i;
4573
4574         /* 1. Allocate FP for leading - fatal if error
4575          * 2. Allocate RSS - fix number of queues if error
4576          */
4577
4578         /* leading */
4579         if (bnx2x_alloc_fp_mem_at(bp, 0))
4580                 return -ENOMEM;
4581
4582         /* RSS */
4583         for_each_nondefault_eth_queue(bp, i)
4584                 if (bnx2x_alloc_fp_mem_at(bp, i))
4585                         break;
4586
4587         /* handle memory failures */
4588         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4589                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4590
4591                 WARN_ON(delta < 0);
4592                 bnx2x_shrink_eth_fp(bp, delta);
4593                 if (CNIC_SUPPORT(bp))
4594                         /* move non eth FPs next to last eth FP
4595                          * must be done in that order
4596                          * FCOE_IDX < FWD_IDX < OOO_IDX
4597                          */
4598
4599                         /* move FCoE fp even NO_FCOE_FLAG is on */
4600                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4601                 bp->num_ethernet_queues -= delta;
4602                 bp->num_queues = bp->num_ethernet_queues +
4603                                  bp->num_cnic_queues;
4604                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4605                           bp->num_queues + delta, bp->num_queues);
4606         }
4607
4608         return 0;
4609 }
4610
4611 void bnx2x_free_mem_bp(struct bnx2x *bp)
4612 {
4613         int i;
4614
4615         for (i = 0; i < bp->fp_array_size; i++)
4616                 kfree(bp->fp[i].tpa_info);
4617         kfree(bp->fp);
4618         kfree(bp->sp_objs);
4619         kfree(bp->fp_stats);
4620         kfree(bp->bnx2x_txq);
4621         kfree(bp->msix_table);
4622         kfree(bp->ilt);
4623 }
4624
4625 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4626 {
4627         struct bnx2x_fastpath *fp;
4628         struct msix_entry *tbl;
4629         struct bnx2x_ilt *ilt;
4630         int msix_table_size = 0;
4631         int fp_array_size, txq_array_size;
4632         int i;
4633
4634         /*
4635          * The biggest MSI-X table we might need is as a maximum number of fast
4636          * path IGU SBs plus default SB (for PF only).
4637          */
4638         msix_table_size = bp->igu_sb_cnt;
4639         if (IS_PF(bp))
4640                 msix_table_size++;
4641         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4642
4643         /* fp array: RSS plus CNIC related L2 queues */
4644         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4645         bp->fp_array_size = fp_array_size;
4646         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4647
4648         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4649         if (!fp)
4650                 goto alloc_err;
4651         for (i = 0; i < bp->fp_array_size; i++) {
4652                 fp[i].tpa_info =
4653                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4654                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4655                 if (!(fp[i].tpa_info))
4656                         goto alloc_err;
4657         }
4658
4659         bp->fp = fp;
4660
4661         /* allocate sp objs */
4662         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4663                               GFP_KERNEL);
4664         if (!bp->sp_objs)
4665                 goto alloc_err;
4666
4667         /* allocate fp_stats */
4668         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4669                                GFP_KERNEL);
4670         if (!bp->fp_stats)
4671                 goto alloc_err;
4672
4673         /* Allocate memory for the transmission queues array */
4674         txq_array_size =
4675                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4676         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4677
4678         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4679                                 GFP_KERNEL);
4680         if (!bp->bnx2x_txq)
4681                 goto alloc_err;
4682
4683         /* msix table */
4684         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4685         if (!tbl)
4686                 goto alloc_err;
4687         bp->msix_table = tbl;
4688
4689         /* ilt */
4690         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4691         if (!ilt)
4692                 goto alloc_err;
4693         bp->ilt = ilt;
4694
4695         return 0;
4696 alloc_err:
4697         bnx2x_free_mem_bp(bp);
4698         return -ENOMEM;
4699 }
4700
4701 int bnx2x_reload_if_running(struct net_device *dev)
4702 {
4703         struct bnx2x *bp = netdev_priv(dev);
4704
4705         if (unlikely(!netif_running(dev)))
4706                 return 0;
4707
4708         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4709         return bnx2x_nic_load(bp, LOAD_NORMAL);
4710 }
4711
4712 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4713 {
4714         u32 sel_phy_idx = 0;
4715         if (bp->link_params.num_phys <= 1)
4716                 return INT_PHY;
4717
4718         if (bp->link_vars.link_up) {
4719                 sel_phy_idx = EXT_PHY1;
4720                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4721                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4722                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4723                         sel_phy_idx = EXT_PHY2;
4724         } else {
4725
4726                 switch (bnx2x_phy_selection(&bp->link_params)) {
4727                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4728                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4729                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4730                        sel_phy_idx = EXT_PHY1;
4731                        break;
4732                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4733                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4734                        sel_phy_idx = EXT_PHY2;
4735                        break;
4736                 }
4737         }
4738
4739         return sel_phy_idx;
4740 }
4741 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4742 {
4743         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4744         /*
4745          * The selected activated PHY is always after swapping (in case PHY
4746          * swapping is enabled). So when swapping is enabled, we need to reverse
4747          * the configuration
4748          */
4749
4750         if (bp->link_params.multi_phy_config &
4751             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4752                 if (sel_phy_idx == EXT_PHY1)
4753                         sel_phy_idx = EXT_PHY2;
4754                 else if (sel_phy_idx == EXT_PHY2)
4755                         sel_phy_idx = EXT_PHY1;
4756         }
4757         return LINK_CONFIG_IDX(sel_phy_idx);
4758 }
4759
4760 #ifdef NETDEV_FCOE_WWNN
4761 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4762 {
4763         struct bnx2x *bp = netdev_priv(dev);
4764         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4765
4766         switch (type) {
4767         case NETDEV_FCOE_WWNN:
4768                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4769                                 cp->fcoe_wwn_node_name_lo);
4770                 break;
4771         case NETDEV_FCOE_WWPN:
4772                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4773                                 cp->fcoe_wwn_port_name_lo);
4774                 break;
4775         default:
4776                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4777                 return -EINVAL;
4778         }
4779
4780         return 0;
4781 }
4782 #endif
4783
4784 /* called with rtnl_lock */
4785 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4786 {
4787         struct bnx2x *bp = netdev_priv(dev);
4788
4789         if (pci_num_vf(bp->pdev)) {
4790                 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4791                 return -EPERM;
4792         }
4793
4794         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4795                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4796                 return -EAGAIN;
4797         }
4798
4799         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4800             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4801                 BNX2X_ERR("Can't support requested MTU size\n");
4802                 return -EINVAL;
4803         }
4804
4805         /* This does not race with packet allocation
4806          * because the actual alloc size is
4807          * only updated as part of load
4808          */
4809         dev->mtu = new_mtu;
4810
4811         return bnx2x_reload_if_running(dev);
4812 }
4813
4814 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4815                                      netdev_features_t features)
4816 {
4817         struct bnx2x *bp = netdev_priv(dev);
4818
4819         if (pci_num_vf(bp->pdev)) {
4820                 netdev_features_t changed = dev->features ^ features;
4821
4822                 /* Revert the requested changes in features if they
4823                  * would require internal reload of PF in bnx2x_set_features().
4824                  */
4825                 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4826                         features &= ~NETIF_F_RXCSUM;
4827                         features |= dev->features & NETIF_F_RXCSUM;
4828                 }
4829
4830                 if (changed & NETIF_F_LOOPBACK) {
4831                         features &= ~NETIF_F_LOOPBACK;
4832                         features |= dev->features & NETIF_F_LOOPBACK;
4833                 }
4834         }
4835
4836         /* TPA requires Rx CSUM offloading */
4837         if (!(features & NETIF_F_RXCSUM)) {
4838                 features &= ~NETIF_F_LRO;
4839                 features &= ~NETIF_F_GRO;
4840         }
4841
4842         return features;
4843 }
4844
4845 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4846 {
4847         struct bnx2x *bp = netdev_priv(dev);
4848         netdev_features_t changes = features ^ dev->features;
4849         bool bnx2x_reload = false;
4850         int rc;
4851
4852         /* VFs or non SRIOV PFs should be able to change loopback feature */
4853         if (!pci_num_vf(bp->pdev)) {
4854                 if (features & NETIF_F_LOOPBACK) {
4855                         if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4856                                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4857                                 bnx2x_reload = true;
4858                         }
4859                 } else {
4860                         if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4861                                 bp->link_params.loopback_mode = LOOPBACK_NONE;
4862                                 bnx2x_reload = true;
4863                         }
4864                 }
4865         }
4866
4867         /* if GRO is changed while LRO is enabled, don't force a reload */
4868         if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4869                 changes &= ~NETIF_F_GRO;
4870
4871         /* if GRO is changed while HW TPA is off, don't force a reload */
4872         if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4873                 changes &= ~NETIF_F_GRO;
4874
4875         if (changes)
4876                 bnx2x_reload = true;
4877
4878         if (bnx2x_reload) {
4879                 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4880                         dev->features = features;
4881                         rc = bnx2x_reload_if_running(dev);
4882                         return rc ? rc : 1;
4883                 }
4884                 /* else: bnx2x_nic_load() will be called at end of recovery */
4885         }
4886
4887         return 0;
4888 }
4889
4890 void bnx2x_tx_timeout(struct net_device *dev)
4891 {
4892         struct bnx2x *bp = netdev_priv(dev);
4893
4894 #ifdef BNX2X_STOP_ON_ERROR
4895         if (!bp->panic)
4896                 bnx2x_panic();
4897 #endif
4898
4899         /* This allows the netif to be shutdown gracefully before resetting */
4900         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4901 }
4902
4903 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4904 {
4905         struct net_device *dev = pci_get_drvdata(pdev);
4906         struct bnx2x *bp;
4907
4908         if (!dev) {
4909                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4910                 return -ENODEV;
4911         }
4912         bp = netdev_priv(dev);
4913
4914         rtnl_lock();
4915
4916         pci_save_state(pdev);
4917
4918         if (!netif_running(dev)) {
4919                 rtnl_unlock();
4920                 return 0;
4921         }
4922
4923         netif_device_detach(dev);
4924
4925         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4926
4927         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4928
4929         rtnl_unlock();
4930
4931         return 0;
4932 }
4933
4934 int bnx2x_resume(struct pci_dev *pdev)
4935 {
4936         struct net_device *dev = pci_get_drvdata(pdev);
4937         struct bnx2x *bp;
4938         int rc;
4939
4940         if (!dev) {
4941                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4942                 return -ENODEV;
4943         }
4944         bp = netdev_priv(dev);
4945
4946         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4947                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4948                 return -EAGAIN;
4949         }
4950
4951         rtnl_lock();
4952
4953         pci_restore_state(pdev);
4954
4955         if (!netif_running(dev)) {
4956                 rtnl_unlock();
4957                 return 0;
4958         }
4959
4960         bnx2x_set_power_state(bp, PCI_D0);
4961         netif_device_attach(dev);
4962
4963         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4964
4965         rtnl_unlock();
4966
4967         return rc;
4968 }
4969
4970 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4971                               u32 cid)
4972 {
4973         if (!cxt) {
4974                 BNX2X_ERR("bad context pointer %p\n", cxt);
4975                 return;
4976         }
4977
4978         /* ustorm cxt validation */
4979         cxt->ustorm_ag_context.cdu_usage =
4980                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4981                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4982         /* xcontext validation */
4983         cxt->xstorm_ag_context.cdu_reserved =
4984                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4985                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4986 }
4987
4988 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4989                                     u8 fw_sb_id, u8 sb_index,
4990                                     u8 ticks)
4991 {
4992         u32 addr = BAR_CSTRORM_INTMEM +
4993                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4994         REG_WR8(bp, addr, ticks);
4995         DP(NETIF_MSG_IFUP,
4996            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4997            port, fw_sb_id, sb_index, ticks);
4998 }
4999
5000 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5001                                     u16 fw_sb_id, u8 sb_index,
5002                                     u8 disable)
5003 {
5004         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5005         u32 addr = BAR_CSTRORM_INTMEM +
5006                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5007         u8 flags = REG_RD8(bp, addr);
5008         /* clear and set */
5009         flags &= ~HC_INDEX_DATA_HC_ENABLED;
5010         flags |= enable_flag;
5011         REG_WR8(bp, addr, flags);
5012         DP(NETIF_MSG_IFUP,
5013            "port %x fw_sb_id %d sb_index %d disable %d\n",
5014            port, fw_sb_id, sb_index, disable);
5015 }
5016
5017 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5018                                     u8 sb_index, u8 disable, u16 usec)
5019 {
5020         int port = BP_PORT(bp);
5021         u8 ticks = usec / BNX2X_BTR;
5022
5023         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5024
5025         disable = disable ? 1 : (usec ? 0 : 1);
5026         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5027 }
5028
5029 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5030                             u32 verbose)
5031 {
5032         smp_mb__before_atomic();
5033         set_bit(flag, &bp->sp_rtnl_state);
5034         smp_mb__after_atomic();
5035         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5036            flag);
5037         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5038 }
5039 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);