2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
22 #include "nicvf_queues.h"
23 #include "thunder_bgx.h"
25 #define DRV_NAME "thunder-nicvf"
26 #define DRV_VERSION "1.0"
28 /* Supported devices */
29 static const struct pci_device_id nicvf_id_table[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 PCI_DEVICE_ID_THUNDER_NIC_VF,
32 PCI_VENDOR_ID_CAVIUM, 0xA134) },
33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
34 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
35 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
36 { 0, } /* end of table */
39 MODULE_AUTHOR("Sunil Goutham");
40 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
41 MODULE_LICENSE("GPL v2");
42 MODULE_VERSION(DRV_VERSION);
43 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
45 static int debug = 0x00;
46 module_param(debug, int, 0644);
47 MODULE_PARM_DESC(debug, "Debug message level bitmap");
49 static int cpi_alg = CPI_ALG_NONE;
50 module_param(cpi_alg, int, S_IRUGO);
51 MODULE_PARM_DESC(cpi_alg,
52 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
54 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
57 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
62 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
66 nic->drv_stats.rx_frames_64++;
67 else if (skb->len <= 127)
68 nic->drv_stats.rx_frames_127++;
69 else if (skb->len <= 255)
70 nic->drv_stats.rx_frames_255++;
71 else if (skb->len <= 511)
72 nic->drv_stats.rx_frames_511++;
73 else if (skb->len <= 1023)
74 nic->drv_stats.rx_frames_1023++;
75 else if (skb->len <= 1518)
76 nic->drv_stats.rx_frames_1518++;
78 nic->drv_stats.rx_frames_jumbo++;
81 /* The Cavium ThunderX network controller can *only* be found in SoCs
82 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
83 * registers on this platform are implicitly strongly ordered with respect
84 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
85 * with no memory barriers in this driver. The readq()/writeq() functions add
86 * explicit ordering operation which in this case are redundant, and only
90 /* Register read/write APIs */
91 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
93 writeq_relaxed(val, nic->reg_base + offset);
96 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
98 return readq_relaxed(nic->reg_base + offset);
101 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
104 void __iomem *addr = nic->reg_base + offset;
106 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
109 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
111 void __iomem *addr = nic->reg_base + offset;
113 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
116 /* VF -> PF mailbox communication */
117 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
119 u64 *msg = (u64 *)mbx;
121 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
122 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
125 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127 int timeout = NIC_MBOX_MSG_TIMEOUT;
130 nic->pf_acked = false;
131 nic->pf_nacked = false;
133 nicvf_write_to_mbx(nic, mbx);
135 /* Wait for previous message to be acked, timeout 2sec */
136 while (!nic->pf_acked) {
144 netdev_err(nic->netdev,
145 "PF didn't ack to mbox msg %d from VF%d\n",
146 (mbx->msg.msg & 0xFF), nic->vf_id);
153 /* Checks if VF is able to comminicate with PF
154 * and also gets the VNIC number this VF is associated to.
156 static int nicvf_check_pf_ready(struct nicvf *nic)
158 union nic_mbx mbx = {};
160 mbx.msg.msg = NIC_MBOX_MSG_READY;
161 if (nicvf_send_msg_to_pf(nic, &mbx)) {
162 netdev_err(nic->netdev,
163 "PF didn't respond to READY msg\n");
170 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
173 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
175 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
178 static void nicvf_handle_mbx_intr(struct nicvf *nic)
180 union nic_mbx mbx = {};
185 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
186 mbx_data = (u64 *)&mbx;
188 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
189 *mbx_data = nicvf_reg_read(nic, mbx_addr);
191 mbx_addr += sizeof(u64);
194 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
195 switch (mbx.msg.msg) {
196 case NIC_MBOX_MSG_READY:
197 nic->pf_acked = true;
198 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
199 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
200 nic->node = mbx.nic_cfg.node_id;
201 if (!nic->set_mac_pending)
202 ether_addr_copy(nic->netdev->dev_addr,
203 mbx.nic_cfg.mac_addr);
204 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
205 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
206 nic->link_up = false;
210 case NIC_MBOX_MSG_ACK:
211 nic->pf_acked = true;
213 case NIC_MBOX_MSG_NACK:
214 nic->pf_nacked = true;
216 case NIC_MBOX_MSG_RSS_SIZE:
217 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
218 nic->pf_acked = true;
220 case NIC_MBOX_MSG_BGX_STATS:
221 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
222 nic->pf_acked = true;
224 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
225 nic->pf_acked = true;
226 nic->link_up = mbx.link_status.link_up;
227 nic->duplex = mbx.link_status.duplex;
228 nic->speed = mbx.link_status.speed;
230 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
231 nic->netdev->name, nic->speed,
232 nic->duplex == DUPLEX_FULL ?
233 "Full duplex" : "Half duplex");
234 netif_carrier_on(nic->netdev);
235 netif_tx_start_all_queues(nic->netdev);
237 netdev_info(nic->netdev, "%s: Link is Down\n",
239 netif_carrier_off(nic->netdev);
240 netif_tx_stop_all_queues(nic->netdev);
243 case NIC_MBOX_MSG_ALLOC_SQS:
244 nic->sqs_count = mbx.sqs_alloc.qs_count;
245 nic->pf_acked = true;
247 case NIC_MBOX_MSG_SNICVF_PTR:
248 /* Primary VF: make note of secondary VF's pointer
249 * to be used while packet transmission.
251 nic->snicvf[mbx.nicvf.sqs_id] =
252 (struct nicvf *)mbx.nicvf.nicvf;
253 nic->pf_acked = true;
255 case NIC_MBOX_MSG_PNICVF_PTR:
256 /* Secondary VF/Qset: make note of primary VF's pointer
257 * to be used while packet reception, to handover packet
258 * to primary VF's netdev.
260 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
261 nic->pf_acked = true;
264 netdev_err(nic->netdev,
265 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
268 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
271 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
273 union nic_mbx mbx = {};
275 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
276 mbx.mac.vf_id = nic->vf_id;
277 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
279 return nicvf_send_msg_to_pf(nic, &mbx);
282 static void nicvf_config_cpi(struct nicvf *nic)
284 union nic_mbx mbx = {};
286 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
287 mbx.cpi_cfg.vf_id = nic->vf_id;
288 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
289 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
291 nicvf_send_msg_to_pf(nic, &mbx);
294 static void nicvf_get_rss_size(struct nicvf *nic)
296 union nic_mbx mbx = {};
298 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
299 mbx.rss_size.vf_id = nic->vf_id;
300 nicvf_send_msg_to_pf(nic, &mbx);
303 void nicvf_config_rss(struct nicvf *nic)
305 union nic_mbx mbx = {};
306 struct nicvf_rss_info *rss = &nic->rss_info;
307 int ind_tbl_len = rss->rss_size;
310 mbx.rss_cfg.vf_id = nic->vf_id;
311 mbx.rss_cfg.hash_bits = rss->hash_bits;
312 while (ind_tbl_len) {
313 mbx.rss_cfg.tbl_offset = nextq;
314 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
315 RSS_IND_TBL_LEN_PER_MBX_MSG);
316 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
317 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
319 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
320 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
322 nicvf_send_msg_to_pf(nic, &mbx);
324 ind_tbl_len -= mbx.rss_cfg.tbl_len;
328 void nicvf_set_rss_key(struct nicvf *nic)
330 struct nicvf_rss_info *rss = &nic->rss_info;
331 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
334 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
335 nicvf_reg_write(nic, key_addr, rss->key[idx]);
336 key_addr += sizeof(u64);
340 static int nicvf_rss_init(struct nicvf *nic)
342 struct nicvf_rss_info *rss = &nic->rss_info;
345 nicvf_get_rss_size(nic);
347 if (cpi_alg != CPI_ALG_NONE) {
355 /* Using the HW reset value for now */
356 rss->key[0] = 0xFEED0BADFEED0BADULL;
357 rss->key[1] = 0xFEED0BADFEED0BADULL;
358 rss->key[2] = 0xFEED0BADFEED0BADULL;
359 rss->key[3] = 0xFEED0BADFEED0BADULL;
360 rss->key[4] = 0xFEED0BADFEED0BADULL;
362 nicvf_set_rss_key(nic);
364 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
365 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
367 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
369 for (idx = 0; idx < rss->rss_size; idx++)
370 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
372 nicvf_config_rss(nic);
376 /* Request PF to allocate additional Qsets */
377 static void nicvf_request_sqs(struct nicvf *nic)
379 union nic_mbx mbx = {};
381 int sqs_count = nic->sqs_count;
382 int rx_queues = 0, tx_queues = 0;
384 /* Only primary VF should request */
385 if (nic->sqs_mode || !nic->sqs_count)
388 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
389 mbx.sqs_alloc.vf_id = nic->vf_id;
390 mbx.sqs_alloc.qs_count = nic->sqs_count;
391 if (nicvf_send_msg_to_pf(nic, &mbx)) {
392 /* No response from PF */
397 /* Return if no Secondary Qsets available */
401 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
402 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
403 if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
404 tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
406 /* Set no of Rx/Tx queues in each of the SQsets */
407 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
408 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
409 mbx.nicvf.vf_id = nic->vf_id;
410 mbx.nicvf.sqs_id = sqs;
411 nicvf_send_msg_to_pf(nic, &mbx);
413 nic->snicvf[sqs]->sqs_id = sqs;
414 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
415 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
416 rx_queues -= MAX_RCV_QUEUES_PER_QS;
418 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
422 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
423 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
424 tx_queues -= MAX_SND_QUEUES_PER_QS;
426 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
430 nic->snicvf[sqs]->qs->cq_cnt =
431 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
433 /* Initialize secondary Qset's queues and its interrupts */
434 nicvf_open(nic->snicvf[sqs]->netdev);
437 /* Update stack with actual Rx/Tx queue count allocated */
438 if (sqs_count != nic->sqs_count)
439 nicvf_set_real_num_queues(nic->netdev,
440 nic->tx_queues, nic->rx_queues);
443 /* Send this Qset's nicvf pointer to PF.
444 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
445 * so that packets received by these Qsets can use primary VF's netdev
447 static void nicvf_send_vf_struct(struct nicvf *nic)
449 union nic_mbx mbx = {};
451 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
452 mbx.nicvf.sqs_mode = nic->sqs_mode;
453 mbx.nicvf.nicvf = (u64)nic;
454 nicvf_send_msg_to_pf(nic, &mbx);
457 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
459 union nic_mbx mbx = {};
461 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
462 nicvf_send_msg_to_pf(nic, &mbx);
465 int nicvf_set_real_num_queues(struct net_device *netdev,
466 int tx_queues, int rx_queues)
470 err = netif_set_real_num_tx_queues(netdev, tx_queues);
473 "Failed to set no of Tx queues: %d\n", tx_queues);
477 err = netif_set_real_num_rx_queues(netdev, rx_queues);
480 "Failed to set no of Rx queues: %d\n", rx_queues);
484 static int nicvf_init_resources(struct nicvf *nic)
487 union nic_mbx mbx = {};
489 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
492 nicvf_qset_config(nic, true);
494 /* Initialize queues and HW for data transfer */
495 err = nicvf_config_data_transfer(nic, true);
497 netdev_err(nic->netdev,
498 "Failed to alloc/config VF's QSet resources\n");
502 /* Send VF config done msg to PF */
503 nicvf_write_to_mbx(nic, &mbx);
508 static void nicvf_snd_pkt_handler(struct net_device *netdev,
509 struct cmp_queue *cq,
510 struct cqe_send_t *cqe_tx, int cqe_type)
512 struct sk_buff *skb = NULL;
513 struct nicvf *nic = netdev_priv(netdev);
514 struct snd_queue *sq;
515 struct sq_hdr_subdesc *hdr;
517 sq = &nic->qs->sq[cqe_tx->sq_idx];
519 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
520 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
523 netdev_dbg(nic->netdev,
524 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
525 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
526 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
528 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
529 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
530 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
531 /* For TSO offloaded packets only one head SKB needs to be freed */
534 dev_consume_skb_any(skb);
535 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
539 static inline void nicvf_set_rxhash(struct net_device *netdev,
540 struct cqe_rx_t *cqe_rx,
546 if (!(netdev->features & NETIF_F_RXHASH))
549 switch (cqe_rx->rss_alg) {
552 hash_type = PKT_HASH_TYPE_L4;
553 hash = cqe_rx->rss_tag;
556 hash_type = PKT_HASH_TYPE_L3;
557 hash = cqe_rx->rss_tag;
560 hash_type = PKT_HASH_TYPE_NONE;
564 skb_set_hash(skb, hash, hash_type);
567 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
568 struct napi_struct *napi,
569 struct cqe_rx_t *cqe_rx)
572 struct nicvf *nic = netdev_priv(netdev);
576 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
579 /* Use primary VF's 'nicvf' struct */
581 netdev = nic->netdev;
584 /* Check for errors */
585 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
586 if (err && !cqe_rx->rb_cnt)
589 skb = nicvf_get_rcv_skb(nic, cqe_rx);
591 netdev_dbg(nic->netdev, "Packet not received\n");
595 if (netif_msg_pktdata(nic)) {
596 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
598 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
599 skb->data, skb->len, true);
602 /* If error packet, drop it here */
604 dev_kfree_skb_any(skb);
608 nicvf_set_rx_frame_cnt(nic, skb);
610 nicvf_set_rxhash(netdev, cqe_rx, skb);
612 skb_record_rx_queue(skb, rq_idx);
613 if (netdev->hw_features & NETIF_F_RXCSUM) {
614 /* HW by default verifies TCP/UDP/SCTP checksums */
615 skb->ip_summed = CHECKSUM_UNNECESSARY;
617 skb_checksum_none_assert(skb);
620 skb->protocol = eth_type_trans(skb, netdev);
622 /* Check for stripped VLAN */
623 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
624 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
625 ntohs((__force __be16)cqe_rx->vlan_tci));
627 if (napi && (netdev->features & NETIF_F_GRO))
628 napi_gro_receive(napi, skb);
630 netif_receive_skb(skb);
633 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
634 struct napi_struct *napi, int budget)
636 int processed_cqe, work_done = 0, tx_done = 0;
637 int cqe_count, cqe_head;
638 struct nicvf *nic = netdev_priv(netdev);
639 struct queue_set *qs = nic->qs;
640 struct cmp_queue *cq = &qs->cq[cq_idx];
641 struct cqe_rx_t *cq_desc;
642 struct netdev_queue *txq;
644 spin_lock_bh(&cq->lock);
647 /* Get no of valid CQ entries to process */
648 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
649 cqe_count &= CQ_CQE_COUNT;
653 /* Get head of the valid CQ entries */
654 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
657 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
658 __func__, cq_idx, cqe_count, cqe_head);
659 while (processed_cqe < cqe_count) {
660 /* Get the CQ descriptor */
661 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
663 cqe_head &= (cq->dmem.q_len - 1);
664 /* Initiate prefetch for next descriptor */
665 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
667 if ((work_done >= budget) && napi &&
668 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
672 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
673 cq_idx, cq_desc->cqe_type);
674 switch (cq_desc->cqe_type) {
676 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
680 nicvf_snd_pkt_handler(netdev, cq,
681 (void *)cq_desc, CQE_TYPE_SEND);
684 case CQE_TYPE_INVALID:
685 case CQE_TYPE_RX_SPLIT:
686 case CQE_TYPE_RX_TCP:
687 case CQE_TYPE_SEND_PTP:
693 netdev_dbg(nic->netdev,
694 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
695 __func__, cq_idx, processed_cqe, work_done, budget);
697 /* Ring doorbell to inform H/W to reuse processed CQEs */
698 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
699 cq_idx, processed_cqe);
701 if ((work_done < budget) && napi)
705 /* Wakeup TXQ if its stopped earlier due to SQ full */
707 netdev = nic->pnicvf->netdev;
708 txq = netdev_get_tx_queue(netdev,
709 nicvf_netdev_qidx(nic, cq_idx));
711 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
712 netif_tx_start_queue(txq);
713 nic->drv_stats.txq_wake++;
714 if (netif_msg_tx_err(nic))
716 "%s: Transmit queue wakeup SQ%d\n",
717 netdev->name, cq_idx);
721 spin_unlock_bh(&cq->lock);
725 static int nicvf_poll(struct napi_struct *napi, int budget)
729 struct net_device *netdev = napi->dev;
730 struct nicvf *nic = netdev_priv(netdev);
731 struct nicvf_cq_poll *cq;
733 cq = container_of(napi, struct nicvf_cq_poll, napi);
734 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
736 if (work_done < budget) {
737 /* Slow packet rate, exit polling */
739 /* Re-enable interrupts */
740 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
742 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
743 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
744 cq->cq_idx, cq_head);
745 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
750 /* Qset error interrupt handler
752 * As of now only CQ errors are handled
754 static void nicvf_handle_qs_err(unsigned long data)
756 struct nicvf *nic = (struct nicvf *)data;
757 struct queue_set *qs = nic->qs;
761 netif_tx_disable(nic->netdev);
763 /* Check if it is CQ err */
764 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
765 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
767 if (!(status & CQ_ERR_MASK))
769 /* Process already queued CQEs and reconfig CQ */
770 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
771 nicvf_sq_disable(nic, qidx);
772 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
773 nicvf_cmp_queue_config(nic, qs, qidx, true);
774 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
775 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
777 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
780 netif_tx_start_all_queues(nic->netdev);
781 /* Re-enable Qset error interrupt */
782 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
785 static void nicvf_dump_intr_status(struct nicvf *nic)
787 if (netif_msg_intr(nic))
788 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
789 nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
792 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
794 struct nicvf *nic = (struct nicvf *)nicvf_irq;
797 nicvf_dump_intr_status(nic);
799 intr = nicvf_reg_read(nic, NIC_VF_INT);
800 /* Check for spurious interrupt */
801 if (!(intr & NICVF_INTR_MBOX_MASK))
804 nicvf_handle_mbx_intr(nic);
809 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
811 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
812 struct nicvf *nic = cq_poll->nicvf;
813 int qidx = cq_poll->cq_idx;
815 nicvf_dump_intr_status(nic);
817 /* Disable interrupts */
818 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
821 napi_schedule(&cq_poll->napi);
823 /* Clear interrupt */
824 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
829 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
831 struct nicvf *nic = (struct nicvf *)nicvf_irq;
835 nicvf_dump_intr_status(nic);
837 /* Disable RBDR interrupt and schedule softirq */
838 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
839 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
841 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
842 tasklet_hi_schedule(&nic->rbdr_task);
843 /* Clear interrupt */
844 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
850 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
852 struct nicvf *nic = (struct nicvf *)nicvf_irq;
854 nicvf_dump_intr_status(nic);
856 /* Disable Qset err interrupt and schedule softirq */
857 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
858 tasklet_hi_schedule(&nic->qs_err_task);
859 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
864 static int nicvf_enable_msix(struct nicvf *nic)
868 nic->num_vec = NIC_VF_MSIX_VECTORS;
870 for (vec = 0; vec < nic->num_vec; vec++)
871 nic->msix_entries[vec].entry = vec;
873 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
875 netdev_err(nic->netdev,
876 "Req for #%d msix vectors failed\n", nic->num_vec);
879 nic->msix_enabled = 1;
883 static void nicvf_disable_msix(struct nicvf *nic)
885 if (nic->msix_enabled) {
886 pci_disable_msix(nic->pdev);
887 nic->msix_enabled = 0;
892 static int nicvf_register_interrupts(struct nicvf *nic)
898 sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
902 sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
903 nic->vf_id, irq - NICVF_INTR_ID_SQ);
905 for_each_rbdr_irq(irq)
906 sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
907 nic->vf_id, irq - NICVF_INTR_ID_RBDR);
909 /* Register CQ interrupts */
910 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
911 vector = nic->msix_entries[irq].vector;
912 ret = request_irq(vector, nicvf_intr_handler,
913 0, nic->irq_name[irq], nic->napi[irq]);
916 nic->irq_allocated[irq] = true;
919 /* Register RBDR interrupt */
920 for (irq = NICVF_INTR_ID_RBDR;
921 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
922 vector = nic->msix_entries[irq].vector;
923 ret = request_irq(vector, nicvf_rbdr_intr_handler,
924 0, nic->irq_name[irq], nic);
927 nic->irq_allocated[irq] = true;
930 /* Register QS error interrupt */
931 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
932 "NICVF%d Qset error", nic->vf_id);
933 irq = NICVF_INTR_ID_QS_ERR;
934 ret = request_irq(nic->msix_entries[irq].vector,
935 nicvf_qs_err_intr_handler,
936 0, nic->irq_name[irq], nic);
938 nic->irq_allocated[irq] = true;
942 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
947 static void nicvf_unregister_interrupts(struct nicvf *nic)
951 /* Free registered interrupts */
952 for (irq = 0; irq < nic->num_vec; irq++) {
953 if (!nic->irq_allocated[irq])
956 if (irq < NICVF_INTR_ID_SQ)
957 free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
959 free_irq(nic->msix_entries[irq].vector, nic);
961 nic->irq_allocated[irq] = false;
965 nicvf_disable_msix(nic);
968 /* Initialize MSIX vectors and register MISC interrupt.
969 * Send READY message to PF to check if its alive
971 static int nicvf_register_misc_interrupt(struct nicvf *nic)
974 int irq = NICVF_INTR_ID_MISC;
976 /* Return if mailbox interrupt is already registered */
977 if (nic->msix_enabled)
981 if (!nicvf_enable_msix(nic))
984 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
985 /* Register Misc interrupt */
986 ret = request_irq(nic->msix_entries[irq].vector,
987 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
991 nic->irq_allocated[irq] = true;
993 /* Enable mailbox interrupt */
994 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
996 /* Check if VF is able to communicate with PF */
997 if (!nicvf_check_pf_ready(nic)) {
998 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
999 nicvf_unregister_interrupts(nic);
1006 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1008 struct nicvf *nic = netdev_priv(netdev);
1009 int qid = skb_get_queue_mapping(skb);
1010 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1012 /* Check for minimum packet length */
1013 if (skb->len <= ETH_HLEN) {
1015 return NETDEV_TX_OK;
1018 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1019 netif_tx_stop_queue(txq);
1020 nic->drv_stats.txq_stop++;
1021 if (netif_msg_tx_err(nic))
1023 "%s: Transmit ring full, stopping SQ%d\n",
1025 return NETDEV_TX_BUSY;
1028 return NETDEV_TX_OK;
1031 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1033 struct nicvf_cq_poll *cq_poll;
1036 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1037 cq_poll = nic->napi[qidx];
1040 nic->napi[qidx] = NULL;
1045 int nicvf_stop(struct net_device *netdev)
1048 struct nicvf *nic = netdev_priv(netdev);
1049 struct queue_set *qs = nic->qs;
1050 struct nicvf_cq_poll *cq_poll = NULL;
1051 union nic_mbx mbx = {};
1053 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1054 nicvf_send_msg_to_pf(nic, &mbx);
1056 netif_carrier_off(netdev);
1057 netif_tx_stop_all_queues(nic->netdev);
1058 nic->link_up = false;
1060 /* Teardown secondary qsets first */
1061 if (!nic->sqs_mode) {
1062 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1063 if (!nic->snicvf[qidx])
1065 nicvf_stop(nic->snicvf[qidx]->netdev);
1066 nic->snicvf[qidx] = NULL;
1070 /* Disable RBDR & QS error interrupts */
1071 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1072 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1073 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1075 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1076 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1078 /* Wait for pending IRQ handlers to finish */
1079 for (irq = 0; irq < nic->num_vec; irq++)
1080 synchronize_irq(nic->msix_entries[irq].vector);
1082 tasklet_kill(&nic->rbdr_task);
1083 tasklet_kill(&nic->qs_err_task);
1084 if (nic->rb_work_scheduled)
1085 cancel_delayed_work_sync(&nic->rbdr_work);
1087 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1088 cq_poll = nic->napi[qidx];
1091 napi_synchronize(&cq_poll->napi);
1092 /* CQ intr is enabled while napi_complete,
1095 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1096 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1097 napi_disable(&cq_poll->napi);
1098 netif_napi_del(&cq_poll->napi);
1101 netif_tx_disable(netdev);
1103 /* Free resources */
1104 nicvf_config_data_transfer(nic, false);
1106 /* Disable HW Qset */
1107 nicvf_qset_config(nic, false);
1109 /* disable mailbox interrupt */
1110 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1112 nicvf_unregister_interrupts(nic);
1114 nicvf_free_cq_poll(nic);
1116 /* Clear multiqset info */
1122 int nicvf_open(struct net_device *netdev)
1125 struct nicvf *nic = netdev_priv(netdev);
1126 struct queue_set *qs = nic->qs;
1127 struct nicvf_cq_poll *cq_poll = NULL;
1129 nic->mtu = netdev->mtu;
1131 netif_carrier_off(netdev);
1133 err = nicvf_register_misc_interrupt(nic);
1137 /* Register NAPI handler for processing CQEs */
1138 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1139 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1144 cq_poll->cq_idx = qidx;
1145 cq_poll->nicvf = nic;
1146 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1148 napi_enable(&cq_poll->napi);
1149 nic->napi[qidx] = cq_poll;
1152 /* Check if we got MAC address from PF or else generate a radom MAC */
1153 if (is_zero_ether_addr(netdev->dev_addr)) {
1154 eth_hw_addr_random(netdev);
1155 nicvf_hw_set_mac_addr(nic, netdev);
1158 if (nic->set_mac_pending) {
1159 nic->set_mac_pending = false;
1160 nicvf_hw_set_mac_addr(nic, netdev);
1163 /* Init tasklet for handling Qset err interrupt */
1164 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1165 (unsigned long)nic);
1167 /* Init RBDR tasklet which will refill RBDR */
1168 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1169 (unsigned long)nic);
1170 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1172 /* Configure CPI alorithm */
1173 nic->cpi_alg = cpi_alg;
1175 nicvf_config_cpi(nic);
1177 nicvf_request_sqs(nic);
1179 nicvf_get_primary_vf_struct(nic);
1181 /* Configure receive side scaling */
1183 nicvf_rss_init(nic);
1185 err = nicvf_register_interrupts(nic);
1189 /* Initialize the queues */
1190 err = nicvf_init_resources(nic);
1194 /* Make sure queue initialization is written */
1197 nicvf_reg_write(nic, NIC_VF_INT, -1);
1198 /* Enable Qset err interrupt */
1199 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1201 /* Enable completion queue interrupt */
1202 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1203 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1205 /* Enable RBDR threshold interrupt */
1206 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1207 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1209 nic->drv_stats.txq_stop = 0;
1210 nic->drv_stats.txq_wake = 0;
1214 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1215 nicvf_unregister_interrupts(nic);
1216 tasklet_kill(&nic->qs_err_task);
1217 tasklet_kill(&nic->rbdr_task);
1219 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1220 cq_poll = nic->napi[qidx];
1223 napi_disable(&cq_poll->napi);
1224 netif_napi_del(&cq_poll->napi);
1226 nicvf_free_cq_poll(nic);
1230 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1232 union nic_mbx mbx = {};
1234 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1235 mbx.frs.max_frs = mtu;
1236 mbx.frs.vf_id = nic->vf_id;
1238 return nicvf_send_msg_to_pf(nic, &mbx);
1241 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1243 struct nicvf *nic = netdev_priv(netdev);
1245 if (new_mtu > NIC_HW_MAX_FRS)
1248 if (new_mtu < NIC_HW_MIN_FRS)
1251 if (nicvf_update_hw_max_frs(nic, new_mtu))
1253 netdev->mtu = new_mtu;
1259 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1261 struct sockaddr *addr = p;
1262 struct nicvf *nic = netdev_priv(netdev);
1264 if (!is_valid_ether_addr(addr->sa_data))
1265 return -EADDRNOTAVAIL;
1267 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1269 if (nic->msix_enabled) {
1270 if (nicvf_hw_set_mac_addr(nic, netdev))
1273 nic->set_mac_pending = true;
1279 void nicvf_update_lmac_stats(struct nicvf *nic)
1282 union nic_mbx mbx = {};
1284 if (!netif_running(nic->netdev))
1287 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1288 mbx.bgx_stats.vf_id = nic->vf_id;
1290 mbx.bgx_stats.rx = 1;
1291 while (stat < BGX_RX_STATS_COUNT) {
1292 mbx.bgx_stats.idx = stat;
1293 if (nicvf_send_msg_to_pf(nic, &mbx))
1301 mbx.bgx_stats.rx = 0;
1302 while (stat < BGX_TX_STATS_COUNT) {
1303 mbx.bgx_stats.idx = stat;
1304 if (nicvf_send_msg_to_pf(nic, &mbx))
1310 void nicvf_update_stats(struct nicvf *nic)
1313 struct nicvf_hw_stats *stats = &nic->hw_stats;
1314 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1315 struct queue_set *qs = nic->qs;
1317 #define GET_RX_STATS(reg) \
1318 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1319 #define GET_TX_STATS(reg) \
1320 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1322 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1323 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1324 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1325 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1326 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1327 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1328 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1329 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1330 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1331 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1332 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1333 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1334 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1335 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1337 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1338 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1339 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1340 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1341 stats->tx_drops = GET_TX_STATS(TX_DROP);
1343 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1344 stats->tx_bcast_frames_ok +
1345 stats->tx_mcast_frames_ok;
1346 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1347 stats->rx_bcast_frames +
1348 stats->rx_mcast_frames;
1349 drv_stats->rx_drops = stats->rx_drop_red +
1350 stats->rx_drop_overrun;
1351 drv_stats->tx_drops = stats->tx_drops;
1353 /* Update RQ and SQ stats */
1354 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1355 nicvf_update_rq_stats(nic, qidx);
1356 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1357 nicvf_update_sq_stats(nic, qidx);
1360 static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1361 struct rtnl_link_stats64 *stats)
1363 struct nicvf *nic = netdev_priv(netdev);
1364 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1365 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1367 nicvf_update_stats(nic);
1369 stats->rx_bytes = hw_stats->rx_bytes;
1370 stats->rx_packets = drv_stats->rx_frames_ok;
1371 stats->rx_dropped = drv_stats->rx_drops;
1372 stats->multicast = hw_stats->rx_mcast_frames;
1374 stats->tx_bytes = hw_stats->tx_bytes_ok;
1375 stats->tx_packets = drv_stats->tx_frames_ok;
1376 stats->tx_dropped = drv_stats->tx_drops;
1381 static void nicvf_tx_timeout(struct net_device *dev)
1383 struct nicvf *nic = netdev_priv(dev);
1385 if (netif_msg_tx_err(nic))
1386 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1389 schedule_work(&nic->reset_task);
1392 static void nicvf_reset_task(struct work_struct *work)
1396 nic = container_of(work, struct nicvf, reset_task);
1398 if (!netif_running(nic->netdev))
1401 nicvf_stop(nic->netdev);
1402 nicvf_open(nic->netdev);
1403 nic->netdev->trans_start = jiffies;
1406 static int nicvf_config_loopback(struct nicvf *nic,
1407 netdev_features_t features)
1409 union nic_mbx mbx = {};
1411 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1412 mbx.lbk.vf_id = nic->vf_id;
1413 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1415 return nicvf_send_msg_to_pf(nic, &mbx);
1418 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1419 netdev_features_t features)
1421 struct nicvf *nic = netdev_priv(netdev);
1423 if ((features & NETIF_F_LOOPBACK) &&
1424 netif_running(netdev) && !nic->loopback_supported)
1425 features &= ~NETIF_F_LOOPBACK;
1430 static int nicvf_set_features(struct net_device *netdev,
1431 netdev_features_t features)
1433 struct nicvf *nic = netdev_priv(netdev);
1434 netdev_features_t changed = features ^ netdev->features;
1436 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1437 nicvf_config_vlan_stripping(nic, features);
1439 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1440 return nicvf_config_loopback(nic, features);
1445 static const struct net_device_ops nicvf_netdev_ops = {
1446 .ndo_open = nicvf_open,
1447 .ndo_stop = nicvf_stop,
1448 .ndo_start_xmit = nicvf_xmit,
1449 .ndo_change_mtu = nicvf_change_mtu,
1450 .ndo_set_mac_address = nicvf_set_mac_address,
1451 .ndo_get_stats64 = nicvf_get_stats64,
1452 .ndo_tx_timeout = nicvf_tx_timeout,
1453 .ndo_fix_features = nicvf_fix_features,
1454 .ndo_set_features = nicvf_set_features,
1457 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1459 struct device *dev = &pdev->dev;
1460 struct net_device *netdev;
1464 err = pci_enable_device(pdev);
1466 dev_err(dev, "Failed to enable PCI device\n");
1470 err = pci_request_regions(pdev, DRV_NAME);
1472 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1473 goto err_disable_device;
1476 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1478 dev_err(dev, "Unable to get usable DMA configuration\n");
1479 goto err_release_regions;
1482 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1484 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1485 goto err_release_regions;
1488 qcount = MAX_CMP_QUEUES_PER_QS;
1490 /* Restrict multiqset support only for host bound VFs */
1491 if (pdev->is_virtfn) {
1492 /* Set max number of queues per VF */
1493 qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
1494 qcount = min(qcount,
1495 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
1498 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
1501 goto err_release_regions;
1504 pci_set_drvdata(pdev, netdev);
1506 SET_NETDEV_DEV(netdev, &pdev->dev);
1508 nic = netdev_priv(netdev);
1509 nic->netdev = netdev;
1512 nic->max_queues = qcount;
1514 /* MAP VF's configuration registers */
1515 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1516 if (!nic->reg_base) {
1517 dev_err(dev, "Cannot map config register space, aborting\n");
1519 goto err_free_netdev;
1522 err = nicvf_set_qset_resources(nic);
1524 goto err_free_netdev;
1526 /* Check if PF is alive and get MAC address for this VF */
1527 err = nicvf_register_misc_interrupt(nic);
1529 goto err_free_netdev;
1531 nicvf_send_vf_struct(nic);
1533 /* Check if this VF is in QS only mode */
1537 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1539 goto err_unregister_interrupts;
1541 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1542 NETIF_F_TSO | NETIF_F_GRO |
1543 NETIF_F_HW_VLAN_CTAG_RX);
1545 netdev->hw_features |= NETIF_F_RXHASH;
1547 netdev->features |= netdev->hw_features;
1548 netdev->hw_features |= NETIF_F_LOOPBACK;
1550 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1552 netdev->netdev_ops = &nicvf_netdev_ops;
1553 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1555 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1557 err = register_netdev(netdev);
1559 dev_err(dev, "Failed to register netdevice\n");
1560 goto err_unregister_interrupts;
1563 nic->msg_enable = debug;
1565 nicvf_set_ethtool_ops(netdev);
1569 err_unregister_interrupts:
1570 nicvf_unregister_interrupts(nic);
1572 pci_set_drvdata(pdev, NULL);
1573 free_netdev(netdev);
1574 err_release_regions:
1575 pci_release_regions(pdev);
1577 pci_disable_device(pdev);
1581 static void nicvf_remove(struct pci_dev *pdev)
1583 struct net_device *netdev = pci_get_drvdata(pdev);
1585 struct net_device *pnetdev;
1590 nic = netdev_priv(netdev);
1591 pnetdev = nic->pnicvf->netdev;
1593 /* Check if this Qset is assigned to different VF.
1594 * If yes, clean primary and all secondary Qsets.
1596 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1597 unregister_netdev(pnetdev);
1598 nicvf_unregister_interrupts(nic);
1599 pci_set_drvdata(pdev, NULL);
1600 free_netdev(netdev);
1601 pci_release_regions(pdev);
1602 pci_disable_device(pdev);
1605 static void nicvf_shutdown(struct pci_dev *pdev)
1610 static struct pci_driver nicvf_driver = {
1612 .id_table = nicvf_id_table,
1613 .probe = nicvf_probe,
1614 .remove = nicvf_remove,
1615 .shutdown = nicvf_shutdown,
1618 static int __init nicvf_init_module(void)
1620 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1622 return pci_register_driver(&nicvf_driver);
1625 static void __exit nicvf_cleanup_module(void)
1627 pci_unregister_driver(&nicvf_driver);
1630 module_init(nicvf_init_module);
1631 module_exit(nicvf_cleanup_module);