2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
18 #include <linux/firmware.h>
19 #include <linux/mdio.h>
25 #define EEPROM_MAGIC 0x38E2F10C
27 static u32 get_msglevel(struct net_device *dev)
29 return netdev2adap(dev)->msg_enable;
32 static void set_msglevel(struct net_device *dev, u32 val)
34 netdev2adap(dev)->msg_enable = val;
37 static const char stats_strings[][ETH_GSTRING_LEN] = {
40 "tx_broadcast_frames ",
41 "tx_multicast_frames ",
46 "tx_frames_65_to_127 ",
47 "tx_frames_128_to_255 ",
48 "tx_frames_256_to_511 ",
49 "tx_frames_512_to_1023 ",
50 "tx_frames_1024_to_1518 ",
51 "tx_frames_1519_to_max ",
66 "rx_broadcast_frames ",
67 "rx_multicast_frames ",
70 "rx_frames_too_long ",
78 "rx_frames_65_to_127 ",
79 "rx_frames_128_to_255 ",
80 "rx_frames_256_to_511 ",
81 "rx_frames_512_to_1023 ",
82 "rx_frames_1024_to_1518 ",
83 "rx_frames_1519_to_max ",
95 "rx_bg0_frames_dropped ",
96 "rx_bg1_frames_dropped ",
97 "rx_bg2_frames_dropped ",
98 "rx_bg3_frames_dropped ",
99 "rx_bg0_frames_trunc ",
100 "rx_bg1_frames_trunc ",
101 "rx_bg2_frames_trunc ",
102 "rx_bg3_frames_trunc ",
113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
117 "tcp_ipv4_out_rsts ",
119 "tcp_ipv4_out_segs ",
120 "tcp_ipv4_retrans_segs ",
121 "tcp_ipv6_out_rsts ",
123 "tcp_ipv6_out_segs ",
124 "tcp_ipv6_retrans_segs ",
128 "rdma_no_rqe_mod_defer ",
129 "rdma_no_rqe_pkt_defer ",
130 "tp_err_ofld_no_neigh ",
131 "tp_err_ofld_cong_defer ",
132 "write_coal_success ",
136 static char channel_stats_strings[][ETH_GSTRING_LEN] = {
137 "--------Channel--------- ",
144 "tp_tnl_cong_drops ",
146 "tp_ofld_vlan_drops ",
147 "tp_ofld_chan_drops ",
153 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
154 "-------Loopback----------- ",
163 "frames_128_to_255 ",
164 "frames_256_to_511 ",
165 "frames_512_to_1023 ",
166 "frames_1024_to_1518 ",
167 "frames_1519_to_max ",
169 "bg0_frames_dropped ",
170 "bg1_frames_dropped ",
171 "bg2_frames_dropped ",
172 "bg3_frames_dropped ",
179 static int get_sset_count(struct net_device *dev, int sset)
183 return ARRAY_SIZE(stats_strings) +
184 ARRAY_SIZE(adapter_stats_strings) +
185 ARRAY_SIZE(channel_stats_strings) +
186 ARRAY_SIZE(loopback_stats_strings);
192 static int get_regs_len(struct net_device *dev)
194 struct adapter *adap = netdev2adap(dev);
196 return t4_get_regs_len(adap);
199 static int get_eeprom_len(struct net_device *dev)
204 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
206 struct adapter *adapter = netdev2adap(dev);
209 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
210 strlcpy(info->version, cxgb4_driver_version,
211 sizeof(info->version));
212 strlcpy(info->bus_info, pci_name(adapter->pdev),
213 sizeof(info->bus_info));
214 info->regdump_len = get_regs_len(dev);
216 if (!adapter->params.fw_vers)
217 strcpy(info->fw_version, "N/A");
219 snprintf(info->fw_version, sizeof(info->fw_version),
220 "%u.%u.%u.%u, TP %u.%u.%u.%u",
221 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
222 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
223 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
224 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
225 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
226 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
227 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
228 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
230 if (!t4_get_exprom_version(adapter, &exprom_vers))
231 snprintf(info->erom_version, sizeof(info->erom_version),
233 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
234 FW_HDR_FW_VER_MINOR_G(exprom_vers),
235 FW_HDR_FW_VER_MICRO_G(exprom_vers),
236 FW_HDR_FW_VER_BUILD_G(exprom_vers));
239 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
241 if (stringset == ETH_SS_STATS) {
242 memcpy(data, stats_strings, sizeof(stats_strings));
243 data += sizeof(stats_strings);
244 memcpy(data, adapter_stats_strings,
245 sizeof(adapter_stats_strings));
246 data += sizeof(adapter_stats_strings);
247 memcpy(data, channel_stats_strings,
248 sizeof(channel_stats_strings));
249 data += sizeof(channel_stats_strings);
250 memcpy(data, loopback_stats_strings,
251 sizeof(loopback_stats_strings));
255 /* port stats maintained per queue of the port. They should be in the same
256 * order as in stats_strings above.
258 struct queue_port_stats {
268 struct adapter_stats {
275 u64 tcp_v4_retrans_segs;
279 u64 tcp_v6_retrans_segs;
291 struct channel_stats {
307 static void collect_sge_port_stats(const struct adapter *adap,
308 const struct port_info *p,
309 struct queue_port_stats *s)
312 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
313 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
315 memset(s, 0, sizeof(*s));
316 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
318 s->tx_csum += tx->tx_cso;
319 s->rx_csum += rx->stats.rx_cso;
320 s->vlan_ex += rx->stats.vlan_ex;
321 s->vlan_ins += tx->vlan_ins;
322 s->gro_pkts += rx->stats.lro_pkts;
323 s->gro_merged += rx->stats.lro_merged;
327 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
329 struct tp_tcp_stats v4, v6;
330 struct tp_rdma_stats rdma_stats;
331 struct tp_err_stats err_stats;
332 struct tp_usm_stats usm_stats;
335 memset(s, 0, sizeof(*s));
337 spin_lock(&adap->stats_lock);
338 t4_tp_get_tcp_stats(adap, &v4, &v6);
339 t4_tp_get_rdma_stats(adap, &rdma_stats);
340 t4_get_usm_stats(adap, &usm_stats);
341 t4_tp_get_err_stats(adap, &err_stats);
342 spin_unlock(&adap->stats_lock);
344 s->db_drop = adap->db_stats.db_drop;
345 s->db_full = adap->db_stats.db_full;
346 s->db_empty = adap->db_stats.db_empty;
348 s->tcp_v4_out_rsts = v4.tcp_out_rsts;
349 s->tcp_v4_in_segs = v4.tcp_in_segs;
350 s->tcp_v4_out_segs = v4.tcp_out_segs;
351 s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
352 s->tcp_v6_out_rsts = v6.tcp_out_rsts;
353 s->tcp_v6_in_segs = v6.tcp_in_segs;
354 s->tcp_v6_out_segs = v6.tcp_out_segs;
355 s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
357 if (is_offload(adap)) {
358 s->frames = usm_stats.frames;
359 s->octets = usm_stats.octets;
360 s->drops = usm_stats.drops;
361 s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
362 s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
365 s->ofld_no_neigh = err_stats.ofld_no_neigh;
366 s->ofld_cong_defer = err_stats.ofld_cong_defer;
368 if (!is_t4(adap->params.chip)) {
371 v = t4_read_reg(adap, SGE_STAT_CFG_A);
372 if (STATSOURCE_T5_G(v) == 7) {
373 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
374 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
375 s->wc_success = val1 - val2;
381 static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
384 struct tp_cpl_stats cpl_stats;
385 struct tp_err_stats err_stats;
386 struct tp_fcoe_stats fcoe_stats;
388 memset(s, 0, sizeof(*s));
390 spin_lock(&adap->stats_lock);
391 t4_tp_get_cpl_stats(adap, &cpl_stats);
392 t4_tp_get_err_stats(adap, &err_stats);
393 t4_get_fcoe_stats(adap, i, &fcoe_stats);
394 spin_unlock(&adap->stats_lock);
396 s->cpl_req = cpl_stats.req[i];
397 s->cpl_rsp = cpl_stats.rsp[i];
398 s->mac_in_errs = err_stats.mac_in_errs[i];
399 s->hdr_in_errs = err_stats.hdr_in_errs[i];
400 s->tcp_in_errs = err_stats.tcp_in_errs[i];
401 s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
402 s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
403 s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
404 s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
405 s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
406 s->octets_ddp = fcoe_stats.octets_ddp;
407 s->frames_ddp = fcoe_stats.frames_ddp;
408 s->frames_drop = fcoe_stats.frames_drop;
411 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
414 struct port_info *pi = netdev_priv(dev);
415 struct adapter *adapter = pi->adapter;
416 struct lb_port_stats s;
420 t4_get_port_stats_offset(adapter, pi->tx_chan,
421 (struct port_stats *)data,
424 data += sizeof(struct port_stats) / sizeof(u64);
425 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
426 data += sizeof(struct queue_port_stats) / sizeof(u64);
427 collect_adapter_stats(adapter, (struct adapter_stats *)data);
428 data += sizeof(struct adapter_stats) / sizeof(u64);
430 *data++ = (u64)pi->port_id;
431 collect_channel_stats(adapter, (struct channel_stats *)data,
433 data += sizeof(struct channel_stats) / sizeof(u64);
435 *data++ = (u64)pi->port_id;
436 memset(&s, 0, sizeof(s));
437 t4_get_lb_stats(adapter, pi->port_id, &s);
440 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
441 *data++ = (unsigned long long)*p0++;
444 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
447 struct adapter *adap = netdev2adap(dev);
450 buf_size = t4_get_regs_len(adap);
451 regs->version = mk_adap_vers(adap);
452 t4_get_regs(adap, buf, buf_size);
455 static int restart_autoneg(struct net_device *dev)
457 struct port_info *p = netdev_priv(dev);
459 if (!netif_running(dev))
461 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
463 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
467 static int identify_port(struct net_device *dev,
468 enum ethtool_phys_id_state state)
471 struct adapter *adap = netdev2adap(dev);
473 if (state == ETHTOOL_ID_ACTIVE)
475 else if (state == ETHTOOL_ID_INACTIVE)
480 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
483 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
487 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
488 type == FW_PORT_TYPE_BT_XAUI) {
490 if (caps & FW_PORT_CAP_SPEED_100M)
491 v |= SUPPORTED_100baseT_Full;
492 if (caps & FW_PORT_CAP_SPEED_1G)
493 v |= SUPPORTED_1000baseT_Full;
494 if (caps & FW_PORT_CAP_SPEED_10G)
495 v |= SUPPORTED_10000baseT_Full;
496 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
497 v |= SUPPORTED_Backplane;
498 if (caps & FW_PORT_CAP_SPEED_1G)
499 v |= SUPPORTED_1000baseKX_Full;
500 if (caps & FW_PORT_CAP_SPEED_10G)
501 v |= SUPPORTED_10000baseKX4_Full;
502 } else if (type == FW_PORT_TYPE_KR) {
503 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
504 } else if (type == FW_PORT_TYPE_BP_AP) {
505 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
506 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
507 } else if (type == FW_PORT_TYPE_BP4_AP) {
508 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
509 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
510 SUPPORTED_10000baseKX4_Full;
511 } else if (type == FW_PORT_TYPE_FIBER_XFI ||
512 type == FW_PORT_TYPE_FIBER_XAUI ||
513 type == FW_PORT_TYPE_SFP ||
514 type == FW_PORT_TYPE_QSFP_10G ||
515 type == FW_PORT_TYPE_QSA) {
516 v |= SUPPORTED_FIBRE;
517 if (caps & FW_PORT_CAP_SPEED_1G)
518 v |= SUPPORTED_1000baseT_Full;
519 if (caps & FW_PORT_CAP_SPEED_10G)
520 v |= SUPPORTED_10000baseT_Full;
521 } else if (type == FW_PORT_TYPE_BP40_BA ||
522 type == FW_PORT_TYPE_QSFP) {
523 v |= SUPPORTED_40000baseSR4_Full;
524 v |= SUPPORTED_FIBRE;
527 if (caps & FW_PORT_CAP_ANEG)
528 v |= SUPPORTED_Autoneg;
532 static unsigned int to_fw_linkcaps(unsigned int caps)
536 if (caps & ADVERTISED_100baseT_Full)
537 v |= FW_PORT_CAP_SPEED_100M;
538 if (caps & ADVERTISED_1000baseT_Full)
539 v |= FW_PORT_CAP_SPEED_1G;
540 if (caps & ADVERTISED_10000baseT_Full)
541 v |= FW_PORT_CAP_SPEED_10G;
542 if (caps & ADVERTISED_40000baseSR4_Full)
543 v |= FW_PORT_CAP_SPEED_40G;
547 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
549 const struct port_info *p = netdev_priv(dev);
551 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
552 p->port_type == FW_PORT_TYPE_BT_XFI ||
553 p->port_type == FW_PORT_TYPE_BT_XAUI) {
555 } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
556 p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
557 cmd->port = PORT_FIBRE;
558 } else if (p->port_type == FW_PORT_TYPE_SFP ||
559 p->port_type == FW_PORT_TYPE_QSFP_10G ||
560 p->port_type == FW_PORT_TYPE_QSA ||
561 p->port_type == FW_PORT_TYPE_QSFP) {
562 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
563 p->mod_type == FW_PORT_MOD_TYPE_SR ||
564 p->mod_type == FW_PORT_MOD_TYPE_ER ||
565 p->mod_type == FW_PORT_MOD_TYPE_LRM)
566 cmd->port = PORT_FIBRE;
567 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
568 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
571 cmd->port = PORT_OTHER;
573 cmd->port = PORT_OTHER;
576 if (p->mdio_addr >= 0) {
577 cmd->phy_address = p->mdio_addr;
578 cmd->transceiver = XCVR_EXTERNAL;
579 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
580 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
582 cmd->phy_address = 0; /* not really, but no better option */
583 cmd->transceiver = XCVR_INTERNAL;
584 cmd->mdio_support = 0;
587 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
588 cmd->advertising = from_fw_linkcaps(p->port_type,
589 p->link_cfg.advertising);
590 ethtool_cmd_speed_set(cmd,
591 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
592 cmd->duplex = DUPLEX_FULL;
593 cmd->autoneg = p->link_cfg.autoneg;
599 static unsigned int speed_to_caps(int speed)
602 return FW_PORT_CAP_SPEED_100M;
604 return FW_PORT_CAP_SPEED_1G;
606 return FW_PORT_CAP_SPEED_10G;
608 return FW_PORT_CAP_SPEED_40G;
612 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
615 struct port_info *p = netdev_priv(dev);
616 struct link_config *lc = &p->link_cfg;
617 u32 speed = ethtool_cmd_speed(cmd);
618 struct link_config old_lc;
621 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
624 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
625 /* PHY offers a single speed. See if that's what's
628 if (cmd->autoneg == AUTONEG_DISABLE &&
629 (lc->supported & speed_to_caps(speed)))
635 if (cmd->autoneg == AUTONEG_DISABLE) {
636 cap = speed_to_caps(speed);
638 if (!(lc->supported & cap))
640 lc->requested_speed = cap;
643 cap = to_fw_linkcaps(cmd->advertising);
644 if (!(lc->supported & cap))
646 lc->requested_speed = 0;
647 lc->advertising = cap | FW_PORT_CAP_ANEG;
649 lc->autoneg = cmd->autoneg;
651 /* If the firmware rejects the Link Configuration request, back out
652 * the changes and report the error.
654 ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
661 static void get_pauseparam(struct net_device *dev,
662 struct ethtool_pauseparam *epause)
664 struct port_info *p = netdev_priv(dev);
666 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
667 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
668 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
671 static int set_pauseparam(struct net_device *dev,
672 struct ethtool_pauseparam *epause)
674 struct port_info *p = netdev_priv(dev);
675 struct link_config *lc = &p->link_cfg;
677 if (epause->autoneg == AUTONEG_DISABLE)
678 lc->requested_fc = 0;
679 else if (lc->supported & FW_PORT_CAP_ANEG)
680 lc->requested_fc = PAUSE_AUTONEG;
684 if (epause->rx_pause)
685 lc->requested_fc |= PAUSE_RX;
686 if (epause->tx_pause)
687 lc->requested_fc |= PAUSE_TX;
688 if (netif_running(dev))
689 return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
694 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
696 const struct port_info *pi = netdev_priv(dev);
697 const struct sge *s = &pi->adapter->sge;
699 e->rx_max_pending = MAX_RX_BUFFERS;
700 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
701 e->rx_jumbo_max_pending = 0;
702 e->tx_max_pending = MAX_TXQ_ENTRIES;
704 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
705 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
706 e->rx_jumbo_pending = 0;
707 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
710 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
713 const struct port_info *pi = netdev_priv(dev);
714 struct adapter *adapter = pi->adapter;
715 struct sge *s = &adapter->sge;
717 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
718 e->tx_pending > MAX_TXQ_ENTRIES ||
719 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
720 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
721 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
724 if (adapter->flags & FULL_INIT_DONE)
727 for (i = 0; i < pi->nqsets; ++i) {
728 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
729 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
730 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
736 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
737 * @dev: the network device
738 * @us: the hold-off time in us, or 0 to disable timer
739 * @cnt: the hold-off packet count, or 0 to disable counter
741 * Set the RX interrupt hold-off parameters for a network device.
743 static int set_rx_intr_params(struct net_device *dev,
744 unsigned int us, unsigned int cnt)
747 struct port_info *pi = netdev_priv(dev);
748 struct adapter *adap = pi->adapter;
749 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
751 for (i = 0; i < pi->nqsets; i++, q++) {
752 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
759 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
762 struct port_info *pi = netdev_priv(dev);
763 struct adapter *adap = pi->adapter;
764 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
766 for (i = 0; i < pi->nqsets; i++, q++)
767 q->rspq.adaptive_rx = adaptive_rx;
772 static int get_adaptive_rx_setting(struct net_device *dev)
774 struct port_info *pi = netdev_priv(dev);
775 struct adapter *adap = pi->adapter;
776 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
778 return q->rspq.adaptive_rx;
781 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
783 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
784 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
785 c->rx_max_coalesced_frames);
788 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
790 const struct port_info *pi = netdev_priv(dev);
791 const struct adapter *adap = pi->adapter;
792 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
794 c->rx_coalesce_usecs = qtimer_val(adap, rq);
795 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
796 adap->sge.counter_val[rq->pktcnt_idx] : 0;
797 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
802 * eeprom_ptov - translate a physical EEPROM address to virtual
803 * @phys_addr: the physical EEPROM address
804 * @fn: the PCI function number
805 * @sz: size of function-specific area
807 * Translate a physical EEPROM address to virtual. The first 1K is
808 * accessed through virtual addresses starting at 31K, the rest is
809 * accessed through virtual addresses starting at 0.
811 * The mapping is as follows:
812 * [0..1K) -> [31K..32K)
813 * [1K..1K+A) -> [31K-A..31K)
814 * [1K+A..ES) -> [0..ES-A-1K)
816 * where A = @fn * @sz, and ES = EEPROM size.
818 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
821 if (phys_addr < 1024)
822 return phys_addr + (31 << 10);
823 if (phys_addr < 1024 + fn)
824 return 31744 - fn + phys_addr - 1024;
825 if (phys_addr < EEPROMSIZE)
826 return phys_addr - 1024 - fn;
830 /* The next two routines implement eeprom read/write from physical addresses.
832 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
834 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
837 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
838 return vaddr < 0 ? vaddr : 0;
841 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
843 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
846 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
847 return vaddr < 0 ? vaddr : 0;
850 #define EEPROM_MAGIC 0x38E2F10C
852 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
856 struct adapter *adapter = netdev2adap(dev);
857 u8 *buf = t4_alloc_mem(EEPROMSIZE);
862 e->magic = EEPROM_MAGIC;
863 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
864 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
867 memcpy(data, buf + e->offset, e->len);
872 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
877 u32 aligned_offset, aligned_len, *p;
878 struct adapter *adapter = netdev2adap(dev);
880 if (eeprom->magic != EEPROM_MAGIC)
883 aligned_offset = eeprom->offset & ~3;
884 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
886 if (adapter->pf > 0) {
887 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
889 if (aligned_offset < start ||
890 aligned_offset + aligned_len > start + EEPROMPFSIZE)
894 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
895 /* RMW possibly needed for first or last words.
897 buf = t4_alloc_mem(aligned_len);
900 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
901 if (!err && aligned_len > 4)
902 err = eeprom_rd_phys(adapter,
903 aligned_offset + aligned_len - 4,
904 (u32 *)&buf[aligned_len - 4]);
907 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
912 err = t4_seeprom_wp(adapter, false);
916 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
917 err = eeprom_wr_phys(adapter, aligned_offset, *p);
922 err = t4_seeprom_wp(adapter, true);
929 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
932 const struct firmware *fw;
933 struct adapter *adap = netdev2adap(netdev);
934 unsigned int mbox = PCIE_FW_MASTER_M + 1;
939 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
940 master = PCIE_FW_MASTER_G(pcie_fw);
941 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
943 /* if csiostor is the master return */
944 if (master_vld && (master != adap->pf)) {
945 dev_warn(adap->pdev_dev,
946 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
950 ef->data[sizeof(ef->data) - 1] = '\0';
951 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
955 /* If the adapter has been fully initialized then we'll go ahead and
956 * try to get the firmware's cooperation in upgrading to the new
957 * firmware image otherwise we'll try to do the entire job from the
958 * host ... and we always "force" the operation in this path.
960 if (adap->flags & FULL_INIT_DONE)
963 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
964 release_firmware(fw);
966 dev_info(adap->pdev_dev,
967 "loaded firmware %s, reload cxgb4 driver\n", ef->data);
971 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
973 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
974 SOF_TIMESTAMPING_RX_SOFTWARE |
975 SOF_TIMESTAMPING_SOFTWARE;
977 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
978 SOF_TIMESTAMPING_RAW_HARDWARE;
980 ts_info->phc_index = -1;
985 static u32 get_rss_table_size(struct net_device *dev)
987 const struct port_info *pi = netdev_priv(dev);
992 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
994 const struct port_info *pi = netdev_priv(dev);
995 unsigned int n = pi->rss_size;
998 *hfunc = ETH_RSS_HASH_TOP;
1006 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1010 struct port_info *pi = netdev_priv(dev);
1012 /* We require at least one supported parameter to be changed and no
1013 * change in any of the unsupported parameters
1016 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1021 /* Interface must be brought up atleast once */
1022 if (pi->adapter->flags & FULL_INIT_DONE) {
1023 for (i = 0; i < pi->rss_size; i++)
1026 return cxgb4_write_rss(pi, pi->rss);
1032 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1035 const struct port_info *pi = netdev_priv(dev);
1037 switch (info->cmd) {
1038 case ETHTOOL_GRXFH: {
1039 unsigned int v = pi->rss_mode;
1042 switch (info->flow_type) {
1044 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1045 info->data = RXH_IP_SRC | RXH_IP_DST |
1046 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1047 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1048 info->data = RXH_IP_SRC | RXH_IP_DST;
1051 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1052 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1053 info->data = RXH_IP_SRC | RXH_IP_DST |
1054 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1055 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1056 info->data = RXH_IP_SRC | RXH_IP_DST;
1059 case AH_ESP_V4_FLOW:
1061 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1062 info->data = RXH_IP_SRC | RXH_IP_DST;
1065 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1066 info->data = RXH_IP_SRC | RXH_IP_DST |
1067 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1068 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1069 info->data = RXH_IP_SRC | RXH_IP_DST;
1072 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1073 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1074 info->data = RXH_IP_SRC | RXH_IP_DST |
1075 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1076 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1077 info->data = RXH_IP_SRC | RXH_IP_DST;
1080 case AH_ESP_V6_FLOW:
1082 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1083 info->data = RXH_IP_SRC | RXH_IP_DST;
1088 case ETHTOOL_GRXRINGS:
1089 info->data = pi->nqsets;
1095 static const struct ethtool_ops cxgb_ethtool_ops = {
1096 .get_settings = get_settings,
1097 .set_settings = set_settings,
1098 .get_drvinfo = get_drvinfo,
1099 .get_msglevel = get_msglevel,
1100 .set_msglevel = set_msglevel,
1101 .get_ringparam = get_sge_param,
1102 .set_ringparam = set_sge_param,
1103 .get_coalesce = get_coalesce,
1104 .set_coalesce = set_coalesce,
1105 .get_eeprom_len = get_eeprom_len,
1106 .get_eeprom = get_eeprom,
1107 .set_eeprom = set_eeprom,
1108 .get_pauseparam = get_pauseparam,
1109 .set_pauseparam = set_pauseparam,
1110 .get_link = ethtool_op_get_link,
1111 .get_strings = get_strings,
1112 .set_phys_id = identify_port,
1113 .nway_reset = restart_autoneg,
1114 .get_sset_count = get_sset_count,
1115 .get_ethtool_stats = get_stats,
1116 .get_regs_len = get_regs_len,
1117 .get_regs = get_regs,
1118 .get_rxnfc = get_rxnfc,
1119 .get_rxfh_indir_size = get_rss_table_size,
1120 .get_rxfh = get_rss_table,
1121 .set_rxfh = set_rss_table,
1122 .flash_device = set_flash,
1123 .get_ts_info = get_ts_info
1126 void cxgb4_set_ethtool_ops(struct net_device *netdev)
1128 netdev->ethtool_ops = &cxgb_ethtool_ops;