2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_vlan.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include <linux/of_address.h>
33 #include <linux/phy.h>
34 #include <linux/clk.h>
35 #include <linux/cpu.h>
38 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
40 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53 #define MVNETA_PORT_RX_RESET 0x1cc0
54 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
55 #define MVNETA_PHY_ADDR 0x2000
56 #define MVNETA_PHY_ADDR_MASK 0x1f
57 #define MVNETA_MBUS_RETRY 0x2010
58 #define MVNETA_UNIT_INTR_CAUSE 0x2080
59 #define MVNETA_UNIT_CONTROL 0x20B0
60 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
61 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64 #define MVNETA_BASE_ADDR_ENABLE 0x2290
65 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
66 #define MVNETA_PORT_CONFIG 0x2400
67 #define MVNETA_UNI_PROMISC_MODE BIT(0)
68 #define MVNETA_DEF_RXQ(q) ((q) << 1)
69 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
70 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
71 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
72 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
73 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
74 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
75 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
76 MVNETA_DEF_RXQ_ARP(q) | \
77 MVNETA_DEF_RXQ_TCP(q) | \
78 MVNETA_DEF_RXQ_UDP(q) | \
79 MVNETA_DEF_RXQ_BPDU(q) | \
80 MVNETA_TX_UNSET_ERR_SUM | \
81 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
82 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
83 #define MVNETA_MAC_ADDR_LOW 0x2414
84 #define MVNETA_MAC_ADDR_HIGH 0x2418
85 #define MVNETA_SDMA_CONFIG 0x241c
86 #define MVNETA_SDMA_BRST_SIZE_16 4
87 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
88 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
89 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
90 #define MVNETA_DESC_SWAP BIT(6)
91 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
92 #define MVNETA_PORT_STATUS 0x2444
93 #define MVNETA_TX_IN_PRGRS BIT(1)
94 #define MVNETA_TX_FIFO_EMPTY BIT(8)
95 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
96 #define MVNETA_SERDES_CFG 0x24A0
97 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
98 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
99 #define MVNETA_TYPE_PRIO 0x24bc
100 #define MVNETA_FORCE_UNI BIT(21)
101 #define MVNETA_TXQ_CMD_1 0x24e4
102 #define MVNETA_TXQ_CMD 0x2448
103 #define MVNETA_TXQ_DISABLE_SHIFT 8
104 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
105 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
106 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
107 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
108 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
109 #define MVNETA_ACC_MODE 0x2500
110 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
111 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
112 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
113 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
115 /* Exception Interrupt Port/Queue Cause register */
117 #define MVNETA_INTR_NEW_CAUSE 0x25a0
118 #define MVNETA_INTR_NEW_MASK 0x25a4
120 /* bits 0..7 = TXQ SENT, one bit per queue.
121 * bits 8..15 = RXQ OCCUP, one bit per queue.
122 * bits 16..23 = RXQ FREE, one bit per queue.
123 * bit 29 = OLD_REG_SUM, see old reg ?
124 * bit 30 = TX_ERR_SUM, one bit for 4 ports
125 * bit 31 = MISC_SUM, one bit for 4 ports
127 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
128 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
129 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
130 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
131 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
133 #define MVNETA_INTR_OLD_CAUSE 0x25a8
134 #define MVNETA_INTR_OLD_MASK 0x25ac
136 /* Data Path Port/Queue Cause Register */
137 #define MVNETA_INTR_MISC_CAUSE 0x25b0
138 #define MVNETA_INTR_MISC_MASK 0x25b4
140 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
141 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
142 #define MVNETA_CAUSE_PTP BIT(4)
144 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
145 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
146 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
147 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
148 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
149 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
150 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
151 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
153 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
154 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
155 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
157 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
158 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
159 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
161 #define MVNETA_INTR_ENABLE 0x25b8
162 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
163 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
165 #define MVNETA_RXQ_CMD 0x2680
166 #define MVNETA_RXQ_DISABLE_SHIFT 8
167 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
168 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
169 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
170 #define MVNETA_GMAC_CTRL_0 0x2c00
171 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
172 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
173 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
174 #define MVNETA_GMAC_CTRL_2 0x2c08
175 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
176 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
177 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
178 #define MVNETA_GMAC2_PORT_RESET BIT(6)
179 #define MVNETA_GMAC_STATUS 0x2c10
180 #define MVNETA_GMAC_LINK_UP BIT(0)
181 #define MVNETA_GMAC_SPEED_1000 BIT(1)
182 #define MVNETA_GMAC_SPEED_100 BIT(2)
183 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
184 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
185 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
186 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
187 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
188 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
189 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
190 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
191 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
192 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
193 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
194 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
195 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
196 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
197 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
198 #define MVNETA_MIB_COUNTERS_BASE 0x3000
199 #define MVNETA_MIB_LATE_COLLISION 0x7c
200 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
201 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
202 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
203 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
204 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
205 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
206 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
207 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
208 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
209 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
210 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
211 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
212 #define MVNETA_PORT_TX_RESET 0x3cf0
213 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
214 #define MVNETA_TX_MTU 0x3e0c
215 #define MVNETA_TX_TOKEN_SIZE 0x3e14
216 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
217 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
218 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
220 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
222 /* Descriptor ring Macros */
223 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
224 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
226 /* Various constants */
229 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
230 #define MVNETA_RX_COAL_PKTS 32
231 #define MVNETA_RX_COAL_USEC 100
233 /* The two bytes Marvell header. Either contains a special value used
234 * by Marvell switches when a specific hardware mode is enabled (not
235 * supported by this driver) or is filled automatically by zeroes on
236 * the RX side. Those two bytes being at the front of the Ethernet
237 * header, they allow to have the IP header aligned on a 4 bytes
238 * boundary automatically: the hardware skips those two bytes on its
241 #define MVNETA_MH_SIZE 2
243 #define MVNETA_VLAN_TAG_LEN 4
245 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
246 #define MVNETA_TX_CSUM_DEF_SIZE 1600
247 #define MVNETA_TX_CSUM_MAX_SIZE 9800
248 #define MVNETA_ACC_MODE_EXT 1
250 /* Timeout constants */
251 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
252 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
253 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
255 #define MVNETA_TX_MTU_MAX 0x3ffff
257 /* TSO header size */
258 #define TSO_HEADER_SIZE 128
260 /* Max number of Rx descriptors */
261 #define MVNETA_MAX_RXD 128
263 /* Max number of Tx descriptors */
264 #define MVNETA_MAX_TXD 532
266 /* Max number of allowed TCP segments for software TSO */
267 #define MVNETA_MAX_TSO_SEGS 100
269 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
271 /* descriptor aligned size */
272 #define MVNETA_DESC_ALIGNED_SIZE 32
274 #define MVNETA_RX_PKT_SIZE(mtu) \
275 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
276 ETH_HLEN + ETH_FCS_LEN, \
277 MVNETA_CPU_D_CACHE_LINE_SIZE)
279 #define IS_TSO_HEADER(txq, addr) \
280 ((addr >= txq->tso_hdrs_phys) && \
281 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
283 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
285 struct mvneta_statistic {
286 unsigned short offset;
288 const char name[ETH_GSTRING_LEN];
294 static const struct mvneta_statistic mvneta_statistics[] = {
295 { 0x3000, T_REG_64, "good_octets_received", },
296 { 0x3010, T_REG_32, "good_frames_received", },
297 { 0x3008, T_REG_32, "bad_octets_received", },
298 { 0x3014, T_REG_32, "bad_frames_received", },
299 { 0x3018, T_REG_32, "broadcast_frames_received", },
300 { 0x301c, T_REG_32, "multicast_frames_received", },
301 { 0x3050, T_REG_32, "unrec_mac_control_received", },
302 { 0x3058, T_REG_32, "good_fc_received", },
303 { 0x305c, T_REG_32, "bad_fc_received", },
304 { 0x3060, T_REG_32, "undersize_received", },
305 { 0x3064, T_REG_32, "fragments_received", },
306 { 0x3068, T_REG_32, "oversize_received", },
307 { 0x306c, T_REG_32, "jabber_received", },
308 { 0x3070, T_REG_32, "mac_receive_error", },
309 { 0x3074, T_REG_32, "bad_crc_event", },
310 { 0x3078, T_REG_32, "collision", },
311 { 0x307c, T_REG_32, "late_collision", },
312 { 0x2484, T_REG_32, "rx_discard", },
313 { 0x2488, T_REG_32, "rx_overrun", },
314 { 0x3020, T_REG_32, "frames_64_octets", },
315 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
316 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
317 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
318 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
319 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
320 { 0x3038, T_REG_64, "good_octets_sent", },
321 { 0x3040, T_REG_32, "good_frames_sent", },
322 { 0x3044, T_REG_32, "excessive_collision", },
323 { 0x3048, T_REG_32, "multicast_frames_sent", },
324 { 0x304c, T_REG_32, "broadcast_frames_sent", },
325 { 0x3054, T_REG_32, "fc_sent", },
326 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
329 struct mvneta_pcpu_stats {
330 struct u64_stats_sync syncp;
337 struct mvneta_pcpu_port {
338 /* Pointer to the shared port */
339 struct mvneta_port *pp;
341 /* Pointer to the CPU-local NAPI struct */
342 struct napi_struct napi;
344 /* Cause of the previous interrupt */
349 struct mvneta_pcpu_port __percpu *ports;
350 struct mvneta_pcpu_stats __percpu *stats;
353 unsigned int frag_size;
355 struct mvneta_rx_queue *rxqs;
356 struct mvneta_tx_queue *txqs;
357 struct net_device *dev;
358 struct notifier_block cpu_notifier;
366 struct mii_bus *mii_bus;
367 struct phy_device *phy_dev;
368 phy_interface_t phy_interface;
369 struct device_node *phy_node;
373 unsigned int tx_csum_limit;
374 int use_inband_status:1;
376 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
379 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
380 * layout of the transmit and reception DMA descriptors, and their
381 * layout is therefore defined by the hardware design
384 #define MVNETA_TX_L3_OFF_SHIFT 0
385 #define MVNETA_TX_IP_HLEN_SHIFT 8
386 #define MVNETA_TX_L4_UDP BIT(16)
387 #define MVNETA_TX_L3_IP6 BIT(17)
388 #define MVNETA_TXD_IP_CSUM BIT(18)
389 #define MVNETA_TXD_Z_PAD BIT(19)
390 #define MVNETA_TXD_L_DESC BIT(20)
391 #define MVNETA_TXD_F_DESC BIT(21)
392 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
393 MVNETA_TXD_L_DESC | \
395 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
396 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
398 #define MVNETA_RXD_ERR_CRC 0x0
399 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
400 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
401 #define MVNETA_RXD_ERR_LEN BIT(18)
402 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
403 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
404 #define MVNETA_RXD_L3_IP4 BIT(25)
405 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
406 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
408 #if defined(__LITTLE_ENDIAN)
409 struct mvneta_tx_desc {
410 u32 command; /* Options used by HW for packet transmitting.*/
411 u16 reserverd1; /* csum_l4 (for future use) */
412 u16 data_size; /* Data size of transmitted packet in bytes */
413 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
414 u32 reserved2; /* hw_cmd - (for future use, PMT) */
415 u32 reserved3[4]; /* Reserved - (for future use) */
418 struct mvneta_rx_desc {
419 u32 status; /* Info about received packet */
420 u16 reserved1; /* pnc_info - (for future use, PnC) */
421 u16 data_size; /* Size of received packet in bytes */
423 u32 buf_phys_addr; /* Physical address of the buffer */
424 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
426 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
427 u16 reserved3; /* prefetch_cmd, for future use */
428 u16 reserved4; /* csum_l4 - (for future use, PnC) */
430 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
431 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
434 struct mvneta_tx_desc {
435 u16 data_size; /* Data size of transmitted packet in bytes */
436 u16 reserverd1; /* csum_l4 (for future use) */
437 u32 command; /* Options used by HW for packet transmitting.*/
438 u32 reserved2; /* hw_cmd - (for future use, PMT) */
439 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
440 u32 reserved3[4]; /* Reserved - (for future use) */
443 struct mvneta_rx_desc {
444 u16 data_size; /* Size of received packet in bytes */
445 u16 reserved1; /* pnc_info - (for future use, PnC) */
446 u32 status; /* Info about received packet */
448 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
449 u32 buf_phys_addr; /* Physical address of the buffer */
451 u16 reserved4; /* csum_l4 - (for future use, PnC) */
452 u16 reserved3; /* prefetch_cmd, for future use */
453 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
455 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
456 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
460 struct mvneta_tx_queue {
461 /* Number of this TX queue, in the range 0-7 */
464 /* Number of TX DMA descriptors in the descriptor ring */
467 /* Number of currently used TX DMA descriptor in the
471 int tx_stop_threshold;
472 int tx_wake_threshold;
474 /* Array of transmitted skb */
475 struct sk_buff **tx_skb;
477 /* Index of last TX DMA descriptor that was inserted */
480 /* Index of the TX DMA descriptor to be cleaned up */
485 /* Virtual address of the TX DMA descriptors array */
486 struct mvneta_tx_desc *descs;
488 /* DMA address of the TX DMA descriptors array */
489 dma_addr_t descs_phys;
491 /* Index of the last TX DMA descriptor */
494 /* Index of the next TX DMA descriptor to process */
495 int next_desc_to_proc;
497 /* DMA buffers for TSO headers */
500 /* DMA address of TSO headers */
501 dma_addr_t tso_hdrs_phys;
504 struct mvneta_rx_queue {
505 /* rx queue number, in the range 0-7 */
508 /* num of rx descriptors in the rx descriptor ring */
511 /* counter of times when mvneta_refill() failed */
517 /* Virtual address of the RX DMA descriptors array */
518 struct mvneta_rx_desc *descs;
520 /* DMA address of the RX DMA descriptors array */
521 dma_addr_t descs_phys;
523 /* Index of the last RX DMA descriptor */
526 /* Index of the next RX DMA descriptor to process */
527 int next_desc_to_proc;
530 /* The hardware supports eight (8) rx queues, but we are only allowing
531 * the first one to be used. Therefore, let's just allocate one queue.
533 static int rxq_number = 8;
534 static int txq_number = 8;
538 static int rx_copybreak __read_mostly = 256;
540 #define MVNETA_DRIVER_NAME "mvneta"
541 #define MVNETA_DRIVER_VERSION "1.0"
543 /* Utility/helper methods */
545 /* Write helper method */
546 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
548 writel(data, pp->base + offset);
551 /* Read helper method */
552 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
554 return readl(pp->base + offset);
557 /* Increment txq get counter */
558 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
560 txq->txq_get_index++;
561 if (txq->txq_get_index == txq->size)
562 txq->txq_get_index = 0;
565 /* Increment txq put counter */
566 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
568 txq->txq_put_index++;
569 if (txq->txq_put_index == txq->size)
570 txq->txq_put_index = 0;
574 /* Clear all MIB counters */
575 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
580 /* Perform dummy reads from MIB counters */
581 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
582 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
583 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
584 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
587 /* Get System Network Statistics */
588 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
589 struct rtnl_link_stats64 *stats)
591 struct mvneta_port *pp = netdev_priv(dev);
595 for_each_possible_cpu(cpu) {
596 struct mvneta_pcpu_stats *cpu_stats;
602 cpu_stats = per_cpu_ptr(pp->stats, cpu);
604 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
605 rx_packets = cpu_stats->rx_packets;
606 rx_bytes = cpu_stats->rx_bytes;
607 tx_packets = cpu_stats->tx_packets;
608 tx_bytes = cpu_stats->tx_bytes;
609 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
611 stats->rx_packets += rx_packets;
612 stats->rx_bytes += rx_bytes;
613 stats->tx_packets += tx_packets;
614 stats->tx_bytes += tx_bytes;
617 stats->rx_errors = dev->stats.rx_errors;
618 stats->rx_dropped = dev->stats.rx_dropped;
620 stats->tx_dropped = dev->stats.tx_dropped;
625 /* Rx descriptors helper methods */
627 /* Checks whether the RX descriptor having this status is both the first
628 * and the last descriptor for the RX packet. Each RX packet is currently
629 * received through a single RX descriptor, so not having each RX
630 * descriptor with its first and last bits set is an error
632 static int mvneta_rxq_desc_is_first_last(u32 status)
634 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
635 MVNETA_RXD_FIRST_LAST_DESC;
638 /* Add number of descriptors ready to receive new packets */
639 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
640 struct mvneta_rx_queue *rxq,
643 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
646 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
647 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
648 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
649 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
650 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
653 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
654 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
657 /* Get number of RX descriptors occupied by received packets */
658 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
659 struct mvneta_rx_queue *rxq)
663 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
664 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
667 /* Update num of rx desc called upon return from rx path or
668 * from mvneta_rxq_drop_pkts().
670 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
671 struct mvneta_rx_queue *rxq,
672 int rx_done, int rx_filled)
676 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
678 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
679 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
683 /* Only 255 descriptors can be added at once */
684 while ((rx_done > 0) || (rx_filled > 0)) {
685 if (rx_done <= 0xff) {
692 if (rx_filled <= 0xff) {
693 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
696 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
699 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
703 /* Get pointer to next RX descriptor to be processed by SW */
704 static struct mvneta_rx_desc *
705 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
707 int rx_desc = rxq->next_desc_to_proc;
709 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
710 prefetch(rxq->descs + rxq->next_desc_to_proc);
711 return rxq->descs + rx_desc;
714 /* Change maximum receive size of the port. */
715 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
719 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
720 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
721 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
722 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
723 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
727 /* Set rx queue offset */
728 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
729 struct mvneta_rx_queue *rxq,
734 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
735 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
738 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
739 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
743 /* Tx descriptors helper methods */
745 /* Update HW with number of TX descriptors to be sent */
746 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
747 struct mvneta_tx_queue *txq,
752 /* Only 255 descriptors can be added at once ; Assume caller
753 * process TX desriptors in quanta less than 256
756 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
759 /* Get pointer to next TX descriptor to be processed (send) by HW */
760 static struct mvneta_tx_desc *
761 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
763 int tx_desc = txq->next_desc_to_proc;
765 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
766 return txq->descs + tx_desc;
769 /* Release the last allocated TX descriptor. Useful to handle DMA
770 * mapping failures in the TX path.
772 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
774 if (txq->next_desc_to_proc == 0)
775 txq->next_desc_to_proc = txq->last_desc - 1;
777 txq->next_desc_to_proc--;
780 /* Set rxq buf size */
781 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
782 struct mvneta_rx_queue *rxq,
787 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
789 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
790 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
792 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
795 /* Disable buffer management (BM) */
796 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
797 struct mvneta_rx_queue *rxq)
801 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
802 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
803 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
806 /* Start the Ethernet port RX and TX activity */
807 static void mvneta_port_up(struct mvneta_port *pp)
812 /* Enable all initialized TXs. */
814 for (queue = 0; queue < txq_number; queue++) {
815 struct mvneta_tx_queue *txq = &pp->txqs[queue];
816 if (txq->descs != NULL)
817 q_map |= (1 << queue);
819 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
821 /* Enable all initialized RXQs. */
822 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
825 /* Stop the Ethernet port activity */
826 static void mvneta_port_down(struct mvneta_port *pp)
831 /* Stop Rx port activity. Check port Rx activity. */
832 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
834 /* Issue stop command for active channels only */
836 mvreg_write(pp, MVNETA_RXQ_CMD,
837 val << MVNETA_RXQ_DISABLE_SHIFT);
839 /* Wait for all Rx activity to terminate. */
842 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
844 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
850 val = mvreg_read(pp, MVNETA_RXQ_CMD);
851 } while (val & 0xff);
853 /* Stop Tx port activity. Check port Tx activity. Issue stop
854 * command for active channels only
856 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
859 mvreg_write(pp, MVNETA_TXQ_CMD,
860 (val << MVNETA_TXQ_DISABLE_SHIFT));
862 /* Wait for all Tx activity to terminate. */
865 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
867 "TIMEOUT for TX stopped status=0x%08x\n",
873 /* Check TX Command reg that all Txqs are stopped */
874 val = mvreg_read(pp, MVNETA_TXQ_CMD);
876 } while (val & 0xff);
878 /* Double check to verify that TX FIFO is empty */
881 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
883 "TX FIFO empty timeout status=0x08%x\n",
889 val = mvreg_read(pp, MVNETA_PORT_STATUS);
890 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
891 (val & MVNETA_TX_IN_PRGRS));
896 /* Enable the port by setting the port enable bit of the MAC control register */
897 static void mvneta_port_enable(struct mvneta_port *pp)
902 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
903 val |= MVNETA_GMAC0_PORT_ENABLE;
904 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
907 /* Disable the port and wait for about 200 usec before retuning */
908 static void mvneta_port_disable(struct mvneta_port *pp)
912 /* Reset the Enable bit in the Serial Control Register */
913 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
914 val &= ~MVNETA_GMAC0_PORT_ENABLE;
915 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
920 /* Multicast tables methods */
922 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
923 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
931 val = 0x1 | (queue << 1);
932 val |= (val << 24) | (val << 16) | (val << 8);
935 for (offset = 0; offset <= 0xc; offset += 4)
936 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
939 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
940 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
948 val = 0x1 | (queue << 1);
949 val |= (val << 24) | (val << 16) | (val << 8);
952 for (offset = 0; offset <= 0xfc; offset += 4)
953 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
957 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
958 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
964 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
967 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
968 val = 0x1 | (queue << 1);
969 val |= (val << 24) | (val << 16) | (val << 8);
972 for (offset = 0; offset <= 0xfc; offset += 4)
973 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
976 /* This method sets defaults to the NETA port:
977 * Clears interrupt Cause and Mask registers.
978 * Clears all MAC tables.
979 * Sets defaults to all registers.
980 * Resets RX and TX descriptor rings.
982 * This method can be called after mvneta_port_down() to return the port
983 * settings to defaults.
985 static void mvneta_defaults_set(struct mvneta_port *pp)
991 /* Clear all Cause registers */
992 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
993 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
994 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
996 /* Mask all interrupts */
997 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
998 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
999 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1000 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1002 /* Enable MBUS Retry bit16 */
1003 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1005 /* Set CPU queue access map - all CPUs have access to all RX
1006 * queues and to all TX queues
1008 for_each_present_cpu(cpu)
1009 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
1010 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
1011 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
1013 /* Reset RX and TX DMAs */
1014 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1015 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1017 /* Disable Legacy WRR, Disable EJP, Release from reset */
1018 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1019 for (queue = 0; queue < txq_number; queue++) {
1020 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1021 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1024 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1025 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1027 /* Set Port Acceleration Mode */
1028 val = MVNETA_ACC_MODE_EXT;
1029 mvreg_write(pp, MVNETA_ACC_MODE, val);
1031 /* Update val of portCfg register accordingly with all RxQueue types */
1032 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
1033 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1036 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1037 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1039 /* Build PORT_SDMA_CONFIG_REG */
1042 /* Default burst size */
1043 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1044 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1045 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1047 #if defined(__BIG_ENDIAN)
1048 val |= MVNETA_DESC_SWAP;
1051 /* Assign port SDMA configuration */
1052 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1054 /* Disable PHY polling in hardware, since we're using the
1055 * kernel phylib to do this.
1057 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1058 val &= ~MVNETA_PHY_POLLING_ENABLE;
1059 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1061 if (pp->use_inband_status) {
1062 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1063 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1064 MVNETA_GMAC_FORCE_LINK_DOWN |
1065 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1066 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1067 MVNETA_GMAC_AN_SPEED_EN |
1068 MVNETA_GMAC_AN_DUPLEX_EN;
1069 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1070 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1071 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1072 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1074 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1075 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1076 MVNETA_GMAC_AN_SPEED_EN |
1077 MVNETA_GMAC_AN_DUPLEX_EN);
1078 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1081 mvneta_set_ucast_table(pp, -1);
1082 mvneta_set_special_mcast_table(pp, -1);
1083 mvneta_set_other_mcast_table(pp, -1);
1085 /* Set port interrupt enable register - default enable all */
1086 mvreg_write(pp, MVNETA_INTR_ENABLE,
1087 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1088 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1090 mvneta_mib_counters_clear(pp);
1093 /* Set max sizes for tx queues */
1094 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1100 mtu = max_tx_size * 8;
1101 if (mtu > MVNETA_TX_MTU_MAX)
1102 mtu = MVNETA_TX_MTU_MAX;
1105 val = mvreg_read(pp, MVNETA_TX_MTU);
1106 val &= ~MVNETA_TX_MTU_MAX;
1108 mvreg_write(pp, MVNETA_TX_MTU, val);
1110 /* TX token size and all TXQs token size must be larger that MTU */
1111 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1113 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1116 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1118 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1120 for (queue = 0; queue < txq_number; queue++) {
1121 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1123 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1126 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1128 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1133 /* Set unicast address */
1134 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1137 unsigned int unicast_reg;
1138 unsigned int tbl_offset;
1139 unsigned int reg_offset;
1141 /* Locate the Unicast table entry */
1142 last_nibble = (0xf & last_nibble);
1144 /* offset from unicast tbl base */
1145 tbl_offset = (last_nibble / 4) * 4;
1147 /* offset within the above reg */
1148 reg_offset = last_nibble % 4;
1150 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1153 /* Clear accepts frame bit at specified unicast DA tbl entry */
1154 unicast_reg &= ~(0xff << (8 * reg_offset));
1156 unicast_reg &= ~(0xff << (8 * reg_offset));
1157 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1160 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1163 /* Set mac address */
1164 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1171 mac_l = (addr[4] << 8) | (addr[5]);
1172 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1173 (addr[2] << 8) | (addr[3] << 0);
1175 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1176 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1179 /* Accept frames of this address */
1180 mvneta_set_ucast_addr(pp, addr[5], queue);
1183 /* Set the number of packets that will be received before RX interrupt
1184 * will be generated by HW.
1186 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1187 struct mvneta_rx_queue *rxq, u32 value)
1189 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1190 value | MVNETA_RXQ_NON_OCCUPIED(0));
1191 rxq->pkts_coal = value;
1194 /* Set the time delay in usec before RX interrupt will be generated by
1197 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1198 struct mvneta_rx_queue *rxq, u32 value)
1201 unsigned long clk_rate;
1203 clk_rate = clk_get_rate(pp->clk);
1204 val = (clk_rate / 1000000) * value;
1206 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1207 rxq->time_coal = value;
1210 /* Set threshold for TX_DONE pkts coalescing */
1211 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1212 struct mvneta_tx_queue *txq, u32 value)
1216 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1218 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1219 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1221 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1223 txq->done_pkts_coal = value;
1226 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1227 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1228 u32 phys_addr, u32 cookie)
1230 rx_desc->buf_cookie = cookie;
1231 rx_desc->buf_phys_addr = phys_addr;
1234 /* Decrement sent descriptors counter */
1235 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1236 struct mvneta_tx_queue *txq,
1241 /* Only 255 TX descriptors can be updated at once */
1242 while (sent_desc > 0xff) {
1243 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1244 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1245 sent_desc = sent_desc - 0xff;
1248 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1249 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1252 /* Get number of TX descriptors already sent by HW */
1253 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1254 struct mvneta_tx_queue *txq)
1259 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1260 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1261 MVNETA_TXQ_SENT_DESC_SHIFT;
1266 /* Get number of sent descriptors and decrement counter.
1267 * The number of sent descriptors is returned.
1269 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1270 struct mvneta_tx_queue *txq)
1274 /* Get number of sent descriptors */
1275 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1277 /* Decrement sent descriptors counter */
1279 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1284 /* Set TXQ descriptors fields relevant for CSUM calculation */
1285 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1286 int ip_hdr_len, int l4_proto)
1290 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1291 * G_L4_chk, L4_type; required only for checksum
1294 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1295 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1297 if (l3_proto == htons(ETH_P_IP))
1298 command |= MVNETA_TXD_IP_CSUM;
1300 command |= MVNETA_TX_L3_IP6;
1302 if (l4_proto == IPPROTO_TCP)
1303 command |= MVNETA_TX_L4_CSUM_FULL;
1304 else if (l4_proto == IPPROTO_UDP)
1305 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1307 command |= MVNETA_TX_L4_CSUM_NOT;
1313 /* Display more error info */
1314 static void mvneta_rx_error(struct mvneta_port *pp,
1315 struct mvneta_rx_desc *rx_desc)
1317 u32 status = rx_desc->status;
1319 if (!mvneta_rxq_desc_is_first_last(status)) {
1321 "bad rx status %08x (buffer oversize), size=%d\n",
1322 status, rx_desc->data_size);
1326 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1327 case MVNETA_RXD_ERR_CRC:
1328 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1329 status, rx_desc->data_size);
1331 case MVNETA_RXD_ERR_OVERRUN:
1332 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1333 status, rx_desc->data_size);
1335 case MVNETA_RXD_ERR_LEN:
1336 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1337 status, rx_desc->data_size);
1339 case MVNETA_RXD_ERR_RESOURCE:
1340 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1341 status, rx_desc->data_size);
1346 /* Handle RX checksum offload based on the descriptor's status */
1347 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1348 struct sk_buff *skb)
1350 if ((status & MVNETA_RXD_L3_IP4) &&
1351 (status & MVNETA_RXD_L4_CSUM_OK)) {
1353 skb->ip_summed = CHECKSUM_UNNECESSARY;
1357 skb->ip_summed = CHECKSUM_NONE;
1360 /* Return tx queue pointer (find last set bit) according to <cause> returned
1361 * form tx_done reg. <cause> must not be null. The return value is always a
1362 * valid queue for matching the first one found in <cause>.
1364 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1367 int queue = fls(cause) - 1;
1369 return &pp->txqs[queue];
1372 /* Free tx queue skbuffs */
1373 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1374 struct mvneta_tx_queue *txq, int num)
1378 for (i = 0; i < num; i++) {
1379 struct mvneta_tx_desc *tx_desc = txq->descs +
1381 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1383 mvneta_txq_inc_get(txq);
1385 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1386 dma_unmap_single(pp->dev->dev.parent,
1387 tx_desc->buf_phys_addr,
1388 tx_desc->data_size, DMA_TO_DEVICE);
1391 dev_kfree_skb_any(skb);
1395 /* Handle end of transmission */
1396 static void mvneta_txq_done(struct mvneta_port *pp,
1397 struct mvneta_tx_queue *txq)
1399 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1402 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1406 mvneta_txq_bufs_free(pp, txq, tx_done);
1408 txq->count -= tx_done;
1410 if (netif_tx_queue_stopped(nq)) {
1411 if (txq->count <= txq->tx_wake_threshold)
1412 netif_tx_wake_queue(nq);
1416 static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1418 if (likely(pp->frag_size <= PAGE_SIZE))
1419 return netdev_alloc_frag(pp->frag_size);
1421 return kmalloc(pp->frag_size, GFP_ATOMIC);
1424 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1426 if (likely(pp->frag_size <= PAGE_SIZE))
1427 skb_free_frag(data);
1432 /* Refill processing */
1433 static int mvneta_rx_refill(struct mvneta_port *pp,
1434 struct mvneta_rx_desc *rx_desc)
1437 dma_addr_t phys_addr;
1440 data = mvneta_frag_alloc(pp);
1444 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1445 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1447 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1448 mvneta_frag_free(pp, data);
1452 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
1456 /* Handle tx checksum */
1457 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1459 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1461 __be16 l3_proto = vlan_get_protocol(skb);
1464 if (l3_proto == htons(ETH_P_IP)) {
1465 struct iphdr *ip4h = ip_hdr(skb);
1467 /* Calculate IPv4 checksum and L4 checksum */
1468 ip_hdr_len = ip4h->ihl;
1469 l4_proto = ip4h->protocol;
1470 } else if (l3_proto == htons(ETH_P_IPV6)) {
1471 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1473 /* Read l4_protocol from one of IPv6 extra headers */
1474 if (skb_network_header_len(skb) > 0)
1475 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1476 l4_proto = ip6h->nexthdr;
1478 return MVNETA_TX_L4_CSUM_NOT;
1480 return mvneta_txq_desc_csum(skb_network_offset(skb),
1481 l3_proto, ip_hdr_len, l4_proto);
1484 return MVNETA_TX_L4_CSUM_NOT;
1487 /* Drop packets received by the RXQ and free buffers */
1488 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1489 struct mvneta_rx_queue *rxq)
1493 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1494 for (i = 0; i < rxq->size; i++) {
1495 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1496 void *data = (void *)rx_desc->buf_cookie;
1498 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1499 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1500 mvneta_frag_free(pp, data);
1504 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1507 /* Main rx processing */
1508 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1509 struct mvneta_rx_queue *rxq)
1511 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
1512 struct net_device *dev = pp->dev;
1517 /* Get number of received packets */
1518 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1520 if (rx_todo > rx_done)
1525 /* Fairness NAPI loop */
1526 while (rx_done < rx_todo) {
1527 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1528 struct sk_buff *skb;
1529 unsigned char *data;
1530 dma_addr_t phys_addr;
1535 rx_status = rx_desc->status;
1536 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1537 data = (unsigned char *)rx_desc->buf_cookie;
1538 phys_addr = rx_desc->buf_phys_addr;
1540 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1541 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1543 dev->stats.rx_errors++;
1544 mvneta_rx_error(pp, rx_desc);
1545 /* leave the descriptor untouched */
1549 if (rx_bytes <= rx_copybreak) {
1550 /* better copy a small frame and not unmap the DMA region */
1551 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1553 goto err_drop_frame;
1555 dma_sync_single_range_for_cpu(dev->dev.parent,
1556 rx_desc->buf_phys_addr,
1557 MVNETA_MH_SIZE + NET_SKB_PAD,
1560 memcpy(skb_put(skb, rx_bytes),
1561 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1564 skb->protocol = eth_type_trans(skb, dev);
1565 mvneta_rx_csum(pp, rx_status, skb);
1566 napi_gro_receive(&port->napi, skb);
1569 rcvd_bytes += rx_bytes;
1571 /* leave the descriptor and buffer untouched */
1575 /* Refill processing */
1576 err = mvneta_rx_refill(pp, rx_desc);
1578 netdev_err(dev, "Linux processing - Can't refill\n");
1580 goto err_drop_frame;
1583 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1585 /* After refill old buffer has to be unmapped regardless
1586 * the skb is successfully built or not.
1588 dma_unmap_single(dev->dev.parent, phys_addr,
1589 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1592 goto err_drop_frame;
1595 rcvd_bytes += rx_bytes;
1597 /* Linux processing */
1598 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
1599 skb_put(skb, rx_bytes);
1601 skb->protocol = eth_type_trans(skb, dev);
1603 mvneta_rx_csum(pp, rx_status, skb);
1605 napi_gro_receive(&port->napi, skb);
1609 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1611 u64_stats_update_begin(&stats->syncp);
1612 stats->rx_packets += rcvd_pkts;
1613 stats->rx_bytes += rcvd_bytes;
1614 u64_stats_update_end(&stats->syncp);
1617 /* Update rxq management counters */
1618 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1624 mvneta_tso_put_hdr(struct sk_buff *skb,
1625 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1627 struct mvneta_tx_desc *tx_desc;
1628 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1630 txq->tx_skb[txq->txq_put_index] = NULL;
1631 tx_desc = mvneta_txq_next_desc_get(txq);
1632 tx_desc->data_size = hdr_len;
1633 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1634 tx_desc->command |= MVNETA_TXD_F_DESC;
1635 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1636 txq->txq_put_index * TSO_HEADER_SIZE;
1637 mvneta_txq_inc_put(txq);
1641 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1642 struct sk_buff *skb, char *data, int size,
1643 bool last_tcp, bool is_last)
1645 struct mvneta_tx_desc *tx_desc;
1647 tx_desc = mvneta_txq_next_desc_get(txq);
1648 tx_desc->data_size = size;
1649 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1650 size, DMA_TO_DEVICE);
1651 if (unlikely(dma_mapping_error(dev->dev.parent,
1652 tx_desc->buf_phys_addr))) {
1653 mvneta_txq_desc_put(txq);
1657 tx_desc->command = 0;
1658 txq->tx_skb[txq->txq_put_index] = NULL;
1661 /* last descriptor in the TCP packet */
1662 tx_desc->command = MVNETA_TXD_L_DESC;
1664 /* last descriptor in SKB */
1666 txq->tx_skb[txq->txq_put_index] = skb;
1668 mvneta_txq_inc_put(txq);
1672 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1673 struct mvneta_tx_queue *txq)
1675 int total_len, data_left;
1677 struct mvneta_port *pp = netdev_priv(dev);
1679 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1682 /* Count needed descriptors */
1683 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1686 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1687 pr_info("*** Is this even possible???!?!?\n");
1691 /* Initialize the TSO handler, and prepare the first payload */
1692 tso_start(skb, &tso);
1694 total_len = skb->len - hdr_len;
1695 while (total_len > 0) {
1698 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1699 total_len -= data_left;
1702 /* prepare packet headers: MAC + IP + TCP */
1703 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1704 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1706 mvneta_tso_put_hdr(skb, pp, txq);
1708 while (data_left > 0) {
1712 size = min_t(int, tso.size, data_left);
1714 if (mvneta_tso_put_data(dev, txq, skb,
1721 tso_build_data(skb, &tso, size);
1728 /* Release all used data descriptors; header descriptors must not
1731 for (i = desc_count - 1; i >= 0; i--) {
1732 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1733 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1734 dma_unmap_single(pp->dev->dev.parent,
1735 tx_desc->buf_phys_addr,
1738 mvneta_txq_desc_put(txq);
1743 /* Handle tx fragmentation processing */
1744 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1745 struct mvneta_tx_queue *txq)
1747 struct mvneta_tx_desc *tx_desc;
1748 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1750 for (i = 0; i < nr_frags; i++) {
1751 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1752 void *addr = page_address(frag->page.p) + frag->page_offset;
1754 tx_desc = mvneta_txq_next_desc_get(txq);
1755 tx_desc->data_size = frag->size;
1757 tx_desc->buf_phys_addr =
1758 dma_map_single(pp->dev->dev.parent, addr,
1759 tx_desc->data_size, DMA_TO_DEVICE);
1761 if (dma_mapping_error(pp->dev->dev.parent,
1762 tx_desc->buf_phys_addr)) {
1763 mvneta_txq_desc_put(txq);
1767 if (i == nr_frags - 1) {
1768 /* Last descriptor */
1769 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1770 txq->tx_skb[txq->txq_put_index] = skb;
1772 /* Descriptor in the middle: Not First, Not Last */
1773 tx_desc->command = 0;
1774 txq->tx_skb[txq->txq_put_index] = NULL;
1776 mvneta_txq_inc_put(txq);
1782 /* Release all descriptors that were used to map fragments of
1783 * this packet, as well as the corresponding DMA mappings
1785 for (i = i - 1; i >= 0; i--) {
1786 tx_desc = txq->descs + i;
1787 dma_unmap_single(pp->dev->dev.parent,
1788 tx_desc->buf_phys_addr,
1791 mvneta_txq_desc_put(txq);
1797 /* Main tx processing */
1798 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1800 struct mvneta_port *pp = netdev_priv(dev);
1801 u16 txq_id = skb_get_queue_mapping(skb);
1802 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1803 struct mvneta_tx_desc *tx_desc;
1808 if (!netif_running(dev))
1811 if (skb_is_gso(skb)) {
1812 frags = mvneta_tx_tso(skb, dev, txq);
1816 frags = skb_shinfo(skb)->nr_frags + 1;
1818 /* Get a descriptor for the first part of the packet */
1819 tx_desc = mvneta_txq_next_desc_get(txq);
1821 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1823 tx_desc->data_size = skb_headlen(skb);
1825 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1828 if (unlikely(dma_mapping_error(dev->dev.parent,
1829 tx_desc->buf_phys_addr))) {
1830 mvneta_txq_desc_put(txq);
1836 /* First and Last descriptor */
1837 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1838 tx_desc->command = tx_cmd;
1839 txq->tx_skb[txq->txq_put_index] = skb;
1840 mvneta_txq_inc_put(txq);
1842 /* First but not Last */
1843 tx_cmd |= MVNETA_TXD_F_DESC;
1844 txq->tx_skb[txq->txq_put_index] = NULL;
1845 mvneta_txq_inc_put(txq);
1846 tx_desc->command = tx_cmd;
1847 /* Continue with other skb fragments */
1848 if (mvneta_tx_frag_process(pp, skb, txq)) {
1849 dma_unmap_single(dev->dev.parent,
1850 tx_desc->buf_phys_addr,
1853 mvneta_txq_desc_put(txq);
1861 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1862 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1864 txq->count += frags;
1865 mvneta_txq_pend_desc_add(pp, txq, frags);
1867 if (txq->count >= txq->tx_stop_threshold)
1868 netif_tx_stop_queue(nq);
1870 u64_stats_update_begin(&stats->syncp);
1871 stats->tx_packets++;
1872 stats->tx_bytes += len;
1873 u64_stats_update_end(&stats->syncp);
1875 dev->stats.tx_dropped++;
1876 dev_kfree_skb_any(skb);
1879 return NETDEV_TX_OK;
1883 /* Free tx resources, when resetting a port */
1884 static void mvneta_txq_done_force(struct mvneta_port *pp,
1885 struct mvneta_tx_queue *txq)
1888 int tx_done = txq->count;
1890 mvneta_txq_bufs_free(pp, txq, tx_done);
1894 txq->txq_put_index = 0;
1895 txq->txq_get_index = 0;
1898 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1899 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1901 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
1903 struct mvneta_tx_queue *txq;
1904 struct netdev_queue *nq;
1906 while (cause_tx_done) {
1907 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1909 nq = netdev_get_tx_queue(pp->dev, txq->id);
1910 __netif_tx_lock(nq, smp_processor_id());
1913 mvneta_txq_done(pp, txq);
1915 __netif_tx_unlock(nq);
1916 cause_tx_done &= ~((1 << txq->id));
1920 /* Compute crc8 of the specified address, using a unique algorithm ,
1921 * according to hw spec, different than generic crc8 algorithm
1923 static int mvneta_addr_crc(unsigned char *addr)
1928 for (i = 0; i < ETH_ALEN; i++) {
1931 crc = (crc ^ addr[i]) << 8;
1932 for (j = 7; j >= 0; j--) {
1933 if (crc & (0x100 << j))
1941 /* This method controls the net device special MAC multicast support.
1942 * The Special Multicast Table for MAC addresses supports MAC of the form
1943 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1944 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1945 * Table entries in the DA-Filter table. This method set the Special
1946 * Multicast Table appropriate entry.
1948 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1949 unsigned char last_byte,
1952 unsigned int smc_table_reg;
1953 unsigned int tbl_offset;
1954 unsigned int reg_offset;
1956 /* Register offset from SMC table base */
1957 tbl_offset = (last_byte / 4);
1958 /* Entry offset within the above reg */
1959 reg_offset = last_byte % 4;
1961 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1965 smc_table_reg &= ~(0xff << (8 * reg_offset));
1967 smc_table_reg &= ~(0xff << (8 * reg_offset));
1968 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1971 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1975 /* This method controls the network device Other MAC multicast support.
1976 * The Other Multicast Table is used for multicast of another type.
1977 * A CRC-8 is used as an index to the Other Multicast Table entries
1978 * in the DA-Filter table.
1979 * The method gets the CRC-8 value from the calling routine and
1980 * sets the Other Multicast Table appropriate entry according to the
1983 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1987 unsigned int omc_table_reg;
1988 unsigned int tbl_offset;
1989 unsigned int reg_offset;
1991 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1992 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1994 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1997 /* Clear accepts frame bit at specified Other DA table entry */
1998 omc_table_reg &= ~(0xff << (8 * reg_offset));
2000 omc_table_reg &= ~(0xff << (8 * reg_offset));
2001 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2004 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2007 /* The network device supports multicast using two tables:
2008 * 1) Special Multicast Table for MAC addresses of the form
2009 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2010 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2011 * Table entries in the DA-Filter table.
2012 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2013 * is used as an index to the Other Multicast Table entries in the
2016 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2019 unsigned char crc_result = 0;
2021 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2022 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2026 crc_result = mvneta_addr_crc(p_addr);
2028 if (pp->mcast_count[crc_result] == 0) {
2029 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2034 pp->mcast_count[crc_result]--;
2035 if (pp->mcast_count[crc_result] != 0) {
2036 netdev_info(pp->dev,
2037 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2038 pp->mcast_count[crc_result], crc_result);
2042 pp->mcast_count[crc_result]++;
2044 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2049 /* Configure Fitering mode of Ethernet port */
2050 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2053 u32 port_cfg_reg, val;
2055 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2057 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2059 /* Set / Clear UPM bit in port configuration register */
2061 /* Accept all Unicast addresses */
2062 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2063 val |= MVNETA_FORCE_UNI;
2064 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2065 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2067 /* Reject all Unicast addresses */
2068 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2069 val &= ~MVNETA_FORCE_UNI;
2072 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2073 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2076 /* register unicast and multicast addresses */
2077 static void mvneta_set_rx_mode(struct net_device *dev)
2079 struct mvneta_port *pp = netdev_priv(dev);
2080 struct netdev_hw_addr *ha;
2082 if (dev->flags & IFF_PROMISC) {
2083 /* Accept all: Multicast + Unicast */
2084 mvneta_rx_unicast_promisc_set(pp, 1);
2085 mvneta_set_ucast_table(pp, rxq_def);
2086 mvneta_set_special_mcast_table(pp, rxq_def);
2087 mvneta_set_other_mcast_table(pp, rxq_def);
2089 /* Accept single Unicast */
2090 mvneta_rx_unicast_promisc_set(pp, 0);
2091 mvneta_set_ucast_table(pp, -1);
2092 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2094 if (dev->flags & IFF_ALLMULTI) {
2095 /* Accept all multicast */
2096 mvneta_set_special_mcast_table(pp, rxq_def);
2097 mvneta_set_other_mcast_table(pp, rxq_def);
2099 /* Accept only initialized multicast */
2100 mvneta_set_special_mcast_table(pp, -1);
2101 mvneta_set_other_mcast_table(pp, -1);
2103 if (!netdev_mc_empty(dev)) {
2104 netdev_for_each_mc_addr(ha, dev) {
2105 mvneta_mcast_addr_set(pp, ha->addr,
2113 /* Interrupt handling - the callback for request_irq() */
2114 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2116 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2118 disable_percpu_irq(port->pp->dev->irq);
2119 napi_schedule(&port->napi);
2124 static int mvneta_fixed_link_update(struct mvneta_port *pp,
2125 struct phy_device *phy)
2127 struct fixed_phy_status status;
2128 struct fixed_phy_status changed = {};
2129 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2131 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2132 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2133 status.speed = SPEED_1000;
2134 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2135 status.speed = SPEED_100;
2137 status.speed = SPEED_10;
2138 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2142 fixed_phy_update_state(phy, &status, &changed);
2147 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2148 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2149 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2150 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2151 * Each CPU has its own causeRxTx register
2153 static int mvneta_poll(struct napi_struct *napi, int budget)
2157 struct mvneta_port *pp = netdev_priv(napi->dev);
2158 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2160 if (!netif_running(pp->dev)) {
2161 napi_complete(&port->napi);
2165 /* Read cause register */
2166 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2167 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2168 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2170 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2171 if (pp->use_inband_status && (cause_misc &
2172 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2173 MVNETA_CAUSE_LINK_CHANGE |
2174 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2175 mvneta_fixed_link_update(pp, pp->phy_dev);
2179 /* Release Tx descriptors */
2180 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2181 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2182 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2185 /* For the case where the last mvneta_poll did not process all
2188 cause_rx_tx |= port->cause_rx_tx;
2189 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2194 napi_complete(&port->napi);
2195 enable_percpu_irq(pp->dev->irq, 0);
2198 port->cause_rx_tx = cause_rx_tx;
2202 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2203 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2208 for (i = 0; i < num; i++) {
2209 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2210 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2211 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2212 __func__, rxq->id, i, num);
2217 /* Add this number of RX descriptors as non occupied (ready to
2220 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2225 /* Free all packets pending transmit from all TXQs and reset TX port */
2226 static void mvneta_tx_reset(struct mvneta_port *pp)
2230 /* free the skb's in the tx ring */
2231 for (queue = 0; queue < txq_number; queue++)
2232 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2234 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2235 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2238 static void mvneta_rx_reset(struct mvneta_port *pp)
2240 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2241 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2244 /* Rx/Tx queue initialization/cleanup methods */
2246 /* Create a specified RX queue */
2247 static int mvneta_rxq_init(struct mvneta_port *pp,
2248 struct mvneta_rx_queue *rxq)
2251 rxq->size = pp->rx_ring_size;
2253 /* Allocate memory for RX descriptors */
2254 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2255 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2256 &rxq->descs_phys, GFP_KERNEL);
2257 if (rxq->descs == NULL)
2260 BUG_ON(rxq->descs !=
2261 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2263 rxq->last_desc = rxq->size - 1;
2265 /* Set Rx descriptors queue starting address */
2266 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2267 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2270 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2272 /* Set coalescing pkts and time */
2273 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2274 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2276 /* Fill RXQ with buffers from RX pool */
2277 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2278 mvneta_rxq_bm_disable(pp, rxq);
2279 mvneta_rxq_fill(pp, rxq, rxq->size);
2284 /* Cleanup Rx queue */
2285 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2286 struct mvneta_rx_queue *rxq)
2288 mvneta_rxq_drop_pkts(pp, rxq);
2291 dma_free_coherent(pp->dev->dev.parent,
2292 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2298 rxq->next_desc_to_proc = 0;
2299 rxq->descs_phys = 0;
2302 /* Create and initialize a tx queue */
2303 static int mvneta_txq_init(struct mvneta_port *pp,
2304 struct mvneta_tx_queue *txq)
2306 txq->size = pp->tx_ring_size;
2308 /* A queue must always have room for at least one skb.
2309 * Therefore, stop the queue when the free entries reaches
2310 * the maximum number of descriptors per skb.
2312 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2313 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2316 /* Allocate memory for TX descriptors */
2317 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2318 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2319 &txq->descs_phys, GFP_KERNEL);
2320 if (txq->descs == NULL)
2323 /* Make sure descriptor address is cache line size aligned */
2324 BUG_ON(txq->descs !=
2325 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2327 txq->last_desc = txq->size - 1;
2329 /* Set maximum bandwidth for enabled TXQs */
2330 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2331 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2333 /* Set Tx descriptors queue starting address */
2334 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2335 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2337 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2338 if (txq->tx_skb == NULL) {
2339 dma_free_coherent(pp->dev->dev.parent,
2340 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2341 txq->descs, txq->descs_phys);
2345 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2346 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2347 txq->size * TSO_HEADER_SIZE,
2348 &txq->tso_hdrs_phys, GFP_KERNEL);
2349 if (txq->tso_hdrs == NULL) {
2351 dma_free_coherent(pp->dev->dev.parent,
2352 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2353 txq->descs, txq->descs_phys);
2356 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2361 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2362 static void mvneta_txq_deinit(struct mvneta_port *pp,
2363 struct mvneta_tx_queue *txq)
2368 dma_free_coherent(pp->dev->dev.parent,
2369 txq->size * TSO_HEADER_SIZE,
2370 txq->tso_hdrs, txq->tso_hdrs_phys);
2372 dma_free_coherent(pp->dev->dev.parent,
2373 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2374 txq->descs, txq->descs_phys);
2378 txq->next_desc_to_proc = 0;
2379 txq->descs_phys = 0;
2381 /* Set minimum bandwidth for disabled TXQs */
2382 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2383 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2385 /* Set Tx descriptors queue starting address and size */
2386 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2387 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2390 /* Cleanup all Tx queues */
2391 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2395 for (queue = 0; queue < txq_number; queue++)
2396 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2399 /* Cleanup all Rx queues */
2400 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2402 mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
2406 /* Init all Rx queues */
2407 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2409 int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
2411 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2413 mvneta_cleanup_rxqs(pp);
2420 /* Init all tx queues */
2421 static int mvneta_setup_txqs(struct mvneta_port *pp)
2425 for (queue = 0; queue < txq_number; queue++) {
2426 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2428 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2430 mvneta_cleanup_txqs(pp);
2438 static void mvneta_start_dev(struct mvneta_port *pp)
2442 mvneta_max_rx_size_set(pp, pp->pkt_size);
2443 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2445 /* start the Rx/Tx activity */
2446 mvneta_port_enable(pp);
2448 /* Enable polling on the port */
2449 for_each_present_cpu(cpu) {
2450 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2452 napi_enable(&port->napi);
2455 /* Unmask interrupts */
2456 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2457 MVNETA_RX_INTR_MASK(rxq_number) |
2458 MVNETA_TX_INTR_MASK(txq_number) |
2459 MVNETA_MISCINTR_INTR_MASK);
2460 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2461 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2462 MVNETA_CAUSE_LINK_CHANGE |
2463 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2465 phy_start(pp->phy_dev);
2466 netif_tx_start_all_queues(pp->dev);
2469 static void mvneta_stop_dev(struct mvneta_port *pp)
2473 phy_stop(pp->phy_dev);
2475 for_each_present_cpu(cpu) {
2476 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2478 napi_disable(&port->napi);
2481 netif_carrier_off(pp->dev);
2483 mvneta_port_down(pp);
2484 netif_tx_stop_all_queues(pp->dev);
2486 /* Stop the port activity */
2487 mvneta_port_disable(pp);
2489 /* Clear all ethernet port interrupts */
2490 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2491 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2493 /* Mask all ethernet port interrupts */
2494 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2495 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2496 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2498 mvneta_tx_reset(pp);
2499 mvneta_rx_reset(pp);
2502 /* Return positive if MTU is valid */
2503 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2506 netdev_err(dev, "cannot change mtu to less than 68\n");
2510 /* 9676 == 9700 - 20 and rounding to 8 */
2512 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2516 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2517 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2518 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2519 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2525 /* Change the device mtu */
2526 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2528 struct mvneta_port *pp = netdev_priv(dev);
2531 mtu = mvneta_check_mtu_valid(dev, mtu);
2537 if (!netif_running(dev)) {
2538 netdev_update_features(dev);
2542 /* The interface is running, so we have to force a
2543 * reallocation of the queues
2545 mvneta_stop_dev(pp);
2547 mvneta_cleanup_txqs(pp);
2548 mvneta_cleanup_rxqs(pp);
2550 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2551 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2552 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2554 ret = mvneta_setup_rxqs(pp);
2556 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2560 ret = mvneta_setup_txqs(pp);
2562 netdev_err(dev, "unable to setup txqs after MTU change\n");
2566 mvneta_start_dev(pp);
2569 netdev_update_features(dev);
2574 static netdev_features_t mvneta_fix_features(struct net_device *dev,
2575 netdev_features_t features)
2577 struct mvneta_port *pp = netdev_priv(dev);
2579 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2580 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2582 "Disable IP checksum for MTU greater than %dB\n",
2589 /* Get mac address */
2590 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2592 u32 mac_addr_l, mac_addr_h;
2594 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2595 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2596 addr[0] = (mac_addr_h >> 24) & 0xFF;
2597 addr[1] = (mac_addr_h >> 16) & 0xFF;
2598 addr[2] = (mac_addr_h >> 8) & 0xFF;
2599 addr[3] = mac_addr_h & 0xFF;
2600 addr[4] = (mac_addr_l >> 8) & 0xFF;
2601 addr[5] = mac_addr_l & 0xFF;
2604 /* Handle setting mac address */
2605 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2607 struct mvneta_port *pp = netdev_priv(dev);
2608 struct sockaddr *sockaddr = addr;
2611 ret = eth_prepare_mac_addr_change(dev, addr);
2614 /* Remove previous address table entry */
2615 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2617 /* Set new addr in hw */
2618 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2620 eth_commit_mac_addr_change(dev, addr);
2624 static void mvneta_adjust_link(struct net_device *ndev)
2626 struct mvneta_port *pp = netdev_priv(ndev);
2627 struct phy_device *phydev = pp->phy_dev;
2628 int status_change = 0;
2631 if ((pp->speed != phydev->speed) ||
2632 (pp->duplex != phydev->duplex)) {
2635 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2636 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2637 MVNETA_GMAC_CONFIG_GMII_SPEED |
2638 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2641 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2643 if (phydev->speed == SPEED_1000)
2644 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2645 else if (phydev->speed == SPEED_100)
2646 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2648 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2650 pp->duplex = phydev->duplex;
2651 pp->speed = phydev->speed;
2655 if (phydev->link != pp->link) {
2656 if (!phydev->link) {
2661 pp->link = phydev->link;
2665 if (status_change) {
2667 if (!pp->use_inband_status) {
2668 u32 val = mvreg_read(pp,
2669 MVNETA_GMAC_AUTONEG_CONFIG);
2670 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2671 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2672 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2677 if (!pp->use_inband_status) {
2678 u32 val = mvreg_read(pp,
2679 MVNETA_GMAC_AUTONEG_CONFIG);
2680 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2681 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2682 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2685 mvneta_port_down(pp);
2687 phy_print_status(phydev);
2691 static int mvneta_mdio_probe(struct mvneta_port *pp)
2693 struct phy_device *phy_dev;
2695 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2698 netdev_err(pp->dev, "could not find the PHY\n");
2702 phy_dev->supported &= PHY_GBIT_FEATURES;
2703 phy_dev->advertising = phy_dev->supported;
2705 pp->phy_dev = phy_dev;
2713 static void mvneta_mdio_remove(struct mvneta_port *pp)
2715 phy_disconnect(pp->phy_dev);
2719 static void mvneta_percpu_enable(void *arg)
2721 struct mvneta_port *pp = arg;
2723 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
2726 static void mvneta_percpu_disable(void *arg)
2728 struct mvneta_port *pp = arg;
2730 disable_percpu_irq(pp->dev->irq);
2733 static void mvneta_percpu_elect(struct mvneta_port *pp)
2735 int online_cpu_idx, cpu, i = 0;
2737 online_cpu_idx = rxq_def % num_online_cpus();
2739 for_each_online_cpu(cpu) {
2740 if (i == online_cpu_idx)
2741 /* Enable per-CPU interrupt on the one CPU we
2744 smp_call_function_single(cpu, mvneta_percpu_enable,
2747 /* Disable per-CPU interrupt on all the other CPU */
2748 smp_call_function_single(cpu, mvneta_percpu_disable,
2754 static int mvneta_percpu_notifier(struct notifier_block *nfb,
2755 unsigned long action, void *hcpu)
2757 struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
2759 int cpu = (unsigned long)hcpu, other_cpu;
2760 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2764 case CPU_ONLINE_FROZEN:
2765 netif_tx_stop_all_queues(pp->dev);
2767 /* We have to synchronise on tha napi of each CPU
2768 * except the one just being waked up
2770 for_each_online_cpu(other_cpu) {
2771 if (other_cpu != cpu) {
2772 struct mvneta_pcpu_port *other_port =
2773 per_cpu_ptr(pp->ports, other_cpu);
2775 napi_synchronize(&other_port->napi);
2779 /* Mask all ethernet port interrupts */
2780 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2781 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2782 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2783 napi_enable(&port->napi);
2785 /* Enable per-CPU interrupt on the one CPU we care
2788 mvneta_percpu_elect(pp);
2790 /* Unmask all ethernet port interrupts */
2791 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2792 MVNETA_RX_INTR_MASK(rxq_number) |
2793 MVNETA_TX_INTR_MASK(txq_number) |
2794 MVNETA_MISCINTR_INTR_MASK);
2795 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2796 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2797 MVNETA_CAUSE_LINK_CHANGE |
2798 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2799 netif_tx_start_all_queues(pp->dev);
2801 case CPU_DOWN_PREPARE:
2802 case CPU_DOWN_PREPARE_FROZEN:
2803 netif_tx_stop_all_queues(pp->dev);
2804 /* Mask all ethernet port interrupts */
2805 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2806 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2807 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2809 napi_synchronize(&port->napi);
2810 napi_disable(&port->napi);
2811 /* Disable per-CPU interrupts on the CPU that is
2814 smp_call_function_single(cpu, mvneta_percpu_disable,
2819 case CPU_DEAD_FROZEN:
2820 /* Check if a new CPU must be elected now this on is down */
2821 mvneta_percpu_elect(pp);
2822 /* Unmask all ethernet port interrupts */
2823 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2824 MVNETA_RX_INTR_MASK(rxq_number) |
2825 MVNETA_TX_INTR_MASK(txq_number) |
2826 MVNETA_MISCINTR_INTR_MASK);
2827 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2828 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2829 MVNETA_CAUSE_LINK_CHANGE |
2830 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2831 netif_tx_start_all_queues(pp->dev);
2838 static int mvneta_open(struct net_device *dev)
2840 struct mvneta_port *pp = netdev_priv(dev);
2843 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2844 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2845 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2847 ret = mvneta_setup_rxqs(pp);
2851 ret = mvneta_setup_txqs(pp);
2853 goto err_cleanup_rxqs;
2855 /* Connect to port interrupt line */
2856 ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
2857 MVNETA_DRIVER_NAME, pp->ports);
2859 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2860 goto err_cleanup_txqs;
2863 /* Even though the documentation says that request_percpu_irq
2864 * doesn't enable the interrupts automatically, it actually
2865 * does so on the local CPU.
2867 * Make sure it's disabled.
2869 mvneta_percpu_disable(pp);
2871 /* Elect a CPU to handle our RX queue interrupt */
2872 mvneta_percpu_elect(pp);
2874 /* Register a CPU notifier to handle the case where our CPU
2875 * might be taken offline.
2877 register_cpu_notifier(&pp->cpu_notifier);
2879 /* In default link is down */
2880 netif_carrier_off(pp->dev);
2882 ret = mvneta_mdio_probe(pp);
2884 netdev_err(dev, "cannot probe MDIO bus\n");
2888 mvneta_start_dev(pp);
2893 free_percpu_irq(pp->dev->irq, pp->ports);
2895 mvneta_cleanup_txqs(pp);
2897 mvneta_cleanup_rxqs(pp);
2901 /* Stop the port, free port interrupt line */
2902 static int mvneta_stop(struct net_device *dev)
2904 struct mvneta_port *pp = netdev_priv(dev);
2907 mvneta_stop_dev(pp);
2908 mvneta_mdio_remove(pp);
2909 unregister_cpu_notifier(&pp->cpu_notifier);
2910 for_each_present_cpu(cpu)
2911 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
2912 free_percpu_irq(dev->irq, pp->ports);
2913 mvneta_cleanup_rxqs(pp);
2914 mvneta_cleanup_txqs(pp);
2919 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2921 struct mvneta_port *pp = netdev_priv(dev);
2926 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2929 /* Ethtool methods */
2931 /* Get settings (phy address, speed) for ethtools */
2932 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2934 struct mvneta_port *pp = netdev_priv(dev);
2939 return phy_ethtool_gset(pp->phy_dev, cmd);
2942 /* Set settings (phy address, speed) for ethtools */
2943 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2945 struct mvneta_port *pp = netdev_priv(dev);
2950 return phy_ethtool_sset(pp->phy_dev, cmd);
2953 /* Set interrupt coalescing for ethtools */
2954 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2955 struct ethtool_coalesce *c)
2957 struct mvneta_port *pp = netdev_priv(dev);
2960 for (queue = 0; queue < rxq_number; queue++) {
2961 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2962 rxq->time_coal = c->rx_coalesce_usecs;
2963 rxq->pkts_coal = c->rx_max_coalesced_frames;
2964 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2965 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2968 for (queue = 0; queue < txq_number; queue++) {
2969 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2970 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2971 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2977 /* get coalescing for ethtools */
2978 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2979 struct ethtool_coalesce *c)
2981 struct mvneta_port *pp = netdev_priv(dev);
2983 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2984 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2986 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2991 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2992 struct ethtool_drvinfo *drvinfo)
2994 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2995 sizeof(drvinfo->driver));
2996 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2997 sizeof(drvinfo->version));
2998 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2999 sizeof(drvinfo->bus_info));
3003 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3004 struct ethtool_ringparam *ring)
3006 struct mvneta_port *pp = netdev_priv(netdev);
3008 ring->rx_max_pending = MVNETA_MAX_RXD;
3009 ring->tx_max_pending = MVNETA_MAX_TXD;
3010 ring->rx_pending = pp->rx_ring_size;
3011 ring->tx_pending = pp->tx_ring_size;
3014 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3015 struct ethtool_ringparam *ring)
3017 struct mvneta_port *pp = netdev_priv(dev);
3019 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3021 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3022 ring->rx_pending : MVNETA_MAX_RXD;
3024 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3025 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3026 if (pp->tx_ring_size != ring->tx_pending)
3027 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3028 pp->tx_ring_size, ring->tx_pending);
3030 if (netif_running(dev)) {
3032 if (mvneta_open(dev)) {
3034 "error on opening device after ring param change\n");
3042 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3045 if (sset == ETH_SS_STATS) {
3048 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3049 memcpy(data + i * ETH_GSTRING_LEN,
3050 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3054 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3056 const struct mvneta_statistic *s;
3057 void __iomem *base = pp->base;
3061 for (i = 0, s = mvneta_statistics;
3062 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3068 val = readl_relaxed(base + s->offset);
3071 /* Docs say to read low 32-bit then high */
3072 low = readl_relaxed(base + s->offset);
3073 high = readl_relaxed(base + s->offset + 4);
3074 val = (u64)high << 32 | low;
3078 pp->ethtool_stats[i] += val;
3082 static void mvneta_ethtool_get_stats(struct net_device *dev,
3083 struct ethtool_stats *stats, u64 *data)
3085 struct mvneta_port *pp = netdev_priv(dev);
3088 mvneta_ethtool_update_stats(pp);
3090 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3091 *data++ = pp->ethtool_stats[i];
3094 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3096 if (sset == ETH_SS_STATS)
3097 return ARRAY_SIZE(mvneta_statistics);
3101 static const struct net_device_ops mvneta_netdev_ops = {
3102 .ndo_open = mvneta_open,
3103 .ndo_stop = mvneta_stop,
3104 .ndo_start_xmit = mvneta_tx,
3105 .ndo_set_rx_mode = mvneta_set_rx_mode,
3106 .ndo_set_mac_address = mvneta_set_mac_addr,
3107 .ndo_change_mtu = mvneta_change_mtu,
3108 .ndo_fix_features = mvneta_fix_features,
3109 .ndo_get_stats64 = mvneta_get_stats64,
3110 .ndo_do_ioctl = mvneta_ioctl,
3113 const struct ethtool_ops mvneta_eth_tool_ops = {
3114 .get_link = ethtool_op_get_link,
3115 .get_settings = mvneta_ethtool_get_settings,
3116 .set_settings = mvneta_ethtool_set_settings,
3117 .set_coalesce = mvneta_ethtool_set_coalesce,
3118 .get_coalesce = mvneta_ethtool_get_coalesce,
3119 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3120 .get_ringparam = mvneta_ethtool_get_ringparam,
3121 .set_ringparam = mvneta_ethtool_set_ringparam,
3122 .get_strings = mvneta_ethtool_get_strings,
3123 .get_ethtool_stats = mvneta_ethtool_get_stats,
3124 .get_sset_count = mvneta_ethtool_get_sset_count,
3128 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3133 mvneta_port_disable(pp);
3135 /* Set port default values */
3136 mvneta_defaults_set(pp);
3138 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3143 /* Initialize TX descriptor rings */
3144 for (queue = 0; queue < txq_number; queue++) {
3145 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3147 txq->size = pp->tx_ring_size;
3148 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3151 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3156 /* Create Rx descriptor rings */
3157 for (queue = 0; queue < rxq_number; queue++) {
3158 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3160 rxq->size = pp->rx_ring_size;
3161 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3162 rxq->time_coal = MVNETA_RX_COAL_USEC;
3168 /* platform glue : initialize decoding windows */
3169 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3170 const struct mbus_dram_target_info *dram)
3176 for (i = 0; i < 6; i++) {
3177 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
3178 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
3181 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
3187 for (i = 0; i < dram->num_cs; i++) {
3188 const struct mbus_dram_window *cs = dram->cs + i;
3189 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
3190 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
3192 mvreg_write(pp, MVNETA_WIN_SIZE(i),
3193 (cs->size - 1) & 0xffff0000);
3195 win_enable &= ~(1 << i);
3196 win_protect |= 3 << (2 * i);
3199 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3200 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
3203 /* Power up the port */
3204 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
3208 /* MAC Cause register should be cleared */
3209 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3211 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3213 /* Even though it might look weird, when we're configured in
3214 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3217 case PHY_INTERFACE_MODE_QSGMII:
3218 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3219 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3221 case PHY_INTERFACE_MODE_SGMII:
3222 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3223 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3225 case PHY_INTERFACE_MODE_RGMII:
3226 case PHY_INTERFACE_MODE_RGMII_ID:
3227 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3233 if (pp->use_inband_status)
3234 ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3236 /* Cancel Port Reset */
3237 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3238 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
3240 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3241 MVNETA_GMAC2_PORT_RESET) != 0)
3247 /* Device initialization routine */
3248 static int mvneta_probe(struct platform_device *pdev)
3250 const struct mbus_dram_target_info *dram_target_info;
3251 struct resource *res;
3252 struct device_node *dn = pdev->dev.of_node;
3253 struct device_node *phy_node;
3254 struct mvneta_port *pp;
3255 struct net_device *dev;
3256 const char *dt_mac_addr;
3257 char hw_mac_addr[ETH_ALEN];
3258 const char *mac_from;
3259 const char *managed;
3265 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
3269 dev->irq = irq_of_parse_and_map(dn, 0);
3270 if (dev->irq == 0) {
3272 goto err_free_netdev;
3275 phy_node = of_parse_phandle(dn, "phy", 0);
3277 if (!of_phy_is_fixed_link(dn)) {
3278 dev_err(&pdev->dev, "no PHY specified\n");
3283 err = of_phy_register_fixed_link(dn);
3285 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3289 /* In the case of a fixed PHY, the DT node associated
3290 * to the PHY is the Ethernet MAC DT node.
3292 phy_node = of_node_get(dn);
3295 phy_mode = of_get_phy_mode(dn);
3297 dev_err(&pdev->dev, "incorrect phy-mode\n");
3299 goto err_put_phy_node;
3302 dev->tx_queue_len = MVNETA_MAX_TXD;
3303 dev->watchdog_timeo = 5 * HZ;
3304 dev->netdev_ops = &mvneta_netdev_ops;
3306 dev->ethtool_ops = &mvneta_eth_tool_ops;
3308 pp = netdev_priv(dev);
3309 pp->phy_node = phy_node;
3310 pp->phy_interface = phy_mode;
3312 err = of_property_read_string(dn, "managed", &managed);
3313 pp->use_inband_status = (err == 0 &&
3314 strcmp(managed, "in-band-status") == 0);
3315 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
3317 pp->clk = devm_clk_get(&pdev->dev, NULL);
3318 if (IS_ERR(pp->clk)) {
3319 err = PTR_ERR(pp->clk);
3320 goto err_put_phy_node;
3323 clk_prepare_enable(pp->clk);
3325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3326 pp->base = devm_ioremap_resource(&pdev->dev, res);
3327 if (IS_ERR(pp->base)) {
3328 err = PTR_ERR(pp->base);
3332 /* Alloc per-cpu port structure */
3333 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
3339 /* Alloc per-cpu stats */
3340 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
3343 goto err_free_ports;
3346 dt_mac_addr = of_get_mac_address(dn);
3348 mac_from = "device tree";
3349 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3351 mvneta_get_mac_addr(pp, hw_mac_addr);
3352 if (is_valid_ether_addr(hw_mac_addr)) {
3353 mac_from = "hardware";
3354 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3356 mac_from = "random";
3357 eth_hw_addr_random(dev);
3361 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3362 if (tx_csum_limit < 0 ||
3363 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3364 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3365 dev_info(&pdev->dev,
3366 "Wrong TX csum limit in DT, set to %dB\n",
3367 MVNETA_TX_CSUM_DEF_SIZE);
3369 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3370 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3372 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3375 pp->tx_csum_limit = tx_csum_limit;
3377 pp->tx_ring_size = MVNETA_MAX_TXD;
3378 pp->rx_ring_size = MVNETA_MAX_RXD;
3381 SET_NETDEV_DEV(dev, &pdev->dev);
3383 err = mvneta_init(&pdev->dev, pp);
3385 goto err_free_stats;
3387 err = mvneta_port_power_up(pp, phy_mode);
3389 dev_err(&pdev->dev, "can't power up port\n");
3390 goto err_free_stats;
3393 dram_target_info = mv_mbus_dram_info();
3394 if (dram_target_info)
3395 mvneta_conf_mbus_windows(pp, dram_target_info);
3397 for_each_present_cpu(cpu) {
3398 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3400 netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3404 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3405 dev->hw_features |= dev->features;
3406 dev->vlan_features |= dev->features;
3407 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
3408 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
3410 err = register_netdev(dev);
3412 dev_err(&pdev->dev, "failed to register\n");
3413 goto err_free_stats;
3416 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3419 platform_set_drvdata(pdev, pp->dev);
3421 if (pp->use_inband_status) {
3422 struct phy_device *phy = of_phy_find_device(dn);
3424 mvneta_fixed_link_update(pp, phy);
3426 put_device(&phy->dev);
3432 free_percpu(pp->stats);
3434 free_percpu(pp->ports);
3436 clk_disable_unprepare(pp->clk);
3438 of_node_put(phy_node);
3440 irq_dispose_mapping(dev->irq);
3446 /* Device removal routine */
3447 static int mvneta_remove(struct platform_device *pdev)
3449 struct net_device *dev = platform_get_drvdata(pdev);
3450 struct mvneta_port *pp = netdev_priv(dev);
3452 unregister_netdev(dev);
3453 clk_disable_unprepare(pp->clk);
3454 free_percpu(pp->ports);
3455 free_percpu(pp->stats);
3456 irq_dispose_mapping(dev->irq);
3457 of_node_put(pp->phy_node);
3463 static const struct of_device_id mvneta_match[] = {
3464 { .compatible = "marvell,armada-370-neta" },
3465 { .compatible = "marvell,armada-xp-neta" },
3468 MODULE_DEVICE_TABLE(of, mvneta_match);
3470 static struct platform_driver mvneta_driver = {
3471 .probe = mvneta_probe,
3472 .remove = mvneta_remove,
3474 .name = MVNETA_DRIVER_NAME,
3475 .of_match_table = mvneta_match,
3479 module_platform_driver(mvneta_driver);
3481 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3482 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3483 MODULE_LICENSE("GPL");
3485 module_param(rxq_number, int, S_IRUGO);
3486 module_param(txq_number, int, S_IRUGO);
3488 module_param(rxq_def, int, S_IRUGO);
3489 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);