2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <uapi/linux/ppp_defs.h>
36 /* RX Fifo Registers */
37 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40 #define MVPP2_RX_FIFO_INIT_REG 0x64
42 /* RX DMA Top Registers */
43 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
48 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
52 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53 #define MVPP2_RXQ_POOL_LONG_OFFS 24
54 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59 /* Parser Registers */
60 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61 #define MVPP2_PRS_PORT_LU_MAX 0xf
62 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
71 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
74 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78 /* Classifier Registers */
79 #define MVPP2_CLS_MODE_REG 0x1800
80 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81 #define MVPP2_CLS_PORT_WAY_REG 0x1810
82 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
84 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85 #define MVPP2_CLS_LKP_TBL_REG 0x1818
86 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99 /* Descriptor Manager Top Registers */
100 #define MVPP2_RXQ_NUM_REG 0x2040
101 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
107 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111 #define MVPP2_RXQ_THRESH_REG 0x204c
112 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
113 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114 #define MVPP2_RXQ_INDEX_REG 0x2050
115 #define MVPP2_TXQ_NUM_REG 0x2080
116 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120 #define MVPP2_TXQ_THRESH_REG 0x2094
121 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123 #define MVPP2_TXQ_INDEX_REG 0x2098
124 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
125 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130 #define MVPP2_TXQ_PENDING_REG 0x20a0
131 #define MVPP2_TXQ_PENDING_MASK 0x3fff
132 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
149 /* MBUS bridge registers */
150 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153 #define MVPP2_BASE_ADDR_ENABLE 0x4060
155 /* Interrupt Cause and Mask registers */
156 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
158 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
159 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
160 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
161 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
162 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
163 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
164 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
165 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
166 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
167 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
168 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
169 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
170 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
171 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
172 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
173 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
174 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
175 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
177 /* Buffer Manager registers */
178 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
179 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
180 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
181 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
182 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
183 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
184 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
185 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
186 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
187 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
188 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
189 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
190 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
191 #define MVPP2_BM_START_MASK BIT(0)
192 #define MVPP2_BM_STOP_MASK BIT(1)
193 #define MVPP2_BM_STATE_MASK BIT(4)
194 #define MVPP2_BM_LOW_THRESH_OFFS 8
195 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
196 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
197 MVPP2_BM_LOW_THRESH_OFFS)
198 #define MVPP2_BM_HIGH_THRESH_OFFS 16
199 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
200 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
201 MVPP2_BM_HIGH_THRESH_OFFS)
202 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
203 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
204 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
205 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
206 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
207 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
208 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
209 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
210 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
211 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
212 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
213 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
214 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
215 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
216 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
217 #define MVPP2_BM_MC_RLS_REG 0x64c4
218 #define MVPP2_BM_MC_ID_MASK 0xfff
219 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
221 /* TX Scheduler registers */
222 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
223 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
224 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
225 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
226 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
227 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
228 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
229 #define MVPP2_TXP_MTU_MAX 0x7FFFF
230 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
231 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
232 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
233 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
234 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
235 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
236 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
237 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
238 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
239 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
240 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
241 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
242 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
243 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
245 /* TX general registers */
246 #define MVPP2_TX_SNOOP_REG 0x8800
247 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
248 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
251 #define MVPP2_SRC_ADDR_MIDDLE 0x24
252 #define MVPP2_SRC_ADDR_HIGH 0x28
253 #define MVPP2_PHY_AN_CFG0_REG 0x34
254 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
255 #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
256 0x400 + (port) * 0x400)
257 #define MVPP2_MIB_LATE_COLLISION 0x7c
258 #define MVPP2_ISR_SUM_MASK_REG 0x220c
259 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
260 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
262 /* Per-port registers */
263 #define MVPP2_GMAC_CTRL_0_REG 0x0
264 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
265 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
266 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
267 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
268 #define MVPP2_GMAC_CTRL_1_REG 0x4
269 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
270 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
271 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
272 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
273 #define MVPP2_GMAC_SA_LOW_OFFS 7
274 #define MVPP2_GMAC_CTRL_2_REG 0x8
275 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
276 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
277 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
278 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
279 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
280 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
281 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
282 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
283 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
284 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
285 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
286 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
287 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
288 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
289 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
290 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
291 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
292 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
294 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
296 /* Descriptor ring Macros */
297 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
298 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
300 /* Various constants */
303 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
305 #define MVPP2_RX_COAL_PKTS 32
306 #define MVPP2_RX_COAL_USEC 100
308 /* The two bytes Marvell header. Either contains a special value used
309 * by Marvell switches when a specific hardware mode is enabled (not
310 * supported by this driver) or is filled automatically by zeroes on
311 * the RX side. Those two bytes being at the front of the Ethernet
312 * header, they allow to have the IP header aligned on a 4 bytes
313 * boundary automatically: the hardware skips those two bytes on its
316 #define MVPP2_MH_SIZE 2
317 #define MVPP2_ETH_TYPE_LEN 2
318 #define MVPP2_PPPOE_HDR_SIZE 8
319 #define MVPP2_VLAN_TAG_LEN 4
321 /* Lbtd 802.3 type */
322 #define MVPP2_IP_LBDT_TYPE 0xfffa
324 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
325 #define MVPP2_TX_CSUM_MAX_SIZE 9800
327 /* Timeout constants */
328 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
329 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
331 #define MVPP2_TX_MTU_MAX 0x7ffff
333 /* Maximum number of T-CONTs of PON port */
334 #define MVPP2_MAX_TCONT 16
336 /* Maximum number of supported ports */
337 #define MVPP2_MAX_PORTS 4
339 /* Maximum number of TXQs used by single port */
340 #define MVPP2_MAX_TXQ 8
342 /* Maximum number of RXQs used by single port */
343 #define MVPP2_MAX_RXQ 8
345 /* Dfault number of RXQs in use */
346 #define MVPP2_DEFAULT_RXQ 4
348 /* Total number of RXQs available to all ports */
349 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
351 /* Max number of Rx descriptors */
352 #define MVPP2_MAX_RXD 128
354 /* Max number of Tx descriptors */
355 #define MVPP2_MAX_TXD 1024
357 /* Amount of Tx descriptors that can be reserved at once by CPU */
358 #define MVPP2_CPU_DESC_CHUNK 64
360 /* Max number of Tx descriptors in each aggregated queue */
361 #define MVPP2_AGGR_TXQ_SIZE 256
363 /* Descriptor aligned size */
364 #define MVPP2_DESC_ALIGNED_SIZE 32
366 /* Descriptor alignment mask */
367 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
369 /* RX FIFO constants */
370 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
371 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
372 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
374 /* RX buffer constants */
375 #define MVPP2_SKB_SHINFO_SIZE \
376 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
378 #define MVPP2_RX_PKT_SIZE(mtu) \
379 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
380 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
382 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
383 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
384 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
385 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
387 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
389 /* IPv6 max L3 address size */
390 #define MVPP2_MAX_L3_ADDR_SIZE 16
393 #define MVPP2_F_LOOPBACK BIT(0)
395 /* Marvell tag types */
396 enum mvpp2_tag_type {
397 MVPP2_TAG_TYPE_NONE = 0,
398 MVPP2_TAG_TYPE_MH = 1,
399 MVPP2_TAG_TYPE_DSA = 2,
400 MVPP2_TAG_TYPE_EDSA = 3,
401 MVPP2_TAG_TYPE_VLAN = 4,
402 MVPP2_TAG_TYPE_LAST = 5
405 /* Parser constants */
406 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
407 #define MVPP2_PRS_TCAM_WORDS 6
408 #define MVPP2_PRS_SRAM_WORDS 4
409 #define MVPP2_PRS_FLOW_ID_SIZE 64
410 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
411 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
412 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
413 #define MVPP2_PRS_IPV4_HEAD 0x40
414 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
415 #define MVPP2_PRS_IPV4_MC 0xe0
416 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
417 #define MVPP2_PRS_IPV4_BC_MASK 0xff
418 #define MVPP2_PRS_IPV4_IHL 0x5
419 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
420 #define MVPP2_PRS_IPV6_MC 0xff
421 #define MVPP2_PRS_IPV6_MC_MASK 0xff
422 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
423 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
424 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
425 #define MVPP2_PRS_DBL_VLANS_MAX 100
428 * - lookup ID - 4 bits
430 * - additional information - 1 byte
431 * - header data - 8 bytes
432 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
434 #define MVPP2_PRS_AI_BITS 8
435 #define MVPP2_PRS_PORT_MASK 0xff
436 #define MVPP2_PRS_LU_MASK 0xf
437 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
438 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
439 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
440 (((offs) * 2) - ((offs) % 2) + 2)
441 #define MVPP2_PRS_TCAM_AI_BYTE 16
442 #define MVPP2_PRS_TCAM_PORT_BYTE 17
443 #define MVPP2_PRS_TCAM_LU_BYTE 20
444 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
445 #define MVPP2_PRS_TCAM_INV_WORD 5
446 /* Tcam entries ID */
447 #define MVPP2_PE_DROP_ALL 0
448 #define MVPP2_PE_FIRST_FREE_TID 1
449 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
450 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
451 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
452 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
453 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
454 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
455 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
456 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
457 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
458 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
459 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
460 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
461 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
462 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
463 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
464 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
465 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
466 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
467 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
468 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
469 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
470 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
471 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
472 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
473 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
476 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
478 #define MVPP2_PRS_SRAM_RI_OFFS 0
479 #define MVPP2_PRS_SRAM_RI_WORD 0
480 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
481 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
482 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
483 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
484 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
485 #define MVPP2_PRS_SRAM_UDF_OFFS 73
486 #define MVPP2_PRS_SRAM_UDF_BITS 8
487 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
488 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
489 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
490 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
491 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
492 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
493 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
494 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
495 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
496 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
497 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
498 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
500 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
501 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
502 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
503 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
504 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
505 #define MVPP2_PRS_SRAM_AI_OFFS 90
506 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
507 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
508 #define MVPP2_PRS_SRAM_AI_MASK 0xff
509 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
510 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
511 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
512 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
514 /* Sram result info bits assignment */
515 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
516 #define MVPP2_PRS_RI_DSA_MASK 0x2
517 #define MVPP2_PRS_RI_VLAN_MASK 0xc
518 #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
519 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
520 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
521 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
522 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
523 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
524 #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
525 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
526 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
527 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
528 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
529 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
530 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
531 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
532 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
533 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
534 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
535 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
536 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
537 #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
538 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
539 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
540 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
541 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
542 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
543 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
544 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
545 #define MVPP2_PRS_RI_L4_TCP BIT(22)
546 #define MVPP2_PRS_RI_L4_UDP BIT(23)
547 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
548 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
549 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
550 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
552 /* Sram additional info bits assignment */
553 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
554 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
555 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
556 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
557 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
558 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
559 #define MVPP2_PRS_SINGLE_VLAN_AI 0
560 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
563 #define MVPP2_PRS_TAGGED true
564 #define MVPP2_PRS_UNTAGGED false
565 #define MVPP2_PRS_EDSA true
566 #define MVPP2_PRS_DSA false
568 /* MAC entries, shadow udf */
570 MVPP2_PRS_UDF_MAC_DEF,
571 MVPP2_PRS_UDF_MAC_RANGE,
572 MVPP2_PRS_UDF_L2_DEF,
573 MVPP2_PRS_UDF_L2_DEF_COPY,
574 MVPP2_PRS_UDF_L2_USER,
578 enum mvpp2_prs_lookup {
592 enum mvpp2_prs_l3_cast {
593 MVPP2_PRS_L3_UNI_CAST,
594 MVPP2_PRS_L3_MULTI_CAST,
595 MVPP2_PRS_L3_BROAD_CAST
598 /* Classifier constants */
599 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
600 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
601 #define MVPP2_CLS_LKP_TBL_SIZE 64
604 #define MVPP2_BM_POOLS_NUM 8
605 #define MVPP2_BM_LONG_BUF_NUM 1024
606 #define MVPP2_BM_SHORT_BUF_NUM 2048
607 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
608 #define MVPP2_BM_POOL_PTR_ALIGN 128
609 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
610 #define MVPP2_BM_SWF_SHORT_POOL 3
612 /* BM cookie (32 bits) definition */
613 #define MVPP2_BM_COOKIE_POOL_OFFS 8
614 #define MVPP2_BM_COOKIE_CPU_OFFS 24
616 /* BM short pool packet size
617 * These value assure that for SWF the total number
618 * of bytes allocated for each buffer will be 512
620 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
630 /* Shared Packet Processor resources */
632 /* Shared registers' base addresses */
634 void __iomem *lms_base;
640 /* List of pointers to port structures */
641 struct mvpp2_port **port_list;
643 /* Aggregated TXQs */
644 struct mvpp2_tx_queue *aggr_txqs;
647 struct mvpp2_bm_pool *bm_pools;
649 /* PRS shadow table */
650 struct mvpp2_prs_shadow *prs_shadow;
651 /* PRS auxiliary table for double vlan entries control */
652 bool *prs_double_vlans;
658 struct mvpp2_pcpu_stats {
659 struct u64_stats_sync syncp;
666 /* Per-CPU port control */
667 struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
681 /* Per-port registers' base address */
684 struct mvpp2_rx_queue **rxqs;
685 struct mvpp2_tx_queue **txqs;
686 struct net_device *dev;
690 u32 pending_cause_rx;
691 struct napi_struct napi;
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
701 struct mvpp2_pcpu_stats __percpu *stats;
703 struct phy_device *phy_dev;
704 phy_interface_t phy_interface;
705 struct device_node *phy_node;
710 struct mvpp2_bm_pool *pool_long;
711 struct mvpp2_bm_pool *pool_short;
713 /* Index of first port's physical RXQ */
717 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
718 * layout of the transmit and reception DMA descriptors, and their
719 * layout is therefore defined by the hardware design
722 #define MVPP2_TXD_L3_OFF_SHIFT 0
723 #define MVPP2_TXD_IP_HLEN_SHIFT 8
724 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
725 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
726 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
727 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
728 #define MVPP2_TXD_L4_UDP BIT(24)
729 #define MVPP2_TXD_L3_IP6 BIT(26)
730 #define MVPP2_TXD_L_DESC BIT(28)
731 #define MVPP2_TXD_F_DESC BIT(29)
733 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
734 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
735 #define MVPP2_RXD_ERR_CRC 0x0
736 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
737 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
738 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
739 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
740 #define MVPP2_RXD_HWF_SYNC BIT(21)
741 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
742 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
743 #define MVPP2_RXD_L4_TCP BIT(25)
744 #define MVPP2_RXD_L4_UDP BIT(26)
745 #define MVPP2_RXD_L3_IP4 BIT(28)
746 #define MVPP2_RXD_L3_IP6 BIT(30)
747 #define MVPP2_RXD_BUF_HDR BIT(31)
749 struct mvpp2_tx_desc {
750 u32 command; /* Options used by HW for packet transmitting.*/
751 u8 packet_offset; /* the offset from the buffer beginning */
752 u8 phys_txq; /* destination queue ID */
753 u16 data_size; /* data size of transmitted packet in bytes */
754 u32 buf_phys_addr; /* physical addr of transmitted buffer */
755 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
756 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
757 u32 reserved2; /* reserved (for future use) */
760 struct mvpp2_rx_desc {
761 u32 status; /* info about received packet */
762 u16 reserved1; /* parser_info (for future use, PnC) */
763 u16 data_size; /* size of received packet in bytes */
764 u32 buf_phys_addr; /* physical address of the buffer */
765 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
766 u16 reserved2; /* gem_port_id (for future use, PON) */
767 u16 reserved3; /* csum_l4 (for future use, PnC) */
768 u8 reserved4; /* bm_qset (for future use, BM) */
770 u16 reserved6; /* classify_info (for future use, PnC) */
771 u32 reserved7; /* flow_id (for future use, PnC) */
775 struct mvpp2_txq_pcpu_buf {
776 /* Transmitted SKB */
779 /* Physical address of transmitted buffer */
782 /* Size transmitted */
786 /* Per-CPU Tx queue control */
787 struct mvpp2_txq_pcpu {
790 /* Number of Tx DMA descriptors in the descriptor ring */
793 /* Number of currently used Tx DMA descriptor in the
798 /* Number of Tx DMA descriptors reserved for each CPU */
801 /* Infos about transmitted buffers */
802 struct mvpp2_txq_pcpu_buf *buffs;
804 /* Index of last TX DMA descriptor that was inserted */
807 /* Index of the TX DMA descriptor to be cleaned up */
811 struct mvpp2_tx_queue {
812 /* Physical number of this Tx queue */
815 /* Logical number of this Tx queue */
818 /* Number of Tx DMA descriptors in the descriptor ring */
821 /* Number of currently used Tx DMA descriptor in the descriptor ring */
824 /* Per-CPU control of physical Tx queues */
825 struct mvpp2_txq_pcpu __percpu *pcpu;
827 /* Array of transmitted skb */
828 struct sk_buff **tx_skb;
832 /* Virtual address of thex Tx DMA descriptors array */
833 struct mvpp2_tx_desc *descs;
835 /* DMA address of the Tx DMA descriptors array */
836 dma_addr_t descs_phys;
838 /* Index of the last Tx DMA descriptor */
841 /* Index of the next Tx DMA descriptor to process */
842 int next_desc_to_proc;
845 struct mvpp2_rx_queue {
846 /* RX queue number, in the range 0-31 for physical RXQs */
849 /* Num of rx descriptors in the rx descriptor ring */
855 /* Virtual address of the RX DMA descriptors array */
856 struct mvpp2_rx_desc *descs;
858 /* DMA address of the RX DMA descriptors array */
859 dma_addr_t descs_phys;
861 /* Index of the last RX DMA descriptor */
864 /* Index of the next RX DMA descriptor to process */
865 int next_desc_to_proc;
867 /* ID of port to which physical RXQ is mapped */
870 /* Port's logic RXQ number to which physical RXQ is mapped */
874 union mvpp2_prs_tcam_entry {
875 u32 word[MVPP2_PRS_TCAM_WORDS];
876 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
879 union mvpp2_prs_sram_entry {
880 u32 word[MVPP2_PRS_SRAM_WORDS];
881 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
884 struct mvpp2_prs_entry {
886 union mvpp2_prs_tcam_entry tcam;
887 union mvpp2_prs_sram_entry sram;
890 struct mvpp2_prs_shadow {
897 /* User defined offset */
905 struct mvpp2_cls_flow_entry {
907 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
910 struct mvpp2_cls_lookup_entry {
916 struct mvpp2_bm_pool {
917 /* Pool number in the range 0-7 */
919 enum mvpp2_bm_type type;
921 /* Buffer Pointers Pool External (BPPE) size */
923 /* Number of buffers for this pool */
925 /* Pool buffer size */
930 /* BPPE virtual base address */
932 /* BPPE physical base address */
933 dma_addr_t phys_addr;
935 /* Ports using BM pool */
938 /* Occupied buffers indicator */
943 struct mvpp2_buff_hdr {
944 u32 next_buff_phys_addr;
945 u32 next_buff_virt_addr;
948 u8 reserved1; /* bm_qset (for future use, BM) */
951 /* Buffer header info bits */
952 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
953 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
954 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
955 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
956 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
957 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
959 /* Static declaractions */
961 /* Number of RXQs used by single port */
962 static int rxq_number = MVPP2_DEFAULT_RXQ;
963 /* Number of TXQs used by single port */
964 static int txq_number = MVPP2_MAX_TXQ;
966 #define MVPP2_DRIVER_NAME "mvpp2"
967 #define MVPP2_DRIVER_VERSION "1.0"
969 /* Utility/helper methods */
971 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
973 writel(data, priv->base + offset);
976 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
978 return readl(priv->base + offset);
981 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
983 txq_pcpu->txq_get_index++;
984 if (txq_pcpu->txq_get_index == txq_pcpu->size)
985 txq_pcpu->txq_get_index = 0;
988 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
990 struct mvpp2_tx_desc *tx_desc)
992 struct mvpp2_txq_pcpu_buf *tx_buf =
993 txq_pcpu->buffs + txq_pcpu->txq_put_index;
995 tx_buf->size = tx_desc->data_size;
996 tx_buf->phys = tx_desc->buf_phys_addr;
997 txq_pcpu->txq_put_index++;
998 if (txq_pcpu->txq_put_index == txq_pcpu->size)
999 txq_pcpu->txq_put_index = 0;
1002 /* Get number of physical egress port */
1003 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1005 return MVPP2_MAX_TCONT + port->id;
1008 /* Get number of physical TXQ */
1009 static inline int mvpp2_txq_phys(int port, int txq)
1011 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1014 /* Parser configuration routines */
1016 /* Update parser tcam and sram hw entries */
1017 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1021 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1024 /* Clear entry invalidation bit */
1025 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1027 /* Write tcam index - indirect access */
1028 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1029 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1030 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1032 /* Write sram index - indirect access */
1033 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1034 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1035 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1040 /* Read tcam entry from hw */
1041 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1045 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1048 /* Write tcam index - indirect access */
1049 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1051 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1052 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1053 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1054 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1056 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1057 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1059 /* Write sram index - indirect access */
1060 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1061 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1062 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1067 /* Invalidate tcam hw entry */
1068 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1070 /* Write index - indirect access */
1071 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1072 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1073 MVPP2_PRS_TCAM_INV_MASK);
1076 /* Enable shadow table entry and set its lookup ID */
1077 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1079 priv->prs_shadow[index].valid = true;
1080 priv->prs_shadow[index].lu = lu;
1083 /* Update ri fields in shadow table entry */
1084 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1085 unsigned int ri, unsigned int ri_mask)
1087 priv->prs_shadow[index].ri_mask = ri_mask;
1088 priv->prs_shadow[index].ri = ri;
1091 /* Update lookup field in tcam sw entry */
1092 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1094 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1096 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1097 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1100 /* Update mask for single port in tcam sw entry */
1101 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1102 unsigned int port, bool add)
1104 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1107 pe->tcam.byte[enable_off] &= ~(1 << port);
1109 pe->tcam.byte[enable_off] |= 1 << port;
1112 /* Update port map in tcam sw entry */
1113 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1116 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1117 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1119 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1120 pe->tcam.byte[enable_off] &= ~port_mask;
1121 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1124 /* Obtain port map from tcam sw entry */
1125 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1127 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1129 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1132 /* Set byte of data and its enable bits in tcam sw entry */
1133 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1134 unsigned int offs, unsigned char byte,
1135 unsigned char enable)
1137 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1138 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1141 /* Get byte of data and its enable bits from tcam sw entry */
1142 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1143 unsigned int offs, unsigned char *byte,
1144 unsigned char *enable)
1146 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1147 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1150 /* Compare tcam data bytes with a pattern */
1151 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1154 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1157 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1158 if (tcam_data != data)
1163 /* Update ai bits in tcam sw entry */
1164 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1165 unsigned int bits, unsigned int enable)
1167 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1169 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1171 if (!(enable & BIT(i)))
1175 pe->tcam.byte[ai_idx] |= 1 << i;
1177 pe->tcam.byte[ai_idx] &= ~(1 << i);
1180 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1183 /* Get ai bits from tcam sw entry */
1184 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1186 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1189 /* Set ethertype in tcam sw entry */
1190 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1191 unsigned short ethertype)
1193 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1194 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1197 /* Set bits in sram sw entry */
1198 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1201 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1204 /* Clear bits in sram sw entry */
1205 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1208 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1211 /* Update ri bits in sram sw entry */
1212 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1213 unsigned int bits, unsigned int mask)
1217 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1218 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1220 if (!(mask & BIT(i)))
1224 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1226 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1228 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1232 /* Obtain ri bits from sram sw entry */
1233 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1235 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1238 /* Update ai bits in sram sw entry */
1239 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1240 unsigned int bits, unsigned int mask)
1243 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1245 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1247 if (!(mask & BIT(i)))
1251 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1253 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1255 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1259 /* Read ai bits from sram sw entry */
1260 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1263 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1264 int ai_en_off = ai_off + 1;
1265 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1267 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1268 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1273 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1276 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1279 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1281 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1282 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1283 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1286 /* In the sram sw entry set sign and value of the next lookup offset
1287 * and the offset value generated to the classifier
1289 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1294 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1297 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1301 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1302 (unsigned char)shift;
1304 /* Reset and set operation */
1305 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1306 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1307 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1309 /* Set base offset as current */
1310 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1313 /* In the sram sw entry set sign and value of the user defined offset
1314 * generated to the classifier
1316 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1317 unsigned int type, int offset,
1322 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1323 offset = 0 - offset;
1325 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1329 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1330 MVPP2_PRS_SRAM_UDF_MASK);
1331 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1332 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1333 MVPP2_PRS_SRAM_UDF_BITS)] &=
1334 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1335 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1336 MVPP2_PRS_SRAM_UDF_BITS)] |=
1337 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1339 /* Set offset type */
1340 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1341 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1342 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1344 /* Set offset operation */
1345 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1346 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1347 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1349 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1350 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1351 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1352 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1354 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1355 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1356 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1358 /* Set base offset as current */
1359 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1362 /* Find parser flow entry */
1363 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1365 struct mvpp2_prs_entry *pe;
1368 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1371 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1373 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1374 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1377 if (!priv->prs_shadow[tid].valid ||
1378 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1382 mvpp2_prs_hw_read(priv, pe);
1383 bits = mvpp2_prs_sram_ai_get(pe);
1385 /* Sram store classification lookup ID in AI bits [5:0] */
1386 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1394 /* Return first free tcam index, seeking from start to end */
1395 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1403 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1404 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1406 for (tid = start; tid <= end; tid++) {
1407 if (!priv->prs_shadow[tid].valid)
1414 /* Enable/disable dropping all mac da's */
1415 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1417 struct mvpp2_prs_entry pe;
1419 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1420 /* Entry exist - update port only */
1421 pe.index = MVPP2_PE_DROP_ALL;
1422 mvpp2_prs_hw_read(priv, &pe);
1424 /* Entry doesn't exist - create new */
1425 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1426 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1427 pe.index = MVPP2_PE_DROP_ALL;
1429 /* Non-promiscuous mode for all ports - DROP unknown packets */
1430 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1431 MVPP2_PRS_RI_DROP_MASK);
1433 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1434 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1436 /* Update shadow table */
1437 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1439 /* Mask all ports */
1440 mvpp2_prs_tcam_port_map_set(&pe, 0);
1443 /* Update port mask */
1444 mvpp2_prs_tcam_port_set(&pe, port, add);
1446 mvpp2_prs_hw_write(priv, &pe);
1449 /* Set port to promiscuous mode */
1450 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1452 struct mvpp2_prs_entry pe;
1454 /* Promiscuous mode - Accept unknown packets */
1456 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1457 /* Entry exist - update port only */
1458 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1459 mvpp2_prs_hw_read(priv, &pe);
1461 /* Entry doesn't exist - create new */
1462 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1463 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1464 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1466 /* Continue - set next lookup */
1467 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1469 /* Set result info bits */
1470 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1471 MVPP2_PRS_RI_L2_CAST_MASK);
1473 /* Shift to ethertype */
1474 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1475 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1477 /* Mask all ports */
1478 mvpp2_prs_tcam_port_map_set(&pe, 0);
1480 /* Update shadow table */
1481 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1484 /* Update port mask */
1485 mvpp2_prs_tcam_port_set(&pe, port, add);
1487 mvpp2_prs_hw_write(priv, &pe);
1490 /* Accept multicast */
1491 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1494 struct mvpp2_prs_entry pe;
1495 unsigned char da_mc;
1497 /* Ethernet multicast address first byte is
1498 * 0x01 for IPv4 and 0x33 for IPv6
1500 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1502 if (priv->prs_shadow[index].valid) {
1503 /* Entry exist - update port only */
1505 mvpp2_prs_hw_read(priv, &pe);
1507 /* Entry doesn't exist - create new */
1508 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1509 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1512 /* Continue - set next lookup */
1513 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1515 /* Set result info bits */
1516 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1517 MVPP2_PRS_RI_L2_CAST_MASK);
1519 /* Update tcam entry data first byte */
1520 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1522 /* Shift to ethertype */
1523 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1524 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1526 /* Mask all ports */
1527 mvpp2_prs_tcam_port_map_set(&pe, 0);
1529 /* Update shadow table */
1530 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1533 /* Update port mask */
1534 mvpp2_prs_tcam_port_set(&pe, port, add);
1536 mvpp2_prs_hw_write(priv, &pe);
1539 /* Set entry for dsa packets */
1540 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1541 bool tagged, bool extend)
1543 struct mvpp2_prs_entry pe;
1547 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1550 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1554 if (priv->prs_shadow[tid].valid) {
1555 /* Entry exist - update port only */
1557 mvpp2_prs_hw_read(priv, &pe);
1559 /* Entry doesn't exist - create new */
1560 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1561 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1564 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1565 mvpp2_prs_sram_shift_set(&pe, shift,
1566 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1568 /* Update shadow table */
1569 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1572 /* Set tagged bit in DSA tag */
1573 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1574 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1575 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1576 /* Clear all ai bits for next iteration */
1577 mvpp2_prs_sram_ai_update(&pe, 0,
1578 MVPP2_PRS_SRAM_AI_MASK);
1579 /* If packet is tagged continue check vlans */
1580 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1582 /* Set result info bits to 'no vlans' */
1583 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1584 MVPP2_PRS_RI_VLAN_MASK);
1585 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1588 /* Mask all ports */
1589 mvpp2_prs_tcam_port_map_set(&pe, 0);
1592 /* Update port mask */
1593 mvpp2_prs_tcam_port_set(&pe, port, add);
1595 mvpp2_prs_hw_write(priv, &pe);
1598 /* Set entry for dsa ethertype */
1599 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1600 bool add, bool tagged, bool extend)
1602 struct mvpp2_prs_entry pe;
1603 int tid, shift, port_mask;
1606 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1607 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1611 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1612 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1613 port_mask = MVPP2_PRS_PORT_MASK;
1617 if (priv->prs_shadow[tid].valid) {
1618 /* Entry exist - update port only */
1620 mvpp2_prs_hw_read(priv, &pe);
1622 /* Entry doesn't exist - create new */
1623 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1624 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1628 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1629 mvpp2_prs_match_etype(&pe, 2, 0);
1631 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1632 MVPP2_PRS_RI_DSA_MASK);
1633 /* Shift ethertype + 2 byte reserved + tag*/
1634 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1635 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1637 /* Update shadow table */
1638 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1641 /* Set tagged bit in DSA tag */
1642 mvpp2_prs_tcam_data_byte_set(&pe,
1643 MVPP2_ETH_TYPE_LEN + 2 + 3,
1644 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1645 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1646 /* Clear all ai bits for next iteration */
1647 mvpp2_prs_sram_ai_update(&pe, 0,
1648 MVPP2_PRS_SRAM_AI_MASK);
1649 /* If packet is tagged continue check vlans */
1650 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1652 /* Set result info bits to 'no vlans' */
1653 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1654 MVPP2_PRS_RI_VLAN_MASK);
1655 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1657 /* Mask/unmask all ports, depending on dsa type */
1658 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1661 /* Update port mask */
1662 mvpp2_prs_tcam_port_set(&pe, port, add);
1664 mvpp2_prs_hw_write(priv, &pe);
1667 /* Search for existing single/triple vlan entry */
1668 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1669 unsigned short tpid, int ai)
1671 struct mvpp2_prs_entry *pe;
1674 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1677 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1679 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1680 for (tid = MVPP2_PE_FIRST_FREE_TID;
1681 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1682 unsigned int ri_bits, ai_bits;
1685 if (!priv->prs_shadow[tid].valid ||
1686 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1691 mvpp2_prs_hw_read(priv, pe);
1692 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1697 ri_bits = mvpp2_prs_sram_ri_get(pe);
1698 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1700 /* Get current ai value from tcam */
1701 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1702 /* Clear double vlan bit */
1703 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1708 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1709 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1717 /* Add/update single/triple vlan entry */
1718 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1719 unsigned int port_map)
1721 struct mvpp2_prs_entry *pe;
1725 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1728 /* Create new tcam entry */
1729 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1730 MVPP2_PE_FIRST_FREE_TID);
1734 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1738 /* Get last double vlan tid */
1739 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1740 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1741 unsigned int ri_bits;
1743 if (!priv->prs_shadow[tid_aux].valid ||
1744 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1747 pe->index = tid_aux;
1748 mvpp2_prs_hw_read(priv, pe);
1749 ri_bits = mvpp2_prs_sram_ri_get(pe);
1750 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1751 MVPP2_PRS_RI_VLAN_DOUBLE)
1755 if (tid <= tid_aux) {
1760 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1761 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1764 mvpp2_prs_match_etype(pe, 0, tpid);
1766 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1767 /* Shift 4 bytes - skip 1 vlan tag */
1768 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1769 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1770 /* Clear all ai bits for next iteration */
1771 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1773 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1774 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1775 MVPP2_PRS_RI_VLAN_MASK);
1777 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1778 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1779 MVPP2_PRS_RI_VLAN_MASK);
1781 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1783 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1785 /* Update ports' mask */
1786 mvpp2_prs_tcam_port_map_set(pe, port_map);
1788 mvpp2_prs_hw_write(priv, pe);
1796 /* Get first free double vlan ai number */
1797 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1801 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1802 if (!priv->prs_double_vlans[i])
1809 /* Search for existing double vlan entry */
1810 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1811 unsigned short tpid1,
1812 unsigned short tpid2)
1814 struct mvpp2_prs_entry *pe;
1817 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1820 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1822 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1823 for (tid = MVPP2_PE_FIRST_FREE_TID;
1824 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1825 unsigned int ri_mask;
1828 if (!priv->prs_shadow[tid].valid ||
1829 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1833 mvpp2_prs_hw_read(priv, pe);
1835 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1836 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1841 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1842 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1850 /* Add or update double vlan entry */
1851 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1852 unsigned short tpid2,
1853 unsigned int port_map)
1855 struct mvpp2_prs_entry *pe;
1856 int tid_aux, tid, ai, ret = 0;
1858 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1861 /* Create new tcam entry */
1862 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1863 MVPP2_PE_LAST_FREE_TID);
1867 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1871 /* Set ai value for new double vlan entry */
1872 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1878 /* Get first single/triple vlan tid */
1879 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1880 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1881 unsigned int ri_bits;
1883 if (!priv->prs_shadow[tid_aux].valid ||
1884 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1887 pe->index = tid_aux;
1888 mvpp2_prs_hw_read(priv, pe);
1889 ri_bits = mvpp2_prs_sram_ri_get(pe);
1890 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1891 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1892 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1896 if (tid >= tid_aux) {
1901 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1902 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1905 priv->prs_double_vlans[ai] = true;
1907 mvpp2_prs_match_etype(pe, 0, tpid1);
1908 mvpp2_prs_match_etype(pe, 4, tpid2);
1910 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1911 /* Shift 8 bytes - skip 2 vlan tags */
1912 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1913 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1914 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1915 MVPP2_PRS_RI_VLAN_MASK);
1916 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1917 MVPP2_PRS_SRAM_AI_MASK);
1919 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1922 /* Update ports' mask */
1923 mvpp2_prs_tcam_port_map_set(pe, port_map);
1924 mvpp2_prs_hw_write(priv, pe);
1931 /* IPv4 header parsing for fragmentation and L4 offset */
1932 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1933 unsigned int ri, unsigned int ri_mask)
1935 struct mvpp2_prs_entry pe;
1938 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1939 (proto != IPPROTO_IGMP))
1942 /* Fragmented packet */
1943 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1944 MVPP2_PE_LAST_FREE_TID);
1948 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1949 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1952 /* Set next lu to IPv4 */
1953 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1954 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1956 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1957 sizeof(struct iphdr) - 4,
1958 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1959 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1960 MVPP2_PRS_IPV4_DIP_AI_BIT);
1961 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1962 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1964 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1965 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1966 /* Unmask all ports */
1967 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1969 /* Update shadow table and hw entry */
1970 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1971 mvpp2_prs_hw_write(priv, &pe);
1973 /* Not fragmented packet */
1974 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1975 MVPP2_PE_LAST_FREE_TID);
1980 /* Clear ri before updating */
1981 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1982 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1983 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1985 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1986 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1988 /* Update shadow table and hw entry */
1989 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1990 mvpp2_prs_hw_write(priv, &pe);
1995 /* IPv4 L3 multicast or broadcast */
1996 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1998 struct mvpp2_prs_entry pe;
2001 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2002 MVPP2_PE_LAST_FREE_TID);
2006 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2007 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2011 case MVPP2_PRS_L3_MULTI_CAST:
2012 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2013 MVPP2_PRS_IPV4_MC_MASK);
2014 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2015 MVPP2_PRS_RI_L3_ADDR_MASK);
2017 case MVPP2_PRS_L3_BROAD_CAST:
2018 mask = MVPP2_PRS_IPV4_BC_MASK;
2019 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2020 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2021 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2022 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2023 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2024 MVPP2_PRS_RI_L3_ADDR_MASK);
2030 /* Finished: go to flowid generation */
2031 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2032 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2034 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2035 MVPP2_PRS_IPV4_DIP_AI_BIT);
2036 /* Unmask all ports */
2037 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2039 /* Update shadow table and hw entry */
2040 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2041 mvpp2_prs_hw_write(priv, &pe);
2046 /* Set entries for protocols over IPv6 */
2047 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2048 unsigned int ri, unsigned int ri_mask)
2050 struct mvpp2_prs_entry pe;
2053 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2054 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2057 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2058 MVPP2_PE_LAST_FREE_TID);
2062 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2063 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2066 /* Finished: go to flowid generation */
2067 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2068 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2069 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2070 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2071 sizeof(struct ipv6hdr) - 6,
2072 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2074 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2075 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2076 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2077 /* Unmask all ports */
2078 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2081 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2082 mvpp2_prs_hw_write(priv, &pe);
2087 /* IPv6 L3 multicast entry */
2088 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2090 struct mvpp2_prs_entry pe;
2093 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2096 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2097 MVPP2_PE_LAST_FREE_TID);
2101 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2102 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2105 /* Finished: go to flowid generation */
2106 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2107 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2108 MVPP2_PRS_RI_L3_ADDR_MASK);
2109 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2110 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2111 /* Shift back to IPv6 NH */
2112 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2114 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2115 MVPP2_PRS_IPV6_MC_MASK);
2116 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2117 /* Unmask all ports */
2118 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2120 /* Update shadow table and hw entry */
2121 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2122 mvpp2_prs_hw_write(priv, &pe);
2127 /* Parser per-port initialization */
2128 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2129 int lu_max, int offset)
2134 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2135 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2136 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2137 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2139 /* Set maximum number of loops for packet received from port */
2140 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2141 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2142 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2143 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2145 /* Set initial offset for packet header extraction for the first
2148 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2149 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2150 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2151 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2154 /* Default flow entries initialization for all ports */
2155 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2157 struct mvpp2_prs_entry pe;
2160 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2161 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2162 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2163 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2165 /* Mask all ports */
2166 mvpp2_prs_tcam_port_map_set(&pe, 0);
2169 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2170 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2172 /* Update shadow table and hw entry */
2173 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2174 mvpp2_prs_hw_write(priv, &pe);
2178 /* Set default entry for Marvell Header field */
2179 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2181 struct mvpp2_prs_entry pe;
2183 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2185 pe.index = MVPP2_PE_MH_DEFAULT;
2186 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2187 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2188 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2189 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2191 /* Unmask all ports */
2192 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2194 /* Update shadow table and hw entry */
2195 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2196 mvpp2_prs_hw_write(priv, &pe);
2199 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2200 * multicast MAC addresses
2202 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2204 struct mvpp2_prs_entry pe;
2206 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2208 /* Non-promiscuous mode for all ports - DROP unknown packets */
2209 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2210 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2212 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2213 MVPP2_PRS_RI_DROP_MASK);
2214 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2215 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2217 /* Unmask all ports */
2218 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2220 /* Update shadow table and hw entry */
2221 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2222 mvpp2_prs_hw_write(priv, &pe);
2224 /* place holders only - no ports */
2225 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2226 mvpp2_prs_mac_promisc_set(priv, 0, false);
2227 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2228 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2231 /* Set default entries for various types of dsa packets */
2232 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2234 struct mvpp2_prs_entry pe;
2236 /* None tagged EDSA entry - place holder */
2237 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2240 /* Tagged EDSA entry - place holder */
2241 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2243 /* None tagged DSA entry - place holder */
2244 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2247 /* Tagged DSA entry - place holder */
2248 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2250 /* None tagged EDSA ethertype entry - place holder*/
2251 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2252 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2254 /* Tagged EDSA ethertype entry - place holder*/
2255 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2256 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2258 /* None tagged DSA ethertype entry */
2259 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2260 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2262 /* Tagged DSA ethertype entry */
2263 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2264 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2266 /* Set default entry, in case DSA or EDSA tag not found */
2267 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2268 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2269 pe.index = MVPP2_PE_DSA_DEFAULT;
2270 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2273 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2274 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2276 /* Clear all sram ai bits for next iteration */
2277 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2279 /* Unmask all ports */
2280 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2282 mvpp2_prs_hw_write(priv, &pe);
2285 /* Match basic ethertypes */
2286 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2288 struct mvpp2_prs_entry pe;
2291 /* Ethertype: PPPoE */
2292 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2293 MVPP2_PE_LAST_FREE_TID);
2297 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2298 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2301 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2303 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2304 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2305 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2306 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2307 MVPP2_PRS_RI_PPPOE_MASK);
2309 /* Update shadow table and hw entry */
2310 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2311 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2312 priv->prs_shadow[pe.index].finish = false;
2313 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2314 MVPP2_PRS_RI_PPPOE_MASK);
2315 mvpp2_prs_hw_write(priv, &pe);
2317 /* Ethertype: ARP */
2318 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2319 MVPP2_PE_LAST_FREE_TID);
2323 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2324 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2327 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2329 /* Generate flow in the next iteration*/
2330 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2331 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2332 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2333 MVPP2_PRS_RI_L3_PROTO_MASK);
2335 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2337 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2339 /* Update shadow table and hw entry */
2340 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2341 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2342 priv->prs_shadow[pe.index].finish = true;
2343 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2344 MVPP2_PRS_RI_L3_PROTO_MASK);
2345 mvpp2_prs_hw_write(priv, &pe);
2347 /* Ethertype: LBTD */
2348 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2349 MVPP2_PE_LAST_FREE_TID);
2353 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2354 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2357 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2359 /* Generate flow in the next iteration*/
2360 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2361 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2362 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2363 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2364 MVPP2_PRS_RI_CPU_CODE_MASK |
2365 MVPP2_PRS_RI_UDF3_MASK);
2367 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2369 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2371 /* Update shadow table and hw entry */
2372 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2373 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2374 priv->prs_shadow[pe.index].finish = true;
2375 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2376 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2377 MVPP2_PRS_RI_CPU_CODE_MASK |
2378 MVPP2_PRS_RI_UDF3_MASK);
2379 mvpp2_prs_hw_write(priv, &pe);
2381 /* Ethertype: IPv4 without options */
2382 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2383 MVPP2_PE_LAST_FREE_TID);
2387 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2388 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2391 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2392 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2393 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2394 MVPP2_PRS_IPV4_HEAD_MASK |
2395 MVPP2_PRS_IPV4_IHL_MASK);
2397 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2398 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2399 MVPP2_PRS_RI_L3_PROTO_MASK);
2400 /* Skip eth_type + 4 bytes of IP header */
2401 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2402 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2404 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2406 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2408 /* Update shadow table and hw entry */
2409 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2410 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2411 priv->prs_shadow[pe.index].finish = false;
2412 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2413 MVPP2_PRS_RI_L3_PROTO_MASK);
2414 mvpp2_prs_hw_write(priv, &pe);
2416 /* Ethertype: IPv4 with options */
2417 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2418 MVPP2_PE_LAST_FREE_TID);
2424 /* Clear tcam data before updating */
2425 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2426 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2428 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2429 MVPP2_PRS_IPV4_HEAD,
2430 MVPP2_PRS_IPV4_HEAD_MASK);
2432 /* Clear ri before updating */
2433 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2434 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2435 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2436 MVPP2_PRS_RI_L3_PROTO_MASK);
2438 /* Update shadow table and hw entry */
2439 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2440 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2441 priv->prs_shadow[pe.index].finish = false;
2442 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2443 MVPP2_PRS_RI_L3_PROTO_MASK);
2444 mvpp2_prs_hw_write(priv, &pe);
2446 /* Ethertype: IPv6 without options */
2447 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2448 MVPP2_PE_LAST_FREE_TID);
2452 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2453 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2456 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2458 /* Skip DIP of IPV6 header */
2459 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2460 MVPP2_MAX_L3_ADDR_SIZE,
2461 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2462 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2463 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2464 MVPP2_PRS_RI_L3_PROTO_MASK);
2466 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2468 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2470 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2471 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2472 priv->prs_shadow[pe.index].finish = false;
2473 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2474 MVPP2_PRS_RI_L3_PROTO_MASK);
2475 mvpp2_prs_hw_write(priv, &pe);
2477 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2478 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2479 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2480 pe.index = MVPP2_PE_ETH_TYPE_UN;
2482 /* Unmask all ports */
2483 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2485 /* Generate flow in the next iteration*/
2486 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2487 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2488 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2489 MVPP2_PRS_RI_L3_PROTO_MASK);
2490 /* Set L3 offset even it's unknown L3 */
2491 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2493 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2495 /* Update shadow table and hw entry */
2496 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2497 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2498 priv->prs_shadow[pe.index].finish = true;
2499 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2500 MVPP2_PRS_RI_L3_PROTO_MASK);
2501 mvpp2_prs_hw_write(priv, &pe);
2506 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2513 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2515 struct mvpp2_prs_entry pe;
2518 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2519 MVPP2_PRS_DBL_VLANS_MAX,
2521 if (!priv->prs_double_vlans)
2524 /* Double VLAN: 0x8100, 0x88A8 */
2525 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2526 MVPP2_PRS_PORT_MASK);
2530 /* Double VLAN: 0x8100, 0x8100 */
2531 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2532 MVPP2_PRS_PORT_MASK);
2536 /* Single VLAN: 0x88a8 */
2537 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2538 MVPP2_PRS_PORT_MASK);
2542 /* Single VLAN: 0x8100 */
2543 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2544 MVPP2_PRS_PORT_MASK);
2548 /* Set default double vlan entry */
2549 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2550 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2551 pe.index = MVPP2_PE_VLAN_DBL;
2553 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2554 /* Clear ai for next iterations */
2555 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2556 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2557 MVPP2_PRS_RI_VLAN_MASK);
2559 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2560 MVPP2_PRS_DBL_VLAN_AI_BIT);
2561 /* Unmask all ports */
2562 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2564 /* Update shadow table and hw entry */
2565 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2566 mvpp2_prs_hw_write(priv, &pe);
2568 /* Set default vlan none entry */
2569 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2570 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2571 pe.index = MVPP2_PE_VLAN_NONE;
2573 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2574 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2575 MVPP2_PRS_RI_VLAN_MASK);
2577 /* Unmask all ports */
2578 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2580 /* Update shadow table and hw entry */
2581 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2582 mvpp2_prs_hw_write(priv, &pe);
2587 /* Set entries for PPPoE ethertype */
2588 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2590 struct mvpp2_prs_entry pe;
2593 /* IPv4 over PPPoE with options */
2594 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2595 MVPP2_PE_LAST_FREE_TID);
2599 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2600 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2603 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2605 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2606 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2607 MVPP2_PRS_RI_L3_PROTO_MASK);
2608 /* Skip eth_type + 4 bytes of IP header */
2609 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2610 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2612 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2614 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2616 /* Update shadow table and hw entry */
2617 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2618 mvpp2_prs_hw_write(priv, &pe);
2620 /* IPv4 over PPPoE without options */
2621 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2622 MVPP2_PE_LAST_FREE_TID);
2628 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2629 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2630 MVPP2_PRS_IPV4_HEAD_MASK |
2631 MVPP2_PRS_IPV4_IHL_MASK);
2633 /* Clear ri before updating */
2634 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2635 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2636 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2637 MVPP2_PRS_RI_L3_PROTO_MASK);
2639 /* Update shadow table and hw entry */
2640 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2641 mvpp2_prs_hw_write(priv, &pe);
2643 /* IPv6 over PPPoE */
2644 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2645 MVPP2_PE_LAST_FREE_TID);
2649 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2650 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2653 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2655 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2656 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2657 MVPP2_PRS_RI_L3_PROTO_MASK);
2658 /* Skip eth_type + 4 bytes of IPv6 header */
2659 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2660 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2662 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2664 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2666 /* Update shadow table and hw entry */
2667 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2668 mvpp2_prs_hw_write(priv, &pe);
2670 /* Non-IP over PPPoE */
2671 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2672 MVPP2_PE_LAST_FREE_TID);
2676 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2677 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2680 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2681 MVPP2_PRS_RI_L3_PROTO_MASK);
2683 /* Finished: go to flowid generation */
2684 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2685 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2686 /* Set L3 offset even if it's unknown L3 */
2687 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2689 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2691 /* Update shadow table and hw entry */
2692 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2693 mvpp2_prs_hw_write(priv, &pe);
2698 /* Initialize entries for IPv4 */
2699 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2701 struct mvpp2_prs_entry pe;
2704 /* Set entries for TCP, UDP and IGMP over IPv4 */
2705 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2706 MVPP2_PRS_RI_L4_PROTO_MASK);
2710 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2711 MVPP2_PRS_RI_L4_PROTO_MASK);
2715 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2716 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2717 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2718 MVPP2_PRS_RI_CPU_CODE_MASK |
2719 MVPP2_PRS_RI_UDF3_MASK);
2723 /* IPv4 Broadcast */
2724 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2728 /* IPv4 Multicast */
2729 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2733 /* Default IPv4 entry for unknown protocols */
2734 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2735 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2736 pe.index = MVPP2_PE_IP4_PROTO_UN;
2738 /* Set next lu to IPv4 */
2739 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2740 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2742 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2743 sizeof(struct iphdr) - 4,
2744 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2745 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2746 MVPP2_PRS_IPV4_DIP_AI_BIT);
2747 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2748 MVPP2_PRS_RI_L4_PROTO_MASK);
2750 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2751 /* Unmask all ports */
2752 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2754 /* Update shadow table and hw entry */
2755 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2756 mvpp2_prs_hw_write(priv, &pe);
2758 /* Default IPv4 entry for unicast address */
2759 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2760 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2761 pe.index = MVPP2_PE_IP4_ADDR_UN;
2763 /* Finished: go to flowid generation */
2764 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2765 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2766 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2767 MVPP2_PRS_RI_L3_ADDR_MASK);
2769 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2770 MVPP2_PRS_IPV4_DIP_AI_BIT);
2771 /* Unmask all ports */
2772 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2774 /* Update shadow table and hw entry */
2775 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2776 mvpp2_prs_hw_write(priv, &pe);
2781 /* Initialize entries for IPv6 */
2782 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2784 struct mvpp2_prs_entry pe;
2787 /* Set entries for TCP, UDP and ICMP over IPv6 */
2788 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2789 MVPP2_PRS_RI_L4_TCP,
2790 MVPP2_PRS_RI_L4_PROTO_MASK);
2794 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2795 MVPP2_PRS_RI_L4_UDP,
2796 MVPP2_PRS_RI_L4_PROTO_MASK);
2800 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2801 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2802 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2803 MVPP2_PRS_RI_CPU_CODE_MASK |
2804 MVPP2_PRS_RI_UDF3_MASK);
2808 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2809 /* Result Info: UDF7=1, DS lite */
2810 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2811 MVPP2_PRS_RI_UDF7_IP6_LITE,
2812 MVPP2_PRS_RI_UDF7_MASK);
2816 /* IPv6 multicast */
2817 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2821 /* Entry for checking hop limit */
2822 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2823 MVPP2_PE_LAST_FREE_TID);
2827 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2828 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2831 /* Finished: go to flowid generation */
2832 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2833 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2834 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2835 MVPP2_PRS_RI_DROP_MASK,
2836 MVPP2_PRS_RI_L3_PROTO_MASK |
2837 MVPP2_PRS_RI_DROP_MASK);
2839 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2840 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2841 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2843 /* Update shadow table and hw entry */
2844 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2845 mvpp2_prs_hw_write(priv, &pe);
2847 /* Default IPv6 entry for unknown protocols */
2848 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2849 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2850 pe.index = MVPP2_PE_IP6_PROTO_UN;
2852 /* Finished: go to flowid generation */
2853 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2854 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2855 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2856 MVPP2_PRS_RI_L4_PROTO_MASK);
2857 /* Set L4 offset relatively to our current place */
2858 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2859 sizeof(struct ipv6hdr) - 4,
2860 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2862 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2863 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2864 /* Unmask all ports */
2865 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2867 /* Update shadow table and hw entry */
2868 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2869 mvpp2_prs_hw_write(priv, &pe);
2871 /* Default IPv6 entry for unknown ext protocols */
2872 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2873 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2874 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2876 /* Finished: go to flowid generation */
2877 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2878 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2879 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2880 MVPP2_PRS_RI_L4_PROTO_MASK);
2882 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2883 MVPP2_PRS_IPV6_EXT_AI_BIT);
2884 /* Unmask all ports */
2885 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2887 /* Update shadow table and hw entry */
2888 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2889 mvpp2_prs_hw_write(priv, &pe);
2891 /* Default IPv6 entry for unicast address */
2892 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2893 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2894 pe.index = MVPP2_PE_IP6_ADDR_UN;
2896 /* Finished: go to IPv6 again */
2897 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2898 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2899 MVPP2_PRS_RI_L3_ADDR_MASK);
2900 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2901 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2902 /* Shift back to IPV6 NH */
2903 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2905 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2906 /* Unmask all ports */
2907 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2909 /* Update shadow table and hw entry */
2910 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2911 mvpp2_prs_hw_write(priv, &pe);
2916 /* Parser default initialization */
2917 static int mvpp2_prs_default_init(struct platform_device *pdev,
2922 /* Enable tcam table */
2923 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2925 /* Clear all tcam and sram entries */
2926 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2927 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2928 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2929 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2931 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2932 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2933 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2936 /* Invalidate all tcam entries */
2937 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2938 mvpp2_prs_hw_inv(priv, index);
2940 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2941 sizeof(struct mvpp2_prs_shadow),
2943 if (!priv->prs_shadow)
2946 /* Always start from lookup = 0 */
2947 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2948 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2949 MVPP2_PRS_PORT_LU_MAX, 0);
2951 mvpp2_prs_def_flow_init(priv);
2953 mvpp2_prs_mh_init(priv);
2955 mvpp2_prs_mac_init(priv);
2957 mvpp2_prs_dsa_init(priv);
2959 err = mvpp2_prs_etype_init(priv);
2963 err = mvpp2_prs_vlan_init(pdev, priv);
2967 err = mvpp2_prs_pppoe_init(priv);
2971 err = mvpp2_prs_ip6_init(priv);
2975 err = mvpp2_prs_ip4_init(priv);
2982 /* Compare MAC DA with tcam entry data */
2983 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2984 const u8 *da, unsigned char *mask)
2986 unsigned char tcam_byte, tcam_mask;
2989 for (index = 0; index < ETH_ALEN; index++) {
2990 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2991 if (tcam_mask != mask[index])
2994 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3001 /* Find tcam entry with matched pair <MAC DA, port> */
3002 static struct mvpp2_prs_entry *
3003 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3004 unsigned char *mask, int udf_type)
3006 struct mvpp2_prs_entry *pe;
3009 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3012 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3014 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3015 for (tid = MVPP2_PE_FIRST_FREE_TID;
3016 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3017 unsigned int entry_pmap;
3019 if (!priv->prs_shadow[tid].valid ||
3020 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3021 (priv->prs_shadow[tid].udf != udf_type))
3025 mvpp2_prs_hw_read(priv, pe);
3026 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3028 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3037 /* Update parser's mac da entry */
3038 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3039 const u8 *da, bool add)
3041 struct mvpp2_prs_entry *pe;
3042 unsigned int pmap, len, ri;
3043 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3046 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3047 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3048 MVPP2_PRS_UDF_MAC_DEF);
3055 /* Create new TCAM entry */
3056 /* Find first range mac entry*/
3057 for (tid = MVPP2_PE_FIRST_FREE_TID;
3058 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3059 if (priv->prs_shadow[tid].valid &&
3060 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3061 (priv->prs_shadow[tid].udf ==
3062 MVPP2_PRS_UDF_MAC_RANGE))
3065 /* Go through the all entries from first to last */
3066 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3071 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3074 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3077 /* Mask all ports */
3078 mvpp2_prs_tcam_port_map_set(pe, 0);
3081 /* Update port mask */
3082 mvpp2_prs_tcam_port_set(pe, port, add);
3084 /* Invalidate the entry if no ports are left enabled */
3085 pmap = mvpp2_prs_tcam_port_map_get(pe);
3091 mvpp2_prs_hw_inv(priv, pe->index);
3092 priv->prs_shadow[pe->index].valid = false;
3097 /* Continue - set next lookup */
3098 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3100 /* Set match on DA */
3103 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3105 /* Set result info bits */
3106 if (is_broadcast_ether_addr(da))
3107 ri = MVPP2_PRS_RI_L2_BCAST;
3108 else if (is_multicast_ether_addr(da))
3109 ri = MVPP2_PRS_RI_L2_MCAST;
3111 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3113 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3114 MVPP2_PRS_RI_MAC_ME_MASK);
3115 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3116 MVPP2_PRS_RI_MAC_ME_MASK);
3118 /* Shift to ethertype */
3119 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3120 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3122 /* Update shadow table and hw entry */
3123 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3124 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3125 mvpp2_prs_hw_write(priv, pe);
3132 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3134 struct mvpp2_port *port = netdev_priv(dev);
3137 /* Remove old parser entry */
3138 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3143 /* Add new parser entry */
3144 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3148 /* Set addr in the device */
3149 ether_addr_copy(dev->dev_addr, da);
3154 /* Delete all port's multicast simple (not range) entries */
3155 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3157 struct mvpp2_prs_entry pe;
3160 for (tid = MVPP2_PE_FIRST_FREE_TID;
3161 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3162 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3164 if (!priv->prs_shadow[tid].valid ||
3165 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3166 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3169 /* Only simple mac entries */
3171 mvpp2_prs_hw_read(priv, &pe);
3173 /* Read mac addr from entry */
3174 for (index = 0; index < ETH_ALEN; index++)
3175 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3178 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3179 /* Delete this entry */
3180 mvpp2_prs_mac_da_accept(priv, port, da, false);
3184 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3187 case MVPP2_TAG_TYPE_EDSA:
3188 /* Add port to EDSA entries */
3189 mvpp2_prs_dsa_tag_set(priv, port, true,
3190 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3191 mvpp2_prs_dsa_tag_set(priv, port, true,
3192 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3193 /* Remove port from DSA entries */
3194 mvpp2_prs_dsa_tag_set(priv, port, false,
3195 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3196 mvpp2_prs_dsa_tag_set(priv, port, false,
3197 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3200 case MVPP2_TAG_TYPE_DSA:
3201 /* Add port to DSA entries */
3202 mvpp2_prs_dsa_tag_set(priv, port, true,
3203 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3204 mvpp2_prs_dsa_tag_set(priv, port, true,
3205 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3206 /* Remove port from EDSA entries */
3207 mvpp2_prs_dsa_tag_set(priv, port, false,
3208 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3209 mvpp2_prs_dsa_tag_set(priv, port, false,
3210 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3213 case MVPP2_TAG_TYPE_MH:
3214 case MVPP2_TAG_TYPE_NONE:
3215 /* Remove port form EDSA and DSA entries */
3216 mvpp2_prs_dsa_tag_set(priv, port, false,
3217 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3218 mvpp2_prs_dsa_tag_set(priv, port, false,
3219 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3220 mvpp2_prs_dsa_tag_set(priv, port, false,
3221 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3222 mvpp2_prs_dsa_tag_set(priv, port, false,
3223 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3227 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3234 /* Set prs flow for the port */
3235 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3237 struct mvpp2_prs_entry *pe;
3240 pe = mvpp2_prs_flow_find(port->priv, port->id);
3242 /* Such entry not exist */
3244 /* Go through the all entires from last to first */
3245 tid = mvpp2_prs_tcam_first_free(port->priv,
3246 MVPP2_PE_LAST_FREE_TID,
3247 MVPP2_PE_FIRST_FREE_TID);
3251 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3255 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3259 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3260 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3262 /* Update shadow table */
3263 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3266 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3267 mvpp2_prs_hw_write(port->priv, pe);
3273 /* Classifier configuration routines */
3275 /* Update classification flow table registers */
3276 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3277 struct mvpp2_cls_flow_entry *fe)
3279 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3280 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3281 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3282 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3285 /* Update classification lookup table register */
3286 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3287 struct mvpp2_cls_lookup_entry *le)
3291 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3292 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3293 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3296 /* Classifier default initialization */
3297 static void mvpp2_cls_init(struct mvpp2 *priv)
3299 struct mvpp2_cls_lookup_entry le;
3300 struct mvpp2_cls_flow_entry fe;
3303 /* Enable classifier */
3304 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3306 /* Clear classifier flow table */
3307 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
3308 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3310 mvpp2_cls_flow_write(priv, &fe);
3313 /* Clear classifier lookup table */
3315 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3318 mvpp2_cls_lookup_write(priv, &le);
3321 mvpp2_cls_lookup_write(priv, &le);
3325 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3327 struct mvpp2_cls_lookup_entry le;
3330 /* Set way for the port */
3331 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3332 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3333 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3335 /* Pick the entry to be accessed in lookup ID decoding table
3336 * according to the way and lkpid.
3338 le.lkpid = port->id;
3342 /* Set initial CPU queue for receiving packets */
3343 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3344 le.data |= port->first_rxq;
3346 /* Disable classification engines */
3347 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3349 /* Update lookup ID table entry */
3350 mvpp2_cls_lookup_write(port->priv, &le);
3353 /* Set CPU queue number for oversize packets */
3354 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3358 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3359 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3361 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3362 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3364 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3365 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3366 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3369 /* Buffer Manager configuration routines */
3372 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3374 struct mvpp2_bm_pool *bm_pool, int size)
3379 size_bytes = sizeof(u32) * size;
3380 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3381 &bm_pool->phys_addr,
3383 if (!bm_pool->virt_addr)
3386 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3387 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3388 bm_pool->phys_addr);
3389 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3390 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3394 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3395 bm_pool->phys_addr);
3396 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3398 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3399 val |= MVPP2_BM_START_MASK;
3400 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3402 bm_pool->type = MVPP2_BM_FREE;
3403 bm_pool->size = size;
3404 bm_pool->pkt_size = 0;
3405 bm_pool->buf_num = 0;
3406 atomic_set(&bm_pool->in_use, 0);
3411 /* Set pool buffer size */
3412 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3413 struct mvpp2_bm_pool *bm_pool,
3418 bm_pool->buf_size = buf_size;
3420 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3421 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3424 /* Free all buffers from the pool */
3425 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3426 struct mvpp2_bm_pool *bm_pool)
3430 for (i = 0; i < bm_pool->buf_num; i++) {
3431 dma_addr_t buf_phys_addr;
3434 /* Get buffer virtual address (indirect access) */
3435 buf_phys_addr = mvpp2_read(priv,
3436 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3437 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3439 dma_unmap_single(dev, buf_phys_addr,
3440 bm_pool->buf_size, DMA_FROM_DEVICE);
3444 dev_kfree_skb_any((struct sk_buff *)vaddr);
3447 /* Update BM driver with number of buffers removed from pool */
3448 bm_pool->buf_num -= i;
3452 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3454 struct mvpp2_bm_pool *bm_pool)
3458 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3459 if (bm_pool->buf_num) {
3460 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3464 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3465 val |= MVPP2_BM_STOP_MASK;
3466 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3468 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3470 bm_pool->phys_addr);
3474 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3478 struct mvpp2_bm_pool *bm_pool;
3480 /* Create all pools with maximum size */
3481 size = MVPP2_BM_POOL_SIZE_MAX;
3482 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3483 bm_pool = &priv->bm_pools[i];
3485 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3487 goto err_unroll_pools;
3488 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3493 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3494 for (i = i - 1; i >= 0; i--)
3495 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3499 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3503 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3504 /* Mask BM all interrupts */
3505 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3506 /* Clear BM cause register */
3507 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3510 /* Allocate and initialize BM pools */
3511 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3512 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3513 if (!priv->bm_pools)
3516 err = mvpp2_bm_pools_init(pdev, priv);
3522 /* Attach long pool to rxq */
3523 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3524 int lrxq, int long_pool)
3529 /* Get queue physical ID */
3530 prxq = port->rxqs[lrxq]->id;
3532 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3533 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3534 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3535 MVPP2_RXQ_POOL_LONG_MASK);
3537 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3540 /* Attach short pool to rxq */
3541 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3542 int lrxq, int short_pool)
3547 /* Get queue physical ID */
3548 prxq = port->rxqs[lrxq]->id;
3550 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3551 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3552 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3553 MVPP2_RXQ_POOL_SHORT_MASK);
3555 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3558 /* Allocate skb for BM pool */
3559 static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3560 struct mvpp2_bm_pool *bm_pool,
3561 dma_addr_t *buf_phys_addr,
3564 struct sk_buff *skb;
3565 dma_addr_t phys_addr;
3567 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3571 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3572 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3574 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3575 dev_kfree_skb_any(skb);
3578 *buf_phys_addr = phys_addr;
3583 /* Set pool number in a BM cookie */
3584 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3588 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3589 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3594 /* Get pool number from a BM cookie */
3595 static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3597 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3600 /* Release buffer to BM */
3601 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3602 u32 buf_phys_addr, u32 buf_virt_addr)
3604 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3605 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3608 /* Release multicast buffer */
3609 static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3610 u32 buf_phys_addr, u32 buf_virt_addr,
3615 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3616 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3618 mvpp2_bm_pool_put(port, pool,
3619 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3623 /* Refill BM pool */
3624 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3625 u32 phys_addr, u32 cookie)
3627 int pool = mvpp2_bm_cookie_pool_get(bm);
3629 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3632 /* Allocate buffers for the pool */
3633 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3634 struct mvpp2_bm_pool *bm_pool, int buf_num)
3636 struct sk_buff *skb;
3637 int i, buf_size, total_size;
3639 dma_addr_t phys_addr;
3641 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3642 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3645 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3646 netdev_err(port->dev,
3647 "cannot allocate %d buffers for pool %d\n",
3648 buf_num, bm_pool->id);
3652 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3653 for (i = 0; i < buf_num; i++) {
3654 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3658 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3661 /* Update BM driver with number of buffers added to pool */
3662 bm_pool->buf_num += i;
3663 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3665 netdev_dbg(port->dev,
3666 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3667 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3668 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3670 netdev_dbg(port->dev,
3671 "%s pool %d: %d of %d buffers added\n",
3672 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3673 bm_pool->id, i, buf_num);
3677 /* Notify the driver that BM pool is being used as specific type and return the
3678 * pool pointer on success
3680 static struct mvpp2_bm_pool *
3681 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3684 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3687 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3688 netdev_err(port->dev, "mixing pool types is forbidden\n");
3692 if (new_pool->type == MVPP2_BM_FREE)
3693 new_pool->type = type;
3695 /* Allocate buffers in case BM pool is used as long pool, but packet
3696 * size doesn't match MTU or BM pool hasn't being used yet
3698 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3699 (new_pool->pkt_size == 0)) {
3702 /* Set default buffer number or free all the buffers in case
3703 * the pool is not empty
3705 pkts_num = new_pool->buf_num;
3707 pkts_num = type == MVPP2_BM_SWF_LONG ?
3708 MVPP2_BM_LONG_BUF_NUM :
3709 MVPP2_BM_SHORT_BUF_NUM;
3711 mvpp2_bm_bufs_free(port->dev->dev.parent,
3712 port->priv, new_pool);
3714 new_pool->pkt_size = pkt_size;
3716 /* Allocate buffers for this pool */
3717 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3718 if (num != pkts_num) {
3719 WARN(1, "pool %d: %d of %d allocated\n",
3720 new_pool->id, num, pkts_num);
3725 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3726 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3731 /* Initialize pools for swf */
3732 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3736 if (!port->pool_long) {
3738 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3741 if (!port->pool_long)
3744 port->pool_long->port_map |= (1 << port->id);
3746 for (rxq = 0; rxq < rxq_number; rxq++)
3747 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3750 if (!port->pool_short) {
3752 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3754 MVPP2_BM_SHORT_PKT_SIZE);
3755 if (!port->pool_short)
3758 port->pool_short->port_map |= (1 << port->id);
3760 for (rxq = 0; rxq < rxq_number; rxq++)
3761 mvpp2_rxq_short_pool_set(port, rxq,
3762 port->pool_short->id);
3768 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3770 struct mvpp2_port *port = netdev_priv(dev);
3771 struct mvpp2_bm_pool *port_pool = port->pool_long;
3772 int num, pkts_num = port_pool->buf_num;
3773 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3775 /* Update BM pool with new buffer size */
3776 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3777 if (port_pool->buf_num) {
3778 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3782 port_pool->pkt_size = pkt_size;
3783 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3784 if (num != pkts_num) {
3785 WARN(1, "pool %d: %d of %d allocated\n",
3786 port_pool->id, num, pkts_num);
3790 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3791 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3793 netdev_update_features(dev);
3797 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3799 int cpu, cpu_mask = 0;
3801 for_each_present_cpu(cpu)
3802 cpu_mask |= 1 << cpu;
3803 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3804 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3807 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3809 int cpu, cpu_mask = 0;
3811 for_each_present_cpu(cpu)
3812 cpu_mask |= 1 << cpu;
3813 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3814 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3817 /* Mask the current CPU's Rx/Tx interrupts */
3818 static void mvpp2_interrupts_mask(void *arg)
3820 struct mvpp2_port *port = arg;
3822 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3825 /* Unmask the current CPU's Rx/Tx interrupts */
3826 static void mvpp2_interrupts_unmask(void *arg)
3828 struct mvpp2_port *port = arg;
3830 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3831 (MVPP2_CAUSE_MISC_SUM_MASK |
3832 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3835 /* Port configuration routines */
3837 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3841 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3843 switch (port->phy_interface) {
3844 case PHY_INTERFACE_MODE_SGMII:
3845 val |= MVPP2_GMAC_INBAND_AN_MASK;
3847 case PHY_INTERFACE_MODE_RGMII:
3848 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3850 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3853 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3856 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3860 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3861 val |= MVPP2_GMAC_FC_ADV_EN;
3862 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3865 static void mvpp2_port_enable(struct mvpp2_port *port)
3869 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3870 val |= MVPP2_GMAC_PORT_EN_MASK;
3871 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3872 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3875 static void mvpp2_port_disable(struct mvpp2_port *port)
3879 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3880 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3881 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3884 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3885 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3889 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3890 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3891 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3894 /* Configure loopback port */
3895 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3899 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3901 if (port->speed == 1000)
3902 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3904 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3906 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3907 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3909 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3911 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3914 static void mvpp2_port_reset(struct mvpp2_port *port)
3918 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3919 ~MVPP2_GMAC_PORT_RESET_MASK;
3920 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3922 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3923 MVPP2_GMAC_PORT_RESET_MASK)
3927 /* Change maximum receive size of the port */
3928 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3932 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3933 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3934 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3935 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3936 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3939 /* Set defaults to the MVPP2 port */
3940 static void mvpp2_defaults_set(struct mvpp2_port *port)
3942 int tx_port_num, val, queue, ptxq, lrxq;
3944 /* Configure port to loopback if needed */
3945 if (port->flags & MVPP2_F_LOOPBACK)
3946 mvpp2_port_loopback_set(port);
3948 /* Update TX FIFO MIN Threshold */
3949 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3950 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3951 /* Min. TX threshold must be less than minimal packet length */
3952 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3953 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3955 /* Disable Legacy WRR, Disable EJP, Release from reset */
3956 tx_port_num = mvpp2_egress_port(port);
3957 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3959 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3961 /* Close bandwidth for all queues */
3962 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3963 ptxq = mvpp2_txq_phys(port->id, queue);
3964 mvpp2_write(port->priv,
3965 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3968 /* Set refill period to 1 usec, refill tokens
3969 * and bucket size to maximum
3971 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3972 port->priv->tclk / USEC_PER_SEC);
3973 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3974 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3975 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3976 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3977 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3978 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3979 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3981 /* Set MaximumLowLatencyPacketSize value to 256 */
3982 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3983 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3984 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3986 /* Enable Rx cache snoop */
3987 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3988 queue = port->rxqs[lrxq]->id;
3989 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3990 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3991 MVPP2_SNOOP_BUF_HDR_MASK;
3992 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3995 /* At default, mask all interrupts to all present cpus */
3996 mvpp2_interrupts_disable(port);
3999 /* Enable/disable receiving packets */
4000 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4005 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4006 queue = port->rxqs[lrxq]->id;
4007 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4008 val &= ~MVPP2_RXQ_DISABLE_MASK;
4009 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4013 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4018 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4019 queue = port->rxqs[lrxq]->id;
4020 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4021 val |= MVPP2_RXQ_DISABLE_MASK;
4022 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4026 /* Enable transmit via physical egress queue
4027 * - HW starts take descriptors from DRAM
4029 static void mvpp2_egress_enable(struct mvpp2_port *port)
4033 int tx_port_num = mvpp2_egress_port(port);
4035 /* Enable all initialized TXs. */
4037 for (queue = 0; queue < txq_number; queue++) {
4038 struct mvpp2_tx_queue *txq = port->txqs[queue];
4040 if (txq->descs != NULL)
4041 qmap |= (1 << queue);
4044 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4045 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4048 /* Disable transmit via physical egress queue
4049 * - HW doesn't take descriptors from DRAM
4051 static void mvpp2_egress_disable(struct mvpp2_port *port)
4055 int tx_port_num = mvpp2_egress_port(port);
4057 /* Issue stop command for active channels only */
4058 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4059 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4060 MVPP2_TXP_SCHED_ENQ_MASK;
4062 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4063 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4065 /* Wait for all Tx activity to terminate. */
4068 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4069 netdev_warn(port->dev,
4070 "Tx stop timed out, status=0x%08x\n",
4077 /* Check port TX Command register that all
4078 * Tx queues are stopped
4080 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4081 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4084 /* Rx descriptors helper methods */
4086 /* Get number of Rx descriptors occupied by received packets */
4088 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4090 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4092 return val & MVPP2_RXQ_OCCUPIED_MASK;
4095 /* Update Rx queue status with the number of occupied and available
4096 * Rx descriptor slots.
4099 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4100 int used_count, int free_count)
4102 /* Decrement the number of used descriptors and increment count
4103 * increment the number of free descriptors.
4105 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4107 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4110 /* Get pointer to next RX descriptor to be processed by SW */
4111 static inline struct mvpp2_rx_desc *
4112 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4114 int rx_desc = rxq->next_desc_to_proc;
4116 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4117 prefetch(rxq->descs + rxq->next_desc_to_proc);
4118 return rxq->descs + rx_desc;
4121 /* Set rx queue offset */
4122 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4123 int prxq, int offset)
4127 /* Convert offset from bytes to units of 32 bytes */
4128 offset = offset >> 5;
4130 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4131 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4134 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4135 MVPP2_RXQ_PACKET_OFFSET_MASK);
4137 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4140 /* Obtain BM cookie information from descriptor */
4141 static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4143 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4144 MVPP2_RXD_BM_POOL_ID_OFFS;
4145 int cpu = smp_processor_id();
4147 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4148 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4151 /* Tx descriptors helper methods */
4153 /* Get number of Tx descriptors waiting to be transmitted by HW */
4154 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4155 struct mvpp2_tx_queue *txq)
4159 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4160 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4162 return val & MVPP2_TXQ_PENDING_MASK;
4165 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4166 static struct mvpp2_tx_desc *
4167 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4169 int tx_desc = txq->next_desc_to_proc;
4171 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4172 return txq->descs + tx_desc;
4175 /* Update HW with number of aggregated Tx descriptors to be sent */
4176 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4178 /* aggregated access - relevant TXQ number is written in TX desc */
4179 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4183 /* Check if there are enough free descriptors in aggregated txq.
4184 * If not, update the number of occupied descriptors and repeat the check.
4186 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4187 struct mvpp2_tx_queue *aggr_txq, int num)
4189 if ((aggr_txq->count + num) > aggr_txq->size) {
4190 /* Update number of occupied aggregated Tx descriptors */
4191 int cpu = smp_processor_id();
4192 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4194 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4197 if ((aggr_txq->count + num) > aggr_txq->size)
4203 /* Reserved Tx descriptors allocation request */
4204 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4205 struct mvpp2_tx_queue *txq, int num)
4209 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4210 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4212 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4214 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4217 /* Check if there are enough reserved descriptors for transmission.
4218 * If not, request chunk of reserved descriptors and check again.
4220 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4221 struct mvpp2_tx_queue *txq,
4222 struct mvpp2_txq_pcpu *txq_pcpu,
4225 int req, cpu, desc_count;
4227 if (txq_pcpu->reserved_num >= num)
4230 /* Not enough descriptors reserved! Update the reserved descriptor
4231 * count and check again.
4235 /* Compute total of used descriptors */
4236 for_each_present_cpu(cpu) {
4237 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4239 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4240 desc_count += txq_pcpu_aux->count;
4241 desc_count += txq_pcpu_aux->reserved_num;
4244 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4248 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4251 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4253 /* OK, the descriptor cound has been updated: check again. */
4254 if (txq_pcpu->reserved_num < num)
4259 /* Release the last allocated Tx descriptor. Useful to handle DMA
4260 * mapping failures in the Tx path.
4262 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4264 if (txq->next_desc_to_proc == 0)
4265 txq->next_desc_to_proc = txq->last_desc - 1;
4267 txq->next_desc_to_proc--;
4270 /* Set Tx descriptors fields relevant for CSUM calculation */
4271 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4272 int ip_hdr_len, int l4_proto)
4276 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4277 * G_L4_chk, L4_type required only for checksum calculation
4279 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4280 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4281 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4283 if (l3_proto == swab16(ETH_P_IP)) {
4284 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4285 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4287 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4290 if (l4_proto == IPPROTO_TCP) {
4291 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4292 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4293 } else if (l4_proto == IPPROTO_UDP) {
4294 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4295 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4297 command |= MVPP2_TXD_L4_CSUM_NOT;
4303 /* Get number of sent descriptors and decrement counter.
4304 * The number of sent descriptors is returned.
4307 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4308 struct mvpp2_tx_queue *txq)
4312 /* Reading status reg resets transmitted descriptor counter */
4313 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4315 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4316 MVPP2_TRANSMITTED_COUNT_OFFSET;
4319 static void mvpp2_txq_sent_counter_clear(void *arg)
4321 struct mvpp2_port *port = arg;
4324 for (queue = 0; queue < txq_number; queue++) {
4325 int id = port->txqs[queue]->id;
4327 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4331 /* Set max sizes for Tx queues */
4332 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4335 int txq, tx_port_num;
4337 mtu = port->pkt_size * 8;
4338 if (mtu > MVPP2_TXP_MTU_MAX)
4339 mtu = MVPP2_TXP_MTU_MAX;
4341 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4344 /* Indirect access to registers */
4345 tx_port_num = mvpp2_egress_port(port);
4346 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4349 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4350 val &= ~MVPP2_TXP_MTU_MAX;
4352 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4354 /* TXP token size and all TXQs token size must be larger that MTU */
4355 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4356 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4359 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4361 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4364 for (txq = 0; txq < txq_number; txq++) {
4365 val = mvpp2_read(port->priv,
4366 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4367 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4371 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4373 mvpp2_write(port->priv,
4374 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4380 /* Set the number of packets that will be received before Rx interrupt
4381 * will be generated by HW.
4383 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4384 struct mvpp2_rx_queue *rxq, u32 pkts)
4388 val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4389 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4390 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4392 rxq->pkts_coal = pkts;
4395 /* Set the time delay in usec before Rx interrupt */
4396 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4397 struct mvpp2_rx_queue *rxq, u32 usec)
4401 val = (port->priv->tclk / USEC_PER_SEC) * usec;
4402 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4404 rxq->time_coal = usec;
4407 /* Free Tx queue skbuffs */
4408 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4409 struct mvpp2_tx_queue *txq,
4410 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4414 for (i = 0; i < num; i++) {
4415 struct mvpp2_txq_pcpu_buf *tx_buf =
4416 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4418 mvpp2_txq_inc_get(txq_pcpu);
4420 dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4421 tx_buf->size, DMA_TO_DEVICE);
4424 dev_kfree_skb_any(tx_buf->skb);
4428 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4431 int queue = fls(cause) - 1;
4433 return port->rxqs[queue];
4436 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4439 int queue = fls(cause) - 1;
4441 return port->txqs[queue];
4444 /* Handle end of transmission */
4445 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4446 struct mvpp2_txq_pcpu *txq_pcpu)
4448 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4451 if (txq_pcpu->cpu != smp_processor_id())
4452 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4454 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4457 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4459 txq_pcpu->count -= tx_done;
4461 if (netif_tx_queue_stopped(nq))
4462 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4463 netif_tx_wake_queue(nq);
4466 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4468 struct mvpp2_tx_queue *txq;
4469 struct mvpp2_txq_pcpu *txq_pcpu;
4470 unsigned int tx_todo = 0;
4473 txq = mvpp2_get_tx_queue(port, cause);
4477 txq_pcpu = this_cpu_ptr(txq->pcpu);
4479 if (txq_pcpu->count) {
4480 mvpp2_txq_done(port, txq, txq_pcpu);
4481 tx_todo += txq_pcpu->count;
4484 cause &= ~(1 << txq->log_id);
4489 /* Rx/Tx queue initialization/cleanup methods */
4491 /* Allocate and initialize descriptors for aggr TXQ */
4492 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4493 struct mvpp2_tx_queue *aggr_txq,
4494 int desc_num, int cpu,
4497 /* Allocate memory for TX descriptors */
4498 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4499 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4500 &aggr_txq->descs_phys, GFP_KERNEL);
4501 if (!aggr_txq->descs)
4504 /* Make sure descriptor address is cache line size aligned */
4505 BUG_ON(aggr_txq->descs !=
4506 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4508 aggr_txq->last_desc = aggr_txq->size - 1;
4510 /* Aggr TXQ no reset WA */
4511 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4512 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4514 /* Set Tx descriptors queue starting address */
4515 /* indirect access */
4516 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4517 aggr_txq->descs_phys);
4518 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4523 /* Create a specified Rx queue */
4524 static int mvpp2_rxq_init(struct mvpp2_port *port,
4525 struct mvpp2_rx_queue *rxq)
4528 rxq->size = port->rx_ring_size;
4530 /* Allocate memory for RX descriptors */
4531 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4532 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4533 &rxq->descs_phys, GFP_KERNEL);
4537 BUG_ON(rxq->descs !=
4538 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4540 rxq->last_desc = rxq->size - 1;
4542 /* Zero occupied and non-occupied counters - direct access */
4543 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4545 /* Set Rx descriptors queue starting address - indirect access */
4546 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4547 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4548 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4549 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4552 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4554 /* Set coalescing pkts and time */
4555 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4556 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4558 /* Add number of descriptors ready for receiving packets */
4559 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4564 /* Push packets received by the RXQ to BM pool */
4565 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4566 struct mvpp2_rx_queue *rxq)
4570 rx_received = mvpp2_rxq_received(port, rxq->id);
4574 for (i = 0; i < rx_received; i++) {
4575 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4576 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4578 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4579 rx_desc->buf_cookie);
4581 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4584 /* Cleanup Rx queue */
4585 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4586 struct mvpp2_rx_queue *rxq)
4588 mvpp2_rxq_drop_pkts(port, rxq);
4591 dma_free_coherent(port->dev->dev.parent,
4592 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4598 rxq->next_desc_to_proc = 0;
4599 rxq->descs_phys = 0;
4601 /* Clear Rx descriptors queue starting address and size;
4602 * free descriptor number
4604 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4605 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4606 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4607 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4610 /* Create and initialize a Tx queue */
4611 static int mvpp2_txq_init(struct mvpp2_port *port,
4612 struct mvpp2_tx_queue *txq)
4615 int cpu, desc, desc_per_txq, tx_port_num;
4616 struct mvpp2_txq_pcpu *txq_pcpu;
4618 txq->size = port->tx_ring_size;
4620 /* Allocate memory for Tx descriptors */
4621 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4622 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4623 &txq->descs_phys, GFP_KERNEL);
4627 /* Make sure descriptor address is cache line size aligned */
4628 BUG_ON(txq->descs !=
4629 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4631 txq->last_desc = txq->size - 1;
4633 /* Set Tx descriptors queue starting address - indirect access */
4634 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4635 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4636 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4637 MVPP2_TXQ_DESC_SIZE_MASK);
4638 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4639 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4640 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4641 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4642 val &= ~MVPP2_TXQ_PENDING_MASK;
4643 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4645 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4646 * for each existing TXQ.
4647 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4648 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4651 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4652 (txq->log_id * desc_per_txq);
4654 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4655 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4656 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4658 /* WRR / EJP configuration - indirect access */
4659 tx_port_num = mvpp2_egress_port(port);
4660 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4662 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4663 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4664 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4665 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4666 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4668 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4669 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4672 for_each_present_cpu(cpu) {
4673 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4674 txq_pcpu->size = txq->size;
4675 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4676 sizeof(struct mvpp2_txq_pcpu_buf),
4678 if (!txq_pcpu->buffs)
4681 txq_pcpu->count = 0;
4682 txq_pcpu->reserved_num = 0;
4683 txq_pcpu->txq_put_index = 0;
4684 txq_pcpu->txq_get_index = 0;
4690 for_each_present_cpu(cpu) {
4691 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4692 kfree(txq_pcpu->buffs);
4695 dma_free_coherent(port->dev->dev.parent,
4696 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4697 txq->descs, txq->descs_phys);
4702 /* Free allocated TXQ resources */
4703 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4704 struct mvpp2_tx_queue *txq)
4706 struct mvpp2_txq_pcpu *txq_pcpu;
4709 for_each_present_cpu(cpu) {
4710 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4711 kfree(txq_pcpu->buffs);
4715 dma_free_coherent(port->dev->dev.parent,
4716 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4717 txq->descs, txq->descs_phys);
4721 txq->next_desc_to_proc = 0;
4722 txq->descs_phys = 0;
4724 /* Set minimum bandwidth for disabled TXQs */
4725 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4727 /* Set Tx descriptors queue starting address and size */
4728 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4729 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4730 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4733 /* Cleanup Tx ports */
4734 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4736 struct mvpp2_txq_pcpu *txq_pcpu;
4737 int delay, pending, cpu;
4740 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4741 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4742 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4743 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4745 /* The napi queue has been stopped so wait for all packets
4746 * to be transmitted.
4750 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4751 netdev_warn(port->dev,
4752 "port %d: cleaning queue %d timed out\n",
4753 port->id, txq->log_id);
4759 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4762 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4763 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4765 for_each_present_cpu(cpu) {
4766 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4768 /* Release all packets */
4769 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4772 txq_pcpu->count = 0;
4773 txq_pcpu->txq_put_index = 0;
4774 txq_pcpu->txq_get_index = 0;
4778 /* Cleanup all Tx queues */
4779 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4781 struct mvpp2_tx_queue *txq;
4785 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4787 /* Reset Tx ports and delete Tx queues */
4788 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4789 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4791 for (queue = 0; queue < txq_number; queue++) {
4792 txq = port->txqs[queue];
4793 mvpp2_txq_clean(port, txq);
4794 mvpp2_txq_deinit(port, txq);
4797 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4799 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4800 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4803 /* Cleanup all Rx queues */
4804 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4808 for (queue = 0; queue < rxq_number; queue++)
4809 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4812 /* Init all Rx queues for port */
4813 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4817 for (queue = 0; queue < rxq_number; queue++) {
4818 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4825 mvpp2_cleanup_rxqs(port);
4829 /* Init all tx queues for port */
4830 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4832 struct mvpp2_tx_queue *txq;
4835 for (queue = 0; queue < txq_number; queue++) {
4836 txq = port->txqs[queue];
4837 err = mvpp2_txq_init(port, txq);
4842 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4846 mvpp2_cleanup_txqs(port);
4850 /* The callback for per-port interrupt */
4851 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4853 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4855 mvpp2_interrupts_disable(port);
4857 napi_schedule(&port->napi);
4863 static void mvpp2_link_event(struct net_device *dev)
4865 struct mvpp2_port *port = netdev_priv(dev);
4866 struct phy_device *phydev = port->phy_dev;
4867 int status_change = 0;
4871 if ((port->speed != phydev->speed) ||
4872 (port->duplex != phydev->duplex)) {
4875 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4876 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4877 MVPP2_GMAC_CONFIG_GMII_SPEED |
4878 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4879 MVPP2_GMAC_AN_SPEED_EN |
4880 MVPP2_GMAC_AN_DUPLEX_EN);
4883 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4885 if (phydev->speed == SPEED_1000)
4886 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4887 else if (phydev->speed == SPEED_100)
4888 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4890 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4892 port->duplex = phydev->duplex;
4893 port->speed = phydev->speed;
4897 if (phydev->link != port->link) {
4898 if (!phydev->link) {
4903 port->link = phydev->link;
4907 if (status_change) {
4909 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4910 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4911 MVPP2_GMAC_FORCE_LINK_DOWN);
4912 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4913 mvpp2_egress_enable(port);
4914 mvpp2_ingress_enable(port);
4916 mvpp2_ingress_disable(port);
4917 mvpp2_egress_disable(port);
4919 phy_print_status(phydev);
4923 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4927 if (!port_pcpu->timer_scheduled) {
4928 port_pcpu->timer_scheduled = true;
4929 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4930 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4931 HRTIMER_MODE_REL_PINNED);
4935 static void mvpp2_tx_proc_cb(unsigned long data)
4937 struct net_device *dev = (struct net_device *)data;
4938 struct mvpp2_port *port = netdev_priv(dev);
4939 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4940 unsigned int tx_todo, cause;
4942 if (!netif_running(dev))
4944 port_pcpu->timer_scheduled = false;
4946 /* Process all the Tx queues */
4947 cause = (1 << txq_number) - 1;
4948 tx_todo = mvpp2_tx_done(port, cause);
4950 /* Set the timer in case not all the packets were processed */
4952 mvpp2_timer_set(port_pcpu);
4955 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4957 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4958 struct mvpp2_port_pcpu,
4961 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4963 return HRTIMER_NORESTART;
4966 /* Main RX/TX processing routines */
4968 /* Display more error info */
4969 static void mvpp2_rx_error(struct mvpp2_port *port,
4970 struct mvpp2_rx_desc *rx_desc)
4972 u32 status = rx_desc->status;
4974 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4975 case MVPP2_RXD_ERR_CRC:
4976 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4977 status, rx_desc->data_size);
4979 case MVPP2_RXD_ERR_OVERRUN:
4980 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4981 status, rx_desc->data_size);
4983 case MVPP2_RXD_ERR_RESOURCE:
4984 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4985 status, rx_desc->data_size);
4990 /* Handle RX checksum offload */
4991 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4992 struct sk_buff *skb)
4994 if (((status & MVPP2_RXD_L3_IP4) &&
4995 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4996 (status & MVPP2_RXD_L3_IP6))
4997 if (((status & MVPP2_RXD_L4_UDP) ||
4998 (status & MVPP2_RXD_L4_TCP)) &&
4999 (status & MVPP2_RXD_L4_CSUM_OK)) {
5001 skb->ip_summed = CHECKSUM_UNNECESSARY;
5005 skb->ip_summed = CHECKSUM_NONE;
5008 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5009 static int mvpp2_rx_refill(struct mvpp2_port *port,
5010 struct mvpp2_bm_pool *bm_pool,
5011 u32 bm, int is_recycle)
5013 struct sk_buff *skb;
5014 dma_addr_t phys_addr;
5017 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
5020 /* No recycle or too many buffers are in use, so allocate a new skb */
5021 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5025 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
5026 atomic_dec(&bm_pool->in_use);
5030 /* Handle tx checksum */
5031 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5033 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5037 if (skb->protocol == htons(ETH_P_IP)) {
5038 struct iphdr *ip4h = ip_hdr(skb);
5040 /* Calculate IPv4 checksum and L4 checksum */
5041 ip_hdr_len = ip4h->ihl;
5042 l4_proto = ip4h->protocol;
5043 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5044 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5046 /* Read l4_protocol from one of IPv6 extra headers */
5047 if (skb_network_header_len(skb) > 0)
5048 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5049 l4_proto = ip6h->nexthdr;
5051 return MVPP2_TXD_L4_CSUM_NOT;
5054 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5055 skb->protocol, ip_hdr_len, l4_proto);
5058 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5061 static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5062 struct mvpp2_rx_desc *rx_desc)
5064 struct mvpp2_buff_hdr *buff_hdr;
5065 struct sk_buff *skb;
5066 u32 rx_status = rx_desc->status;
5069 u32 buff_phys_addr_next;
5070 u32 buff_virt_addr_next;
5074 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5075 MVPP2_RXD_BM_POOL_ID_OFFS;
5076 buff_phys_addr = rx_desc->buf_phys_addr;
5077 buff_virt_addr = rx_desc->buf_cookie;
5080 skb = (struct sk_buff *)buff_virt_addr;
5081 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5083 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5085 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5086 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5088 /* Release buffer */
5089 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5090 buff_virt_addr, mc_id);
5092 buff_phys_addr = buff_phys_addr_next;
5093 buff_virt_addr = buff_virt_addr_next;
5095 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5098 /* Main rx processing */
5099 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5100 struct mvpp2_rx_queue *rxq)
5102 struct net_device *dev = port->dev;
5108 /* Get number of received packets and clamp the to-do */
5109 rx_received = mvpp2_rxq_received(port, rxq->id);
5110 if (rx_todo > rx_received)
5111 rx_todo = rx_received;
5113 while (rx_done < rx_todo) {
5114 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5115 struct mvpp2_bm_pool *bm_pool;
5116 struct sk_buff *skb;
5117 dma_addr_t phys_addr;
5119 int pool, rx_bytes, err;
5122 rx_status = rx_desc->status;
5123 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5124 phys_addr = rx_desc->buf_phys_addr;
5126 bm = mvpp2_bm_cookie_build(rx_desc);
5127 pool = mvpp2_bm_cookie_pool_get(bm);
5128 bm_pool = &port->priv->bm_pools[pool];
5129 /* Check if buffer header is used */
5130 if (rx_status & MVPP2_RXD_BUF_HDR) {
5131 mvpp2_buff_hdr_rx(port, rx_desc);
5135 /* In case of an error, release the requested buffer pointer
5136 * to the Buffer Manager. This request process is controlled
5137 * by the hardware, and the information about the buffer is
5138 * comprised by the RX descriptor.
5140 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5142 dev->stats.rx_errors++;
5143 mvpp2_rx_error(port, rx_desc);
5144 /* Return the buffer to the pool */
5145 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5146 rx_desc->buf_cookie);
5150 skb = (struct sk_buff *)rx_desc->buf_cookie;
5152 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5154 netdev_err(port->dev, "failed to refill BM pools\n");
5155 goto err_drop_frame;
5158 dma_unmap_single(dev->dev.parent, phys_addr,
5159 bm_pool->buf_size, DMA_FROM_DEVICE);
5162 rcvd_bytes += rx_bytes;
5163 atomic_inc(&bm_pool->in_use);
5165 skb_reserve(skb, MVPP2_MH_SIZE);
5166 skb_put(skb, rx_bytes);
5167 skb->protocol = eth_type_trans(skb, dev);
5168 mvpp2_rx_csum(port, rx_status, skb);
5170 napi_gro_receive(&port->napi, skb);
5174 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5176 u64_stats_update_begin(&stats->syncp);
5177 stats->rx_packets += rcvd_pkts;
5178 stats->rx_bytes += rcvd_bytes;
5179 u64_stats_update_end(&stats->syncp);
5182 /* Update Rx queue management counters */
5184 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5190 tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5191 struct mvpp2_tx_desc *desc)
5193 dma_unmap_single(dev, desc->buf_phys_addr,
5194 desc->data_size, DMA_TO_DEVICE);
5195 mvpp2_txq_desc_put(txq);
5198 /* Handle tx fragmentation processing */
5199 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5200 struct mvpp2_tx_queue *aggr_txq,
5201 struct mvpp2_tx_queue *txq)
5203 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5204 struct mvpp2_tx_desc *tx_desc;
5206 dma_addr_t buf_phys_addr;
5208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5209 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5210 void *addr = page_address(frag->page.p) + frag->page_offset;
5212 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5213 tx_desc->phys_txq = txq->id;
5214 tx_desc->data_size = frag->size;
5216 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5219 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5220 mvpp2_txq_desc_put(txq);
5224 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5225 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5227 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5228 /* Last descriptor */
5229 tx_desc->command = MVPP2_TXD_L_DESC;
5230 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5232 /* Descriptor in the middle: Not First, Not Last */
5233 tx_desc->command = 0;
5234 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5241 /* Release all descriptors that were used to map fragments of
5242 * this packet, as well as the corresponding DMA mappings
5244 for (i = i - 1; i >= 0; i--) {
5245 tx_desc = txq->descs + i;
5246 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5252 /* Main tx processing */
5253 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5255 struct mvpp2_port *port = netdev_priv(dev);
5256 struct mvpp2_tx_queue *txq, *aggr_txq;
5257 struct mvpp2_txq_pcpu *txq_pcpu;
5258 struct mvpp2_tx_desc *tx_desc;
5259 dma_addr_t buf_phys_addr;
5264 txq_id = skb_get_queue_mapping(skb);
5265 txq = port->txqs[txq_id];
5266 txq_pcpu = this_cpu_ptr(txq->pcpu);
5267 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5269 frags = skb_shinfo(skb)->nr_frags + 1;
5271 /* Check number of available descriptors */
5272 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5273 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5279 /* Get a descriptor for the first part of the packet */
5280 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5281 tx_desc->phys_txq = txq->id;
5282 tx_desc->data_size = skb_headlen(skb);
5284 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5285 tx_desc->data_size, DMA_TO_DEVICE);
5286 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5287 mvpp2_txq_desc_put(txq);
5291 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5292 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5294 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5297 /* First and Last descriptor */
5298 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5299 tx_desc->command = tx_cmd;
5300 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5302 /* First but not Last */
5303 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5304 tx_desc->command = tx_cmd;
5305 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5307 /* Continue with other skb fragments */
5308 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5309 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5315 txq_pcpu->reserved_num -= frags;
5316 txq_pcpu->count += frags;
5317 aggr_txq->count += frags;
5319 /* Enable transmit */
5321 mvpp2_aggr_txq_pend_desc_add(port, frags);
5323 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5324 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5326 netif_tx_stop_queue(nq);
5330 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5332 u64_stats_update_begin(&stats->syncp);
5333 stats->tx_packets++;
5334 stats->tx_bytes += skb->len;
5335 u64_stats_update_end(&stats->syncp);
5337 dev->stats.tx_dropped++;
5338 dev_kfree_skb_any(skb);
5341 /* Finalize TX processing */
5342 if (txq_pcpu->count >= txq->done_pkts_coal)
5343 mvpp2_txq_done(port, txq, txq_pcpu);
5345 /* Set the timer in case not all frags were processed */
5346 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5347 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5349 mvpp2_timer_set(port_pcpu);
5352 return NETDEV_TX_OK;
5355 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5357 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5358 netdev_err(dev, "FCS error\n");
5359 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5360 netdev_err(dev, "rx fifo overrun error\n");
5361 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5362 netdev_err(dev, "tx fifo underrun error\n");
5365 static int mvpp2_poll(struct napi_struct *napi, int budget)
5367 u32 cause_rx_tx, cause_rx, cause_misc;
5369 struct mvpp2_port *port = netdev_priv(napi->dev);
5371 /* Rx/Tx cause register
5373 * Bits 0-15: each bit indicates received packets on the Rx queue
5374 * (bit 0 is for Rx queue 0).
5376 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5377 * (bit 16 is for Tx queue 0).
5379 * Each CPU has its own Rx/Tx cause register
5381 cause_rx_tx = mvpp2_read(port->priv,
5382 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5383 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5384 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5387 mvpp2_cause_error(port->dev, cause_misc);
5389 /* Clear the cause register */
5390 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5391 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5392 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5395 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5397 /* Process RX packets */
5398 cause_rx |= port->pending_cause_rx;
5399 while (cause_rx && budget > 0) {
5401 struct mvpp2_rx_queue *rxq;
5403 rxq = mvpp2_get_rx_queue(port, cause_rx);
5407 count = mvpp2_rx(port, budget, rxq);
5411 /* Clear the bit associated to this Rx queue
5412 * so that next iteration will continue from
5413 * the next Rx queue.
5415 cause_rx &= ~(1 << rxq->logic_rxq);
5421 napi_complete(napi);
5423 mvpp2_interrupts_enable(port);
5425 port->pending_cause_rx = cause_rx;
5429 /* Set hw internals when starting port */
5430 static void mvpp2_start_dev(struct mvpp2_port *port)
5432 mvpp2_gmac_max_rx_size_set(port);
5433 mvpp2_txp_max_tx_size_set(port);
5435 napi_enable(&port->napi);
5437 /* Enable interrupts on all CPUs */
5438 mvpp2_interrupts_enable(port);
5440 mvpp2_port_enable(port);
5441 phy_start(port->phy_dev);
5442 netif_tx_start_all_queues(port->dev);
5445 /* Set hw internals when stopping port */
5446 static void mvpp2_stop_dev(struct mvpp2_port *port)
5448 /* Stop new packets from arriving to RXQs */
5449 mvpp2_ingress_disable(port);
5453 /* Disable interrupts on all CPUs */
5454 mvpp2_interrupts_disable(port);
5456 napi_disable(&port->napi);
5458 netif_carrier_off(port->dev);
5459 netif_tx_stop_all_queues(port->dev);
5461 mvpp2_egress_disable(port);
5462 mvpp2_port_disable(port);
5463 phy_stop(port->phy_dev);
5466 /* Return positive if MTU is valid */
5467 static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5470 netdev_err(dev, "cannot change mtu to less than 68\n");
5474 /* 9676 == 9700 - 20 and rounding to 8 */
5476 netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5480 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5481 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5482 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5483 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5489 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5490 struct ethtool_ringparam *ring)
5492 u16 new_rx_pending = ring->rx_pending;
5493 u16 new_tx_pending = ring->tx_pending;
5495 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5498 if (ring->rx_pending > MVPP2_MAX_RXD)
5499 new_rx_pending = MVPP2_MAX_RXD;
5500 else if (!IS_ALIGNED(ring->rx_pending, 16))
5501 new_rx_pending = ALIGN(ring->rx_pending, 16);
5503 if (ring->tx_pending > MVPP2_MAX_TXD)
5504 new_tx_pending = MVPP2_MAX_TXD;
5505 else if (!IS_ALIGNED(ring->tx_pending, 32))
5506 new_tx_pending = ALIGN(ring->tx_pending, 32);
5508 if (ring->rx_pending != new_rx_pending) {
5509 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5510 ring->rx_pending, new_rx_pending);
5511 ring->rx_pending = new_rx_pending;
5514 if (ring->tx_pending != new_tx_pending) {
5515 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5516 ring->tx_pending, new_tx_pending);
5517 ring->tx_pending = new_tx_pending;
5523 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5525 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5527 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5528 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5529 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5530 addr[0] = (mac_addr_h >> 24) & 0xFF;
5531 addr[1] = (mac_addr_h >> 16) & 0xFF;
5532 addr[2] = (mac_addr_h >> 8) & 0xFF;
5533 addr[3] = mac_addr_h & 0xFF;
5534 addr[4] = mac_addr_m & 0xFF;
5535 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5538 static int mvpp2_phy_connect(struct mvpp2_port *port)
5540 struct phy_device *phy_dev;
5542 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5543 port->phy_interface);
5545 netdev_err(port->dev, "cannot connect to phy\n");
5548 phy_dev->supported &= PHY_GBIT_FEATURES;
5549 phy_dev->advertising = phy_dev->supported;
5551 port->phy_dev = phy_dev;
5559 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5561 phy_disconnect(port->phy_dev);
5562 port->phy_dev = NULL;
5565 static int mvpp2_open(struct net_device *dev)
5567 struct mvpp2_port *port = netdev_priv(dev);
5568 unsigned char mac_bcast[ETH_ALEN] = {
5569 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5572 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5574 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5577 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5578 dev->dev_addr, true);
5580 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5583 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5585 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5588 err = mvpp2_prs_def_flow(port);
5590 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5594 /* Allocate the Rx/Tx queues */
5595 err = mvpp2_setup_rxqs(port);
5597 netdev_err(port->dev, "cannot allocate Rx queues\n");
5601 err = mvpp2_setup_txqs(port);
5603 netdev_err(port->dev, "cannot allocate Tx queues\n");
5604 goto err_cleanup_rxqs;
5607 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5609 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5610 goto err_cleanup_txqs;
5613 /* In default link is down */
5614 netif_carrier_off(port->dev);
5616 err = mvpp2_phy_connect(port);
5620 /* Unmask interrupts on all CPUs */
5621 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5623 mvpp2_start_dev(port);
5628 free_irq(port->irq, port);
5630 mvpp2_cleanup_txqs(port);
5632 mvpp2_cleanup_rxqs(port);
5636 static int mvpp2_stop(struct net_device *dev)
5638 struct mvpp2_port *port = netdev_priv(dev);
5639 struct mvpp2_port_pcpu *port_pcpu;
5642 mvpp2_stop_dev(port);
5643 mvpp2_phy_disconnect(port);
5645 /* Mask interrupts on all CPUs */
5646 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5648 free_irq(port->irq, port);
5649 for_each_present_cpu(cpu) {
5650 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5652 hrtimer_cancel(&port_pcpu->tx_done_timer);
5653 port_pcpu->timer_scheduled = false;
5654 tasklet_kill(&port_pcpu->tx_done_tasklet);
5656 mvpp2_cleanup_rxqs(port);
5657 mvpp2_cleanup_txqs(port);
5662 static void mvpp2_set_rx_mode(struct net_device *dev)
5664 struct mvpp2_port *port = netdev_priv(dev);
5665 struct mvpp2 *priv = port->priv;
5666 struct netdev_hw_addr *ha;
5668 bool allmulti = dev->flags & IFF_ALLMULTI;
5670 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5671 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5672 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5674 /* Remove all port->id's mcast enries */
5675 mvpp2_prs_mcast_del_all(priv, id);
5677 if (allmulti && !netdev_mc_empty(dev)) {
5678 netdev_for_each_mc_addr(ha, dev)
5679 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5683 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5685 struct mvpp2_port *port = netdev_priv(dev);
5686 const struct sockaddr *addr = p;
5689 if (!is_valid_ether_addr(addr->sa_data)) {
5690 err = -EADDRNOTAVAIL;
5694 if (!netif_running(dev)) {
5695 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5698 /* Reconfigure parser to accept the original MAC address */
5699 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5704 mvpp2_stop_dev(port);
5706 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5710 /* Reconfigure parser accept the original MAC address */
5711 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5715 mvpp2_start_dev(port);
5716 mvpp2_egress_enable(port);
5717 mvpp2_ingress_enable(port);
5721 netdev_err(dev, "fail to change MAC address\n");
5725 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5727 struct mvpp2_port *port = netdev_priv(dev);
5730 mtu = mvpp2_check_mtu_valid(dev, mtu);
5736 if (!netif_running(dev)) {
5737 err = mvpp2_bm_update_mtu(dev, mtu);
5739 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5743 /* Reconfigure BM to the original MTU */
5744 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5749 mvpp2_stop_dev(port);
5751 err = mvpp2_bm_update_mtu(dev, mtu);
5753 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5757 /* Reconfigure BM to the original MTU */
5758 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5763 mvpp2_start_dev(port);
5764 mvpp2_egress_enable(port);
5765 mvpp2_ingress_enable(port);
5770 netdev_err(dev, "fail to change MTU\n");
5774 static struct rtnl_link_stats64 *
5775 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5777 struct mvpp2_port *port = netdev_priv(dev);
5781 for_each_possible_cpu(cpu) {
5782 struct mvpp2_pcpu_stats *cpu_stats;
5788 cpu_stats = per_cpu_ptr(port->stats, cpu);
5790 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5791 rx_packets = cpu_stats->rx_packets;
5792 rx_bytes = cpu_stats->rx_bytes;
5793 tx_packets = cpu_stats->tx_packets;
5794 tx_bytes = cpu_stats->tx_bytes;
5795 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5797 stats->rx_packets += rx_packets;
5798 stats->rx_bytes += rx_bytes;
5799 stats->tx_packets += tx_packets;
5800 stats->tx_bytes += tx_bytes;
5803 stats->rx_errors = dev->stats.rx_errors;
5804 stats->rx_dropped = dev->stats.rx_dropped;
5805 stats->tx_dropped = dev->stats.tx_dropped;
5810 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5812 struct mvpp2_port *port = netdev_priv(dev);
5818 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
5820 mvpp2_link_event(dev);
5825 /* Ethtool methods */
5827 /* Get settings (phy address, speed) for ethtools */
5828 static int mvpp2_ethtool_get_settings(struct net_device *dev,
5829 struct ethtool_cmd *cmd)
5831 struct mvpp2_port *port = netdev_priv(dev);
5835 return phy_ethtool_gset(port->phy_dev, cmd);
5838 /* Set settings (phy address, speed) for ethtools */
5839 static int mvpp2_ethtool_set_settings(struct net_device *dev,
5840 struct ethtool_cmd *cmd)
5842 struct mvpp2_port *port = netdev_priv(dev);
5846 return phy_ethtool_sset(port->phy_dev, cmd);
5849 /* Set interrupt coalescing for ethtools */
5850 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5851 struct ethtool_coalesce *c)
5853 struct mvpp2_port *port = netdev_priv(dev);
5856 for (queue = 0; queue < rxq_number; queue++) {
5857 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5859 rxq->time_coal = c->rx_coalesce_usecs;
5860 rxq->pkts_coal = c->rx_max_coalesced_frames;
5861 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5862 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5865 for (queue = 0; queue < txq_number; queue++) {
5866 struct mvpp2_tx_queue *txq = port->txqs[queue];
5868 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5874 /* get coalescing for ethtools */
5875 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5876 struct ethtool_coalesce *c)
5878 struct mvpp2_port *port = netdev_priv(dev);
5880 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5881 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5882 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5886 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5887 struct ethtool_drvinfo *drvinfo)
5889 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5890 sizeof(drvinfo->driver));
5891 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5892 sizeof(drvinfo->version));
5893 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5894 sizeof(drvinfo->bus_info));
5897 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5898 struct ethtool_ringparam *ring)
5900 struct mvpp2_port *port = netdev_priv(dev);
5902 ring->rx_max_pending = MVPP2_MAX_RXD;
5903 ring->tx_max_pending = MVPP2_MAX_TXD;
5904 ring->rx_pending = port->rx_ring_size;
5905 ring->tx_pending = port->tx_ring_size;
5908 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5909 struct ethtool_ringparam *ring)
5911 struct mvpp2_port *port = netdev_priv(dev);
5912 u16 prev_rx_ring_size = port->rx_ring_size;
5913 u16 prev_tx_ring_size = port->tx_ring_size;
5916 err = mvpp2_check_ringparam_valid(dev, ring);
5920 if (!netif_running(dev)) {
5921 port->rx_ring_size = ring->rx_pending;
5922 port->tx_ring_size = ring->tx_pending;
5926 /* The interface is running, so we have to force a
5927 * reallocation of the queues
5929 mvpp2_stop_dev(port);
5930 mvpp2_cleanup_rxqs(port);
5931 mvpp2_cleanup_txqs(port);
5933 port->rx_ring_size = ring->rx_pending;
5934 port->tx_ring_size = ring->tx_pending;
5936 err = mvpp2_setup_rxqs(port);
5938 /* Reallocate Rx queues with the original ring size */
5939 port->rx_ring_size = prev_rx_ring_size;
5940 ring->rx_pending = prev_rx_ring_size;
5941 err = mvpp2_setup_rxqs(port);
5945 err = mvpp2_setup_txqs(port);
5947 /* Reallocate Tx queues with the original ring size */
5948 port->tx_ring_size = prev_tx_ring_size;
5949 ring->tx_pending = prev_tx_ring_size;
5950 err = mvpp2_setup_txqs(port);
5952 goto err_clean_rxqs;
5955 mvpp2_start_dev(port);
5956 mvpp2_egress_enable(port);
5957 mvpp2_ingress_enable(port);
5962 mvpp2_cleanup_rxqs(port);
5964 netdev_err(dev, "fail to change ring parameters");
5970 static const struct net_device_ops mvpp2_netdev_ops = {
5971 .ndo_open = mvpp2_open,
5972 .ndo_stop = mvpp2_stop,
5973 .ndo_start_xmit = mvpp2_tx,
5974 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5975 .ndo_set_mac_address = mvpp2_set_mac_address,
5976 .ndo_change_mtu = mvpp2_change_mtu,
5977 .ndo_get_stats64 = mvpp2_get_stats64,
5978 .ndo_do_ioctl = mvpp2_ioctl,
5981 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5982 .get_link = ethtool_op_get_link,
5983 .get_settings = mvpp2_ethtool_get_settings,
5984 .set_settings = mvpp2_ethtool_set_settings,
5985 .set_coalesce = mvpp2_ethtool_set_coalesce,
5986 .get_coalesce = mvpp2_ethtool_get_coalesce,
5987 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5988 .get_ringparam = mvpp2_ethtool_get_ringparam,
5989 .set_ringparam = mvpp2_ethtool_set_ringparam,
5992 /* Driver initialization */
5994 static void mvpp2_port_power_up(struct mvpp2_port *port)
5996 mvpp2_port_mii_set(port);
5997 mvpp2_port_periodic_xon_disable(port);
5998 mvpp2_port_fc_adv_enable(port);
5999 mvpp2_port_reset(port);
6002 /* Initialize port HW */
6003 static int mvpp2_port_init(struct mvpp2_port *port)
6005 struct device *dev = port->dev->dev.parent;
6006 struct mvpp2 *priv = port->priv;
6007 struct mvpp2_txq_pcpu *txq_pcpu;
6008 int queue, cpu, err;
6010 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6014 mvpp2_egress_disable(port);
6015 mvpp2_port_disable(port);
6017 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6022 /* Associate physical Tx queues to this port and initialize.
6023 * The mapping is predefined.
6025 for (queue = 0; queue < txq_number; queue++) {
6026 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6027 struct mvpp2_tx_queue *txq;
6029 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6033 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6036 goto err_free_percpu;
6039 txq->id = queue_phy_id;
6040 txq->log_id = queue;
6041 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6042 for_each_present_cpu(cpu) {
6043 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6044 txq_pcpu->cpu = cpu;
6047 port->txqs[queue] = txq;
6050 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6054 goto err_free_percpu;
6057 /* Allocate and initialize Rx queue for this port */
6058 for (queue = 0; queue < rxq_number; queue++) {
6059 struct mvpp2_rx_queue *rxq;
6061 /* Map physical Rx queue to port's logical Rx queue */
6062 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6064 goto err_free_percpu;
6065 /* Map this Rx queue to a physical queue */
6066 rxq->id = port->first_rxq + queue;
6067 rxq->port = port->id;
6068 rxq->logic_rxq = queue;
6070 port->rxqs[queue] = rxq;
6073 /* Configure Rx queue group interrupt for this port */
6074 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6076 /* Create Rx descriptor rings */
6077 for (queue = 0; queue < rxq_number; queue++) {
6078 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6080 rxq->size = port->rx_ring_size;
6081 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6082 rxq->time_coal = MVPP2_RX_COAL_USEC;
6085 mvpp2_ingress_disable(port);
6087 /* Port default configuration */
6088 mvpp2_defaults_set(port);
6090 /* Port's classifier configuration */
6091 mvpp2_cls_oversize_rxq_set(port);
6092 mvpp2_cls_port_config(port);
6094 /* Provide an initial Rx packet size */
6095 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6097 /* Initialize pools for swf */
6098 err = mvpp2_swf_bm_pool_init(port);
6100 goto err_free_percpu;
6105 for (queue = 0; queue < txq_number; queue++) {
6106 if (!port->txqs[queue])
6108 free_percpu(port->txqs[queue]->pcpu);
6113 /* Ports initialization */
6114 static int mvpp2_port_probe(struct platform_device *pdev,
6115 struct device_node *port_node,
6117 int *next_first_rxq)
6119 struct device_node *phy_node;
6120 struct mvpp2_port *port;
6121 struct mvpp2_port_pcpu *port_pcpu;
6122 struct net_device *dev;
6123 struct resource *res;
6124 const char *dt_mac_addr;
6125 const char *mac_from;
6126 char hw_mac_addr[ETH_ALEN];
6130 int priv_common_regs_num = 2;
6133 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6138 phy_node = of_parse_phandle(port_node, "phy", 0);
6140 dev_err(&pdev->dev, "missing phy\n");
6142 goto err_free_netdev;
6145 phy_mode = of_get_phy_mode(port_node);
6147 dev_err(&pdev->dev, "incorrect phy mode\n");
6149 goto err_free_netdev;
6152 if (of_property_read_u32(port_node, "port-id", &id)) {
6154 dev_err(&pdev->dev, "missing port-id value\n");
6155 goto err_free_netdev;
6158 dev->tx_queue_len = MVPP2_MAX_TXD;
6159 dev->watchdog_timeo = 5 * HZ;
6160 dev->netdev_ops = &mvpp2_netdev_ops;
6161 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6163 port = netdev_priv(dev);
6165 port->irq = irq_of_parse_and_map(port_node, 0);
6166 if (port->irq <= 0) {
6168 goto err_free_netdev;
6171 if (of_property_read_bool(port_node, "marvell,loopback"))
6172 port->flags |= MVPP2_F_LOOPBACK;
6176 port->first_rxq = *next_first_rxq;
6177 port->phy_node = phy_node;
6178 port->phy_interface = phy_mode;
6180 res = platform_get_resource(pdev, IORESOURCE_MEM,
6181 priv_common_regs_num + id);
6182 port->base = devm_ioremap_resource(&pdev->dev, res);
6183 if (IS_ERR(port->base)) {
6184 err = PTR_ERR(port->base);
6188 /* Alloc per-cpu stats */
6189 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6195 dt_mac_addr = of_get_mac_address(port_node);
6196 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6197 mac_from = "device tree";
6198 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6200 mvpp2_get_mac_address(port, hw_mac_addr);
6201 if (is_valid_ether_addr(hw_mac_addr)) {
6202 mac_from = "hardware";
6203 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6205 mac_from = "random";
6206 eth_hw_addr_random(dev);
6210 port->tx_ring_size = MVPP2_MAX_TXD;
6211 port->rx_ring_size = MVPP2_MAX_RXD;
6213 SET_NETDEV_DEV(dev, &pdev->dev);
6215 err = mvpp2_port_init(port);
6217 dev_err(&pdev->dev, "failed to init port %d\n", id);
6218 goto err_free_stats;
6220 mvpp2_port_power_up(port);
6222 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6225 goto err_free_txq_pcpu;
6228 for_each_present_cpu(cpu) {
6229 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6231 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6232 HRTIMER_MODE_REL_PINNED);
6233 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6234 port_pcpu->timer_scheduled = false;
6236 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6237 (unsigned long)dev);
6240 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6241 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6242 dev->features = features | NETIF_F_RXCSUM;
6243 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6244 dev->vlan_features |= features;
6246 err = register_netdev(dev);
6248 dev_err(&pdev->dev, "failed to register netdev\n");
6249 goto err_free_port_pcpu;
6251 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6253 /* Increment the first Rx queue number to be used by the next port */
6254 *next_first_rxq += rxq_number;
6255 priv->port_list[id] = port;
6259 free_percpu(port->pcpu);
6261 for (i = 0; i < txq_number; i++)
6262 free_percpu(port->txqs[i]->pcpu);
6264 free_percpu(port->stats);
6266 irq_dispose_mapping(port->irq);
6272 /* Ports removal routine */
6273 static void mvpp2_port_remove(struct mvpp2_port *port)
6277 unregister_netdev(port->dev);
6278 free_percpu(port->pcpu);
6279 free_percpu(port->stats);
6280 for (i = 0; i < txq_number; i++)
6281 free_percpu(port->txqs[i]->pcpu);
6282 irq_dispose_mapping(port->irq);
6283 free_netdev(port->dev);
6286 /* Initialize decoding windows */
6287 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6293 for (i = 0; i < 6; i++) {
6294 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6295 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6298 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6303 for (i = 0; i < dram->num_cs; i++) {
6304 const struct mbus_dram_window *cs = dram->cs + i;
6306 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6307 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6308 dram->mbus_dram_target_id);
6310 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6311 (cs->size - 1) & 0xffff0000);
6313 win_enable |= (1 << i);
6316 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6319 /* Initialize Rx FIFO's */
6320 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6324 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6325 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6326 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6327 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6328 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6331 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6332 MVPP2_RX_FIFO_PORT_MIN_PKT);
6333 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6336 /* Initialize network controller common part HW */
6337 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6339 const struct mbus_dram_target_info *dram_target_info;
6343 /* Checks for hardware constraints */
6344 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6345 (txq_number > MVPP2_MAX_TXQ)) {
6346 dev_err(&pdev->dev, "invalid queue size parameter\n");
6350 /* MBUS windows configuration */
6351 dram_target_info = mv_mbus_dram_info();
6352 if (dram_target_info)
6353 mvpp2_conf_mbus_windows(dram_target_info, priv);
6355 /* Disable HW PHY polling */
6356 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6357 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6358 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6360 /* Allocate and initialize aggregated TXQs */
6361 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6362 sizeof(struct mvpp2_tx_queue),
6364 if (!priv->aggr_txqs)
6367 for_each_present_cpu(i) {
6368 priv->aggr_txqs[i].id = i;
6369 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6370 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6371 MVPP2_AGGR_TXQ_SIZE, i, priv);
6377 mvpp2_rx_fifo_init(priv);
6379 /* Reset Rx queue group interrupt configuration */
6380 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6381 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6383 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6384 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6386 /* Allow cache snoop when transmiting packets */
6387 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6389 /* Buffer Manager initialization */
6390 err = mvpp2_bm_init(pdev, priv);
6394 /* Parser default initialization */
6395 err = mvpp2_prs_default_init(pdev, priv);
6399 /* Classifier default initialization */
6400 mvpp2_cls_init(priv);
6405 static int mvpp2_probe(struct platform_device *pdev)
6407 struct device_node *dn = pdev->dev.of_node;
6408 struct device_node *port_node;
6410 struct resource *res;
6411 int port_count, first_rxq;
6414 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6418 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6419 priv->base = devm_ioremap_resource(&pdev->dev, res);
6420 if (IS_ERR(priv->base))
6421 return PTR_ERR(priv->base);
6423 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6424 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6425 if (IS_ERR(priv->lms_base))
6426 return PTR_ERR(priv->lms_base);
6428 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6429 if (IS_ERR(priv->pp_clk))
6430 return PTR_ERR(priv->pp_clk);
6431 err = clk_prepare_enable(priv->pp_clk);
6435 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6436 if (IS_ERR(priv->gop_clk)) {
6437 err = PTR_ERR(priv->gop_clk);
6440 err = clk_prepare_enable(priv->gop_clk);
6444 /* Get system's tclk rate */
6445 priv->tclk = clk_get_rate(priv->pp_clk);
6447 /* Initialize network controller */
6448 err = mvpp2_init(pdev, priv);
6450 dev_err(&pdev->dev, "failed to initialize controller\n");
6454 port_count = of_get_available_child_count(dn);
6455 if (port_count == 0) {
6456 dev_err(&pdev->dev, "no ports enabled\n");
6461 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6462 sizeof(struct mvpp2_port *),
6464 if (!priv->port_list) {
6469 /* Initialize ports */
6471 for_each_available_child_of_node(dn, port_node) {
6472 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6477 platform_set_drvdata(pdev, priv);
6481 clk_disable_unprepare(priv->gop_clk);
6483 clk_disable_unprepare(priv->pp_clk);
6487 static int mvpp2_remove(struct platform_device *pdev)
6489 struct mvpp2 *priv = platform_get_drvdata(pdev);
6490 struct device_node *dn = pdev->dev.of_node;
6491 struct device_node *port_node;
6494 for_each_available_child_of_node(dn, port_node) {
6495 if (priv->port_list[i])
6496 mvpp2_port_remove(priv->port_list[i]);
6500 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6501 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6503 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6506 for_each_present_cpu(i) {
6507 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6509 dma_free_coherent(&pdev->dev,
6510 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6512 aggr_txq->descs_phys);
6515 clk_disable_unprepare(priv->pp_clk);
6516 clk_disable_unprepare(priv->gop_clk);
6521 static const struct of_device_id mvpp2_match[] = {
6522 { .compatible = "marvell,armada-375-pp2" },
6525 MODULE_DEVICE_TABLE(of, mvpp2_match);
6527 static struct platform_driver mvpp2_driver = {
6528 .probe = mvpp2_probe,
6529 .remove = mvpp2_remove,
6531 .name = MVPP2_DRIVER_NAME,
6532 .of_match_table = mvpp2_match,
6536 module_platform_driver(mvpp2_driver);
6538 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6539 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6540 MODULE_LICENSE("GPL v2");