1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
241 return err ? -EOPNOTSUPP : 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
257 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
304 dev_info(&pf->pdev->dev,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
321 return err ? -EOPNOTSUPP : 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Always returns -EOPNOTSUPP
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
354 struct i40e_pf *pf = vsi->back;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
380 dev_info(&pf->pdev->dev,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
399 return err ? -EOPNOTSUPP : 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
412 struct i40e_pf *pf = vsi->back;
415 switch (input->flow_type & ~FLOW_EXT) {
417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
429 switch (input->ip4_proto) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog = i40e_get_global_fd_count(pf);
504 fcnt_avail = pf->fdir_pf_filter_count;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
511 !(pf->auto_disable_flags &
512 I40E_FLAG_FD_SB_ENABLED)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
520 "FD filter programming failed due to incorrect filter parameters\n");
522 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
523 if (I40E_DEBUG_FD & pf->hw.debug_mask)
524 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
525 rx_desc->wb.qword0.hi_dword.fd_id);
530 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
531 * @ring: the ring that owns the buffer
532 * @tx_buffer: the buffer to free
534 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
535 struct i40e_tx_buffer *tx_buffer)
537 if (tx_buffer->skb) {
538 dev_kfree_skb_any(tx_buffer->skb);
539 if (dma_unmap_len(tx_buffer, len))
540 dma_unmap_single(ring->dev,
541 dma_unmap_addr(tx_buffer, dma),
542 dma_unmap_len(tx_buffer, len),
544 } else if (dma_unmap_len(tx_buffer, len)) {
545 dma_unmap_page(ring->dev,
546 dma_unmap_addr(tx_buffer, dma),
547 dma_unmap_len(tx_buffer, len),
551 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
552 kfree(tx_buffer->raw_buf);
554 tx_buffer->next_to_watch = NULL;
555 tx_buffer->skb = NULL;
556 dma_unmap_len_set(tx_buffer, len, 0);
557 /* tx_buffer must be completely set up in the transmit path */
561 * i40e_clean_tx_ring - Free any empty Tx buffers
562 * @tx_ring: ring to be cleaned
564 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
566 unsigned long bi_size;
569 /* ring already cleared, nothing to do */
573 /* Free all the Tx ring sk_buffs */
574 for (i = 0; i < tx_ring->count; i++)
575 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
577 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
578 memset(tx_ring->tx_bi, 0, bi_size);
580 /* Zero out the descriptor ring */
581 memset(tx_ring->desc, 0, tx_ring->size);
583 tx_ring->next_to_use = 0;
584 tx_ring->next_to_clean = 0;
586 if (!tx_ring->netdev)
589 /* cleanup Tx queue statistics */
590 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
591 tx_ring->queue_index));
595 * i40e_free_tx_resources - Free Tx resources per queue
596 * @tx_ring: Tx descriptor ring for a specific queue
598 * Free all transmit software resources
600 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
602 i40e_clean_tx_ring(tx_ring);
603 kfree(tx_ring->tx_bi);
604 tx_ring->tx_bi = NULL;
607 dma_free_coherent(tx_ring->dev, tx_ring->size,
608 tx_ring->desc, tx_ring->dma);
609 tx_ring->desc = NULL;
614 * i40e_get_tx_pending - how many tx descriptors not processed
615 * @tx_ring: the ring of descriptors
617 * Since there is no access to the ring head register
618 * in XL710, we need to use our local copies
620 u32 i40e_get_tx_pending(struct i40e_ring *ring)
624 head = i40e_get_head(ring);
625 tail = readl(ring->tail);
628 return (head < tail) ?
629 tail - head : (tail + ring->count - head);
634 #define WB_STRIDE 0x3
637 * i40e_clean_tx_irq - Reclaim resources after transmit completes
638 * @tx_ring: tx ring to clean
639 * @budget: how many cleans we're allowed
641 * Returns true if there's any budget left (e.g. the clean is finished)
643 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
645 u16 i = tx_ring->next_to_clean;
646 struct i40e_tx_buffer *tx_buf;
647 struct i40e_tx_desc *tx_head;
648 struct i40e_tx_desc *tx_desc;
649 unsigned int total_packets = 0;
650 unsigned int total_bytes = 0;
652 tx_buf = &tx_ring->tx_bi[i];
653 tx_desc = I40E_TX_DESC(tx_ring, i);
656 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
659 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
661 /* if next_to_watch is not set then there is no work pending */
665 /* prevent any other reads prior to eop_desc */
666 read_barrier_depends();
668 /* we have caught up to head, no work left to do */
669 if (tx_head == tx_desc)
672 /* clear next_to_watch to prevent false hangs */
673 tx_buf->next_to_watch = NULL;
675 /* update the statistics for this packet */
676 total_bytes += tx_buf->bytecount;
677 total_packets += tx_buf->gso_segs;
680 dev_consume_skb_any(tx_buf->skb);
682 /* unmap skb header data */
683 dma_unmap_single(tx_ring->dev,
684 dma_unmap_addr(tx_buf, dma),
685 dma_unmap_len(tx_buf, len),
688 /* clear tx_buffer data */
690 dma_unmap_len_set(tx_buf, len, 0);
692 /* unmap remaining buffers */
693 while (tx_desc != eop_desc) {
700 tx_buf = tx_ring->tx_bi;
701 tx_desc = I40E_TX_DESC(tx_ring, 0);
704 /* unmap any remaining paged data */
705 if (dma_unmap_len(tx_buf, len)) {
706 dma_unmap_page(tx_ring->dev,
707 dma_unmap_addr(tx_buf, dma),
708 dma_unmap_len(tx_buf, len),
710 dma_unmap_len_set(tx_buf, len, 0);
714 /* move us one more past the eop_desc for start of next pkt */
720 tx_buf = tx_ring->tx_bi;
721 tx_desc = I40E_TX_DESC(tx_ring, 0);
726 /* update budget accounting */
728 } while (likely(budget));
731 tx_ring->next_to_clean = i;
732 u64_stats_update_begin(&tx_ring->syncp);
733 tx_ring->stats.bytes += total_bytes;
734 tx_ring->stats.packets += total_packets;
735 u64_stats_update_end(&tx_ring->syncp);
736 tx_ring->q_vector->tx.total_bytes += total_bytes;
737 tx_ring->q_vector->tx.total_packets += total_packets;
739 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
742 /* check to see if there are < 4 descriptors
743 * waiting to be written back, then kick the hardware to force
744 * them to be written back in case we stay in NAPI.
745 * In this mode on X722 we do not enable Interrupt.
747 j = i40e_get_tx_pending(tx_ring);
750 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
751 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
752 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
753 tx_ring->arm_wb = true;
756 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
757 tx_ring->queue_index),
758 total_packets, total_bytes);
760 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
761 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
762 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
763 /* Make sure that anybody stopping the queue after this
764 * sees the new next_to_clean.
767 if (__netif_subqueue_stopped(tx_ring->netdev,
768 tx_ring->queue_index) &&
769 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
770 netif_wake_subqueue(tx_ring->netdev,
771 tx_ring->queue_index);
772 ++tx_ring->tx_stats.restart_queue;
780 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
781 * @vsi: the VSI we care about
782 * @q_vector: the vector on which to force writeback
785 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
787 u16 flags = q_vector->tx.ring[0].flags;
789 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
792 if (q_vector->arm_wb_state)
795 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
798 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
799 vsi->base_vector - 1),
801 q_vector->arm_wb_state = true;
802 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
803 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
804 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
805 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
806 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
807 /* allow 00 to be written to the index */
810 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
811 vsi->base_vector - 1), val);
813 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
814 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
815 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
816 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
817 /* allow 00 to be written to the index */
819 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
824 * i40e_set_new_dynamic_itr - Find new ITR level
825 * @rc: structure containing ring performance data
827 * Returns true if ITR changed, false if not
829 * Stores a new ITR value based on packets and byte counts during
830 * the last interrupt. The advantage of per interrupt computation
831 * is faster updates and more accurate ITR for the current traffic
832 * pattern. Constants in this function were computed based on
833 * theoretical maximum wire speed and thresholds were set based on
834 * testing data as well as attempting to minimize response time
835 * while increasing bulk throughput.
837 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
839 enum i40e_latency_range new_latency_range = rc->latency_range;
840 struct i40e_q_vector *qv = rc->ring->q_vector;
841 u32 new_itr = rc->itr;
845 if (rc->total_packets == 0 || !rc->itr)
848 /* simple throttlerate management
849 * 0-10MB/s lowest (50000 ints/s)
850 * 10-20MB/s low (20000 ints/s)
851 * 20-1249MB/s bulk (18000 ints/s)
852 * > 40000 Rx packets per second (8000 ints/s)
854 * The math works out because the divisor is in 10^(-6) which
855 * turns the bytes/us input value into MB/s values, but
856 * make sure to use usecs, as the register values written
857 * are in 2 usec increments in the ITR registers, and make sure
858 * to use the smoothed values that the countdown timer gives us.
860 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
861 bytes_per_int = rc->total_bytes / usecs;
863 switch (new_latency_range) {
864 case I40E_LOWEST_LATENCY:
865 if (bytes_per_int > 10)
866 new_latency_range = I40E_LOW_LATENCY;
868 case I40E_LOW_LATENCY:
869 if (bytes_per_int > 20)
870 new_latency_range = I40E_BULK_LATENCY;
871 else if (bytes_per_int <= 10)
872 new_latency_range = I40E_LOWEST_LATENCY;
874 case I40E_BULK_LATENCY:
875 case I40E_ULTRA_LATENCY:
877 if (bytes_per_int <= 20)
878 new_latency_range = I40E_LOW_LATENCY;
882 /* this is to adjust RX more aggressively when streaming small
883 * packets. The value of 40000 was picked as it is just beyond
884 * what the hardware can receive per second if in low latency
887 #define RX_ULTRA_PACKET_RATE 40000
889 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
891 new_latency_range = I40E_ULTRA_LATENCY;
893 rc->latency_range = new_latency_range;
895 switch (new_latency_range) {
896 case I40E_LOWEST_LATENCY:
897 new_itr = I40E_ITR_50K;
899 case I40E_LOW_LATENCY:
900 new_itr = I40E_ITR_20K;
902 case I40E_BULK_LATENCY:
903 new_itr = I40E_ITR_18K;
905 case I40E_ULTRA_LATENCY:
906 new_itr = I40E_ITR_8K;
913 rc->total_packets = 0;
915 if (new_itr != rc->itr) {
924 * i40e_clean_programming_status - clean the programming status descriptor
925 * @rx_ring: the rx ring that has this descriptor
926 * @rx_desc: the rx descriptor written back by HW
928 * Flow director should handle FD_FILTER_STATUS to check its filter programming
929 * status being successful or not and take actions accordingly. FCoE should
930 * handle its context/filter programming/invalidation status and take actions.
933 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
934 union i40e_rx_desc *rx_desc)
939 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
940 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
941 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
943 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
944 i40e_fd_handle_status(rx_ring, rx_desc, id);
946 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
947 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
948 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
953 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
954 * @tx_ring: the tx ring to set up
956 * Return 0 on success, negative on error
958 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
960 struct device *dev = tx_ring->dev;
966 /* warn if we are about to overwrite the pointer */
967 WARN_ON(tx_ring->tx_bi);
968 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
969 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
973 /* round up to nearest 4K */
974 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
975 /* add u32 for head writeback, align after this takes care of
976 * guaranteeing this is at least one cache line in size
978 tx_ring->size += sizeof(u32);
979 tx_ring->size = ALIGN(tx_ring->size, 4096);
980 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
981 &tx_ring->dma, GFP_KERNEL);
982 if (!tx_ring->desc) {
983 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
988 tx_ring->next_to_use = 0;
989 tx_ring->next_to_clean = 0;
993 kfree(tx_ring->tx_bi);
994 tx_ring->tx_bi = NULL;
999 * i40e_clean_rx_ring - Free Rx buffers
1000 * @rx_ring: ring to be cleaned
1002 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1004 struct device *dev = rx_ring->dev;
1005 struct i40e_rx_buffer *rx_bi;
1006 unsigned long bi_size;
1009 /* ring already cleared, nothing to do */
1010 if (!rx_ring->rx_bi)
1013 if (ring_is_ps_enabled(rx_ring)) {
1014 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1016 rx_bi = &rx_ring->rx_bi[0];
1017 if (rx_bi->hdr_buf) {
1018 dma_free_coherent(dev,
1022 for (i = 0; i < rx_ring->count; i++) {
1023 rx_bi = &rx_ring->rx_bi[i];
1025 rx_bi->hdr_buf = NULL;
1029 /* Free all the Rx ring sk_buffs */
1030 for (i = 0; i < rx_ring->count; i++) {
1031 rx_bi = &rx_ring->rx_bi[i];
1033 dma_unmap_single(dev,
1035 rx_ring->rx_buf_len,
1040 dev_kfree_skb(rx_bi->skb);
1044 if (rx_bi->page_dma) {
1049 rx_bi->page_dma = 0;
1051 __free_page(rx_bi->page);
1053 rx_bi->page_offset = 0;
1057 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1058 memset(rx_ring->rx_bi, 0, bi_size);
1060 /* Zero out the descriptor ring */
1061 memset(rx_ring->desc, 0, rx_ring->size);
1063 rx_ring->next_to_clean = 0;
1064 rx_ring->next_to_use = 0;
1068 * i40e_free_rx_resources - Free Rx resources
1069 * @rx_ring: ring to clean the resources from
1071 * Free all receive software resources
1073 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1075 i40e_clean_rx_ring(rx_ring);
1076 kfree(rx_ring->rx_bi);
1077 rx_ring->rx_bi = NULL;
1079 if (rx_ring->desc) {
1080 dma_free_coherent(rx_ring->dev, rx_ring->size,
1081 rx_ring->desc, rx_ring->dma);
1082 rx_ring->desc = NULL;
1087 * i40e_alloc_rx_headers - allocate rx header buffers
1088 * @rx_ring: ring to alloc buffers
1090 * Allocate rx header buffers for the entire ring. As these are static,
1091 * this is only called when setting up a new ring.
1093 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1095 struct device *dev = rx_ring->dev;
1096 struct i40e_rx_buffer *rx_bi;
1102 if (rx_ring->rx_bi[0].hdr_buf)
1104 /* Make sure the buffers don't cross cache line boundaries. */
1105 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1106 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1110 for (i = 0; i < rx_ring->count; i++) {
1111 rx_bi = &rx_ring->rx_bi[i];
1112 rx_bi->dma = dma + (i * buf_size);
1113 rx_bi->hdr_buf = buffer + (i * buf_size);
1118 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1119 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1121 * Returns 0 on success, negative on failure
1123 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1125 struct device *dev = rx_ring->dev;
1128 /* warn if we are about to overwrite the pointer */
1129 WARN_ON(rx_ring->rx_bi);
1130 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1131 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1132 if (!rx_ring->rx_bi)
1135 u64_stats_init(&rx_ring->syncp);
1137 /* Round up to nearest 4K */
1138 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1139 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1140 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1141 rx_ring->size = ALIGN(rx_ring->size, 4096);
1142 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1143 &rx_ring->dma, GFP_KERNEL);
1145 if (!rx_ring->desc) {
1146 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1151 rx_ring->next_to_clean = 0;
1152 rx_ring->next_to_use = 0;
1156 kfree(rx_ring->rx_bi);
1157 rx_ring->rx_bi = NULL;
1162 * i40e_release_rx_desc - Store the new tail and head values
1163 * @rx_ring: ring to bump
1164 * @val: new head index
1166 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1168 rx_ring->next_to_use = val;
1169 /* Force memory writes to complete before letting h/w
1170 * know there are new descriptors to fetch. (Only
1171 * applicable for weak-ordered memory model archs,
1175 writel(val, rx_ring->tail);
1179 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1180 * @rx_ring: ring to place buffers on
1181 * @cleaned_count: number of buffers to replace
1183 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1185 u16 i = rx_ring->next_to_use;
1186 union i40e_rx_desc *rx_desc;
1187 struct i40e_rx_buffer *bi;
1189 /* do nothing if no valid netdev defined */
1190 if (!rx_ring->netdev || !cleaned_count)
1193 while (cleaned_count--) {
1194 rx_desc = I40E_RX_DESC(rx_ring, i);
1195 bi = &rx_ring->rx_bi[i];
1197 if (bi->skb) /* desc is in use */
1200 bi->page = alloc_page(GFP_ATOMIC);
1202 rx_ring->rx_stats.alloc_page_failed++;
1207 if (!bi->page_dma) {
1208 /* use a half page if we're re-using */
1209 bi->page_offset ^= PAGE_SIZE / 2;
1210 bi->page_dma = dma_map_page(rx_ring->dev,
1215 if (dma_mapping_error(rx_ring->dev,
1217 rx_ring->rx_stats.alloc_page_failed++;
1223 dma_sync_single_range_for_device(rx_ring->dev,
1226 rx_ring->rx_hdr_len,
1228 /* Refresh the desc even if buffer_addrs didn't change
1229 * because each write-back erases this info.
1231 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1232 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1234 if (i == rx_ring->count)
1239 if (rx_ring->next_to_use != i)
1240 i40e_release_rx_desc(rx_ring, i);
1244 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1245 * @rx_ring: ring to place buffers on
1246 * @cleaned_count: number of buffers to replace
1248 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1250 u16 i = rx_ring->next_to_use;
1251 union i40e_rx_desc *rx_desc;
1252 struct i40e_rx_buffer *bi;
1253 struct sk_buff *skb;
1255 /* do nothing if no valid netdev defined */
1256 if (!rx_ring->netdev || !cleaned_count)
1259 while (cleaned_count--) {
1260 rx_desc = I40E_RX_DESC(rx_ring, i);
1261 bi = &rx_ring->rx_bi[i];
1265 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1266 rx_ring->rx_buf_len);
1268 rx_ring->rx_stats.alloc_buff_failed++;
1271 /* initialize queue mapping */
1272 skb_record_rx_queue(skb, rx_ring->queue_index);
1277 bi->dma = dma_map_single(rx_ring->dev,
1279 rx_ring->rx_buf_len,
1281 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1282 rx_ring->rx_stats.alloc_buff_failed++;
1288 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1289 rx_desc->read.hdr_addr = 0;
1291 if (i == rx_ring->count)
1296 if (rx_ring->next_to_use != i)
1297 i40e_release_rx_desc(rx_ring, i);
1301 * i40e_receive_skb - Send a completed packet up the stack
1302 * @rx_ring: rx ring in play
1303 * @skb: packet to send up
1304 * @vlan_tag: vlan tag for packet
1306 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1307 struct sk_buff *skb, u16 vlan_tag)
1309 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1311 if (vlan_tag & VLAN_VID_MASK)
1312 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1314 napi_gro_receive(&q_vector->napi, skb);
1318 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1319 * @vsi: the VSI we care about
1320 * @skb: skb currently being received and modified
1321 * @rx_status: status value of last descriptor in packet
1322 * @rx_error: error value of last descriptor in packet
1323 * @rx_ptype: ptype value of last descriptor in packet
1325 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1326 struct sk_buff *skb,
1331 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1332 bool ipv4 = false, ipv6 = false;
1333 bool ipv4_tunnel, ipv6_tunnel;
1338 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1339 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1340 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1341 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1343 skb->ip_summed = CHECKSUM_NONE;
1345 /* Rx csum enabled and ip headers found? */
1346 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1349 /* did the hardware decode the packet and checksum? */
1350 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1353 /* both known and outer_ip must be set for the below code to work */
1354 if (!(decoded.known && decoded.outer_ip))
1357 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1358 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1360 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1361 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1365 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1366 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1369 /* likely incorrect csum if alternate IP extension headers found */
1371 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1372 /* don't increment checksum err here, non-fatal err */
1375 /* there was some L4 error, count error and punt packet to the stack */
1376 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1379 /* handle packets that were not able to be checksummed due
1380 * to arrival speed, in this case the stack can compute
1383 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1386 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1387 * it in the driver, hardware does not do it for us.
1388 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1389 * so the total length of IPv4 header is IHL*4 bytes
1390 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1392 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1394 skb->transport_header = skb->mac_header +
1395 sizeof(struct ethhdr) +
1396 (ip_hdr(skb)->ihl * 4);
1398 /* Add 4 bytes for VLAN tagged packets */
1399 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1400 skb->protocol == htons(ETH_P_8021AD))
1403 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1404 (udp_hdr(skb)->check != 0)) {
1405 rx_udp_csum = udp_csum(skb);
1407 csum = csum_tcpudp_magic(
1408 iph->saddr, iph->daddr,
1409 (skb->len - skb_transport_offset(skb)),
1410 IPPROTO_UDP, rx_udp_csum);
1412 if (udp_hdr(skb)->check != csum)
1415 } /* else its GRE and so no outer UDP header */
1418 skb->ip_summed = CHECKSUM_UNNECESSARY;
1419 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1424 vsi->back->hw_csum_rx_error++;
1428 * i40e_ptype_to_htype - get a hash type
1429 * @ptype: the ptype value from the descriptor
1431 * Returns a hash type to be used by skb_set_hash
1433 static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
1435 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1438 return PKT_HASH_TYPE_NONE;
1440 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1441 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1442 return PKT_HASH_TYPE_L4;
1443 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1444 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1445 return PKT_HASH_TYPE_L3;
1447 return PKT_HASH_TYPE_L2;
1451 * i40e_rx_hash - set the hash value in the skb
1452 * @ring: descriptor ring
1453 * @rx_desc: specific descriptor
1455 static inline void i40e_rx_hash(struct i40e_ring *ring,
1456 union i40e_rx_desc *rx_desc,
1457 struct sk_buff *skb,
1461 const __le64 rss_mask =
1462 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1463 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1465 if (ring->netdev->features & NETIF_F_RXHASH)
1468 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1469 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1470 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1475 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1476 * @rx_ring: rx ring to clean
1477 * @budget: how many cleans we're allowed
1479 * Returns true if there's any budget left (e.g. the clean is finished)
1481 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1483 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1484 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1485 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1486 const int current_node = numa_mem_id();
1487 struct i40e_vsi *vsi = rx_ring->vsi;
1488 u16 i = rx_ring->next_to_clean;
1489 union i40e_rx_desc *rx_desc;
1490 u32 rx_error, rx_status;
1498 struct i40e_rx_buffer *rx_bi;
1499 struct sk_buff *skb;
1501 /* return some buffers to hardware, one at a time is too slow */
1502 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1503 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1507 i = rx_ring->next_to_clean;
1508 rx_desc = I40E_RX_DESC(rx_ring, i);
1509 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1510 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1511 I40E_RXD_QW1_STATUS_SHIFT;
1513 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1516 /* This memory barrier is needed to keep us from reading
1517 * any other fields out of the rx_desc until we know the
1521 if (i40e_rx_is_programming_status(qword)) {
1522 i40e_clean_programming_status(rx_ring, rx_desc);
1523 I40E_RX_INCREMENT(rx_ring, i);
1526 rx_bi = &rx_ring->rx_bi[i];
1529 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1530 rx_ring->rx_hdr_len);
1532 rx_ring->rx_stats.alloc_buff_failed++;
1536 /* initialize queue mapping */
1537 skb_record_rx_queue(skb, rx_ring->queue_index);
1538 /* we are reusing so sync this buffer for CPU use */
1539 dma_sync_single_range_for_cpu(rx_ring->dev,
1542 rx_ring->rx_hdr_len,
1545 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1546 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1547 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1548 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1549 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1550 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1552 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1553 I40E_RXD_QW1_ERROR_SHIFT;
1554 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1555 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1557 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1558 I40E_RXD_QW1_PTYPE_SHIFT;
1559 prefetch(rx_bi->page);
1562 if (rx_hbo || rx_sph) {
1566 len = I40E_RX_HDR_SIZE;
1568 len = rx_header_len;
1569 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1570 } else if (skb->len == 0) {
1573 len = (rx_packet_len > skb_headlen(skb) ?
1574 skb_headlen(skb) : rx_packet_len);
1575 memcpy(__skb_put(skb, len),
1576 rx_bi->page + rx_bi->page_offset,
1578 rx_bi->page_offset += len;
1579 rx_packet_len -= len;
1582 /* Get the rest of the data if this was a header split */
1583 if (rx_packet_len) {
1584 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1589 skb->len += rx_packet_len;
1590 skb->data_len += rx_packet_len;
1591 skb->truesize += rx_packet_len;
1593 if ((page_count(rx_bi->page) == 1) &&
1594 (page_to_nid(rx_bi->page) == current_node))
1595 get_page(rx_bi->page);
1599 dma_unmap_page(rx_ring->dev,
1603 rx_bi->page_dma = 0;
1605 I40E_RX_INCREMENT(rx_ring, i);
1608 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1609 struct i40e_rx_buffer *next_buffer;
1611 next_buffer = &rx_ring->rx_bi[i];
1612 next_buffer->skb = skb;
1613 rx_ring->rx_stats.non_eop_descs++;
1617 /* ERR_MASK will only have valid bits if EOP set */
1618 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1619 dev_kfree_skb_any(skb);
1623 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1625 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1626 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1627 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1628 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1629 rx_ring->last_rx_timestamp = jiffies;
1632 /* probably a little skewed due to removing CRC */
1633 total_rx_bytes += skb->len;
1636 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1638 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1640 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1641 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1644 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1645 dev_kfree_skb_any(skb);
1649 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1650 i40e_receive_skb(rx_ring, skb, vlan_tag);
1652 rx_desc->wb.qword1.status_error_len = 0;
1654 } while (likely(total_rx_packets < budget));
1656 u64_stats_update_begin(&rx_ring->syncp);
1657 rx_ring->stats.packets += total_rx_packets;
1658 rx_ring->stats.bytes += total_rx_bytes;
1659 u64_stats_update_end(&rx_ring->syncp);
1660 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1661 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1663 return total_rx_packets;
1667 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1668 * @rx_ring: rx ring to clean
1669 * @budget: how many cleans we're allowed
1671 * Returns number of packets cleaned
1673 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1675 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1676 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1677 struct i40e_vsi *vsi = rx_ring->vsi;
1678 union i40e_rx_desc *rx_desc;
1679 u32 rx_error, rx_status;
1686 struct i40e_rx_buffer *rx_bi;
1687 struct sk_buff *skb;
1689 /* return some buffers to hardware, one at a time is too slow */
1690 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1691 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1695 i = rx_ring->next_to_clean;
1696 rx_desc = I40E_RX_DESC(rx_ring, i);
1697 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1698 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1699 I40E_RXD_QW1_STATUS_SHIFT;
1701 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1704 /* This memory barrier is needed to keep us from reading
1705 * any other fields out of the rx_desc until we know the
1710 if (i40e_rx_is_programming_status(qword)) {
1711 i40e_clean_programming_status(rx_ring, rx_desc);
1712 I40E_RX_INCREMENT(rx_ring, i);
1715 rx_bi = &rx_ring->rx_bi[i];
1717 prefetch(skb->data);
1719 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1720 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1722 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1723 I40E_RXD_QW1_ERROR_SHIFT;
1724 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1726 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1727 I40E_RXD_QW1_PTYPE_SHIFT;
1731 /* Get the header and possibly the whole packet
1732 * If this is an skb from previous receive dma will be 0
1734 skb_put(skb, rx_packet_len);
1735 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1739 I40E_RX_INCREMENT(rx_ring, i);
1742 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1743 rx_ring->rx_stats.non_eop_descs++;
1747 /* ERR_MASK will only have valid bits if EOP set */
1748 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1749 dev_kfree_skb_any(skb);
1753 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1754 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1755 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1756 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1757 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1758 rx_ring->last_rx_timestamp = jiffies;
1761 /* probably a little skewed due to removing CRC */
1762 total_rx_bytes += skb->len;
1765 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1767 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1769 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1770 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1773 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1774 dev_kfree_skb_any(skb);
1778 i40e_receive_skb(rx_ring, skb, vlan_tag);
1780 rx_desc->wb.qword1.status_error_len = 0;
1781 } while (likely(total_rx_packets < budget));
1783 u64_stats_update_begin(&rx_ring->syncp);
1784 rx_ring->stats.packets += total_rx_packets;
1785 rx_ring->stats.bytes += total_rx_bytes;
1786 u64_stats_update_end(&rx_ring->syncp);
1787 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1788 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1790 return total_rx_packets;
1793 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1797 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1798 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1799 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1800 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1805 /* a small macro to shorten up some long lines */
1806 #define INTREG I40E_PFINT_DYN_CTLN
1809 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1810 * @vsi: the VSI we care about
1811 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1814 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1815 struct i40e_q_vector *q_vector)
1817 struct i40e_hw *hw = &vsi->back->hw;
1818 bool rx = false, tx = false;
1822 vector = (q_vector->v_idx + vsi->base_vector);
1824 /* avoid dynamic calculation if in countdown mode OR if
1825 * all dynamic is disabled
1827 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1829 if (q_vector->itr_countdown > 0 ||
1830 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1831 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1835 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1836 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1837 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1840 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1841 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1842 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1846 /* get the higher of the two ITR adjustments and
1847 * use the same value for both ITR registers
1848 * when in adaptive mode (Rx and/or Tx)
1850 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1852 q_vector->tx.itr = q_vector->rx.itr = itr;
1853 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1855 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1859 /* only need to enable the interrupt once, but need
1860 * to possibly update both ITR values
1863 /* set the INTENA_MSK_MASK so that this first write
1864 * won't actually enable the interrupt, instead just
1865 * updating the ITR (it's bit 31 PF and VF)
1868 /* don't check _DOWN because interrupt isn't being enabled */
1869 wr32(hw, INTREG(vector - 1), rxval);
1873 if (!test_bit(__I40E_DOWN, &vsi->state))
1874 wr32(hw, INTREG(vector - 1), txval);
1876 if (q_vector->itr_countdown)
1877 q_vector->itr_countdown--;
1879 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1884 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1885 * @napi: napi struct with our devices info in it
1886 * @budget: amount of work driver is allowed to do this pass, in packets
1888 * This function will clean all queues associated with a q_vector.
1890 * Returns the amount of work done
1892 int i40e_napi_poll(struct napi_struct *napi, int budget)
1894 struct i40e_q_vector *q_vector =
1895 container_of(napi, struct i40e_q_vector, napi);
1896 struct i40e_vsi *vsi = q_vector->vsi;
1897 struct i40e_ring *ring;
1898 bool clean_complete = true;
1899 bool arm_wb = false;
1900 int budget_per_ring;
1903 if (test_bit(__I40E_DOWN, &vsi->state)) {
1904 napi_complete(napi);
1908 /* Since the actual Tx work is minimal, we can give the Tx a larger
1909 * budget and be more aggressive about cleaning up the Tx descriptors.
1911 i40e_for_each_ring(ring, q_vector->tx) {
1912 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1913 arm_wb |= ring->arm_wb;
1914 ring->arm_wb = false;
1917 /* Handle case where we are called by netpoll with a budget of 0 */
1921 /* We attempt to distribute budget to each Rx queue fairly, but don't
1922 * allow the budget to go below 1 because that would exit polling early.
1924 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1926 i40e_for_each_ring(ring, q_vector->rx) {
1929 if (ring_is_ps_enabled(ring))
1930 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1932 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1934 work_done += cleaned;
1935 /* if we didn't clean as many as budgeted, we must be done */
1936 clean_complete &= (budget_per_ring != cleaned);
1939 /* If work not completed, return budget and polling will return */
1940 if (!clean_complete) {
1943 i40e_force_wb(vsi, q_vector);
1947 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1948 q_vector->arm_wb_state = false;
1950 /* Work is done so exit the polling mode and re-enable the interrupt */
1951 napi_complete_done(napi, work_done);
1952 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1953 i40e_update_enable_itr(vsi, q_vector);
1954 } else { /* Legacy mode */
1955 struct i40e_hw *hw = &vsi->back->hw;
1956 /* We re-enable the queue 0 cause, but
1957 * don't worry about dynamic_enable
1958 * because we left it on for the other
1959 * possible interrupts during napi
1961 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1962 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1964 wr32(hw, I40E_QINT_RQCTL(0), qval);
1965 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1966 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1967 wr32(hw, I40E_QINT_TQCTL(0), qval);
1968 i40e_irq_dynamic_enable_icr0(vsi->back);
1974 * i40e_atr - Add a Flow Director ATR filter
1975 * @tx_ring: ring to add programming descriptor to
1977 * @tx_flags: send tx flags
1978 * @protocol: wire protocol
1980 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1981 u32 tx_flags, __be16 protocol)
1983 struct i40e_filter_program_desc *fdir_desc;
1984 struct i40e_pf *pf = tx_ring->vsi->back;
1986 unsigned char *network;
1988 struct ipv6hdr *ipv6;
1992 u32 flex_ptype, dtype_cmd;
1995 /* make sure ATR is enabled */
1996 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1999 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2002 /* if sampling is disabled do nothing */
2003 if (!tx_ring->atr_sample_rate)
2006 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2009 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
2010 /* snag network header to get L4 type and address */
2011 hdr.network = skb_network_header(skb);
2013 /* Currently only IPv4/IPv6 with TCP is supported
2014 * access ihl as u8 to avoid unaligned access on ia64
2016 if (tx_flags & I40E_TX_FLAGS_IPV4)
2017 hlen = (hdr.network[0] & 0x0F) << 2;
2018 else if (protocol == htons(ETH_P_IPV6))
2019 hlen = sizeof(struct ipv6hdr);
2023 hdr.network = skb_inner_network_header(skb);
2024 hlen = skb_inner_network_header_len(skb);
2027 /* Currently only IPv4/IPv6 with TCP is supported
2028 * Note: tx_flags gets modified to reflect inner protocols in
2029 * tx_enable_csum function if encap is enabled.
2031 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2032 (hdr.ipv4->protocol != IPPROTO_TCP))
2034 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2035 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2038 th = (struct tcphdr *)(hdr.network + hlen);
2040 /* Due to lack of space, no more new filters can be programmed */
2041 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2043 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2044 /* HW ATR eviction will take care of removing filters on FIN
2047 if (th->fin || th->rst)
2051 tx_ring->atr_count++;
2053 /* sample on all syn/fin/rst packets or once every atr sample rate */
2057 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2060 tx_ring->atr_count = 0;
2062 /* grab the next descriptor */
2063 i = tx_ring->next_to_use;
2064 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2067 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2069 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2070 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2071 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2072 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2073 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2074 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2075 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2077 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2079 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2081 dtype_cmd |= (th->fin || th->rst) ?
2082 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2083 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2084 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2085 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2087 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2088 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2090 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2091 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2093 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2094 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2096 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2097 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2098 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2101 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2102 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2103 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2105 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2106 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2108 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2109 fdir_desc->rsvd = cpu_to_le32(0);
2110 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2111 fdir_desc->fd_id = cpu_to_le32(0);
2115 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2117 * @tx_ring: ring to send buffer on
2118 * @flags: the tx flags to be set
2120 * Checks the skb and set up correspondingly several generic transmit flags
2121 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2123 * Returns error code indicate the frame should be dropped upon error and the
2124 * otherwise returns 0 to indicate the flags has been set properly.
2127 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2128 struct i40e_ring *tx_ring,
2131 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2132 struct i40e_ring *tx_ring,
2136 __be16 protocol = skb->protocol;
2139 if (protocol == htons(ETH_P_8021Q) &&
2140 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2141 /* When HW VLAN acceleration is turned off by the user the
2142 * stack sets the protocol to 8021q so that the driver
2143 * can take any steps required to support the SW only
2144 * VLAN handling. In our case the driver doesn't need
2145 * to take any further steps so just set the protocol
2146 * to the encapsulated ethertype.
2148 skb->protocol = vlan_get_protocol(skb);
2152 /* if we have a HW VLAN tag being added, default to the HW one */
2153 if (skb_vlan_tag_present(skb)) {
2154 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2155 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2156 /* else if it is a SW VLAN, check the next protocol and store the tag */
2157 } else if (protocol == htons(ETH_P_8021Q)) {
2158 struct vlan_hdr *vhdr, _vhdr;
2160 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2164 protocol = vhdr->h_vlan_encapsulated_proto;
2165 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2166 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2169 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2172 /* Insert 802.1p priority into VLAN header */
2173 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2174 (skb->priority != TC_PRIO_CONTROL)) {
2175 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2176 tx_flags |= (skb->priority & 0x7) <<
2177 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2178 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2179 struct vlan_ethhdr *vhdr;
2182 rc = skb_cow_head(skb, 0);
2185 vhdr = (struct vlan_ethhdr *)skb->data;
2186 vhdr->h_vlan_TCI = htons(tx_flags >>
2187 I40E_TX_FLAGS_VLAN_SHIFT);
2189 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2199 * i40e_tso - set up the tso context descriptor
2200 * @tx_ring: ptr to the ring to send
2201 * @skb: ptr to the skb we're sending
2202 * @hdr_len: ptr to the size of the packet header
2203 * @cd_type_cmd_tso_mss: ptr to u64 object
2204 * @cd_tunneling: ptr to context descriptor bits
2206 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2208 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2209 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2212 u32 cd_cmd, cd_tso_len, cd_mss;
2213 struct ipv6hdr *ipv6h;
2214 struct tcphdr *tcph;
2219 if (!skb_is_gso(skb))
2222 err = skb_cow_head(skb, 0);
2226 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2227 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2229 if (iph->version == 4) {
2230 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2233 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2235 } else if (ipv6h->version == 6) {
2236 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2237 ipv6h->payload_len = 0;
2238 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2242 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2243 *hdr_len = (skb->encapsulation
2244 ? (skb_inner_transport_header(skb) - skb->data)
2245 : skb_transport_offset(skb)) + l4len;
2247 /* find the field values */
2248 cd_cmd = I40E_TX_CTX_DESC_TSO;
2249 cd_tso_len = skb->len - *hdr_len;
2250 cd_mss = skb_shinfo(skb)->gso_size;
2251 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2253 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2254 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2259 * i40e_tsyn - set up the tsyn context descriptor
2260 * @tx_ring: ptr to the ring to send
2261 * @skb: ptr to the skb we're sending
2262 * @tx_flags: the collected send information
2263 * @cd_type_cmd_tso_mss: ptr to u64 object
2265 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2267 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2268 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2272 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2275 /* Tx timestamps cannot be sampled when doing TSO */
2276 if (tx_flags & I40E_TX_FLAGS_TSO)
2279 /* only timestamp the outbound packet if the user has requested it and
2280 * we are not already transmitting a packet to be timestamped
2282 pf = i40e_netdev_to_pf(tx_ring->netdev);
2283 if (!(pf->flags & I40E_FLAG_PTP))
2287 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2288 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2289 pf->ptp_tx_skb = skb_get(skb);
2294 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2295 I40E_TXD_CTX_QW1_CMD_SHIFT;
2301 * i40e_tx_enable_csum - Enable Tx checksum offloads
2303 * @tx_flags: pointer to Tx flags currently set
2304 * @td_cmd: Tx descriptor command bits to set
2305 * @td_offset: Tx descriptor header offsets to set
2306 * @tx_ring: Tx descriptor ring
2307 * @cd_tunneling: ptr to context desc bits
2309 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2310 u32 *td_cmd, u32 *td_offset,
2311 struct i40e_ring *tx_ring,
2314 struct ipv6hdr *this_ipv6_hdr;
2315 unsigned int this_tcp_hdrlen;
2316 struct iphdr *this_ip_hdr;
2317 u32 network_hdr_len;
2319 struct udphdr *oudph;
2323 if (skb->encapsulation) {
2324 switch (ip_hdr(skb)->protocol) {
2326 oudph = udp_hdr(skb);
2328 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2329 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2332 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2337 network_hdr_len = skb_inner_network_header_len(skb);
2338 this_ip_hdr = inner_ip_hdr(skb);
2339 this_ipv6_hdr = inner_ipv6_hdr(skb);
2340 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2342 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2343 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2344 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2345 ip_hdr(skb)->check = 0;
2348 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2350 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2351 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2352 if (*tx_flags & I40E_TX_FLAGS_TSO)
2353 ip_hdr(skb)->check = 0;
2356 /* Now set the ctx descriptor fields */
2357 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2358 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2360 ((skb_inner_network_offset(skb) -
2361 skb_transport_offset(skb)) >> 1) <<
2362 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2363 if (this_ip_hdr->version == 6) {
2364 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2365 *tx_flags |= I40E_TX_FLAGS_IPV6;
2367 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2368 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2369 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2370 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2372 (skb->len - skb_transport_offset(skb)),
2374 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2377 network_hdr_len = skb_network_header_len(skb);
2378 this_ip_hdr = ip_hdr(skb);
2379 this_ipv6_hdr = ipv6_hdr(skb);
2380 this_tcp_hdrlen = tcp_hdrlen(skb);
2383 /* Enable IP checksum offloads */
2384 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2385 l4_hdr = this_ip_hdr->protocol;
2386 /* the stack computes the IP header already, the only time we
2387 * need the hardware to recompute it is in the case of TSO.
2389 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2390 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2391 this_ip_hdr->check = 0;
2393 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2395 /* Now set the td_offset for IP header length */
2396 *td_offset = (network_hdr_len >> 2) <<
2397 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2398 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2399 l4_hdr = this_ipv6_hdr->nexthdr;
2400 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2401 /* Now set the td_offset for IP header length */
2402 *td_offset = (network_hdr_len >> 2) <<
2403 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2405 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2406 *td_offset |= (skb_network_offset(skb) >> 1) <<
2407 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2409 /* Enable L4 checksum offloads */
2412 /* enable checksum offloads */
2413 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2414 *td_offset |= (this_tcp_hdrlen >> 2) <<
2415 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2418 /* enable SCTP checksum offload */
2419 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2420 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2421 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2424 /* enable UDP checksum offload */
2425 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2426 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2427 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2435 * i40e_create_tx_ctx Build the Tx context descriptor
2436 * @tx_ring: ring to create the descriptor on
2437 * @cd_type_cmd_tso_mss: Quad Word 1
2438 * @cd_tunneling: Quad Word 0 - bits 0-31
2439 * @cd_l2tag2: Quad Word 0 - bits 32-63
2441 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2442 const u64 cd_type_cmd_tso_mss,
2443 const u32 cd_tunneling, const u32 cd_l2tag2)
2445 struct i40e_tx_context_desc *context_desc;
2446 int i = tx_ring->next_to_use;
2448 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2449 !cd_tunneling && !cd_l2tag2)
2452 /* grab the next descriptor */
2453 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2456 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2458 /* cpu_to_le32 and assign to struct fields */
2459 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2460 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2461 context_desc->rsvd = cpu_to_le16(0);
2462 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2466 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2467 * @tx_ring: the ring to be checked
2468 * @size: the size buffer we want to assure is available
2470 * Returns -EBUSY if a stop is needed, else 0
2472 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2474 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2475 /* Memory barrier before checking head and tail */
2478 /* Check again in a case another CPU has just made room available. */
2479 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2482 /* A reprieve! - use start_queue because it doesn't call schedule */
2483 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2484 ++tx_ring->tx_stats.restart_queue;
2489 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2490 * @tx_ring: the ring to be checked
2491 * @size: the size buffer we want to assure is available
2493 * Returns 0 if stop is not needed
2496 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2498 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2501 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2503 return __i40e_maybe_stop_tx(tx_ring, size);
2507 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2509 * @tx_flags: collected send information
2511 * Note: Our HW can't scatter-gather more than 8 fragments to build
2512 * a packet on the wire and so we need to figure out the cases where we
2513 * need to linearize the skb.
2515 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2517 struct skb_frag_struct *frag;
2518 bool linearize = false;
2519 unsigned int size = 0;
2523 num_frags = skb_shinfo(skb)->nr_frags;
2524 gso_segs = skb_shinfo(skb)->gso_segs;
2526 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2529 if (num_frags < (I40E_MAX_BUFFER_TXD))
2530 goto linearize_chk_done;
2531 /* try the simple math, if we have too many frags per segment */
2532 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2533 I40E_MAX_BUFFER_TXD) {
2535 goto linearize_chk_done;
2537 frag = &skb_shinfo(skb)->frags[0];
2538 /* we might still have more fragments per segment */
2540 size += skb_frag_size(frag);
2542 if ((size >= skb_shinfo(skb)->gso_size) &&
2543 (j < I40E_MAX_BUFFER_TXD)) {
2544 size = (size % skb_shinfo(skb)->gso_size);
2547 if (j == I40E_MAX_BUFFER_TXD) {
2552 } while (num_frags);
2554 if (num_frags >= I40E_MAX_BUFFER_TXD)
2563 * i40e_tx_map - Build the Tx descriptor
2564 * @tx_ring: ring to send buffer on
2566 * @first: first buffer info buffer to use
2567 * @tx_flags: collected send information
2568 * @hdr_len: size of the packet header
2569 * @td_cmd: the command field in the descriptor
2570 * @td_offset: offset for checksum or crc
2573 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2574 struct i40e_tx_buffer *first, u32 tx_flags,
2575 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2577 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2578 struct i40e_tx_buffer *first, u32 tx_flags,
2579 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2582 unsigned int data_len = skb->data_len;
2583 unsigned int size = skb_headlen(skb);
2584 struct skb_frag_struct *frag;
2585 struct i40e_tx_buffer *tx_bi;
2586 struct i40e_tx_desc *tx_desc;
2587 u16 i = tx_ring->next_to_use;
2592 bool tail_bump = true;
2595 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2596 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2597 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2598 I40E_TX_FLAGS_VLAN_SHIFT;
2601 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2602 gso_segs = skb_shinfo(skb)->gso_segs;
2606 /* multiply data chunks by size of headers */
2607 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2608 first->gso_segs = gso_segs;
2610 first->tx_flags = tx_flags;
2612 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2614 tx_desc = I40E_TX_DESC(tx_ring, i);
2617 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2618 if (dma_mapping_error(tx_ring->dev, dma))
2621 /* record length, and DMA address */
2622 dma_unmap_len_set(tx_bi, len, size);
2623 dma_unmap_addr_set(tx_bi, dma, dma);
2625 tx_desc->buffer_addr = cpu_to_le64(dma);
2627 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2628 tx_desc->cmd_type_offset_bsz =
2629 build_ctob(td_cmd, td_offset,
2630 I40E_MAX_DATA_PER_TXD, td_tag);
2636 if (i == tx_ring->count) {
2637 tx_desc = I40E_TX_DESC(tx_ring, 0);
2641 dma += I40E_MAX_DATA_PER_TXD;
2642 size -= I40E_MAX_DATA_PER_TXD;
2644 tx_desc->buffer_addr = cpu_to_le64(dma);
2647 if (likely(!data_len))
2650 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2657 if (i == tx_ring->count) {
2658 tx_desc = I40E_TX_DESC(tx_ring, 0);
2662 size = skb_frag_size(frag);
2665 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2668 tx_bi = &tx_ring->tx_bi[i];
2671 /* set next_to_watch value indicating a packet is present */
2672 first->next_to_watch = tx_desc;
2675 if (i == tx_ring->count)
2678 tx_ring->next_to_use = i;
2680 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2681 tx_ring->queue_index),
2683 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2685 /* Algorithm to optimize tail and RS bit setting:
2686 * if xmit_more is supported
2687 * if xmit_more is true
2688 * do not update tail and do not mark RS bit.
2689 * if xmit_more is false and last xmit_more was false
2690 * if every packet spanned less than 4 desc
2691 * then set RS bit on 4th packet and update tail
2694 * update tail and set RS bit on every packet.
2695 * if xmit_more is false and last_xmit_more was true
2696 * update tail and set RS bit.
2698 * Optimization: wmb to be issued only in case of tail update.
2699 * Also optimize the Descriptor WB path for RS bit with the same
2702 * Note: If there are less than 4 packets
2703 * pending and interrupts were disabled the service task will
2704 * trigger a force WB.
2706 if (skb->xmit_more &&
2707 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2708 tx_ring->queue_index))) {
2709 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2711 } else if (!skb->xmit_more &&
2712 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2713 tx_ring->queue_index)) &&
2714 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2715 (tx_ring->packet_stride < WB_STRIDE) &&
2716 (desc_count < WB_STRIDE)) {
2717 tx_ring->packet_stride++;
2719 tx_ring->packet_stride = 0;
2720 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2724 tx_ring->packet_stride = 0;
2726 tx_desc->cmd_type_offset_bsz =
2727 build_ctob(td_cmd, td_offset, size, td_tag) |
2728 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2729 I40E_TX_DESC_CMD_EOP) <<
2730 I40E_TXD_QW1_CMD_SHIFT);
2732 /* notify HW of packet */
2734 prefetchw(tx_desc + 1);
2737 /* Force memory writes to complete before letting h/w
2738 * know there are new descriptors to fetch. (Only
2739 * applicable for weak-ordered memory model archs,
2743 writel(i, tx_ring->tail);
2749 dev_info(tx_ring->dev, "TX DMA map failed\n");
2751 /* clear dma mappings for failed tx_bi map */
2753 tx_bi = &tx_ring->tx_bi[i];
2754 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2762 tx_ring->next_to_use = i;
2766 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2768 * @tx_ring: ring to send buffer on
2770 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2771 * there is not enough descriptors available in this ring since we need at least
2775 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2776 struct i40e_ring *tx_ring)
2778 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2779 struct i40e_ring *tx_ring)
2785 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2786 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2787 * + 4 desc gap to avoid the cache line where head is,
2788 * + 1 desc for context descriptor,
2789 * otherwise try next time
2791 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2792 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2794 count += TXD_USE_COUNT(skb_headlen(skb));
2795 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2796 tx_ring->tx_stats.tx_busy++;
2803 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2805 * @tx_ring: ring to send buffer on
2807 * Returns NETDEV_TX_OK if sent, else an error code
2809 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2810 struct i40e_ring *tx_ring)
2812 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2813 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2814 struct i40e_tx_buffer *first;
2823 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2824 return NETDEV_TX_BUSY;
2826 /* prepare the xmit flags */
2827 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2830 /* obtain protocol of skb */
2831 protocol = vlan_get_protocol(skb);
2833 /* record the location of the first descriptor for this packet */
2834 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2836 /* setup IPv4/IPv6 offloads */
2837 if (protocol == htons(ETH_P_IP))
2838 tx_flags |= I40E_TX_FLAGS_IPV4;
2839 else if (protocol == htons(ETH_P_IPV6))
2840 tx_flags |= I40E_TX_FLAGS_IPV6;
2842 tso = i40e_tso(tx_ring, skb, &hdr_len,
2843 &cd_type_cmd_tso_mss, &cd_tunneling);
2848 tx_flags |= I40E_TX_FLAGS_TSO;
2850 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2853 tx_flags |= I40E_TX_FLAGS_TSYN;
2855 if (i40e_chk_linearize(skb, tx_flags)) {
2856 if (skb_linearize(skb))
2858 tx_ring->tx_stats.tx_linearize++;
2860 skb_tx_timestamp(skb);
2862 /* always enable CRC insertion offload */
2863 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2865 /* Always offload the checksum, since it's in the data descriptor */
2866 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2867 tx_flags |= I40E_TX_FLAGS_CSUM;
2869 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2870 tx_ring, &cd_tunneling);
2873 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2874 cd_tunneling, cd_l2tag2);
2876 /* Add Flow Director ATR if it's enabled.
2878 * NOTE: this must always be directly before the data descriptor.
2880 i40e_atr(tx_ring, skb, tx_flags, protocol);
2882 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2885 return NETDEV_TX_OK;
2888 dev_kfree_skb_any(skb);
2889 return NETDEV_TX_OK;
2893 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2895 * @netdev: network interface device structure
2897 * Returns NETDEV_TX_OK if sent, else an error code
2899 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2901 struct i40e_netdev_priv *np = netdev_priv(netdev);
2902 struct i40e_vsi *vsi = np->vsi;
2903 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2905 /* hardware can't handle really short frames, hardware padding works
2908 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2909 return NETDEV_TX_OK;
2911 return i40e_xmit_frame_ring(skb, tx_ring);