2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_ethdev.h>
18 #include <rte_version.h>
22 #include "task_base.h"
25 #include "prox_assert.h"
27 #include "mbuf_utils.h"
29 static void buf_pkt_single(struct task_base *tbase, struct rte_mbuf *mbuf, const uint8_t out)
31 const uint16_t prod = tbase->ws_mbuf->idx[out].prod++;
32 tbase->ws_mbuf->mbuf[out][prod & WS_MBUF_MASK] = mbuf;
35 static inline void buf_pkt_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
37 for (uint16_t j = 0; j < n_pkts; ++j) {
38 if (unlikely(out[j] >= OUT_HANDLED)) {
39 rte_pktmbuf_free(mbufs[j]);
40 if (out[j] == OUT_HANDLED)
41 TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, 1);
43 TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
46 buf_pkt_single(tbase, mbufs[j], out[j]);
52 /* The following help functions also report stats. Therefore we need
53 to pass the task_base struct. */
54 static inline int txhw_drop(const struct port_queue *port_queue, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase)
59 /* TX vector mode can't transmit more than 32 packets */
60 if (n_pkts > MAX_PMD_TX) {
61 ntx = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, MAX_PMD_TX);
62 ntx += rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs + ntx, n_pkts - ntx);
64 ntx = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, n_pkts);
67 TASK_STATS_ADD_TX(&tbase->aux->stats, ntx);
70 TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts - ntx);
71 if (tbase->tx_pkt == tx_pkt_bw) {
72 uint32_t drop_bytes = 0;
74 drop_bytes += mbuf_wire_size(mbufs[ntx]);
75 rte_pktmbuf_free(mbufs[ntx++]);
76 } while (ntx < n_pkts);
77 TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes);
81 rte_pktmbuf_free(mbufs[ntx++]);
82 } while (ntx < n_pkts);
88 static inline int txhw_no_drop(const struct port_queue *port_queue, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase)
93 TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts);
96 ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, n_pkts);
104 static inline int ring_enq_drop(struct rte_ring *ring, struct rte_mbuf *const *mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase)
107 /* return 0 on succes, -ENOBUFS on failure */
108 // Rings can be single or multiproducer (ctrl rings are multi producer)
109 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
110 if (unlikely(rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts))) {
112 if (unlikely(rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts, NULL) == 0)) {
115 if (tbase->tx_pkt == tx_pkt_bw) {
116 uint32_t drop_bytes = 0;
117 for (uint16_t i = 0; i < n_pkts; ++i) {
118 drop_bytes += mbuf_wire_size(mbufs[i]);
119 rte_pktmbuf_free(mbufs[i]);
121 TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes);
122 TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts);
125 for (uint16_t i = 0; i < n_pkts; ++i)
126 rte_pktmbuf_free(mbufs[i]);
127 TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts);
131 TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts);
136 static inline int ring_enq_no_drop(struct rte_ring *ring, struct rte_mbuf *const *mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase)
139 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
140 while (rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts)) {
142 while (rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts, NULL) == 0) {
146 TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts);
150 void flush_queues_hw(struct task_base *tbase)
154 for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
155 prod = tbase->ws_mbuf->idx[i].prod;
156 cons = tbase->ws_mbuf->idx[i].cons;
159 tbase->ws_mbuf->idx[i].prod = 0;
160 tbase->ws_mbuf->idx[i].cons = 0;
161 txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
165 tbase->flags &= ~FLAG_TX_FLUSH;
168 void flush_queues_sw(struct task_base *tbase)
172 for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
173 prod = tbase->ws_mbuf->idx[i].prod;
174 cons = tbase->ws_mbuf->idx[i].cons;
177 tbase->ws_mbuf->idx[i].prod = 0;
178 tbase->ws_mbuf->idx[i].cons = 0;
179 ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
182 tbase->flags &= ~FLAG_TX_FLUSH;
185 void flush_queues_no_drop_hw(struct task_base *tbase)
189 for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
190 prod = tbase->ws_mbuf->idx[i].prod;
191 cons = tbase->ws_mbuf->idx[i].cons;
194 tbase->ws_mbuf->idx[i].prod = 0;
195 tbase->ws_mbuf->idx[i].cons = 0;
196 txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
200 tbase->flags &= ~FLAG_TX_FLUSH;
203 void flush_queues_no_drop_sw(struct task_base *tbase)
207 for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
208 prod = tbase->ws_mbuf->idx[i].prod;
209 cons = tbase->ws_mbuf->idx[i].cons;
212 tbase->ws_mbuf->idx[i].prod = 0;
213 tbase->ws_mbuf->idx[i].cons = 0;
214 ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
217 tbase->flags &= ~FLAG_TX_FLUSH;
220 /* "try" functions try to send packets to sw/hw w/o failing or blocking;
221 They return if ring/queue is full and are used by aggregators.
222 "try" functions do not have drop/no drop flavors
223 They are only implemented in never_discard mode (as by default they
224 use only one outgoing ring. */
225 uint16_t tx_try_self(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
228 tx_pkt_never_discard_self(tbase, mbufs, n_pkts, NULL);
231 tx_pkt_never_discard_self(tbase, mbufs, 64, NULL);
236 uint16_t tx_try_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
238 const int bulk_size = 64;
239 uint16_t ret = bulk_size, sent = 0, n_bulks;
240 n_bulks = n_pkts >> __builtin_ctz(bulk_size);
242 for (int i = 0; i < n_bulks; i++) {
243 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
244 ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, bulk_size);
246 ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, bulk_size, NULL);
250 if (ret != bulk_size)
253 if ((ret == bulk_size) && (n_pkts & (bulk_size - 1))) {
254 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
255 ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, (n_pkts & (bulk_size - 1)));
257 ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, (n_pkts & (bulk_size - 1)), NULL);
262 TASK_STATS_ADD_TX(&tbase->aux->stats, sent);
266 uint16_t tx_try_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
268 const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
269 const int bulk_size = 64;
270 uint16_t ret = bulk_size, n_bulks, sent = 0;
271 n_bulks = n_pkts >> __builtin_ctz(bulk_size);
273 for (int i = 0; i < n_bulks; i++) {
274 ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, bulk_size);
277 if (ret != bulk_size)
280 if ((ret == bulk_size) && (n_pkts & (bulk_size - 1))) {
281 ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, (n_pkts & (bulk_size - 1)));
285 TASK_STATS_ADD_TX(&tbase->aux->stats, sent);
289 int tx_pkt_no_drop_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
291 return txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
294 int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
296 static uint8_t fake_out[MAX_PKT_BURST] = {0};
298 if (n_pkts == MAX_PKT_BURST) {
299 // First xmit what was queued
302 prod = tbase->ws_mbuf->idx[0].prod;
303 cons = tbase->ws_mbuf->idx[0].cons;
305 if ((uint16_t)(prod - cons)){
306 tbase->flags &= ~FLAG_TX_FLUSH;
307 tbase->ws_mbuf->idx[0].prod = 0;
308 tbase->ws_mbuf->idx[0].cons = 0;
309 ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
311 ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
313 ret+= tx_pkt_no_drop_hw(tbase, mbufs, n_pkts, fake_out);
318 int tx_pkt_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
320 return txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
323 int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
325 static uint8_t fake_out[MAX_PKT_BURST] = {0};
327 if (n_pkts == MAX_PKT_BURST) {
328 // First xmit what was queued
331 prod = tbase->ws_mbuf->idx[0].prod;
332 cons = tbase->ws_mbuf->idx[0].cons;
334 if ((uint16_t)(prod - cons)){
335 tbase->flags &= ~FLAG_TX_FLUSH;
336 tbase->ws_mbuf->idx[0].prod = 0;
337 tbase->ws_mbuf->idx[0].cons = 0;
338 ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
340 ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
342 ret+= tx_pkt_hw(tbase, mbufs, n_pkts, fake_out);
347 /* Transmit to hw using tx_params_hw_sw structure
348 This function is used to transmit to hw when tx_params_hw_sw should be used
349 i.e. when the task needs to transmit both to hw and sw */
350 int tx_pkt_no_drop_never_discard_hw1_no_pointer(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
352 txhw_no_drop(&tbase->tx_params_hw_sw.tx_port_queue, mbufs, n_pkts, tbase);
356 int tx_pkt_no_drop_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
358 return ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase);
361 int tx_pkt_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
363 return ring_enq_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase);
366 static uint16_t tx_pkt_free_dropped(__attribute__((unused)) struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
370 /* The most probable and most important optimize case is if
371 the no packets should be dropped. */
372 for (i = 0; i + 8 < n_pkts; i += 8) {
373 v |= *((uint64_t*)(&out[i]));
375 for (; i < n_pkts; ++i) {
380 /* At least some packets need to be dropped, so the
381 mbufs array needs to be updated. */
383 uint16_t n_discard = 0;
384 for (uint16_t i = 0; i < n_pkts; ++i) {
385 if (unlikely(out[i] >= OUT_HANDLED)) {
386 rte_pktmbuf_free(mbufs[i]);
387 n_discard += out[i] == OUT_DISCARD;
390 mbufs[n_kept++] = mbufs[i];
392 TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, n_discard);
393 TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, n_pkts - n_kept - n_discard);
399 int tx_pkt_no_drop_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
401 const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out);
405 ret = txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_kept, tbase);
409 int tx_pkt_no_drop_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
411 const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out);
415 ret = ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_kept, tbase);
419 int tx_pkt_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
421 const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out);
424 return txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_kept, tbase);
428 int tx_pkt_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
430 const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out);
433 return ring_enq_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_kept, tbase);
437 int tx_pkt_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out)
439 const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out);
441 TASK_STATS_ADD_TX(&tbase->aux->stats, n_kept);
442 tbase->ws_mbuf->idx[0].nb_rx = n_kept;
443 struct rte_mbuf **tx_mbuf = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK);
444 for (uint16_t i = 0; i < n_kept; ++i) {
445 tx_mbuf[i] = mbufs[i];
450 int tx_pkt_never_discard_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
452 TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts);
453 tbase->ws_mbuf->idx[0].nb_rx = n_pkts;
454 struct rte_mbuf **tx_mbuf = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK);
455 for (uint16_t i = 0; i < n_pkts; ++i) {
456 tx_mbuf[i] = mbufs[i];
461 int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
464 buf_pkt_all(tbase, mbufs, n_pkts, out);
466 const uint8_t nb_bufs = tbase->tx_params_hw.nb_txports;
469 for (uint8_t i = 0; i < nb_bufs; ++i) {
470 prod = tbase->ws_mbuf->idx[i].prod;
471 cons = tbase->ws_mbuf->idx[i].cons;
473 if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
474 tbase->flags &= ~FLAG_TX_FLUSH;
475 tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
476 ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
482 int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
485 buf_pkt_all(tbase, mbufs, n_pkts, out);
487 const uint8_t nb_bufs = tbase->tx_params_sw.nb_txrings;
490 for (uint8_t i = 0; i < nb_bufs; ++i) {
491 prod = tbase->ws_mbuf->idx[i].prod;
492 cons = tbase->ws_mbuf->idx[i].cons;
494 if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
495 tbase->flags &= ~FLAG_TX_FLUSH;
496 tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
497 ret += ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
503 int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
506 buf_pkt_all(tbase, mbufs, n_pkts, out);
508 const uint8_t nb_bufs = tbase->tx_params_hw.nb_txports;
511 for (uint8_t i = 0; i < nb_bufs; ++i) {
512 prod = tbase->ws_mbuf->idx[i].prod;
513 cons = tbase->ws_mbuf->idx[i].cons;
515 if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
516 tbase->flags &= ~FLAG_TX_FLUSH;
517 tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
518 ret += txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
524 int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
527 buf_pkt_all(tbase, mbufs, n_pkts, out);
529 const uint8_t nb_bufs = tbase->tx_params_sw.nb_txrings;
531 for (uint8_t i = 0; i < nb_bufs; ++i) {
532 prod = tbase->ws_mbuf->idx[i].prod;
533 cons = tbase->ws_mbuf->idx[i].cons;
535 if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
536 tbase->flags &= ~FLAG_TX_FLUSH;
537 tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
538 ret+= ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
544 int tx_pkt_trace(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
547 if (tbase->aux->task_rt_dump.cur_trace == 0) {
548 // No packet received since dumping...
549 // So the transmitted packets should not be linked to received packets
550 tbase->aux->task_rt_dump.n_print_tx = tbase->aux->task_rt_dump.n_trace;
551 tbase->aux->task_rt_dump.n_trace = 0;
552 task_base_del_rx_pkt_function(tbase, rx_pkt_trace);
553 return tx_pkt_dump(tbase, mbufs, n_pkts, out);
555 plog_info("Tracing %d pkts\n", tbase->aux->task_rt_dump.cur_trace);
557 for (uint32_t i = 0; i < tbase->aux->task_rt_dump.cur_trace; ++i) {
559 /* For each packet being transmitted, find which
560 buffer represent the packet as it was before
563 uint32_t len = sizeof(tbase->aux->task_rt_dump.pkt_mbuf_addr)/sizeof(tbase->aux->task_rt_dump.pkt_mbuf_addr[0]);
564 for (;j < len; ++j) {
565 if (tbase->aux->task_rt_dump.pkt_mbuf_addr[j] == mbufs[i])
569 plog_info("Trace RX: missing!\n");
572 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
575 rte_pktmbuf_data_len(&tmp) = tbase->aux->task_rt_dump.pkt_cpy_len[j];
576 rte_pktmbuf_pkt_len(&tmp) = tbase->aux->task_rt_dump.pkt_cpy_len[j];
577 tmp.buf_addr = tbase->aux->task_rt_dump.pkt_cpy[j];
578 plogd_info(&tmp, "Trace RX: ");
583 plogd_info(mbufs[i], "Trace TX[%d]: ", out[i]);
585 plogd_info(mbufs[i], "Trace Dropped: ");
587 plogd_info(mbufs[i], "Trace TX: ");
589 ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
591 /* Unset by TX when n_trace = 0 */
592 if (0 == tbase->aux->task_rt_dump.n_trace) {
593 tbase->tx_pkt = tbase->aux->tx_pkt_orig;
594 tbase->aux->tx_pkt_orig = NULL;
595 task_base_del_rx_pkt_function(tbase, rx_pkt_trace);
600 int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
602 uint32_t n_dump = tbase->aux->task_rt_dump.n_print_tx;
605 n_dump = n_pkts < n_dump? n_pkts : n_dump;
606 for (uint32_t i = 0; i < n_dump; ++i) {
608 plogd_info(mbufs[i], "TX[%d]: ", out[i]);
610 plogd_info(mbufs[i], "TX: ");
612 tbase->aux->task_rt_dump.n_print_tx -= n_dump;
614 ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
616 if (0 == tbase->aux->task_rt_dump.n_print_tx) {
617 tbase->tx_pkt = tbase->aux->tx_pkt_orig;
618 tbase->aux->tx_pkt_orig = NULL;
623 /* Gather the distribution of the number of packets that have been
624 xmitted from one TX call. Since the value is only modified by the
625 task that xmits the packet, no atomic operation is needed. */
626 int tx_pkt_distr(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
628 tbase->aux->tx_bucket[n_pkts]++;
629 return tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
632 int tx_pkt_bw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
634 uint32_t tx_bytes = 0;
635 uint32_t drop_bytes = 0;
637 for (uint16_t i = 0; i < n_pkts; ++i) {
638 if (!out || out[i] < OUT_HANDLED)
639 tx_bytes += mbuf_wire_size(mbufs[i]);
641 drop_bytes += mbuf_wire_size(mbufs[i]);
644 TASK_STATS_ADD_TX_BYTES(&tbase->aux->stats, tx_bytes);
645 TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes);
646 return tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
649 int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
651 for (uint16_t j = 0; j < n_pkts; ++j) {
652 rte_pktmbuf_free(mbufs[j]);
655 TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, n_pkts);
657 for (uint16_t j = 0; j < n_pkts; ++j) {
658 if (out[j] == OUT_HANDLED)
659 TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, 1);
661 TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);