2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
21 #include <rte_cycles.h>
22 #include <rte_version.h>
23 #include <rte_byteorder.h>
24 #include <rte_ether.h>
25 #include <rte_hash_crc.h>
27 #include "prox_shared.h"
29 #include "prox_malloc.h"
30 #include "handle_gen.h"
31 #include "handle_lat.h"
32 #include "task_init.h"
33 #include "task_base.h"
34 #include "prox_port_cfg.h"
39 #include "mbuf_utils.h"
41 #include "prox_cksum.h"
43 #include "prox_assert.h"
45 #include "token_time.h"
46 #include "local_mbuf.h"
49 #include "handle_master.h"
58 #define MAX_TEMPLATE_INDEX 65536
59 #define TEMPLATE_INDEX_MASK (MAX_TEMPLATE_INDEX - 1)
60 #define MBUF_ARP MAX_TEMPLATE_INDEX
62 #define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
64 static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt)
66 const uint32_t pkt_size = pkt_template->len;
68 rte_pktmbuf_pkt_len(mbuf) = pkt_size;
69 rte_pktmbuf_data_len(mbuf) = pkt_size;
71 rte_memcpy(pkt, pkt_template->buf, pkt_template->len);
74 struct task_gen_pcap {
75 struct task_base base;
77 struct local_mbuf local_mbuf;
79 struct pkt_template *proto;
87 struct task_base base;
90 struct token_time token_time;
91 struct local_mbuf local_mbuf;
92 struct pkt_template *pkt_template; /* packet templates used at runtime */
93 uint64_t write_duration_estimate; /* how long it took previously to write the time stamps in the packets */
94 uint64_t earliest_tsc_next_pkt;
95 uint64_t new_rate_bps;
96 uint64_t pkt_queue_index;
97 uint32_t n_pkts; /* number of packets in pcap */
98 uint32_t pkt_idx; /* current packet from pcap */
99 uint32_t pkt_count; /* how many pakets to generate */
100 uint32_t max_frame_size;
101 uint32_t runtime_flags;
103 uint16_t packet_id_pos;
107 uint8_t generator_id;
108 uint8_t n_rands; /* number of randoms */
109 uint8_t min_bulk_size;
110 uint8_t max_bulk_size;
112 uint8_t runtime_checksum_needed;
115 uint32_t rand_mask; /* since the random vals are uniform, masks don't introduce bias */
116 uint32_t fixed_bits; /* length of each random (max len = 4) */
117 uint16_t rand_offset; /* each random has an offset*/
118 uint8_t rand_len; /* # bytes to take from random (no bias introduced) */
121 uint64_t pkt_tsc_offset[64];
122 struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */
123 struct ether_addr src_mac;
125 uint8_t cksum_offload;
126 struct prox_port_cfg *port;
127 } __rte_cache_aligned;
129 static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
131 /* Optimize for common case of IPv4 header without options. */
132 if (ip->version_ihl == 0x45)
133 return sizeof(struct ipv4_hdr);
134 if (unlikely(ip->version_ihl >> 4 != 4)) {
135 plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
138 return (ip->version_ihl & 0xF) * 4;
141 static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len)
143 *l2_len = sizeof(struct ether_hdr);
145 struct vlan_hdr *vlan_hdr;
146 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
148 uint16_t ether_type = eth_hdr->ether_type;
151 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) {
152 vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len);
154 ether_type = vlan_hdr->eth_proto;
157 // No L3 cksum offload for IPv6, but TODO L4 offload
158 // ETYPE_EoGRE CRC not implemented yet
160 switch (ether_type) {
174 plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type);
179 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len);
180 *l3_len = ipv4_get_hdr_len(ip);
184 static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_template *pkt_template, int cksum_offload)
186 uint16_t l2_len = pkt_template->l2_len;
187 uint16_t l3_len = pkt_template->l3_len;
190 struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len);
191 prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload);
195 static void task_gen_reset_token_time(struct task_gen *task)
197 token_time_set_bpp(&task->token_time, task->new_rate_bps);
198 token_time_reset(&task->token_time, rte_rdtsc(), 0);
201 static void task_gen_take_count(struct task_gen *task, uint32_t send_bulk)
203 if (task->pkt_count == (uint32_t)-1)
206 if (task->pkt_count >= send_bulk)
207 task->pkt_count -= send_bulk;
213 static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts)
215 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
216 uint64_t now = rte_rdtsc();
217 uint64_t send_bulk = 0;
218 uint32_t pkt_idx_tmp = task->pkt_idx;
220 if (pkt_idx_tmp == task->n_pkts) {
221 PROX_ASSERT(task->loop);
225 for (uint16_t j = 0; j < 64; ++j) {
226 uint64_t tsc = task->proto_tsc[pkt_idx_tmp];
227 if (task->last_tsc + tsc <= now) {
228 task->last_tsc += tsc;
231 if (pkt_idx_tmp == task->n_pkts) {
242 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
243 if (new_pkts == NULL)
246 for (uint16_t j = 0; j < send_bulk; ++j) {
247 struct rte_mbuf *next_pkt = new_pkts[j];
248 struct pkt_template *pkt_template = &task->proto[task->pkt_idx];
249 uint8_t *hdr = rte_pktmbuf_mtod(next_pkt, uint8_t *);
251 pkt_template_init_mbuf(pkt_template, next_pkt, hdr);
254 if (task->pkt_idx == task->n_pkts) {
262 return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL);
265 static uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
267 const uint64_t hz = task->hz;
268 const uint64_t bytes_per_hz = task->link_speed;
270 if (bytes_per_hz == UINT64_MAX)
273 return hz * bytes / bytes_per_hz;
276 static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx)
278 return pkt_idx + 1 == task->n_pkts? 0 : pkt_idx + 1;
281 static uint32_t task_gen_offset_pkt_idx(const struct task_gen *task, uint32_t offset)
283 return (task->pkt_idx + offset) % task->n_pkts;
286 static uint32_t task_gen_calc_send_bulk(const struct task_gen *task, uint32_t *total_bytes)
288 /* The biggest bulk we allow to send is task->max_bulk_size
289 packets. The max bulk size can also be limited by the
290 pkt_count field. At the same time, we are rate limiting
291 based on the specified speed (in bytes per second) so token
292 bucket based rate limiting must also be applied. The
293 minimum bulk size is also constrained. If the calculated
294 bulk size is less then the minimum, then don't send
297 const uint32_t min_bulk = task->min_bulk_size;
298 uint32_t max_bulk = task->max_bulk_size;
300 if (task->pkt_count != (uint32_t)-1 && task->pkt_count < max_bulk) {
301 max_bulk = task->pkt_count;
304 uint32_t send_bulk = 0;
305 uint32_t pkt_idx_tmp = task->pkt_idx;
306 uint32_t would_send_bytes = 0;
310 * TODO - this must be improved to take into account the fact that, after applying randoms
311 * The packet can be replaced by an ARP
313 for (uint16_t j = 0; j < max_bulk; ++j) {
314 struct pkt_template *pktpl = &task->pkt_template[pkt_idx_tmp];
315 pkt_size = pktpl->len;
316 uint32_t pkt_len = pkt_len_to_wire_size(pkt_size);
317 if (pkt_len + would_send_bytes > task->token_time.bytes_now)
320 pkt_idx_tmp = task_gen_next_pkt_idx(task, pkt_idx_tmp);
323 would_send_bytes += pkt_len;
326 if (send_bulk < min_bulk)
328 *total_bytes = would_send_bytes;
332 static void task_gen_apply_random_fields(struct task_gen *task, uint8_t *hdr)
334 uint32_t ret, ret_tmp;
336 for (uint16_t i = 0; i < task->n_rands; ++i) {
337 ret = random_next(&task->rand[i].state);
338 ret_tmp = (ret & task->rand[i].rand_mask) | task->rand[i].fixed_bits;
340 ret_tmp = rte_bswap32(ret_tmp);
341 /* At this point, the lower order bytes (BE) contain
342 the generated value. The address where the values
343 of interest starts is at ret_tmp + 4 - rand_len. */
344 uint8_t *pret_tmp = (uint8_t*)&ret_tmp;
345 rte_memcpy(hdr + task->rand[i].rand_offset, pret_tmp + 4 - task->rand[i].rand_len, task->rand[i].rand_len);
349 static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
354 for (uint16_t i = 0; i < count; ++i)
355 task_gen_apply_random_fields(task, pkt_hdr[i]);
358 static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
360 *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
363 static void task_gen_apply_sig(struct task_gen *task, uint8_t *pkt_hdr)
365 *(uint32_t *)(pkt_hdr + task->sig_pos) = task->sig;
368 static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
370 if (!task->accur_pos)
373 /* The accuracy of task->pkt_queue_index - 64 is stored in
374 packet task->pkt_queue_index. The ID modulo 64 is the
376 for (uint16_t j = 0; j < count; ++j) {
377 if ((mbufs[j]->udata64 & MBUF_ARP) == 0) {
378 uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & 63];
379 task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy);
384 static void task_gen_apply_all_sig(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
389 for (uint16_t j = 0; j < count; ++j) {
390 if ((mbufs[j]->udata64 & MBUF_ARP) == 0) {
391 task_gen_apply_sig(task, pkt_hdr[j]);
396 static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id)
398 struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_pos);
403 static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
405 if (!task->packet_id_pos)
408 for (uint16_t i = 0; i < count; ++i) {
409 if ((mbufs[i]->udata64 & MBUF_ARP) == 0) {
411 unique_id_init(&id, task->generator_id, task->pkt_queue_index++);
412 task_gen_apply_unique_id(task, pkt_hdr[i], &id);
417 static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
419 if (!(task->runtime_flags & TASK_TX_CRC))
422 if (!task->runtime_checksum_needed)
425 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - count);
426 for (uint16_t i = 0; i < count; ++i) {
427 if ((mbufs[i]->udata64 & MBUF_ARP) == 0) {
428 struct pkt_template *pkt_template = &task->pkt_template[pkt_idx];
429 checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload);
430 pkt_idx = task_gen_next_pkt_idx(task, pkt_idx);
435 static void task_gen_consume_tokens(struct task_gen *task, uint32_t tokens, uint32_t send_count)
437 /* If max burst has been sent, we can't keep up so just assume
438 that we can (leaving a "gap" in the packet stream on the
440 task->token_time.bytes_now -= tokens;
441 if (send_count == task->max_bulk_size && task->token_time.bytes_now > tokens) {
442 task->token_time.bytes_now = tokens;
446 static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t count)
448 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1);
449 struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx];
450 uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len);
451 uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len);
452 uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration;
454 return bulk_duration;
457 static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
459 if (!task->lat_enabled)
462 uint64_t tx_tsc, delta_t;
463 uint64_t tsc_before_tx = 0;
465 /* Just before sending the packets, apply the time stamp
466 relative to when the first packet will be sent. The first
467 packet will be sent now. The time is read for each packet
468 to reduce the error towards the actual time the packet will
470 uint64_t write_tsc_after, write_tsc_before;
472 write_tsc_before = rte_rdtsc();
474 /* The time it took previously to write the time stamps in the
475 packets is used as an estimate for how long it will take to
476 write the time stamps now. The estimated time at which the
477 packets will actually be sent will be at tx_tsc. */
478 tx_tsc = write_tsc_before + task->write_duration_estimate;
480 /* The offset delta_t tracks the difference between the actual
481 time and the time written in the packets. Adding the offset
482 to the actual time insures that the time written in the
483 packets is monotonically increasing. At the same time,
484 simply sleeping until delta_t is zero would leave a period
485 of silence on the line. The error has been introduced
486 earlier, but the packets have already been sent. */
487 if (tx_tsc < task->earliest_tsc_next_pkt)
488 delta_t = task->earliest_tsc_next_pkt - tx_tsc;
492 for (uint16_t i = 0; i < count; ++i) {
493 uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos);
494 const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i];
496 *pos = pkt_tsc >> LATENCY_ACCURACY;
499 uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count);
501 task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration;
502 write_tsc_after = rte_rdtsc();
503 task->write_duration_estimate = write_tsc_after - write_tsc_before;
505 /* Make sure that the time stamps that were written
506 are valid. The offset must be taken into account */
508 tsc_before_tx = rte_rdtsc();
509 } while (tsc_before_tx < tx_tsc);
510 return tsc_before_tx;
513 static void task_gen_store_accuracy(struct task_gen *task, uint32_t count, uint64_t tsc_before_tx)
515 if (!task->accur_pos)
518 uint64_t accur = rte_rdtsc() - tsc_before_tx;
519 uint64_t first_accuracy_idx = task->pkt_queue_index - count;
521 for (uint32_t i = 0; i < count; ++i) {
522 uint32_t accuracy_idx = (first_accuracy_idx + i) & 63;
524 task->accur[accuracy_idx] = accur;
528 static void task_gen_load_and_prefetch(struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
530 for (uint16_t i = 0; i < count; ++i)
531 rte_prefetch0(mbufs[i]);
532 for (uint16_t i = 0; i < count; ++i)
533 pkt_hdr[i] = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
534 for (uint16_t i = 0; i < count; ++i)
535 rte_prefetch0(pkt_hdr[i]);
538 static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
540 uint64_t will_send_bytes = 0;
542 for (uint16_t i = 0; i < count; ++i) {
543 struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx];
544 struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx];
545 pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]);
546 mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK;
547 struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i];
548 if (task->lat_enabled) {
549 task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes);
550 will_send_bytes += pkt_len_to_wire_size(pkt_template->len);
552 task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx);
556 static void task_gen_update_config(struct task_gen *task)
558 if (task->token_time.cfg.bpp != task->new_rate_bps)
559 task_gen_reset_token_time(task);
562 static inline void build_value(struct task_gen *task, uint32_t mask, int bit_pos, uint32_t val, uint32_t fixed_bits)
564 struct task_base *tbase = (struct task_base *)task;
566 build_value(task, mask >> 1, bit_pos + 1, val, fixed_bits);
568 build_value(task, mask >> 1, bit_pos + 1, val | (1 << bit_pos), fixed_bits);
571 register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_cpu_to_be_32(val | fixed_bits), tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
574 static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
576 struct task_base *tbase = (struct task_base *)task;
581 for (uint32_t i = 0; i < task->n_pkts; ++i) {
582 struct pkt_template *pktpl = &task->pkt_template[i];
583 unsigned int ip_src_pos = 0;
585 unsigned int l2_len = sizeof(struct ether_hdr);
587 uint8_t *pkt = pktpl->buf;
588 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
589 uint16_t ether_type = eth_hdr->ether_type;
590 struct vlan_hdr *vlan_hdr;
593 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < pktpl->len)) {
594 vlan_hdr = (struct vlan_hdr *)(pkt + l2_len);
596 ether_type = vlan_hdr->eth_proto;
598 if ((ether_type == ETYPE_MPLSU) || (ether_type == ETYPE_MPLSM)) {
602 if ((ether_type != ETYPE_IPv4) && !maybe_ipv4)
605 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len);
606 PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
608 // Even if IPv4 header contains options, options are after ip src and dst
609 ip_src_pos = l2_len + sizeof(struct ipv4_hdr) - 2 * sizeof(uint32_t);
610 uint32_t *ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos));
611 plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src);
612 register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
614 for (int j = 0; j < task->n_rands; j++) {
615 offset = task->rand[j].rand_offset;
616 len = task->rand[j].rand_len;
617 mask = task->rand[j].rand_mask;
618 fixed = task->rand[j].fixed_bits;
619 plog_info("offset = %d, len = %d, mask = %x, fixed = %x\n", offset, len, mask, fixed);
620 if ((offset < ip_src_pos + 4) && (offset + len >= ip_src_pos)) {
621 if (offset >= ip_src_pos) {
622 int32_t ip_src_mask = (1 << (4 + ip_src_pos - offset) * 8) - 1;
623 mask = mask & ip_src_mask;
624 fixed = (fixed & ip_src_mask) | (rte_be_to_cpu_32(*ip_src) & ~ip_src_mask);
625 build_value(task, mask, 0, 0, fixed);
627 int32_t bits = ((ip_src_pos + 4 - offset - len) * 8);
629 fixed = (fixed << bits) | (rte_be_to_cpu_32(*ip_src) & ((1 << bits) - 1));
630 build_value(task, mask, 0, 0, fixed);
637 static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
639 struct task_gen *task = (struct task_gen *)tbase;
640 uint8_t out[MAX_PKT_BURST] = {0};
645 // If link is down, link_speed is 0
646 if (unlikely(task->link_speed == 0)) {
647 if (task->port && task->port->link_speed != 0) {
648 task->link_speed = task->port->link_speed * 125000L;
649 plog_info("\tPort %u: link speed is %ld Mbps\n",
650 (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
655 task_gen_update_config(task);
657 if (task->pkt_count == 0) {
658 task_gen_reset_token_time(task);
661 if (!task->token_time.cfg.bpp)
664 token_time_update(&task->token_time, rte_rdtsc());
666 uint32_t would_send_bytes;
667 uint32_t send_bulk = task_gen_calc_send_bulk(task, &would_send_bytes);
671 task_gen_take_count(task, send_bulk);
672 task_gen_consume_tokens(task, would_send_bytes, send_bulk);
674 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
675 if (new_pkts == NULL)
677 uint8_t *pkt_hdr[MAX_RING_BURST];
679 task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk);
680 task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk);
681 task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk);
682 task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk);
683 task_gen_apply_all_sig(task, new_pkts, pkt_hdr, send_bulk);
684 task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk);
686 uint64_t tsc_before_tx;
688 tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk);
689 task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
690 ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
691 task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
695 static void init_task_gen_seeds(struct task_gen *task)
697 for (size_t i = 0; i < sizeof(task->rand)/sizeof(task->rand[0]); ++i)
698 random_init_seed(&task->rand[i].state);
701 static uint32_t pcap_count_pkts(pcap_t *handle, uint32_t *max_frame_size)
703 struct pcap_pkthdr header;
707 long pkt1_fpos = ftell(pcap_file(handle));
709 while ((buf = pcap_next(handle, &header))) {
710 if (header.len > *max_frame_size)
711 *max_frame_size = header.len;
714 int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET);
715 PROX_PANIC(ret2 != 0, "Failed to reset reading pcap file\n");
719 static uint64_t avg_time_stamp(uint64_t *time_stamp, uint32_t n)
721 uint64_t tot_inter_pkt = 0;
723 for (uint32_t i = 0; i < n; ++i)
724 tot_inter_pkt += time_stamp[i];
725 return (tot_inter_pkt + n / 2)/n;
728 static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp)
730 struct pcap_pkthdr header;
734 for (uint32_t i = 0; i < n_pkts; ++i) {
735 buf = pcap_next(handle, &header);
737 PROX_PANIC(buf == NULL, "Failed to read packet %d from pcap %s\n", i, file_name);
738 proto[i].len = header.len;
739 len = RTE_MIN(header.len, sizeof(proto[i].buf));
740 if (header.len > len)
741 plogx_warn("Packet truncated from %u to %zu bytes\n", header.len, len);
744 static struct timeval beg;
750 tv = tv_diff(&beg, &header.ts);
751 tv_to_tsc(&tv, time_stamp + i);
753 rte_memcpy(proto[i].buf, buf, len);
756 if (time_stamp && n_pkts) {
757 for (uint32_t i = n_pkts - 1; i > 0; --i)
758 time_stamp[i] -= time_stamp[i - 1];
759 /* Since the handle function will loop the packets,
760 there is one time-stamp that is not provided by the
761 pcap file. This is the time between the last and
762 the first packet. This implementation takes the
763 average of the inter-packet times here. */
765 time_stamp[0] = avg_time_stamp(time_stamp + 1, n_pkts - 1);
771 static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic)
773 const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
774 const uint16_t max_len = task->max_frame_size;
777 PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n");
778 PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len);
779 PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len);
783 plog_err("Invalid packet size length (no packet defined?)\n");
786 if (pkt_size > max_len) {
787 plog_err("pkt_size out of range (must be <= %u)\n", max_len);
790 if (pkt_size < min_len) {
791 plog_err("pkt_size out of range (must be >= %u)\n", min_len);
798 static int check_all_pkt_size(struct task_gen *task, int do_panic)
801 for (uint32_t i = 0; i < task->n_pkts;++i) {
802 if ((rc = check_pkt_size(task, task->pkt_template[i].len, do_panic)) != 0)
808 static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int do_panic)
810 if (task->lat_enabled) {
811 uint32_t pos_beg = task->lat_pos;
812 uint32_t pos_end = task->lat_pos + 3U;
815 PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n",
816 pos_beg, pos_end, pkt_size);
817 else if (pkt_size <= pos_end) {
818 plog_err("Writing latency at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
822 if (task->packet_id_pos) {
823 uint32_t pos_beg = task->packet_id_pos;
824 uint32_t pos_end = task->packet_id_pos + 4U;
827 PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n",
828 pos_beg, pos_end, pkt_size);
829 else if (pkt_size <= pos_end) {
830 plog_err("Writing packet at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
834 if (task->accur_pos) {
835 uint32_t pos_beg = task->accur_pos;
836 uint32_t pos_end = task->accur_pos + 3U;
839 PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u%-u, but packet size is %u bytes\n",
840 pos_beg, pos_end, pkt_size);
841 else if (pkt_size <= pos_end) {
842 plog_err("Writing accuracy at %u%-u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
849 static void task_gen_pkt_template_recalc_metadata(struct task_gen *task)
851 struct pkt_template *template;
853 for (size_t i = 0; i < task->n_pkts; ++i) {
854 template = &task->pkt_template[i];
855 parse_l2_l3_len(template->buf, &template->l2_len, &template->l3_len, template->len);
859 static void task_gen_pkt_template_recalc_checksum(struct task_gen *task)
861 struct pkt_template *template;
864 task->runtime_checksum_needed = 0;
865 for (size_t i = 0; i < task->n_pkts; ++i) {
866 template = &task->pkt_template[i];
867 if (template->l2_len == 0)
869 ip = (struct ipv4_hdr *)(template->buf + template->l2_len);
871 ip->hdr_checksum = 0;
872 prox_ip_cksum_sw(ip);
873 uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len;
875 if (ip->next_proto_id == IPPROTO_UDP) {
876 struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len);
877 prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
878 } else if (ip->next_proto_id == IPPROTO_TCP) {
879 struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
880 prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
883 /* The current implementation avoids checksum
884 calculation by determining that at packet
885 construction time, no fields are applied that would
886 require a recalculation of the checksum. */
887 if (task->lat_enabled && task->lat_pos > template->l2_len)
888 task->runtime_checksum_needed = 1;
889 if (task->accur_pos > template->l2_len)
890 task->runtime_checksum_needed = 1;
891 if (task->packet_id_pos > template->l2_len)
892 task->runtime_checksum_needed = 1;
896 static void task_gen_pkt_template_recalc_all(struct task_gen *task)
898 task_gen_pkt_template_recalc_metadata(task);
899 task_gen_pkt_template_recalc_checksum(task);
902 static void task_gen_reset_pkt_templates_len(struct task_gen *task)
904 struct pkt_template *src, *dst;
906 for (size_t i = 0; i < task->n_pkts; ++i) {
907 src = &task->pkt_template_orig[i];
908 dst = &task->pkt_template[i];
913 static void task_gen_reset_pkt_templates_content(struct task_gen *task)
915 struct pkt_template *src, *dst;
917 for (size_t i = 0; i < task->n_pkts; ++i) {
918 src = &task->pkt_template_orig[i];
919 dst = &task->pkt_template[i];
920 memcpy(dst->buf, src->buf, dst->len);
924 static void task_gen_reset_pkt_templates(struct task_gen *task)
926 task_gen_reset_pkt_templates_len(task);
927 task_gen_reset_pkt_templates_content(task);
928 task_gen_pkt_template_recalc_all(task);
931 static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_args *targ)
933 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
937 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
938 task->pkt_template = prox_zmalloc(mem_size, socket_id);
939 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
941 PROX_PANIC(task->pkt_template == NULL ||
942 task->pkt_template_orig == NULL,
943 "Failed to allocate %lu bytes (in huge pages) for packet template\n", mem_size);
945 task->pkt_template->buf = prox_zmalloc(task->max_frame_size, socket_id);
946 task->pkt_template_orig->buf = prox_zmalloc(task->max_frame_size, socket_id);
947 PROX_PANIC(task->pkt_template->buf == NULL ||
948 task->pkt_template_orig->buf == NULL,
949 "Failed to allocate %u bytes (in huge pages) for packet\n", task->max_frame_size);
951 PROX_PANIC(targ->pkt_size > task->max_frame_size,
952 targ->pkt_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ?
953 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
955 rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size);
956 task->pkt_template_orig[0].len = targ->pkt_size;
957 task_gen_reset_pkt_templates(task);
958 check_all_pkt_size(task, 1);
959 check_fields_in_bounds(task, task->pkt_template[0].len, 1);
962 static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *targ)
964 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
965 char err[PCAP_ERRBUF_SIZE];
966 uint32_t max_frame_size;
967 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
968 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
970 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
971 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
972 PROX_PANIC(max_frame_size > task->max_frame_size,
973 max_frame_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ?
974 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
977 task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts);
978 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
979 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
980 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
981 task->pkt_template = prox_zmalloc(mem_size, socket_id);
982 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
983 PROX_PANIC(task->pkt_template == NULL ||
984 task->pkt_template_orig == NULL,
985 "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
987 for (uint i = 0; i < task->n_pkts; i++) {
988 task->pkt_template[i].buf = prox_zmalloc(max_frame_size, socket_id);
989 task->pkt_template_orig[i].buf = prox_zmalloc(max_frame_size, socket_id);
991 PROX_PANIC(task->pkt_template->buf == NULL ||
992 task->pkt_template_orig->buf == NULL,
993 "Failed to allocate %u bytes (in huge pages) for pcap file\n", task->max_frame_size);
996 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL);
998 task_gen_reset_pkt_templates(task);
1001 static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint16_t max_frame_size)
1003 static char name[] = "gen_pool";
1004 struct rte_mempool *ret;
1005 const int sock_id = rte_lcore_to_socket_id(targ->lconf->id);
1008 uint32_t mbuf_size = TX_MBUF_SIZE;
1009 if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
1010 mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1011 plog_info("\t\tCreating mempool with name '%s'\n", name);
1012 ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
1013 targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
1014 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
1016 PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
1017 sock_id, targ->nb_mbuf - 1);
1019 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
1020 targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
1025 void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count)
1027 struct task_gen *task = (struct task_gen *)tbase;
1029 task->pkt_count = count;
1032 int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size)
1034 struct task_gen *task = (struct task_gen *)tbase;
1037 if ((rc = check_pkt_size(task, pkt_size, 0)) != 0)
1039 if ((rc = check_fields_in_bounds(task, pkt_size, 0)) != 0)
1041 task->pkt_template[0].len = pkt_size;
1045 void task_gen_set_rate(struct task_base *tbase, uint64_t bps)
1047 struct task_gen *task = (struct task_gen *)tbase;
1049 task->new_rate_bps = bps;
1052 void task_gen_reset_randoms(struct task_base *tbase)
1054 struct task_gen *task = (struct task_gen *)tbase;
1056 for (uint32_t i = 0; i < task->n_rands; ++i) {
1057 task->rand[i].rand_mask = 0;
1058 task->rand[i].fixed_bits = 0;
1059 task->rand[i].rand_offset = 0;
1064 int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len)
1066 struct task_gen *task = (struct task_gen *)tbase;
1068 for (size_t i = 0; i < task->n_pkts; ++i) {
1069 uint32_t to_write = rte_cpu_to_be_32(value) >> ((4 - len) * 8);
1070 uint8_t *dst = task->pkt_template[i].buf;
1072 rte_memcpy(dst + offset, &to_write, len);
1075 task_gen_pkt_template_recalc_all(task);
1080 void task_gen_reset_values(struct task_base *tbase)
1082 struct task_gen *task = (struct task_gen *)tbase;
1084 task_gen_reset_pkt_templates_content(task);
1087 uint32_t task_gen_get_n_randoms(struct task_base *tbase)
1089 struct task_gen *task = (struct task_gen *)tbase;
1091 return task->n_rands;
1094 static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ)
1096 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1097 const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id);
1098 uint32_t max_frame_size;
1100 task->loop = targ->loop;
1102 task->hz = rte_get_tsc_hz();
1104 char err[PCAP_ERRBUF_SIZE];
1105 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
1106 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
1108 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
1109 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
1111 task->local_mbuf.mempool = task_gen_create_mempool(targ, max_frame_size);
1113 PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n");
1116 plogx_info("Configured to load %u packets\n", targ->n_pkts);
1117 if (task->n_pkts > targ->n_pkts)
1118 task->n_pkts = targ->n_pkts;
1120 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
1122 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
1124 size_t mem_size = task->n_pkts * (sizeof(*task->proto) + sizeof(*task->proto_tsc));
1125 uint8_t *mem = prox_zmalloc(mem_size, sockid);
1127 PROX_PANIC(mem == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
1128 task->proto = (struct pkt_template *) mem;
1129 task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto));
1131 for (uint i = 0; i < targ->n_pkts; i++) {
1132 task->proto[i].buf = prox_zmalloc(max_frame_size, sockid);
1133 PROX_PANIC(task->proto[i].buf == NULL, "Failed to allocate %u bytes (in huge pages) for pcap file\n", max_frame_size);
1136 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc);
1140 static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offset)
1142 for (uint32_t i = 0; i < task->n_rands; ++i) {
1143 if (task->rand[i].rand_offset == offset) {
1151 int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id)
1153 struct task_gen *task = (struct task_gen *)tbase;
1154 uint32_t existing_rand;
1156 if (rand_id == UINT32_MAX && task->n_rands == 64) {
1157 plog_err("Too many randoms\n");
1160 uint32_t mask, fixed, len;
1162 if (parse_random_str(&mask, &fixed, &len, rand_str)) {
1163 plog_err("%s\n", get_parse_err());
1166 task->runtime_checksum_needed = 1;
1168 existing_rand = task_gen_find_random_with_offset(task, offset);
1169 if (existing_rand != UINT32_MAX) {
1170 plog_warn("Random at offset %d already set => overwriting len = %d %s\n", offset, len, rand_str);
1171 rand_id = existing_rand;
1172 task->rand[rand_id].rand_len = len;
1173 task->rand[rand_id].rand_offset = offset;
1174 task->rand[rand_id].rand_mask = mask;
1175 task->rand[rand_id].fixed_bits = fixed;
1179 task->rand[task->n_rands].rand_len = len;
1180 task->rand[task->n_rands].rand_offset = offset;
1181 task->rand[task->n_rands].rand_mask = mask;
1182 task->rand[task->n_rands].fixed_bits = fixed;
1188 static void start(struct task_base *tbase)
1190 struct task_gen *task = (struct task_gen *)tbase;
1191 task->pkt_queue_index = 0;
1193 task_gen_reset_token_time(task);
1194 if (tbase->l3.tmaster) {
1195 register_all_ip_to_ctrl_plane(task);
1198 // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC.
1199 // task->link_speed reports link speed in Bytes per sec.
1200 // It can be 0 if link is down, and must hence be updated in fast path.
1201 task->link_speed = task->port->link_speed * 125000L;
1202 if (task->link_speed)
1203 plog_info("\tPort %u: link speed is %ld Mbps\n",
1204 (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
1206 plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n",
1207 (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
1210 Handle the case when two tasks transmit to the same port
1211 and one of them is stopped. In that case ARP (requests or replies)
1212 might not be sent. Master will have to keep a list of rings.
1213 stop will have to de-register IP from ctrl plane.
1214 un-registration will remove the ring. when having more than
1215 one active rings, master can always use the first one
1219 static void start_pcap(struct task_base *tbase)
1221 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1222 /* When we start, the first packet is sent immediately. */
1223 task->last_tsc = rte_rdtsc() - task->proto_tsc[0];
1227 static void init_task_gen_early(struct task_args *targ)
1229 uint8_t *generator_count = prox_sh_find_system("generator_count");
1231 if (generator_count == NULL) {
1232 generator_count = prox_zmalloc(sizeof(*generator_count), rte_lcore_to_socket_id(targ->lconf->id));
1233 PROX_PANIC(generator_count == NULL, "Failed to allocate generator count\n");
1234 prox_sh_add_system("generator_count", generator_count);
1236 targ->generator_id = *generator_count;
1237 (*generator_count)++;
1240 static void init_task_gen(struct task_base *tbase, struct task_args *targ)
1242 struct task_gen *task = (struct task_gen *)tbase;
1244 task->packet_id_pos = targ->packet_id_pos;
1246 struct prox_port_cfg *port = find_reachable_port(targ);
1247 // TODO: check that all reachable ports have the same mtu...
1249 task->cksum_offload = port->capabilities.tx_offload_cksum;
1251 task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
1253 // Not generating to any port...
1254 task->max_frame_size = ETHER_MAX_LEN;
1256 task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size);
1257 PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n");
1259 task->hz = rte_get_tsc_hz();
1260 task->lat_pos = targ->lat_pos;
1261 task->accur_pos = targ->accur_pos;
1262 task->sig_pos = targ->sig_pos;
1263 task->sig = targ->sig;
1264 task->new_rate_bps = targ->rate_bps;
1267 * For tokens, use 10 Gbps as base rate
1268 * Scripts can then use speed command, with speed=100 as 10 Gbps and speed=400 as 40 Gbps
1269 * Script can query prox "port info" command to find out the port link speed to know
1270 * at which rate to start. Note that virtio running on OVS returns 10 Gbps, so a script has
1271 * probably also to check the driver (as returned by the same "port info" command.
1273 struct token_time_cfg tt_cfg = token_time_cfg_create(1250000000, rte_get_tsc_hz(), -1);
1274 token_time_init(&task->token_time, &tt_cfg);
1276 init_task_gen_seeds(task);
1278 task->min_bulk_size = targ->min_bulk_size;
1279 task->max_bulk_size = targ->max_bulk_size;
1280 if (task->min_bulk_size < 1)
1281 task->min_bulk_size = 1;
1282 if (task->max_bulk_size < 1)
1283 task->max_bulk_size = 64;
1284 PROX_PANIC(task->max_bulk_size > 64, "max_bulk_size higher than 64\n");
1285 PROX_PANIC(task->max_bulk_size < task->min_bulk_size, "max_bulk_size must be > than min_bulk_size\n");
1287 task->pkt_count = -1;
1288 task->lat_enabled = targ->lat_enabled;
1289 task->runtime_flags = targ->runtime_flags;
1290 PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
1292 task->generator_id = targ->generator_id;
1293 plog_info("\tGenerator id = %d\n", task->generator_id);
1294 task->link_speed = UINT64_MAX;
1296 if (!strcmp(targ->pcap_file, "")) {
1297 plog_info("\tUsing inline definition of a packet\n");
1298 task_init_gen_load_pkt_inline(task, targ);
1300 plog_info("Loading from pcap %s\n", targ->pcap_file);
1301 task_init_gen_load_pcap(task, targ);
1304 if ((targ->flags & DSF_KEEP_SRC_MAC) == 0 && (targ->nb_txrings || targ->nb_txports)) {
1305 uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes;
1306 for (uint32_t i = 0; i < task->n_pkts; ++i) {
1307 rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6);
1310 memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
1311 for (uint32_t i = 0; i < targ->n_rand_str; ++i) {
1312 PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX),
1313 "Failed to add random\n");
1317 static struct task_init task_init_gen = {
1319 .init = init_task_gen,
1320 .handle = handle_gen_bulk,
1322 .early_init = init_task_gen_early,
1324 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1325 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1326 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1328 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1330 .size = sizeof(struct task_gen)
1333 static struct task_init task_init_gen_l3 = {
1335 .sub_mode_str = "l3",
1336 .init = init_task_gen,
1337 .handle = handle_gen_bulk,
1339 .early_init = init_task_gen_early,
1341 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1342 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1343 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1345 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1347 .size = sizeof(struct task_gen)
1350 static struct task_init task_init_gen_pcap = {
1352 .sub_mode_str = "pcap",
1353 .init = init_task_gen_pcap,
1354 .handle = handle_gen_pcap_bulk,
1355 .start = start_pcap,
1356 .early_init = init_task_gen_early,
1358 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1360 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1362 .size = sizeof(struct task_gen_pcap)
1365 __attribute__((constructor)) static void reg_task_gen(void)
1367 reg_task(&task_init_gen);
1368 reg_task(&task_init_gen_l3);
1369 reg_task(&task_init_gen_pcap);