2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
21 #include <rte_cycles.h>
22 #include <rte_version.h>
23 #include <rte_byteorder.h>
24 #include <rte_ether.h>
25 #include <rte_hash_crc.h>
27 #include "prox_shared.h"
29 #include "prox_malloc.h"
30 #include "handle_gen.h"
31 #include "handle_lat.h"
32 #include "task_init.h"
33 #include "task_base.h"
34 #include "prox_port_cfg.h"
39 #include "mbuf_utils.h"
41 #include "prox_cksum.h"
43 #include "prox_assert.h"
45 #include "token_time.h"
46 #include "local_mbuf.h"
49 #include "handle_master.h"
58 #define MAX_TEMPLATE_INDEX 65536
59 #define TEMPLATE_INDEX_MASK (MAX_TEMPLATE_INDEX - 1)
61 #define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
63 static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt)
65 const uint32_t pkt_size = pkt_template->len;
67 rte_pktmbuf_pkt_len(mbuf) = pkt_size;
68 rte_pktmbuf_data_len(mbuf) = pkt_size;
70 rte_memcpy(pkt, pkt_template->buf, pkt_template->len);
73 struct task_gen_pcap {
74 struct task_base base;
76 struct local_mbuf local_mbuf;
78 struct pkt_template *proto;
86 struct task_base base;
88 struct token_time token_time;
89 struct local_mbuf local_mbuf;
90 struct pkt_template *pkt_template; /* packet templates used at runtime */
91 uint64_t write_duration_estimate; /* how long it took previously to write the time stamps in the packets */
92 uint64_t earliest_tsc_next_pkt;
93 uint64_t new_rate_bps;
94 uint64_t pkt_queue_index;
95 uint32_t n_pkts; /* number of packets in pcap */
96 uint32_t pkt_idx; /* current packet from pcap */
97 uint32_t pkt_count; /* how many pakets to generate */
98 uint32_t max_frame_size;
99 uint32_t runtime_flags;
101 uint16_t packet_id_pos;
105 uint8_t generator_id;
106 uint8_t n_rands; /* number of randoms */
107 uint8_t min_bulk_size;
108 uint8_t max_bulk_size;
110 uint8_t runtime_checksum_needed;
113 uint32_t rand_mask; /* since the random vals are uniform, masks don't introduce bias */
114 uint32_t fixed_bits; /* length of each random (max len = 4) */
115 uint16_t rand_offset; /* each random has an offset*/
116 uint8_t rand_len; /* # bytes to take from random (no bias introduced) */
119 uint64_t pkt_tsc_offset[64];
120 struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */
121 struct ether_addr src_mac;
123 uint8_t cksum_offload;
124 struct prox_port_cfg *port;
125 uint64_t *bytes_to_tsc;
126 } __rte_cache_aligned;
128 static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
130 /* Optimize for common case of IPv4 header without options. */
131 if (ip->version_ihl == 0x45)
132 return sizeof(struct ipv4_hdr);
133 if (unlikely(ip->version_ihl >> 4 != 4)) {
134 plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
137 return (ip->version_ihl & 0xF) * 4;
140 static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len)
142 *l2_len = sizeof(struct ether_hdr);
144 struct vlan_hdr *vlan_hdr;
145 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
147 uint16_t ether_type = eth_hdr->ether_type;
150 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) {
151 vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len);
153 ether_type = vlan_hdr->eth_proto;
156 // No L3 cksum offload for IPv6, but TODO L4 offload
157 // ETYPE_EoGRE CRC not implemented yet
159 switch (ether_type) {
173 plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type);
178 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len);
179 *l3_len = ipv4_get_hdr_len(ip);
183 static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_template *pkt_template, int cksum_offload)
185 uint16_t l2_len = pkt_template->l2_len;
186 uint16_t l3_len = pkt_template->l3_len;
189 struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len);
190 prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload);
194 static void task_gen_reset_token_time(struct task_gen *task)
196 token_time_set_bpp(&task->token_time, task->new_rate_bps);
197 token_time_reset(&task->token_time, rte_rdtsc(), 0);
200 static void task_gen_take_count(struct task_gen *task, uint32_t send_bulk)
202 if (task->pkt_count == (uint32_t)-1)
205 if (task->pkt_count >= send_bulk)
206 task->pkt_count -= send_bulk;
212 static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts)
214 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
215 uint64_t now = rte_rdtsc();
216 uint64_t send_bulk = 0;
217 uint32_t pkt_idx_tmp = task->pkt_idx;
219 if (pkt_idx_tmp == task->n_pkts) {
220 PROX_ASSERT(task->loop);
224 for (uint16_t j = 0; j < 64; ++j) {
225 uint64_t tsc = task->proto_tsc[pkt_idx_tmp];
226 if (task->last_tsc + tsc <= now) {
227 task->last_tsc += tsc;
230 if (pkt_idx_tmp == task->n_pkts) {
241 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
242 if (new_pkts == NULL)
245 for (uint16_t j = 0; j < send_bulk; ++j) {
246 struct rte_mbuf *next_pkt = new_pkts[j];
247 struct pkt_template *pkt_template = &task->proto[task->pkt_idx];
248 uint8_t *hdr = rte_pktmbuf_mtod(next_pkt, uint8_t *);
250 pkt_template_init_mbuf(pkt_template, next_pkt, hdr);
253 if (task->pkt_idx == task->n_pkts) {
261 return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL);
264 static inline uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
266 return task->bytes_to_tsc[bytes];
269 static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx)
271 return pkt_idx + 1 == task->n_pkts? 0 : pkt_idx + 1;
274 static uint32_t task_gen_offset_pkt_idx(const struct task_gen *task, uint32_t offset)
276 return (task->pkt_idx + offset) % task->n_pkts;
279 static uint32_t task_gen_calc_send_bulk(const struct task_gen *task, uint32_t *total_bytes)
281 /* The biggest bulk we allow to send is task->max_bulk_size
282 packets. The max bulk size can also be limited by the
283 pkt_count field. At the same time, we are rate limiting
284 based on the specified speed (in bytes per second) so token
285 bucket based rate limiting must also be applied. The
286 minimum bulk size is also constrained. If the calculated
287 bulk size is less then the minimum, then don't send
290 const uint32_t min_bulk = task->min_bulk_size;
291 uint32_t max_bulk = task->max_bulk_size;
293 if (task->pkt_count != (uint32_t)-1 && task->pkt_count < max_bulk) {
294 max_bulk = task->pkt_count;
297 uint32_t send_bulk = 0;
298 uint32_t pkt_idx_tmp = task->pkt_idx;
299 uint32_t would_send_bytes = 0;
303 * TODO - this must be improved to take into account the fact that, after applying randoms
304 * The packet can be replaced by an ARP
306 for (uint16_t j = 0; j < max_bulk; ++j) {
307 struct pkt_template *pktpl = &task->pkt_template[pkt_idx_tmp];
308 pkt_size = pktpl->len;
309 uint32_t pkt_len = pkt_len_to_wire_size(pkt_size);
310 if (pkt_len + would_send_bytes > task->token_time.bytes_now)
313 pkt_idx_tmp = task_gen_next_pkt_idx(task, pkt_idx_tmp);
316 would_send_bytes += pkt_len;
319 if (send_bulk < min_bulk)
321 *total_bytes = would_send_bytes;
325 static void task_gen_apply_random_fields(struct task_gen *task, uint8_t *hdr)
327 uint32_t ret, ret_tmp;
329 for (uint16_t i = 0; i < task->n_rands; ++i) {
330 ret = random_next(&task->rand[i].state);
331 ret_tmp = (ret & task->rand[i].rand_mask) | task->rand[i].fixed_bits;
333 ret_tmp = rte_bswap32(ret_tmp);
334 /* At this point, the lower order bytes (BE) contain
335 the generated value. The address where the values
336 of interest starts is at ret_tmp + 4 - rand_len. */
337 uint8_t *pret_tmp = (uint8_t*)&ret_tmp;
338 rte_memcpy(hdr + task->rand[i].rand_offset, pret_tmp + 4 - task->rand[i].rand_len, task->rand[i].rand_len);
342 static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
347 for (uint16_t i = 0; i < count; ++i)
348 task_gen_apply_random_fields(task, pkt_hdr[i]);
351 static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
353 *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
356 static void task_gen_apply_sig(struct task_gen *task, struct pkt_template *dst)
359 *(uint32_t *)(dst->buf + task->sig_pos) = task->sig;
362 static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
364 if (!task->accur_pos)
367 /* The accuracy of task->pkt_queue_index - 64 is stored in
368 packet task->pkt_queue_index. The ID modulo 64 is the
370 for (uint16_t j = 0; j < count; ++j) {
371 uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & 63];
372 task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy);
376 static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id)
378 struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_pos);
383 static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
385 if (!task->packet_id_pos)
388 for (uint16_t i = 0; i < count; ++i) {
390 unique_id_init(&id, task->generator_id, task->pkt_queue_index++);
391 task_gen_apply_unique_id(task, pkt_hdr[i], &id);
395 static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
397 if (!(task->runtime_flags & TASK_TX_CRC))
400 if (!task->runtime_checksum_needed)
403 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - count);
404 for (uint16_t i = 0; i < count; ++i) {
405 struct pkt_template *pkt_template = &task->pkt_template[pkt_idx];
406 checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload);
407 pkt_idx = task_gen_next_pkt_idx(task, pkt_idx);
411 static void task_gen_consume_tokens(struct task_gen *task, uint32_t tokens, uint32_t send_count)
413 /* If max burst has been sent, we can't keep up so just assume
414 that we can (leaving a "gap" in the packet stream on the
416 task->token_time.bytes_now -= tokens;
417 if (send_count == task->max_bulk_size && task->token_time.bytes_now > tokens) {
418 task->token_time.bytes_now = tokens;
422 static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t count)
424 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1);
425 struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx];
426 uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len);
427 #ifdef NO_EXTRAPOLATION
428 uint64_t bulk_duration = task->pkt_tsc_offset[count - 1];
430 uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len);
431 uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration;
434 return bulk_duration;
437 static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
439 if (!task->lat_enabled)
442 uint64_t tx_tsc, delta_t;
443 uint64_t tsc_before_tx = 0;
445 /* Just before sending the packets, apply the time stamp
446 relative to when the first packet will be sent. The first
447 packet will be sent now. The time is read for each packet
448 to reduce the error towards the actual time the packet will
450 uint64_t write_tsc_after, write_tsc_before;
452 write_tsc_before = rte_rdtsc();
454 /* The time it took previously to write the time stamps in the
455 packets is used as an estimate for how long it will take to
456 write the time stamps now. The estimated time at which the
457 packets will actually be sent will be at tx_tsc. */
458 tx_tsc = write_tsc_before + task->write_duration_estimate;
460 /* The offset delta_t tracks the difference between the actual
461 time and the time written in the packets. Adding the offset
462 to the actual time insures that the time written in the
463 packets is monotonically increasing. At the same time,
464 simply sleeping until delta_t is zero would leave a period
465 of silence on the line. The error has been introduced
466 earlier, but the packets have already been sent. */
468 /* This happens typically if previous bulk was delayed
469 by an interrupt e.g. (with Time in nsec)
470 Time x: sleep 4 microsec
471 Time x+4000: send 64 packets (64 packets as 4000 nsec, w/ 10Gbps 64 bytes)
472 Time x+5000: send 16 packets (16 packets as 1000 nsec)
473 When we send the 16 packets, the 64 ealier packets are not yet
475 if (tx_tsc < task->earliest_tsc_next_pkt)
476 delta_t = task->earliest_tsc_next_pkt - tx_tsc;
480 for (uint16_t i = 0; i < count; ++i) {
481 uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos);
482 const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i];
483 *pos = pkt_tsc >> LATENCY_ACCURACY;
486 uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count);
487 task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration;
488 write_tsc_after = rte_rdtsc();
489 task->write_duration_estimate = write_tsc_after - write_tsc_before;
491 /* Make sure that the time stamps that were written
492 are valid. The offset must be taken into account */
494 tsc_before_tx = rte_rdtsc();
495 } while (tsc_before_tx < tx_tsc);
497 return tsc_before_tx;
500 static void task_gen_store_accuracy(struct task_gen *task, uint32_t count, uint64_t tsc_before_tx)
502 if (!task->accur_pos)
505 uint64_t accur = rte_rdtsc() - tsc_before_tx;
506 uint64_t first_accuracy_idx = task->pkt_queue_index - count;
508 for (uint32_t i = 0; i < count; ++i) {
509 uint32_t accuracy_idx = (first_accuracy_idx + i) & 63;
511 task->accur[accuracy_idx] = accur;
515 static void task_gen_load_and_prefetch(struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
517 for (uint16_t i = 0; i < count; ++i)
518 rte_prefetch0(mbufs[i]);
519 for (uint16_t i = 0; i < count; ++i)
520 pkt_hdr[i] = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
521 for (uint16_t i = 0; i < count; ++i)
522 rte_prefetch0(pkt_hdr[i]);
525 static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
527 uint64_t will_send_bytes = 0;
529 for (uint16_t i = 0; i < count; ++i) {
530 struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx];
531 struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx];
532 pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]);
533 mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK;
534 struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i];
535 if (task->lat_enabled) {
536 #ifdef NO_EXTRAPOLATION
537 task->pkt_tsc_offset[i] = 0;
539 task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes);
541 will_send_bytes += pkt_len_to_wire_size(pkt_template->len);
543 task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx);
547 static void task_gen_update_config(struct task_gen *task)
549 if (task->token_time.cfg.bpp != task->new_rate_bps)
550 task_gen_reset_token_time(task);
553 static inline void build_value(struct task_gen *task, uint32_t mask, int bit_pos, uint32_t val, uint32_t fixed_bits)
555 struct task_base *tbase = (struct task_base *)task;
557 build_value(task, mask >> 1, bit_pos + 1, val, fixed_bits);
559 build_value(task, mask >> 1, bit_pos + 1, val | (1 << bit_pos), fixed_bits);
562 register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_cpu_to_be_32(val | fixed_bits), tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
565 static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
567 struct task_base *tbase = (struct task_base *)task;
572 for (uint32_t i = 0; i < task->n_pkts; ++i) {
573 struct pkt_template *pktpl = &task->pkt_template[i];
574 unsigned int ip_src_pos = 0;
576 unsigned int l2_len = sizeof(struct ether_hdr);
578 uint8_t *pkt = pktpl->buf;
579 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
580 uint16_t ether_type = eth_hdr->ether_type;
581 struct vlan_hdr *vlan_hdr;
584 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < pktpl->len)) {
585 vlan_hdr = (struct vlan_hdr *)(pkt + l2_len);
587 ether_type = vlan_hdr->eth_proto;
589 if ((ether_type == ETYPE_MPLSU) || (ether_type == ETYPE_MPLSM)) {
593 if ((ether_type != ETYPE_IPv4) && !maybe_ipv4)
596 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len);
597 PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
599 // Even if IPv4 header contains options, options are after ip src and dst
600 ip_src_pos = l2_len + sizeof(struct ipv4_hdr) - 2 * sizeof(uint32_t);
601 uint32_t *ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos));
602 plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src);
603 register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
605 for (int j = 0; j < task->n_rands; j++) {
606 offset = task->rand[j].rand_offset;
607 len = task->rand[j].rand_len;
608 mask = task->rand[j].rand_mask;
609 fixed = task->rand[j].fixed_bits;
610 plog_info("offset = %d, len = %d, mask = %x, fixed = %x\n", offset, len, mask, fixed);
611 if ((offset < ip_src_pos + 4) && (offset + len >= ip_src_pos)) {
612 if (offset >= ip_src_pos) {
613 int32_t ip_src_mask = (1 << (4 + ip_src_pos - offset) * 8) - 1;
614 mask = mask & ip_src_mask;
615 fixed = (fixed & ip_src_mask) | (rte_be_to_cpu_32(*ip_src) & ~ip_src_mask);
616 build_value(task, mask, 0, 0, fixed);
618 int32_t bits = ((ip_src_pos + 4 - offset - len) * 8);
620 fixed = (fixed << bits) | (rte_be_to_cpu_32(*ip_src) & ((1 << bits) - 1));
621 build_value(task, mask, 0, 0, fixed);
628 static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
630 struct task_gen *task = (struct task_gen *)tbase;
631 uint8_t out[MAX_PKT_BURST] = {0};
636 task_gen_update_config(task);
638 if (task->pkt_count == 0) {
639 task_gen_reset_token_time(task);
642 if (!task->token_time.cfg.bpp)
645 token_time_update(&task->token_time, rte_rdtsc());
647 uint32_t would_send_bytes;
648 uint32_t send_bulk = task_gen_calc_send_bulk(task, &would_send_bytes);
652 task_gen_take_count(task, send_bulk);
653 task_gen_consume_tokens(task, would_send_bytes, send_bulk);
655 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
656 if (new_pkts == NULL)
658 uint8_t *pkt_hdr[MAX_RING_BURST];
660 task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk);
661 task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk);
662 task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk);
663 task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk);
664 task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk);
666 uint64_t tsc_before_tx;
668 tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk);
669 task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
670 ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
671 task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
675 static void init_task_gen_seeds(struct task_gen *task)
677 for (size_t i = 0; i < sizeof(task->rand)/sizeof(task->rand[0]); ++i)
678 random_init_seed(&task->rand[i].state);
681 static uint32_t pcap_count_pkts(pcap_t *handle, uint32_t *max_frame_size)
683 struct pcap_pkthdr header;
687 long pkt1_fpos = ftell(pcap_file(handle));
689 while ((buf = pcap_next(handle, &header))) {
690 if (header.len > *max_frame_size)
691 *max_frame_size = header.len;
694 int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET);
695 PROX_PANIC(ret2 != 0, "Failed to reset reading pcap file\n");
699 static uint64_t avg_time_stamp(uint64_t *time_stamp, uint32_t n)
701 uint64_t tot_inter_pkt = 0;
703 for (uint32_t i = 0; i < n; ++i)
704 tot_inter_pkt += time_stamp[i];
705 return (tot_inter_pkt + n / 2)/n;
708 static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp)
710 struct pcap_pkthdr header;
714 for (uint32_t i = 0; i < n_pkts; ++i) {
715 buf = pcap_next(handle, &header);
717 PROX_PANIC(buf == NULL, "Failed to read packet %d from pcap %s\n", i, file_name);
718 proto[i].len = header.len;
719 len = RTE_MIN(header.len, sizeof(proto[i].buf));
720 if (header.len > len)
721 plogx_warn("Packet truncated from %u to %zu bytes\n", header.len, len);
724 static struct timeval beg;
730 tv = tv_diff(&beg, &header.ts);
731 tv_to_tsc(&tv, time_stamp + i);
733 rte_memcpy(proto[i].buf, buf, len);
736 if (time_stamp && n_pkts) {
737 for (uint32_t i = n_pkts - 1; i > 0; --i)
738 time_stamp[i] -= time_stamp[i - 1];
739 /* Since the handle function will loop the packets,
740 there is one time-stamp that is not provided by the
741 pcap file. This is the time between the last and
742 the first packet. This implementation takes the
743 average of the inter-packet times here. */
745 time_stamp[0] = avg_time_stamp(time_stamp + 1, n_pkts - 1);
751 static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic)
753 const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
754 const uint16_t max_len = task->max_frame_size;
757 PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n");
758 PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len);
759 PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len);
763 plog_err("Invalid packet size length (no packet defined?)\n");
766 if (pkt_size > max_len) {
767 plog_err("pkt_size out of range (must be <= %u)\n", max_len);
770 if (pkt_size < min_len) {
771 plog_err("pkt_size out of range (must be >= %u)\n", min_len);
778 static int check_all_pkt_size(struct task_gen *task, int do_panic)
781 for (uint32_t i = 0; i < task->n_pkts;++i) {
782 if ((rc = check_pkt_size(task, task->pkt_template[i].len, do_panic)) != 0)
788 static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int do_panic)
790 if (task->lat_enabled) {
791 uint32_t pos_beg = task->lat_pos;
792 uint32_t pos_end = task->lat_pos + 3U;
795 PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n",
796 pos_beg, pos_end, pkt_size);
797 else if (pkt_size <= pos_end) {
798 plog_err("Writing latency at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
802 if (task->packet_id_pos) {
803 uint32_t pos_beg = task->packet_id_pos;
804 uint32_t pos_end = task->packet_id_pos + 4U;
807 PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n",
808 pos_beg, pos_end, pkt_size);
809 else if (pkt_size <= pos_end) {
810 plog_err("Writing packet at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
814 if (task->accur_pos) {
815 uint32_t pos_beg = task->accur_pos;
816 uint32_t pos_end = task->accur_pos + 3U;
819 PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u%-u, but packet size is %u bytes\n",
820 pos_beg, pos_end, pkt_size);
821 else if (pkt_size <= pos_end) {
822 plog_err("Writing accuracy at %u%-u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
829 static void task_gen_pkt_template_recalc_metadata(struct task_gen *task)
831 struct pkt_template *template;
833 for (size_t i = 0; i < task->n_pkts; ++i) {
834 template = &task->pkt_template[i];
835 parse_l2_l3_len(template->buf, &template->l2_len, &template->l3_len, template->len);
839 static void task_gen_pkt_template_recalc_checksum(struct task_gen *task)
841 struct pkt_template *template;
844 task->runtime_checksum_needed = 0;
845 for (size_t i = 0; i < task->n_pkts; ++i) {
846 template = &task->pkt_template[i];
847 if (template->l2_len == 0)
849 ip = (struct ipv4_hdr *)(template->buf + template->l2_len);
851 ip->hdr_checksum = 0;
852 prox_ip_cksum_sw(ip);
853 uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len;
855 if (ip->next_proto_id == IPPROTO_UDP) {
856 struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len);
857 prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
858 } else if (ip->next_proto_id == IPPROTO_TCP) {
859 struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
860 prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
863 /* The current implementation avoids checksum
864 calculation by determining that at packet
865 construction time, no fields are applied that would
866 require a recalculation of the checksum. */
867 if (task->lat_enabled && task->lat_pos > template->l2_len)
868 task->runtime_checksum_needed = 1;
869 if (task->accur_pos > template->l2_len)
870 task->runtime_checksum_needed = 1;
871 if (task->packet_id_pos > template->l2_len)
872 task->runtime_checksum_needed = 1;
876 static void task_gen_pkt_template_recalc_all(struct task_gen *task)
878 task_gen_pkt_template_recalc_metadata(task);
879 task_gen_pkt_template_recalc_checksum(task);
882 static void task_gen_reset_pkt_templates_len(struct task_gen *task)
884 struct pkt_template *src, *dst;
886 for (size_t i = 0; i < task->n_pkts; ++i) {
887 src = &task->pkt_template_orig[i];
888 dst = &task->pkt_template[i];
893 static void task_gen_reset_pkt_templates_content(struct task_gen *task)
895 struct pkt_template *src, *dst;
897 for (size_t i = 0; i < task->n_pkts; ++i) {
898 src = &task->pkt_template_orig[i];
899 dst = &task->pkt_template[i];
900 memcpy(dst->buf, src->buf, dst->len);
901 task_gen_apply_sig(task, dst);
905 static void task_gen_reset_pkt_templates(struct task_gen *task)
907 task_gen_reset_pkt_templates_len(task);
908 task_gen_reset_pkt_templates_content(task);
909 task_gen_pkt_template_recalc_all(task);
912 static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_args *targ)
914 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
918 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
919 task->pkt_template = prox_zmalloc(mem_size, socket_id);
920 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
922 PROX_PANIC(task->pkt_template == NULL ||
923 task->pkt_template_orig == NULL,
924 "Failed to allocate %lu bytes (in huge pages) for packet template\n", mem_size);
926 task->pkt_template->buf = prox_zmalloc(task->max_frame_size, socket_id);
927 task->pkt_template_orig->buf = prox_zmalloc(task->max_frame_size, socket_id);
928 PROX_PANIC(task->pkt_template->buf == NULL ||
929 task->pkt_template_orig->buf == NULL,
930 "Failed to allocate %u bytes (in huge pages) for packet\n", task->max_frame_size);
932 PROX_PANIC(targ->pkt_size > task->max_frame_size,
933 targ->pkt_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ?
934 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
936 rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size);
937 task->pkt_template_orig[0].len = targ->pkt_size;
938 task_gen_reset_pkt_templates(task);
939 check_all_pkt_size(task, 1);
940 check_fields_in_bounds(task, task->pkt_template[0].len, 1);
943 static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *targ)
945 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
946 char err[PCAP_ERRBUF_SIZE];
947 uint32_t max_frame_size;
948 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
949 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
951 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
952 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
953 PROX_PANIC(max_frame_size > task->max_frame_size,
954 max_frame_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ?
955 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
958 task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts);
959 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
960 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
961 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
962 task->pkt_template = prox_zmalloc(mem_size, socket_id);
963 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
964 PROX_PANIC(task->pkt_template == NULL ||
965 task->pkt_template_orig == NULL,
966 "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
968 for (uint i = 0; i < task->n_pkts; i++) {
969 task->pkt_template[i].buf = prox_zmalloc(max_frame_size, socket_id);
970 task->pkt_template_orig[i].buf = prox_zmalloc(max_frame_size, socket_id);
972 PROX_PANIC(task->pkt_template->buf == NULL ||
973 task->pkt_template_orig->buf == NULL,
974 "Failed to allocate %u bytes (in huge pages) for pcap file\n", task->max_frame_size);
977 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL);
979 task_gen_reset_pkt_templates(task);
982 static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint16_t max_frame_size)
984 static char name[] = "gen_pool";
985 struct rte_mempool *ret;
986 const int sock_id = rte_lcore_to_socket_id(targ->lconf->id);
989 uint32_t mbuf_size = TX_MBUF_SIZE;
990 if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
991 mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
992 plog_info("\t\tCreating mempool with name '%s'\n", name);
993 ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
994 targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
995 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
997 PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
998 sock_id, targ->nb_mbuf - 1);
1000 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
1001 targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
1006 void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count)
1008 struct task_gen *task = (struct task_gen *)tbase;
1010 task->pkt_count = count;
1013 int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size)
1015 struct task_gen *task = (struct task_gen *)tbase;
1018 if ((rc = check_pkt_size(task, pkt_size, 0)) != 0)
1020 if ((rc = check_fields_in_bounds(task, pkt_size, 0)) != 0)
1022 task->pkt_template[0].len = pkt_size;
1026 void task_gen_set_rate(struct task_base *tbase, uint64_t bps)
1028 struct task_gen *task = (struct task_gen *)tbase;
1030 task->new_rate_bps = bps;
1033 void task_gen_reset_randoms(struct task_base *tbase)
1035 struct task_gen *task = (struct task_gen *)tbase;
1037 for (uint32_t i = 0; i < task->n_rands; ++i) {
1038 task->rand[i].rand_mask = 0;
1039 task->rand[i].fixed_bits = 0;
1040 task->rand[i].rand_offset = 0;
1045 int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len)
1047 struct task_gen *task = (struct task_gen *)tbase;
1049 for (size_t i = 0; i < task->n_pkts; ++i) {
1050 uint32_t to_write = rte_cpu_to_be_32(value) >> ((4 - len) * 8);
1051 uint8_t *dst = task->pkt_template[i].buf;
1053 rte_memcpy(dst + offset, &to_write, len);
1056 task_gen_pkt_template_recalc_all(task);
1061 void task_gen_reset_values(struct task_base *tbase)
1063 struct task_gen *task = (struct task_gen *)tbase;
1065 task_gen_reset_pkt_templates_content(task);
1068 uint32_t task_gen_get_n_randoms(struct task_base *tbase)
1070 struct task_gen *task = (struct task_gen *)tbase;
1072 return task->n_rands;
1075 static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ)
1077 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1078 const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id);
1079 uint32_t max_frame_size;
1081 task->loop = targ->loop;
1083 task->hz = rte_get_tsc_hz();
1085 char err[PCAP_ERRBUF_SIZE];
1086 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
1087 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
1089 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
1090 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
1092 task->local_mbuf.mempool = task_gen_create_mempool(targ, max_frame_size);
1094 PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n");
1097 plogx_info("Configured to load %u packets\n", targ->n_pkts);
1098 if (task->n_pkts > targ->n_pkts)
1099 task->n_pkts = targ->n_pkts;
1101 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
1103 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
1105 size_t mem_size = task->n_pkts * (sizeof(*task->proto) + sizeof(*task->proto_tsc));
1106 uint8_t *mem = prox_zmalloc(mem_size, sockid);
1108 PROX_PANIC(mem == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
1109 task->proto = (struct pkt_template *) mem;
1110 task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto));
1112 for (uint i = 0; i < targ->n_pkts; i++) {
1113 task->proto[i].buf = prox_zmalloc(max_frame_size, sockid);
1114 PROX_PANIC(task->proto[i].buf == NULL, "Failed to allocate %u bytes (in huge pages) for pcap file\n", max_frame_size);
1117 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc);
1121 static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offset)
1123 for (uint32_t i = 0; i < task->n_rands; ++i) {
1124 if (task->rand[i].rand_offset == offset) {
1132 int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id)
1134 struct task_gen *task = (struct task_gen *)tbase;
1135 uint32_t existing_rand;
1137 if (rand_id == UINT32_MAX && task->n_rands == 64) {
1138 plog_err("Too many randoms\n");
1141 uint32_t mask, fixed, len;
1143 if (parse_random_str(&mask, &fixed, &len, rand_str)) {
1144 plog_err("%s\n", get_parse_err());
1147 task->runtime_checksum_needed = 1;
1149 existing_rand = task_gen_find_random_with_offset(task, offset);
1150 if (existing_rand != UINT32_MAX) {
1151 plog_warn("Random at offset %d already set => overwriting len = %d %s\n", offset, len, rand_str);
1152 rand_id = existing_rand;
1153 task->rand[rand_id].rand_len = len;
1154 task->rand[rand_id].rand_offset = offset;
1155 task->rand[rand_id].rand_mask = mask;
1156 task->rand[rand_id].fixed_bits = fixed;
1160 task->rand[task->n_rands].rand_len = len;
1161 task->rand[task->n_rands].rand_offset = offset;
1162 task->rand[task->n_rands].rand_mask = mask;
1163 task->rand[task->n_rands].fixed_bits = fixed;
1169 static void start(struct task_base *tbase)
1171 struct task_gen *task = (struct task_gen *)tbase;
1172 task->pkt_queue_index = 0;
1174 task_gen_reset_token_time(task);
1175 if (tbase->l3.tmaster) {
1176 register_all_ip_to_ctrl_plane(task);
1180 Handle the case when two tasks transmit to the same port
1181 and one of them is stopped. In that case ARP (requests or replies)
1182 might not be sent. Master will have to keep a list of rings.
1183 stop will have to de-register IP from ctrl plane.
1184 un-registration will remove the ring. when having more than
1185 one active rings, master can always use the first one
1189 static void start_pcap(struct task_base *tbase)
1191 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1192 /* When we start, the first packet is sent immediately. */
1193 task->last_tsc = rte_rdtsc() - task->proto_tsc[0];
1197 static void init_task_gen_early(struct task_args *targ)
1199 uint8_t *generator_count = prox_sh_find_system("generator_count");
1201 if (generator_count == NULL) {
1202 generator_count = prox_zmalloc(sizeof(*generator_count), rte_lcore_to_socket_id(targ->lconf->id));
1203 PROX_PANIC(generator_count == NULL, "Failed to allocate generator count\n");
1204 prox_sh_add_system("generator_count", generator_count);
1206 targ->generator_id = *generator_count;
1207 (*generator_count)++;
1210 static void init_task_gen(struct task_base *tbase, struct task_args *targ)
1212 struct task_gen *task = (struct task_gen *)tbase;
1214 task->packet_id_pos = targ->packet_id_pos;
1216 struct prox_port_cfg *port = find_reachable_port(targ);
1217 // TODO: check that all reachable ports have the same mtu...
1219 task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
1221 task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
1223 // Not generating to any port...
1224 task->max_frame_size = ETHER_MAX_LEN;
1226 task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size);
1227 PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n");
1229 task->hz = rte_get_tsc_hz();
1230 task->lat_pos = targ->lat_pos;
1231 task->accur_pos = targ->accur_pos;
1232 task->sig_pos = targ->sig_pos;
1233 task->sig = targ->sig;
1234 task->new_rate_bps = targ->rate_bps;
1237 * For tokens, use 10 Gbps as base rate
1238 * Scripts can then use speed command, with speed=100 as 10 Gbps and speed=400 as 40 Gbps
1239 * Script can query prox "port info" command to find out the port link speed to know
1240 * at which rate to start. Note that virtio running on OVS returns 10 Gbps, so a script has
1241 * probably also to check the driver (as returned by the same "port info" command.
1243 struct token_time_cfg tt_cfg = token_time_cfg_create(1250000000, rte_get_tsc_hz(), -1);
1244 token_time_init(&task->token_time, &tt_cfg);
1246 init_task_gen_seeds(task);
1248 task->min_bulk_size = targ->min_bulk_size;
1249 task->max_bulk_size = targ->max_bulk_size;
1250 if (task->min_bulk_size < 1)
1251 task->min_bulk_size = 1;
1252 if (task->max_bulk_size < 1)
1253 task->max_bulk_size = 64;
1254 PROX_PANIC(task->max_bulk_size > 64, "max_bulk_size higher than 64\n");
1255 PROX_PANIC(task->max_bulk_size < task->min_bulk_size, "max_bulk_size must be > than min_bulk_size\n");
1257 task->pkt_count = -1;
1258 task->lat_enabled = targ->lat_enabled;
1259 task->runtime_flags = targ->runtime_flags;
1260 PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
1262 task->generator_id = targ->generator_id;
1263 plog_info("\tGenerator id = %d\n", task->generator_id);
1265 // Allocate array holding bytes to tsc for supported frame sizes
1266 task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), rte_lcore_to_socket_id(targ->lconf->id));
1267 PROX_PANIC(task->bytes_to_tsc == NULL,
1268 "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", task->max_frame_size);
1270 // task->port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
1271 // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
1272 uint64_t bytes_per_hz = UINT64_MAX;
1273 if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
1274 bytes_per_hz = task->port->max_link_speed * 125000L;
1275 plog_info("\tPort %u: max link speed is %ld Mbps\n",
1276 (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
1278 for (unsigned int i = 0; i < task->max_frame_size * MAX_PKT_BURST ; i++) {
1279 if (bytes_per_hz == UINT64_MAX)
1280 task->bytes_to_tsc[i] = 0;
1282 task->bytes_to_tsc[i] = (task->hz * i) / bytes_per_hz;
1285 if (!strcmp(targ->pcap_file, "")) {
1286 plog_info("\tUsing inline definition of a packet\n");
1287 task_init_gen_load_pkt_inline(task, targ);
1289 plog_info("Loading from pcap %s\n", targ->pcap_file);
1290 task_init_gen_load_pcap(task, targ);
1293 PROX_PANIC(((targ->nb_txrings == 0) && (targ->nb_txports == 0)), "Gen mode requires a tx ring or a tx port");
1294 if ((targ->flags & DSF_KEEP_SRC_MAC) == 0) {
1295 uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes;
1296 for (uint32_t i = 0; i < task->n_pkts; ++i) {
1297 rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6);
1300 memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
1301 for (uint32_t i = 0; i < targ->n_rand_str; ++i) {
1302 PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX),
1303 "Failed to add random\n");
1307 static struct task_init task_init_gen = {
1309 .init = init_task_gen,
1310 .handle = handle_gen_bulk,
1312 .early_init = init_task_gen_early,
1314 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1315 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1316 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1318 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1320 .size = sizeof(struct task_gen)
1323 static struct task_init task_init_gen_l3 = {
1325 .sub_mode_str = "l3",
1326 .init = init_task_gen,
1327 .handle = handle_gen_bulk,
1329 .early_init = init_task_gen_early,
1331 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1332 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1333 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1335 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1337 .size = sizeof(struct task_gen)
1340 static struct task_init task_init_gen_pcap = {
1342 .sub_mode_str = "pcap",
1343 .init = init_task_gen_pcap,
1344 .handle = handle_gen_pcap_bulk,
1345 .start = start_pcap,
1346 .early_init = init_task_gen_early,
1348 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1350 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1352 .size = sizeof(struct task_gen_pcap)
1355 __attribute__((constructor)) static void reg_task_gen(void)
1357 reg_task(&task_init_gen);
1358 reg_task(&task_init_gen_l3);
1359 reg_task(&task_init_gen_pcap);