2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
21 #include <rte_cycles.h>
22 #include <rte_version.h>
23 #include <rte_byteorder.h>
24 #include <rte_ether.h>
25 #include <rte_hash_crc.h>
27 #include "prox_shared.h"
29 #include "prox_malloc.h"
30 #include "handle_gen.h"
31 #include "handle_lat.h"
32 #include "task_init.h"
33 #include "task_base.h"
34 #include "prox_port_cfg.h"
39 #include "mbuf_utils.h"
41 #include "prox_cksum.h"
43 #include "prox_assert.h"
45 #include "token_time.h"
46 #include "local_mbuf.h"
49 #include "handle_master.h"
58 #define MAX_TEMPLATE_INDEX 65536
59 #define TEMPLATE_INDEX_MASK (MAX_TEMPLATE_INDEX - 1)
61 #define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
63 static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt)
65 const uint32_t pkt_size = pkt_template->len;
67 rte_pktmbuf_pkt_len(mbuf) = pkt_size;
68 rte_pktmbuf_data_len(mbuf) = pkt_size;
70 rte_memcpy(pkt, pkt_template->buf, pkt_template->len);
73 struct task_gen_pcap {
74 struct task_base base;
76 struct local_mbuf local_mbuf;
78 struct pkt_template *proto;
86 struct task_base base;
88 struct token_time token_time;
89 struct local_mbuf local_mbuf;
90 struct pkt_template *pkt_template; /* packet templates used at runtime */
91 uint64_t write_duration_estimate; /* how long it took previously to write the time stamps in the packets */
92 uint64_t earliest_tsc_next_pkt;
93 uint64_t new_rate_bps;
94 uint64_t pkt_queue_index;
95 uint32_t n_pkts; /* number of packets in pcap */
96 uint32_t pkt_idx; /* current packet from pcap */
97 uint32_t pkt_count; /* how many pakets to generate */
98 uint32_t max_frame_size;
99 uint32_t runtime_flags;
101 uint16_t packet_id_pos;
105 uint8_t generator_id;
106 uint8_t n_rands; /* number of randoms */
107 uint8_t min_bulk_size;
108 uint8_t max_bulk_size;
110 uint8_t runtime_checksum_needed;
113 uint32_t rand_mask; /* since the random vals are uniform, masks don't introduce bias */
114 uint32_t fixed_bits; /* length of each random (max len = 4) */
115 uint16_t rand_offset; /* each random has an offset*/
116 uint8_t rand_len; /* # bytes to take from random (no bias introduced) */
119 uint64_t pkt_tsc_offset[64];
120 struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */
121 struct ether_addr src_mac;
123 uint8_t cksum_offload;
124 struct prox_port_cfg *port;
125 uint64_t *bytes_to_tsc;
126 } __rte_cache_aligned;
128 static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
130 /* Optimize for common case of IPv4 header without options. */
131 if (ip->version_ihl == 0x45)
132 return sizeof(struct ipv4_hdr);
133 if (unlikely(ip->version_ihl >> 4 != 4)) {
134 plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
137 return (ip->version_ihl & 0xF) * 4;
140 static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len)
142 *l2_len = sizeof(struct ether_hdr);
144 struct vlan_hdr *vlan_hdr;
145 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
147 uint16_t ether_type = eth_hdr->ether_type;
150 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) {
151 vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len);
153 ether_type = vlan_hdr->eth_proto;
156 // No L3 cksum offload for IPv6, but TODO L4 offload
157 // ETYPE_EoGRE CRC not implemented yet
159 switch (ether_type) {
173 plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type);
178 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len);
179 *l3_len = ipv4_get_hdr_len(ip);
183 static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_template *pkt_template, int cksum_offload)
185 uint16_t l2_len = pkt_template->l2_len;
186 uint16_t l3_len = pkt_template->l3_len;
189 struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len);
190 prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload);
194 static void task_gen_reset_token_time(struct task_gen *task)
196 token_time_set_bpp(&task->token_time, task->new_rate_bps);
197 token_time_reset(&task->token_time, rte_rdtsc(), 0);
200 static void task_gen_take_count(struct task_gen *task, uint32_t send_bulk)
202 if (task->pkt_count == (uint32_t)-1)
205 if (task->pkt_count >= send_bulk)
206 task->pkt_count -= send_bulk;
212 static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts)
214 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
215 uint64_t now = rte_rdtsc();
216 uint64_t send_bulk = 0;
217 uint32_t pkt_idx_tmp = task->pkt_idx;
219 if (pkt_idx_tmp == task->n_pkts) {
220 PROX_ASSERT(task->loop);
224 for (uint16_t j = 0; j < 64; ++j) {
225 uint64_t tsc = task->proto_tsc[pkt_idx_tmp];
226 if (task->last_tsc + tsc <= now) {
227 task->last_tsc += tsc;
230 if (pkt_idx_tmp == task->n_pkts) {
241 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
242 if (new_pkts == NULL)
245 for (uint16_t j = 0; j < send_bulk; ++j) {
246 struct rte_mbuf *next_pkt = new_pkts[j];
247 struct pkt_template *pkt_template = &task->proto[task->pkt_idx];
248 uint8_t *hdr = rte_pktmbuf_mtod(next_pkt, uint8_t *);
250 pkt_template_init_mbuf(pkt_template, next_pkt, hdr);
253 if (task->pkt_idx == task->n_pkts) {
261 return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL);
264 static inline uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
266 return task->bytes_to_tsc[bytes];
269 static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx)
271 return pkt_idx + 1 == task->n_pkts? 0 : pkt_idx + 1;
274 static uint32_t task_gen_offset_pkt_idx(const struct task_gen *task, uint32_t offset)
276 return (task->pkt_idx + offset) % task->n_pkts;
279 static uint32_t task_gen_calc_send_bulk(const struct task_gen *task, uint32_t *total_bytes)
281 /* The biggest bulk we allow to send is task->max_bulk_size
282 packets. The max bulk size can also be limited by the
283 pkt_count field. At the same time, we are rate limiting
284 based on the specified speed (in bytes per second) so token
285 bucket based rate limiting must also be applied. The
286 minimum bulk size is also constrained. If the calculated
287 bulk size is less then the minimum, then don't send
290 const uint32_t min_bulk = task->min_bulk_size;
291 uint32_t max_bulk = task->max_bulk_size;
293 if (task->pkt_count != (uint32_t)-1 && task->pkt_count < max_bulk) {
294 max_bulk = task->pkt_count;
297 uint32_t send_bulk = 0;
298 uint32_t pkt_idx_tmp = task->pkt_idx;
299 uint32_t would_send_bytes = 0;
303 * TODO - this must be improved to take into account the fact that, after applying randoms
304 * The packet can be replaced by an ARP
306 for (uint16_t j = 0; j < max_bulk; ++j) {
307 struct pkt_template *pktpl = &task->pkt_template[pkt_idx_tmp];
308 pkt_size = pktpl->len;
309 uint32_t pkt_len = pkt_len_to_wire_size(pkt_size);
310 if (pkt_len + would_send_bytes > task->token_time.bytes_now)
313 pkt_idx_tmp = task_gen_next_pkt_idx(task, pkt_idx_tmp);
316 would_send_bytes += pkt_len;
319 if (send_bulk < min_bulk)
321 *total_bytes = would_send_bytes;
325 static void task_gen_apply_random_fields(struct task_gen *task, uint8_t *hdr)
327 uint32_t ret, ret_tmp;
329 for (uint16_t i = 0; i < task->n_rands; ++i) {
330 ret = random_next(&task->rand[i].state);
331 ret_tmp = (ret & task->rand[i].rand_mask) | task->rand[i].fixed_bits;
333 ret_tmp = rte_bswap32(ret_tmp);
334 /* At this point, the lower order bytes (BE) contain
335 the generated value. The address where the values
336 of interest starts is at ret_tmp + 4 - rand_len. */
337 uint8_t *pret_tmp = (uint8_t*)&ret_tmp;
338 rte_memcpy(hdr + task->rand[i].rand_offset, pret_tmp + 4 - task->rand[i].rand_len, task->rand[i].rand_len);
342 static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
347 for (uint16_t i = 0; i < count; ++i)
348 task_gen_apply_random_fields(task, pkt_hdr[i]);
351 static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
353 *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
356 static void task_gen_apply_sig(struct task_gen *task, uint8_t *pkt_hdr)
358 *(uint32_t *)(pkt_hdr + task->sig_pos) = task->sig;
361 static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
363 if (!task->accur_pos)
366 /* The accuracy of task->pkt_queue_index - 64 is stored in
367 packet task->pkt_queue_index. The ID modulo 64 is the
369 for (uint16_t j = 0; j < count; ++j) {
370 uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & 63];
371 task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy);
375 static void task_gen_apply_all_sig(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
380 for (uint16_t j = 0; j < count; ++j) {
381 task_gen_apply_sig(task, pkt_hdr[j]);
385 static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id)
387 struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_pos);
392 static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
394 if (!task->packet_id_pos)
397 for (uint16_t i = 0; i < count; ++i) {
399 unique_id_init(&id, task->generator_id, task->pkt_queue_index++);
400 task_gen_apply_unique_id(task, pkt_hdr[i], &id);
404 static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
406 if (!(task->runtime_flags & TASK_TX_CRC))
409 if (!task->runtime_checksum_needed)
412 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - count);
413 for (uint16_t i = 0; i < count; ++i) {
414 struct pkt_template *pkt_template = &task->pkt_template[pkt_idx];
415 checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload);
416 pkt_idx = task_gen_next_pkt_idx(task, pkt_idx);
420 static void task_gen_consume_tokens(struct task_gen *task, uint32_t tokens, uint32_t send_count)
422 /* If max burst has been sent, we can't keep up so just assume
423 that we can (leaving a "gap" in the packet stream on the
425 task->token_time.bytes_now -= tokens;
426 if (send_count == task->max_bulk_size && task->token_time.bytes_now > tokens) {
427 task->token_time.bytes_now = tokens;
431 static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t count)
433 uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1);
434 struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx];
435 uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len);
436 #ifdef NO_EXTRAPOLATION
437 uint64_t bulk_duration = task->pkt_tsc_offset[count - 1];
439 uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len);
440 uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration;
443 return bulk_duration;
446 static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
448 if (!task->lat_enabled)
451 uint64_t tx_tsc, delta_t;
452 uint64_t tsc_before_tx = 0;
454 /* Just before sending the packets, apply the time stamp
455 relative to when the first packet will be sent. The first
456 packet will be sent now. The time is read for each packet
457 to reduce the error towards the actual time the packet will
459 uint64_t write_tsc_after, write_tsc_before;
461 write_tsc_before = rte_rdtsc();
463 /* The time it took previously to write the time stamps in the
464 packets is used as an estimate for how long it will take to
465 write the time stamps now. The estimated time at which the
466 packets will actually be sent will be at tx_tsc. */
467 tx_tsc = write_tsc_before + task->write_duration_estimate;
469 /* The offset delta_t tracks the difference between the actual
470 time and the time written in the packets. Adding the offset
471 to the actual time insures that the time written in the
472 packets is monotonically increasing. At the same time,
473 simply sleeping until delta_t is zero would leave a period
474 of silence on the line. The error has been introduced
475 earlier, but the packets have already been sent. */
477 /* This happens typically if previous bulk was delayed
478 by an interrupt e.g. (with Time in nsec)
479 Time x: sleep 4 microsec
480 Time x+4000: send 64 packets (64 packets as 4000 nsec, w/ 10Gbps 64 bytes)
481 Time x+5000: send 16 packets (16 packets as 1000 nsec)
482 When we send the 16 packets, the 64 ealier packets are not yet
484 if (tx_tsc < task->earliest_tsc_next_pkt)
485 delta_t = task->earliest_tsc_next_pkt - tx_tsc;
489 for (uint16_t i = 0; i < count; ++i) {
490 uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos);
491 const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i];
492 *pos = pkt_tsc >> LATENCY_ACCURACY;
495 uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count);
496 task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration;
497 write_tsc_after = rte_rdtsc();
498 task->write_duration_estimate = write_tsc_after - write_tsc_before;
500 /* Make sure that the time stamps that were written
501 are valid. The offset must be taken into account */
503 tsc_before_tx = rte_rdtsc();
504 } while (tsc_before_tx < tx_tsc);
506 return tsc_before_tx;
509 static void task_gen_store_accuracy(struct task_gen *task, uint32_t count, uint64_t tsc_before_tx)
511 if (!task->accur_pos)
514 uint64_t accur = rte_rdtsc() - tsc_before_tx;
515 uint64_t first_accuracy_idx = task->pkt_queue_index - count;
517 for (uint32_t i = 0; i < count; ++i) {
518 uint32_t accuracy_idx = (first_accuracy_idx + i) & 63;
520 task->accur[accuracy_idx] = accur;
524 static void task_gen_load_and_prefetch(struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
526 for (uint16_t i = 0; i < count; ++i)
527 rte_prefetch0(mbufs[i]);
528 for (uint16_t i = 0; i < count; ++i)
529 pkt_hdr[i] = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
530 for (uint16_t i = 0; i < count; ++i)
531 rte_prefetch0(pkt_hdr[i]);
534 static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
536 uint64_t will_send_bytes = 0;
538 for (uint16_t i = 0; i < count; ++i) {
539 struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx];
540 struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx];
541 pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]);
542 mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK;
543 struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i];
544 if (task->lat_enabled) {
545 #ifdef NO_EXTRAPOLATION
546 task->pkt_tsc_offset[i] = 0;
548 task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes);
550 will_send_bytes += pkt_len_to_wire_size(pkt_template->len);
552 task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx);
556 static void task_gen_update_config(struct task_gen *task)
558 if (task->token_time.cfg.bpp != task->new_rate_bps)
559 task_gen_reset_token_time(task);
562 static inline void build_value(struct task_gen *task, uint32_t mask, int bit_pos, uint32_t val, uint32_t fixed_bits)
564 struct task_base *tbase = (struct task_base *)task;
566 build_value(task, mask >> 1, bit_pos + 1, val, fixed_bits);
568 build_value(task, mask >> 1, bit_pos + 1, val | (1 << bit_pos), fixed_bits);
571 register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_cpu_to_be_32(val | fixed_bits), tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
574 static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
576 struct task_base *tbase = (struct task_base *)task;
581 for (uint32_t i = 0; i < task->n_pkts; ++i) {
582 struct pkt_template *pktpl = &task->pkt_template[i];
583 unsigned int ip_src_pos = 0;
585 unsigned int l2_len = sizeof(struct ether_hdr);
587 uint8_t *pkt = pktpl->buf;
588 struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
589 uint16_t ether_type = eth_hdr->ether_type;
590 struct vlan_hdr *vlan_hdr;
593 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < pktpl->len)) {
594 vlan_hdr = (struct vlan_hdr *)(pkt + l2_len);
596 ether_type = vlan_hdr->eth_proto;
598 if ((ether_type == ETYPE_MPLSU) || (ether_type == ETYPE_MPLSM)) {
602 if ((ether_type != ETYPE_IPv4) && !maybe_ipv4)
605 struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len);
606 PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
608 // Even if IPv4 header contains options, options are after ip src and dst
609 ip_src_pos = l2_len + sizeof(struct ipv4_hdr) - 2 * sizeof(uint32_t);
610 uint32_t *ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos));
611 plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src);
612 register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
614 for (int j = 0; j < task->n_rands; j++) {
615 offset = task->rand[j].rand_offset;
616 len = task->rand[j].rand_len;
617 mask = task->rand[j].rand_mask;
618 fixed = task->rand[j].fixed_bits;
619 plog_info("offset = %d, len = %d, mask = %x, fixed = %x\n", offset, len, mask, fixed);
620 if ((offset < ip_src_pos + 4) && (offset + len >= ip_src_pos)) {
621 if (offset >= ip_src_pos) {
622 int32_t ip_src_mask = (1 << (4 + ip_src_pos - offset) * 8) - 1;
623 mask = mask & ip_src_mask;
624 fixed = (fixed & ip_src_mask) | (rte_be_to_cpu_32(*ip_src) & ~ip_src_mask);
625 build_value(task, mask, 0, 0, fixed);
627 int32_t bits = ((ip_src_pos + 4 - offset - len) * 8);
629 fixed = (fixed << bits) | (rte_be_to_cpu_32(*ip_src) & ((1 << bits) - 1));
630 build_value(task, mask, 0, 0, fixed);
637 static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
639 struct task_gen *task = (struct task_gen *)tbase;
640 uint8_t out[MAX_PKT_BURST] = {0};
645 task_gen_update_config(task);
647 if (task->pkt_count == 0) {
648 task_gen_reset_token_time(task);
651 if (!task->token_time.cfg.bpp)
654 token_time_update(&task->token_time, rte_rdtsc());
656 uint32_t would_send_bytes;
657 uint32_t send_bulk = task_gen_calc_send_bulk(task, &would_send_bytes);
661 task_gen_take_count(task, send_bulk);
662 task_gen_consume_tokens(task, would_send_bytes, send_bulk);
664 struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk);
665 if (new_pkts == NULL)
667 uint8_t *pkt_hdr[MAX_RING_BURST];
669 task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk);
670 task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk);
671 task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk);
672 task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk);
673 task_gen_apply_all_sig(task, new_pkts, pkt_hdr, send_bulk);
674 task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk);
676 uint64_t tsc_before_tx;
678 tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk);
679 task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
680 ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
681 task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
685 static void init_task_gen_seeds(struct task_gen *task)
687 for (size_t i = 0; i < sizeof(task->rand)/sizeof(task->rand[0]); ++i)
688 random_init_seed(&task->rand[i].state);
691 static uint32_t pcap_count_pkts(pcap_t *handle, uint32_t *max_frame_size)
693 struct pcap_pkthdr header;
697 long pkt1_fpos = ftell(pcap_file(handle));
699 while ((buf = pcap_next(handle, &header))) {
700 if (header.len > *max_frame_size)
701 *max_frame_size = header.len;
704 int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET);
705 PROX_PANIC(ret2 != 0, "Failed to reset reading pcap file\n");
709 static uint64_t avg_time_stamp(uint64_t *time_stamp, uint32_t n)
711 uint64_t tot_inter_pkt = 0;
713 for (uint32_t i = 0; i < n; ++i)
714 tot_inter_pkt += time_stamp[i];
715 return (tot_inter_pkt + n / 2)/n;
718 static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp)
720 struct pcap_pkthdr header;
724 for (uint32_t i = 0; i < n_pkts; ++i) {
725 buf = pcap_next(handle, &header);
727 PROX_PANIC(buf == NULL, "Failed to read packet %d from pcap %s\n", i, file_name);
728 proto[i].len = header.len;
729 len = RTE_MIN(header.len, sizeof(proto[i].buf));
730 if (header.len > len)
731 plogx_warn("Packet truncated from %u to %zu bytes\n", header.len, len);
734 static struct timeval beg;
740 tv = tv_diff(&beg, &header.ts);
741 tv_to_tsc(&tv, time_stamp + i);
743 rte_memcpy(proto[i].buf, buf, len);
746 if (time_stamp && n_pkts) {
747 for (uint32_t i = n_pkts - 1; i > 0; --i)
748 time_stamp[i] -= time_stamp[i - 1];
749 /* Since the handle function will loop the packets,
750 there is one time-stamp that is not provided by the
751 pcap file. This is the time between the last and
752 the first packet. This implementation takes the
753 average of the inter-packet times here. */
755 time_stamp[0] = avg_time_stamp(time_stamp + 1, n_pkts - 1);
761 static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic)
763 const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
764 const uint16_t max_len = task->max_frame_size;
767 PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n");
768 PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len);
769 PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len);
773 plog_err("Invalid packet size length (no packet defined?)\n");
776 if (pkt_size > max_len) {
777 plog_err("pkt_size out of range (must be <= %u)\n", max_len);
780 if (pkt_size < min_len) {
781 plog_err("pkt_size out of range (must be >= %u)\n", min_len);
788 static int check_all_pkt_size(struct task_gen *task, int do_panic)
791 for (uint32_t i = 0; i < task->n_pkts;++i) {
792 if ((rc = check_pkt_size(task, task->pkt_template[i].len, do_panic)) != 0)
798 static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int do_panic)
800 if (task->lat_enabled) {
801 uint32_t pos_beg = task->lat_pos;
802 uint32_t pos_end = task->lat_pos + 3U;
805 PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n",
806 pos_beg, pos_end, pkt_size);
807 else if (pkt_size <= pos_end) {
808 plog_err("Writing latency at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
812 if (task->packet_id_pos) {
813 uint32_t pos_beg = task->packet_id_pos;
814 uint32_t pos_end = task->packet_id_pos + 4U;
817 PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n",
818 pos_beg, pos_end, pkt_size);
819 else if (pkt_size <= pos_end) {
820 plog_err("Writing packet at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
824 if (task->accur_pos) {
825 uint32_t pos_beg = task->accur_pos;
826 uint32_t pos_end = task->accur_pos + 3U;
829 PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u%-u, but packet size is %u bytes\n",
830 pos_beg, pos_end, pkt_size);
831 else if (pkt_size <= pos_end) {
832 plog_err("Writing accuracy at %u%-u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
839 static void task_gen_pkt_template_recalc_metadata(struct task_gen *task)
841 struct pkt_template *template;
843 for (size_t i = 0; i < task->n_pkts; ++i) {
844 template = &task->pkt_template[i];
845 parse_l2_l3_len(template->buf, &template->l2_len, &template->l3_len, template->len);
849 static void task_gen_pkt_template_recalc_checksum(struct task_gen *task)
851 struct pkt_template *template;
854 task->runtime_checksum_needed = 0;
855 for (size_t i = 0; i < task->n_pkts; ++i) {
856 template = &task->pkt_template[i];
857 if (template->l2_len == 0)
859 ip = (struct ipv4_hdr *)(template->buf + template->l2_len);
861 ip->hdr_checksum = 0;
862 prox_ip_cksum_sw(ip);
863 uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len;
865 if (ip->next_proto_id == IPPROTO_UDP) {
866 struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len);
867 prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
868 } else if (ip->next_proto_id == IPPROTO_TCP) {
869 struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
870 prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
873 /* The current implementation avoids checksum
874 calculation by determining that at packet
875 construction time, no fields are applied that would
876 require a recalculation of the checksum. */
877 if (task->lat_enabled && task->lat_pos > template->l2_len)
878 task->runtime_checksum_needed = 1;
879 if (task->accur_pos > template->l2_len)
880 task->runtime_checksum_needed = 1;
881 if (task->packet_id_pos > template->l2_len)
882 task->runtime_checksum_needed = 1;
886 static void task_gen_pkt_template_recalc_all(struct task_gen *task)
888 task_gen_pkt_template_recalc_metadata(task);
889 task_gen_pkt_template_recalc_checksum(task);
892 static void task_gen_reset_pkt_templates_len(struct task_gen *task)
894 struct pkt_template *src, *dst;
896 for (size_t i = 0; i < task->n_pkts; ++i) {
897 src = &task->pkt_template_orig[i];
898 dst = &task->pkt_template[i];
903 static void task_gen_reset_pkt_templates_content(struct task_gen *task)
905 struct pkt_template *src, *dst;
907 for (size_t i = 0; i < task->n_pkts; ++i) {
908 src = &task->pkt_template_orig[i];
909 dst = &task->pkt_template[i];
910 memcpy(dst->buf, src->buf, dst->len);
914 static void task_gen_reset_pkt_templates(struct task_gen *task)
916 task_gen_reset_pkt_templates_len(task);
917 task_gen_reset_pkt_templates_content(task);
918 task_gen_pkt_template_recalc_all(task);
921 static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_args *targ)
923 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
927 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
928 task->pkt_template = prox_zmalloc(mem_size, socket_id);
929 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
931 PROX_PANIC(task->pkt_template == NULL ||
932 task->pkt_template_orig == NULL,
933 "Failed to allocate %lu bytes (in huge pages) for packet template\n", mem_size);
935 task->pkt_template->buf = prox_zmalloc(task->max_frame_size, socket_id);
936 task->pkt_template_orig->buf = prox_zmalloc(task->max_frame_size, socket_id);
937 PROX_PANIC(task->pkt_template->buf == NULL ||
938 task->pkt_template_orig->buf == NULL,
939 "Failed to allocate %u bytes (in huge pages) for packet\n", task->max_frame_size);
941 PROX_PANIC(targ->pkt_size > task->max_frame_size,
942 targ->pkt_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ?
943 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
945 rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size);
946 task->pkt_template_orig[0].len = targ->pkt_size;
947 task_gen_reset_pkt_templates(task);
948 check_all_pkt_size(task, 1);
949 check_fields_in_bounds(task, task->pkt_template[0].len, 1);
952 static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *targ)
954 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
955 char err[PCAP_ERRBUF_SIZE];
956 uint32_t max_frame_size;
957 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
958 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
960 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
961 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
962 PROX_PANIC(max_frame_size > task->max_frame_size,
963 max_frame_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ?
964 "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
967 task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts);
968 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
969 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
970 size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
971 task->pkt_template = prox_zmalloc(mem_size, socket_id);
972 task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
973 PROX_PANIC(task->pkt_template == NULL ||
974 task->pkt_template_orig == NULL,
975 "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
977 for (uint i = 0; i < task->n_pkts; i++) {
978 task->pkt_template[i].buf = prox_zmalloc(max_frame_size, socket_id);
979 task->pkt_template_orig[i].buf = prox_zmalloc(max_frame_size, socket_id);
981 PROX_PANIC(task->pkt_template->buf == NULL ||
982 task->pkt_template_orig->buf == NULL,
983 "Failed to allocate %u bytes (in huge pages) for pcap file\n", task->max_frame_size);
986 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL);
988 task_gen_reset_pkt_templates(task);
991 static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint16_t max_frame_size)
993 static char name[] = "gen_pool";
994 struct rte_mempool *ret;
995 const int sock_id = rte_lcore_to_socket_id(targ->lconf->id);
998 uint32_t mbuf_size = TX_MBUF_SIZE;
999 if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
1000 mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1001 plog_info("\t\tCreating mempool with name '%s'\n", name);
1002 ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
1003 targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
1004 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
1006 PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
1007 sock_id, targ->nb_mbuf - 1);
1009 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
1010 targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
1015 void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count)
1017 struct task_gen *task = (struct task_gen *)tbase;
1019 task->pkt_count = count;
1022 int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size)
1024 struct task_gen *task = (struct task_gen *)tbase;
1027 if ((rc = check_pkt_size(task, pkt_size, 0)) != 0)
1029 if ((rc = check_fields_in_bounds(task, pkt_size, 0)) != 0)
1031 task->pkt_template[0].len = pkt_size;
1035 void task_gen_set_rate(struct task_base *tbase, uint64_t bps)
1037 struct task_gen *task = (struct task_gen *)tbase;
1039 task->new_rate_bps = bps;
1042 void task_gen_reset_randoms(struct task_base *tbase)
1044 struct task_gen *task = (struct task_gen *)tbase;
1046 for (uint32_t i = 0; i < task->n_rands; ++i) {
1047 task->rand[i].rand_mask = 0;
1048 task->rand[i].fixed_bits = 0;
1049 task->rand[i].rand_offset = 0;
1054 int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len)
1056 struct task_gen *task = (struct task_gen *)tbase;
1058 for (size_t i = 0; i < task->n_pkts; ++i) {
1059 uint32_t to_write = rte_cpu_to_be_32(value) >> ((4 - len) * 8);
1060 uint8_t *dst = task->pkt_template[i].buf;
1062 rte_memcpy(dst + offset, &to_write, len);
1065 task_gen_pkt_template_recalc_all(task);
1070 void task_gen_reset_values(struct task_base *tbase)
1072 struct task_gen *task = (struct task_gen *)tbase;
1074 task_gen_reset_pkt_templates_content(task);
1077 uint32_t task_gen_get_n_randoms(struct task_base *tbase)
1079 struct task_gen *task = (struct task_gen *)tbase;
1081 return task->n_rands;
1084 static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ)
1086 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1087 const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id);
1088 uint32_t max_frame_size;
1090 task->loop = targ->loop;
1092 task->hz = rte_get_tsc_hz();
1094 char err[PCAP_ERRBUF_SIZE];
1095 pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
1096 PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
1098 task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
1099 plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
1101 task->local_mbuf.mempool = task_gen_create_mempool(targ, max_frame_size);
1103 PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n");
1106 plogx_info("Configured to load %u packets\n", targ->n_pkts);
1107 if (task->n_pkts > targ->n_pkts)
1108 task->n_pkts = targ->n_pkts;
1110 PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
1112 plogx_info("Loading %u packets from pcap\n", task->n_pkts);
1114 size_t mem_size = task->n_pkts * (sizeof(*task->proto) + sizeof(*task->proto_tsc));
1115 uint8_t *mem = prox_zmalloc(mem_size, sockid);
1117 PROX_PANIC(mem == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
1118 task->proto = (struct pkt_template *) mem;
1119 task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto));
1121 for (uint i = 0; i < targ->n_pkts; i++) {
1122 task->proto[i].buf = prox_zmalloc(max_frame_size, sockid);
1123 PROX_PANIC(task->proto[i].buf == NULL, "Failed to allocate %u bytes (in huge pages) for pcap file\n", max_frame_size);
1126 pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc);
1130 static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offset)
1132 for (uint32_t i = 0; i < task->n_rands; ++i) {
1133 if (task->rand[i].rand_offset == offset) {
1141 int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id)
1143 struct task_gen *task = (struct task_gen *)tbase;
1144 uint32_t existing_rand;
1146 if (rand_id == UINT32_MAX && task->n_rands == 64) {
1147 plog_err("Too many randoms\n");
1150 uint32_t mask, fixed, len;
1152 if (parse_random_str(&mask, &fixed, &len, rand_str)) {
1153 plog_err("%s\n", get_parse_err());
1156 task->runtime_checksum_needed = 1;
1158 existing_rand = task_gen_find_random_with_offset(task, offset);
1159 if (existing_rand != UINT32_MAX) {
1160 plog_warn("Random at offset %d already set => overwriting len = %d %s\n", offset, len, rand_str);
1161 rand_id = existing_rand;
1162 task->rand[rand_id].rand_len = len;
1163 task->rand[rand_id].rand_offset = offset;
1164 task->rand[rand_id].rand_mask = mask;
1165 task->rand[rand_id].fixed_bits = fixed;
1169 task->rand[task->n_rands].rand_len = len;
1170 task->rand[task->n_rands].rand_offset = offset;
1171 task->rand[task->n_rands].rand_mask = mask;
1172 task->rand[task->n_rands].fixed_bits = fixed;
1178 static void start(struct task_base *tbase)
1180 struct task_gen *task = (struct task_gen *)tbase;
1181 task->pkt_queue_index = 0;
1183 task_gen_reset_token_time(task);
1184 if (tbase->l3.tmaster) {
1185 register_all_ip_to_ctrl_plane(task);
1189 Handle the case when two tasks transmit to the same port
1190 and one of them is stopped. In that case ARP (requests or replies)
1191 might not be sent. Master will have to keep a list of rings.
1192 stop will have to de-register IP from ctrl plane.
1193 un-registration will remove the ring. when having more than
1194 one active rings, master can always use the first one
1198 static void start_pcap(struct task_base *tbase)
1200 struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
1201 /* When we start, the first packet is sent immediately. */
1202 task->last_tsc = rte_rdtsc() - task->proto_tsc[0];
1206 static void init_task_gen_early(struct task_args *targ)
1208 uint8_t *generator_count = prox_sh_find_system("generator_count");
1210 if (generator_count == NULL) {
1211 generator_count = prox_zmalloc(sizeof(*generator_count), rte_lcore_to_socket_id(targ->lconf->id));
1212 PROX_PANIC(generator_count == NULL, "Failed to allocate generator count\n");
1213 prox_sh_add_system("generator_count", generator_count);
1215 targ->generator_id = *generator_count;
1216 (*generator_count)++;
1219 static void init_task_gen(struct task_base *tbase, struct task_args *targ)
1221 struct task_gen *task = (struct task_gen *)tbase;
1223 task->packet_id_pos = targ->packet_id_pos;
1225 struct prox_port_cfg *port = find_reachable_port(targ);
1226 // TODO: check that all reachable ports have the same mtu...
1228 task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
1230 task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
1232 // Not generating to any port...
1233 task->max_frame_size = ETHER_MAX_LEN;
1235 task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size);
1236 PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n");
1238 task->hz = rte_get_tsc_hz();
1239 task->lat_pos = targ->lat_pos;
1240 task->accur_pos = targ->accur_pos;
1241 task->sig_pos = targ->sig_pos;
1242 task->sig = targ->sig;
1243 task->new_rate_bps = targ->rate_bps;
1246 * For tokens, use 10 Gbps as base rate
1247 * Scripts can then use speed command, with speed=100 as 10 Gbps and speed=400 as 40 Gbps
1248 * Script can query prox "port info" command to find out the port link speed to know
1249 * at which rate to start. Note that virtio running on OVS returns 10 Gbps, so a script has
1250 * probably also to check the driver (as returned by the same "port info" command.
1252 struct token_time_cfg tt_cfg = token_time_cfg_create(1250000000, rte_get_tsc_hz(), -1);
1253 token_time_init(&task->token_time, &tt_cfg);
1255 init_task_gen_seeds(task);
1257 task->min_bulk_size = targ->min_bulk_size;
1258 task->max_bulk_size = targ->max_bulk_size;
1259 if (task->min_bulk_size < 1)
1260 task->min_bulk_size = 1;
1261 if (task->max_bulk_size < 1)
1262 task->max_bulk_size = 64;
1263 PROX_PANIC(task->max_bulk_size > 64, "max_bulk_size higher than 64\n");
1264 PROX_PANIC(task->max_bulk_size < task->min_bulk_size, "max_bulk_size must be > than min_bulk_size\n");
1266 task->pkt_count = -1;
1267 task->lat_enabled = targ->lat_enabled;
1268 task->runtime_flags = targ->runtime_flags;
1269 PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
1271 task->generator_id = targ->generator_id;
1272 plog_info("\tGenerator id = %d\n", task->generator_id);
1274 // Allocate array holding bytes to tsc for supported frame sizes
1275 task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), rte_lcore_to_socket_id(targ->lconf->id));
1276 PROX_PANIC(task->bytes_to_tsc == NULL,
1277 "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", task->max_frame_size);
1279 // task->port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
1280 // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
1281 uint64_t bytes_per_hz = UINT64_MAX;
1282 if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
1283 bytes_per_hz = task->port->max_link_speed * 125000L;
1284 plog_info("\tPort %u: max link speed is %ld Mbps\n",
1285 (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
1287 for (unsigned int i = 0; i < task->max_frame_size * MAX_PKT_BURST ; i++) {
1288 if (bytes_per_hz == UINT64_MAX)
1289 task->bytes_to_tsc[i] = 0;
1291 task->bytes_to_tsc[i] = (task->hz * i) / bytes_per_hz;
1294 if (!strcmp(targ->pcap_file, "")) {
1295 plog_info("\tUsing inline definition of a packet\n");
1296 task_init_gen_load_pkt_inline(task, targ);
1298 plog_info("Loading from pcap %s\n", targ->pcap_file);
1299 task_init_gen_load_pcap(task, targ);
1302 PROX_PANIC(((targ->nb_txrings == 0) && (targ->nb_txports == 0)), "Gen mode requires a tx ring or a tx port");
1303 if ((targ->flags & DSF_KEEP_SRC_MAC) == 0) {
1304 uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes;
1305 for (uint32_t i = 0; i < task->n_pkts; ++i) {
1306 rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6);
1309 memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
1310 for (uint32_t i = 0; i < targ->n_rand_str; ++i) {
1311 PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX),
1312 "Failed to add random\n");
1316 static struct task_init task_init_gen = {
1318 .init = init_task_gen,
1319 .handle = handle_gen_bulk,
1321 .early_init = init_task_gen_early,
1323 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1324 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1325 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1327 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1329 .size = sizeof(struct task_gen)
1332 static struct task_init task_init_gen_l3 = {
1334 .sub_mode_str = "l3",
1335 .init = init_task_gen,
1336 .handle = handle_gen_bulk,
1338 .early_init = init_task_gen_early,
1340 // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
1341 // vector mode is used by DPDK, resulting (theoretically) in higher performance.
1342 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1344 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1346 .size = sizeof(struct task_gen)
1349 static struct task_init task_init_gen_pcap = {
1351 .sub_mode_str = "pcap",
1352 .init = init_task_gen_pcap,
1353 .handle = handle_gen_pcap_bulk,
1354 .start = start_pcap,
1355 .early_init = init_task_gen_early,
1357 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
1359 .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
1361 .size = sizeof(struct task_gen_pcap)
1364 __attribute__((constructor)) static void reg_task_gen(void)
1366 reg_task(&task_init_gen);
1367 reg_task(&task_init_gen_l3);
1368 reg_task(&task_init_gen_pcap);