2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
84 #include "parse_obj_list.h"
86 #define APP_LOOKUP_EXACT_MATCH 0
87 #define APP_LOOKUP_LPM 1
88 #define DO_RFC_1812_CHECKS
90 #ifndef APP_LOOKUP_METHOD
91 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
96 #include <netinet/in.h>
100 * When set to zero, simple forwaring path is eanbled.
101 * When set to one, optimized forwarding path is enabled.
102 * Note that LPM optimisation path uses SSE4.1 instructions.
104 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
105 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
107 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
110 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
111 #include <rte_hash.h>
112 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
114 #include <rte_lpm6.h>
116 #error "APP_LOOKUP_METHOD set to incorrect value"
120 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
121 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
122 #define IPv6_BYTES(addr) \
123 addr[0], addr[1], addr[2], addr[3], \
124 addr[4], addr[5], addr[6], addr[7], \
125 addr[8], addr[9], addr[10], addr[11],\
126 addr[12], addr[13],addr[14], addr[15]
130 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
132 #define MAX_JUMBO_PKT_LEN 9600
134 #define IPV6_ADDR_LEN 16
136 #define MEMPOOL_CACHE_SIZE 256
139 * This expression is used to calculate the number of mbufs needed depending on user input, taking
140 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
141 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
144 #define NB_MBUF RTE_MAX ( \
145 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
146 nb_ports*nb_lcores*MAX_PKT_BURST + \
147 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
148 nb_lcores*MEMPOOL_CACHE_SIZE), \
151 #define MAX_PKT_BURST 32
152 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
155 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
157 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
161 /* Configure how many packets ahead to prefetch, when reading packets */
162 #define PREFETCH_OFFSET 3
164 /* Used to mark destination port as 'invalid'. */
165 #define BAD_PORT ((uint16_t)-1)
170 * Configurable number of RX/TX ring descriptors
172 #define RTE_TEST_RX_DESC_DEFAULT 128
173 #define RTE_TEST_TX_DESC_DEFAULT 512
174 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
175 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
176 static uint64_t rcv_pkt_count[32] = {0};
177 static uint64_t tx_pkt_count[32] = {0};
179 /* ethernet addresses of ports */
180 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
182 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
184 static __m128i val_eth[RTE_MAX_ETHPORTS];
186 cmdline_parse_ctx_t main_ctx[];
188 /* replace first 12B of the ethernet header. */
189 #define MASK_ETH 0x3f
191 /* mask of enabled ports */
192 static uint32_t enabled_port_mask = 0;
193 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
194 static int numa_on = 1; /**< NUMA is enabled by default. */
195 static int csum_on = 1; /**< NUMA is enabled by default. */
197 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
198 static int ipv6 = 0; /**< ipv6 is false by default. */
203 print_ethaddr(const char *name, const struct ether_addr *eth_addr);
205 int print_stats(void);
206 int clear_stats(void);
210 struct rte_mbuf *m_table[MAX_PKT_BURST];
213 struct lcore_rx_queue {
216 } __rte_cache_aligned;
218 #define MAX_RX_QUEUE_PER_LCORE 16
219 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
220 #define MAX_RX_QUEUE_PER_PORT 128
222 #define MAX_LCORE_PARAMS 1024
223 struct lcore_params {
227 } __rte_cache_aligned;
229 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
230 static struct lcore_params lcore_params_array_default[] = {
242 static struct lcore_params * lcore_params = lcore_params_array_default;
243 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
244 sizeof(lcore_params_array_default[0]);
246 static struct rte_eth_conf port_conf = {
248 .mq_mode = ETH_MQ_RX_RSS,
249 .max_rx_pkt_len = ETHER_MAX_LEN,
251 .header_split = 0, /**< Header Split disabled */
252 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
253 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
254 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
255 .hw_strip_crc = 0, /**< CRC stripped by hardware */
260 .rss_hf = ETH_RSS_IP,
264 .mq_mode = ETH_MQ_TX_NONE,
268 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
270 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
272 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
273 #include <rte_hash_crc.h>
274 #define DEFAULT_HASH_FUNC rte_hash_crc
276 #include <rte_jhash.h>
277 #define DEFAULT_HASH_FUNC rte_jhash
286 } __attribute__((__packed__));
288 union ipv4_5tuple_host {
301 #define XMM_NUM_IN_IPV6_5TUPLE 3
304 uint8_t ip_dst[IPV6_ADDR_LEN];
305 uint8_t ip_src[IPV6_ADDR_LEN];
309 } __attribute__((__packed__));
311 union ipv6_5tuple_host {
316 uint8_t ip_src[IPV6_ADDR_LEN];
317 uint8_t ip_dst[IPV6_ADDR_LEN];
322 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
325 struct ipv4_udp_replay_route {
326 struct ipv4_5tuple key;
330 struct ipv6_udp_replay_route {
331 struct ipv6_5tuple key;
335 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
336 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
337 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
338 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
339 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
342 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
344 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
345 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
346 101, 11, IPPROTO_TCP}, 0},
349 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
350 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
351 102, 12, IPPROTO_TCP}, 1},
354 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
355 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
356 101, 11, IPPROTO_TCP}, 2},
359 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
360 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
361 102, 12, IPPROTO_TCP}, 3},
364 typedef struct rte_hash lookup_struct_t;
365 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
366 static lookup_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
368 #ifdef RTE_ARCH_X86_64
369 /* default to 4 million hash entries (approx) */
370 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
372 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
373 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
375 #define HASH_ENTRY_NUMBER_DEFAULT 4
377 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
379 static inline uint32_t
380 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
383 const union ipv4_5tuple_host *k;
389 p = (const uint32_t *)&k->port_src;
391 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
392 init_val = rte_hash_crc_4byte(t, init_val);
393 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
394 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
395 init_val = rte_hash_crc_4byte(*p, init_val);
396 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
397 init_val = rte_jhash_1word(t, init_val);
398 init_val = rte_jhash_1word(k->ip_src, init_val);
399 init_val = rte_jhash_1word(k->ip_dst, init_val);
400 init_val = rte_jhash_1word(*p, init_val);
401 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
405 static inline uint32_t
406 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
408 const union ipv6_5tuple_host *k;
411 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
412 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
413 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
414 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
418 p = (const uint32_t *)&k->port_src;
420 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
421 ip_src0 = (const uint32_t *) k->ip_src;
422 ip_src1 = (const uint32_t *)(k->ip_src+4);
423 ip_src2 = (const uint32_t *)(k->ip_src+8);
424 ip_src3 = (const uint32_t *)(k->ip_src+12);
425 ip_dst0 = (const uint32_t *) k->ip_dst;
426 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
427 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
428 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
429 init_val = rte_hash_crc_4byte(t, init_val);
430 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
431 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
432 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
433 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
434 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
435 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
436 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
437 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
438 init_val = rte_hash_crc_4byte(*p, init_val);
439 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
440 init_val = rte_jhash_1word(t, init_val);
441 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
442 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
443 init_val = rte_jhash_1word(*p, init_val);
444 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
448 #define IPV4_UDP_Replay_NUM_ROUTES \
449 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
451 #define IPV6_UDP_Replay_NUM_ROUTES \
452 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
454 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
455 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
459 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
460 struct ipv4_udp_replay_route {
466 struct ipv6_udp_replay_route {
472 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
473 {IPv4(1,1,1,0), 24, 0},
474 {IPv4(2,1,1,0), 24, 1},
475 {IPv4(3,1,1,0), 24, 2},
476 {IPv4(4,1,1,0), 24, 3},
477 {IPv4(5,1,1,0), 24, 4},
478 {IPv4(6,1,1,0), 24, 5},
479 {IPv4(7,1,1,0), 24, 6},
480 {IPv4(8,1,1,0), 24, 7},
483 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
484 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
485 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
486 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
487 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
488 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
489 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
490 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
491 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
494 #define IPV4_UDP_Replay_NUM_ROUTES \
495 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
496 #define IPV6_UDP_Replay_NUM_ROUTES \
497 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
499 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
500 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
501 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
503 typedef struct rte_lpm lookup_struct_t;
504 typedef struct rte_lpm6 lookup6_struct_t;
505 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
506 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
511 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
512 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
513 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
514 lookup_struct_t * ipv4_lookup_struct;
515 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
516 lookup6_struct_t * ipv6_lookup_struct;
518 lookup_struct_t * ipv6_lookup_struct;
520 } __rte_cache_aligned;
522 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
524 /* Send burst of packets on an output interface */
526 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
528 struct rte_mbuf **m_table;
532 queueid = qconf->tx_queue_id[port];
533 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
535 ret = rte_eth_tx_burst(port, queueid, m_table, n);
536 if (unlikely(ret < n)) {
538 rte_pktmbuf_free(m_table[ret]);
542 tx_pkt_count[port] += ret;
546 /* Enqueue a single packet, and send burst if queue is filled */
548 send_single_packet(struct rte_mbuf *m, uint8_t port)
552 struct lcore_conf *qconf;
554 lcore_id = rte_lcore_id();
556 qconf = &lcore_conf[lcore_id];
557 len = qconf->tx_mbufs[port].len;
558 qconf->tx_mbufs[port].m_table[len] = m;
561 /* enough pkts to be sent */
562 if (unlikely(len == MAX_PKT_BURST)) {
563 send_burst(qconf, MAX_PKT_BURST, port);
567 qconf->tx_mbufs[port].len = len;
571 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
572 static inline __attribute__((always_inline)) void
573 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
574 struct rte_mbuf *m[], uint32_t num)
578 len = qconf->tx_mbufs[port].len;
581 * If TX buffer for that queue is empty, and we have enough packets,
582 * then send them straightway.
584 if (num >= MAX_TX_BURST && len == 0) {
585 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
586 if (unlikely(n < num)) {
588 rte_pktmbuf_free(m[n]);
595 * Put packets into TX buffer for that queue.
599 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
602 switch (n % FWDSTEP) {
605 qconf->tx_mbufs[port].m_table[len + j] = m[j];
608 qconf->tx_mbufs[port].m_table[len + j] = m[j];
611 qconf->tx_mbufs[port].m_table[len + j] = m[j];
614 qconf->tx_mbufs[port].m_table[len + j] = m[j];
621 /* enough pkts to be sent */
622 if (unlikely(len == MAX_PKT_BURST)) {
624 send_burst(qconf, MAX_PKT_BURST, port);
626 /* copy rest of the packets into the TX buffer. */
629 switch (len % FWDSTEP) {
632 qconf->tx_mbufs[port].m_table[j] = m[n + j];
635 qconf->tx_mbufs[port].m_table[j] = m[n + j];
638 qconf->tx_mbufs[port].m_table[j] = m[n + j];
641 qconf->tx_mbufs[port].m_table[j] = m[n + j];
647 qconf->tx_mbufs[port].len = len;
649 #endif /* APP_LOOKUP_LPM */
651 #ifdef DO_RFC_1812_CHECKS
653 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
655 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
657 * 1. The packet length reported by the Link Layer must be large
658 * enough to hold the minimum length legal IP datagram (20 bytes).
660 if (link_len < sizeof(struct ipv4_hdr))
663 /* 2. The IP checksum must be correct. */
664 /* this is checked in H/W */
667 * 3. The IP version number must be 4. If the version number is not 4
668 * then the packet may be another version of IP, such as IPng or
671 if (((pkt->version_ihl) >> 4) != 4)
674 * 4. The IP header length field must be large enough to hold the
675 * minimum length legal IP datagram (20 bytes = 5 words).
677 if ((pkt->version_ihl & 0xf) < 5)
681 * 5. The IP total length field must be large enough to hold the IP
682 * datagram header, whose length is specified in the IP header length
685 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
692 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
694 static __m128i mask0;
695 static __m128i mask1;
696 static __m128i mask2;
697 static inline uint8_t
698 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
701 union ipv4_5tuple_host key;
703 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
704 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
705 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
706 key.xmm = _mm_and_si128(data, mask0);
707 /* Find destination port */
708 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
709 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
712 static inline uint8_t
713 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
716 union ipv6_5tuple_host key;
718 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
719 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
720 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
721 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
722 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
723 key.xmm[0] = _mm_and_si128(data0, mask1);
724 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
726 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
727 key.xmm[2] = _mm_and_si128(data2, mask2);
729 /* Find destination port */
730 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
731 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
735 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
737 static inline uint8_t
738 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
742 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
743 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
744 &next_hop) == 0) ? next_hop : portid);
747 static inline uint8_t
748 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
751 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
752 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
757 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
758 struct lcore_conf *qconf) __attribute__((unused));
760 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
761 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
763 #define MASK_ALL_PKTS 0xff
764 #define EXCLUDE_1ST_PKT 0xfe
765 #define EXCLUDE_2ND_PKT 0xfd
766 #define EXCLUDE_3RD_PKT 0xfb
767 #define EXCLUDE_4TH_PKT 0xf7
768 #define EXCLUDE_5TH_PKT 0xef
769 #define EXCLUDE_6TH_PKT 0xdf
770 #define EXCLUDE_7TH_PKT 0xbf
771 #define EXCLUDE_8TH_PKT 0x7f
774 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
776 struct ether_hdr *eth_hdr[8];
777 struct ether_hdr tmp;
778 struct ipv4_hdr *ipv4_hdr[8];
779 struct udp_hdr *udp_hdr[8];
781 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
782 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
783 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
784 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
785 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
786 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
787 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
788 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
791 memset(&tmp,0,sizeof (struct ether_hdr));
797 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
798 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
799 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
802 /* Handle IPv4 headers.*/
803 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
804 sizeof(struct ether_hdr));
805 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
806 sizeof(struct ether_hdr));
807 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
808 sizeof(struct ether_hdr));
809 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
810 sizeof(struct ether_hdr));
811 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
812 sizeof(struct ether_hdr));
813 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
814 sizeof(struct ether_hdr));
815 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
816 sizeof(struct ether_hdr));
817 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
818 sizeof(struct ether_hdr));
819 struct ipv4_hdr temp_ipv4;
822 temp_ipv4.dst_addr = ipv4_hdr[i]->dst_addr;
823 ipv4_hdr[i]->dst_addr = ipv4_hdr[i]->src_addr;
824 ipv4_hdr[i]->src_addr = temp_ipv4.dst_addr;
827 /* Handle UDP headers.*/
828 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
829 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
831 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
832 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
833 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
834 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
835 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
836 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
837 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
838 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
839 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
840 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
841 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
842 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
843 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
844 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
845 /*1) memcpy or assignment.*/
847 struct udp_hdr temp_udp;
850 temp_udp.dst_port = udp_hdr[i]->dst_port;
851 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
852 udp_hdr[i]->src_port = temp_udp.dst_port;
854 #ifdef DO_RFC_1812_CHECKS
855 /* Check to make sure the packet is valid (RFC1812) */
856 uint8_t valid_mask = MASK_ALL_PKTS;
857 if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
858 rte_pktmbuf_free(m[0]);
859 valid_mask &= EXCLUDE_1ST_PKT;
861 if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
862 rte_pktmbuf_free(m[1]);
863 valid_mask &= EXCLUDE_2ND_PKT;
865 if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
866 rte_pktmbuf_free(m[2]);
867 valid_mask &= EXCLUDE_3RD_PKT;
869 if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
870 rte_pktmbuf_free(m[3]);
871 valid_mask &= EXCLUDE_4TH_PKT;
873 if (is_valid_ipv4_pkt(ipv4_hdr[4], m[4]->pkt_len) < 0) {
874 rte_pktmbuf_free(m[4]);
875 valid_mask &= EXCLUDE_5TH_PKT;
877 if (is_valid_ipv4_pkt(ipv4_hdr[5], m[5]->pkt_len) < 0) {
878 rte_pktmbuf_free(m[5]);
879 valid_mask &= EXCLUDE_6TH_PKT;
881 if (is_valid_ipv4_pkt(ipv4_hdr[6], m[6]->pkt_len) < 0) {
882 rte_pktmbuf_free(m[6]);
883 valid_mask &= EXCLUDE_7TH_PKT;
885 if (is_valid_ipv4_pkt(ipv4_hdr[7], m[7]->pkt_len) < 0) {
886 rte_pktmbuf_free(m[7]);
887 valid_mask &= EXCLUDE_8TH_PKT;
889 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
890 if (valid_mask == 0){
894 for (i = 0; i < 8; i++) {
895 if ((0x1 << i) & valid_mask) {
896 udp_replay_simple_replay(m[i], portid, qconf);
902 #endif // End of #ifdef DO_RFC_1812_CHECKS
904 #ifdef DO_RFC_1812_CHECKS
905 /* Update time to live and header checksum */
906 --(ipv4_hdr[0]->time_to_live);
907 --(ipv4_hdr[1]->time_to_live);
908 --(ipv4_hdr[2]->time_to_live);
909 --(ipv4_hdr[3]->time_to_live);
910 ++(ipv4_hdr[0]->hdr_checksum);
911 ++(ipv4_hdr[1]->hdr_checksum);
912 ++(ipv4_hdr[2]->hdr_checksum);
913 ++(ipv4_hdr[3]->hdr_checksum);
914 --(ipv4_hdr[4]->time_to_live);
915 --(ipv4_hdr[5]->time_to_live);
916 --(ipv4_hdr[6]->time_to_live);
917 --(ipv4_hdr[7]->time_to_live);
918 ++(ipv4_hdr[4]->hdr_checksum);
919 ++(ipv4_hdr[5]->hdr_checksum);
920 ++(ipv4_hdr[6]->hdr_checksum);
921 ++(ipv4_hdr[7]->hdr_checksum);
924 send_single_packet(m[0],portid );
925 send_single_packet(m[1],portid );
926 send_single_packet(m[2],portid );
927 send_single_packet(m[3],portid);
928 send_single_packet(m[4],portid);
929 send_single_packet(m[5],portid);
930 send_single_packet(m[6],portid);
931 send_single_packet(m[7],portid);
935 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
936 union ipv6_5tuple_host * key)
938 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
939 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
940 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
941 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
942 key->xmm[1] = tmpdata1;
943 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
948 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
950 struct ether_hdr *eth_hdr[8],tmp;
951 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
953 union ipv6_5tuple_host key[8];
954 struct udp_hdr *udp_hdr[8];
956 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
957 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
958 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
959 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
960 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
961 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
962 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
963 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
965 memset(&tmp,0,sizeof (struct ether_hdr));
970 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
971 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
972 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
974 /* Handle IPv6 headers.*/
975 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
976 sizeof(struct ether_hdr));
977 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
978 sizeof(struct ether_hdr));
979 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
980 sizeof(struct ether_hdr));
981 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
982 sizeof(struct ether_hdr));
983 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
984 sizeof(struct ether_hdr));
985 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
986 sizeof(struct ether_hdr));
987 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
988 sizeof(struct ether_hdr));
989 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
990 sizeof(struct ether_hdr));
993 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
994 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
995 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
998 /* Handle UDP headers.*/
999 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1000 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1002 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1003 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1004 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1005 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1006 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1007 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1008 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1009 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1010 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1011 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1012 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1013 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1014 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1015 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1016 /*1) memcpy or assignment.*/
1018 struct udp_hdr temp_udp;
1021 temp_udp.dst_port = udp_hdr[i]->dst_port;
1022 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1023 udp_hdr[i]->src_port = temp_udp.dst_port;
1025 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1026 &key[4], &key[5], &key[6], &key[7]};
1027 #if RTE_VERSION < 0x100b0000
1028 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1030 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1032 send_single_packet(m[0],portid);
1033 send_single_packet(m[1],portid);
1034 send_single_packet(m[2],portid);
1035 send_single_packet(m[3],portid);
1036 send_single_packet(m[4],portid);
1037 send_single_packet(m[5],portid);
1038 send_single_packet(m[6],portid);
1039 send_single_packet(m[7],portid);
1042 #endif /* APP_LOOKUP_METHOD */
1044 static inline __attribute__((always_inline)) void
1045 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1047 struct ether_hdr *eth_hdr,tmp;
1048 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1050 struct udp_hdr *udp_hdr,temp_udp;
1052 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1053 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1054 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1055 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1056 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1058 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1059 /* Handle IPv4 headers.*/
1060 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1061 sizeof(struct ether_hdr));
1062 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1063 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1064 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1065 #ifdef DO_RFC_1812_CHECKS
1066 /* Check to make sure the packet is valid (RFC1812) */
1067 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
1068 rte_pktmbuf_free(m);
1073 dst_port = get_ipv4_dst_port(ipv4_hdr, portid,
1074 qconf->ipv4_lookup_struct);
1077 #ifdef DO_RFC_1812_CHECKS
1078 /* Update time to live and header checksum */
1079 --(ipv4_hdr->time_to_live);
1080 ++(ipv4_hdr->hdr_checksum);
1082 /* Handle UDP headers.*/
1083 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1084 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1086 /*Swapping Src and Dst Port*/
1087 temp_udp.dst_port = udp_hdr->dst_port;
1088 udp_hdr->dst_port = udp_hdr->src_port;
1089 udp_hdr->src_port = temp_udp.dst_port;
1091 send_single_packet(m, dst_port);
1092 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1093 /* Handle IPv6 headers.*/
1094 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1096 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1097 sizeof(struct ether_hdr));
1099 /*Swapping of Src and Dst IP address*/
1100 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1101 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1102 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1105 dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
1106 /* Handle UDP headers.*/
1107 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1108 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1109 /*Swapping Src and Dst Port*/
1110 temp_udp.dst_port = udp_hdr->dst_port;
1111 udp_hdr->dst_port = udp_hdr->src_port;
1112 udp_hdr->src_port = temp_udp.dst_port;
1113 send_single_packet(m, portid);
1115 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1116 rte_pktmbuf_free(m);
1119 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1120 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1121 #ifdef DO_RFC_1812_CHECKS
1123 #define IPV4_MIN_VER_IHL 0x45
1124 #define IPV4_MAX_VER_IHL 0x4f
1125 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1127 /* Minimum value of IPV4 total length (20B) in network byte order. */
1128 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1131 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1132 * - The IP version number must be 4.
1133 * - The IP header length field must be large enough to hold the
1134 * minimum length legal IP datagram (20 bytes = 5 words).
1135 * - The IP total length field must be large enough to hold the IP
1136 * datagram header, whose length is specified in the IP header length
1138 * If we encounter invalid IPV4 packet, then set destination port for it
1139 * to BAD_PORT value.
1141 static inline __attribute__((always_inline)) void
1142 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1146 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1147 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1149 ipv4_hdr->time_to_live--;
1150 ipv4_hdr->hdr_checksum++;
1152 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1153 ((uint8_t)ipv4_hdr->total_length == 0 &&
1154 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1161 #define rfc1812_process(mb, dp) do { } while (0)
1162 #endif /* DO_RFC_1812_CHECKS */
1163 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1166 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1167 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1169 static inline __attribute__((always_inline)) uint16_t
1170 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1171 uint32_t dst_ipv4, uint8_t portid)
1174 struct ipv6_hdr *ipv6_hdr;
1175 struct ether_hdr *eth_hdr;
1176 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1178 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1179 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1182 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1183 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1184 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1185 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1186 ipv6_hdr->dst_addr, &next_hop) != 0)
1196 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1197 uint16_t *dst_port, uint8_t portid)
1199 struct ether_hdr *eth_hdr;
1200 struct ipv4_hdr *ipv4_hdr;
1205 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1206 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1209 dst_ipv4 = ipv4_hdr->dst_addr;
1210 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1212 /*Changing the dp to incoming port*/
1213 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1216 te = _mm_loadu_si128((__m128i *)eth_hdr);
1220 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1222 te = _mm_blend_epi16(te, ve, MASK_ETH);
1223 _mm_storeu_si128((__m128i *)eth_hdr, te);
1225 /* Wont be using the following fucntion*/
1228 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1231 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1233 uint32_t *ipv4_flag)
1235 struct ipv4_hdr *ipv4_hdr;
1236 struct ether_hdr *eth_hdr;
1237 uint32_t x0, x1, x2, x3;
1239 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1240 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1241 x0 = ipv4_hdr->dst_addr;
1242 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1244 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1245 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1246 x1 = ipv4_hdr->dst_addr;
1247 ipv4_flag[0] &= pkt[1]->packet_type;
1249 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1250 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1251 x2 = ipv4_hdr->dst_addr;
1252 ipv4_flag[0] &= pkt[2]->packet_type;
1254 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1255 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1256 x3 = ipv4_hdr->dst_addr;
1257 ipv4_flag[0] &= pkt[3]->packet_type;
1259 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1263 * Lookup into LPM for destination port.
1264 * If lookup fails, use incoming port (portid) as destination port.
1267 processx4_step2(const struct lcore_conf *qconf,
1271 struct rte_mbuf *pkt[FWDSTEP],
1272 uint16_t dprt[FWDSTEP])
1275 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1276 4, 5, 6, 7, 0, 1, 2, 3);
1278 /* Byte swap 4 IPV4 addresses. */
1279 dip = _mm_shuffle_epi8(dip, bswap_mask);
1281 /* if all 4 packets are IPV4. */
1282 if (likely(ipv4_flag)) {
1283 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1286 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1287 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1288 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1289 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1294 * Update source and destination MAC addresses in the ethernet header.
1295 * Perform RFC1812 checks and updates for IPV4 packets.
1298 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1300 __m128i te[FWDSTEP];
1301 __m128i ve[FWDSTEP];
1302 __m128i *p[FWDSTEP];
1304 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1305 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1306 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1307 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1309 ve[0] = val_eth[dst_port[0]];
1310 te[0] = _mm_loadu_si128(p[0]);
1312 ve[1] = val_eth[dst_port[1]];
1313 te[1] = _mm_loadu_si128(p[1]);
1315 ve[2] = val_eth[dst_port[2]];
1316 te[2] = _mm_loadu_si128(p[2]);
1318 ve[3] = val_eth[dst_port[3]];
1319 te[3] = _mm_loadu_si128(p[3]);
1321 /* Update first 12 bytes, keep rest bytes intact. */
1322 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1323 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1324 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1325 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1327 _mm_storeu_si128(p[0], te[0]);
1328 _mm_storeu_si128(p[1], te[1]);
1329 _mm_storeu_si128(p[2], te[2]);
1330 _mm_storeu_si128(p[3], te[3]);
1332 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1333 &dst_port[0], pkt[0]->packet_type);
1334 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1335 &dst_port[1], pkt[1]->packet_type);
1336 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1337 &dst_port[2], pkt[2]->packet_type);
1338 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1339 &dst_port[3], pkt[3]->packet_type);
1343 * We group consecutive packets with the same destionation port into one burst.
1344 * To avoid extra latency this is done together with some other packet
1345 * processing, but after we made a final decision about packet's destination.
1346 * To do this we maintain:
1347 * pnum - array of number of consecutive packets with the same dest port for
1348 * each packet in the input burst.
1349 * lp - pointer to the last updated element in the pnum.
1350 * dlp - dest port value lp corresponds to.
1353 #define GRPSZ (1 << FWDSTEP)
1354 #define GRPMSK (GRPSZ - 1)
1356 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1357 if (likely((dlp) == (dcp)[(idx)])) { \
1360 (dlp) = (dcp)[idx]; \
1361 (lp) = (pn) + (idx); \
1367 * Group consecutive packets with the same destination port in bursts of 4.
1368 * Suppose we have array of destionation ports:
1369 * dst_port[] = {a, b, c, d,, e, ... }
1370 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1371 * We doing 4 comparisions at once and the result is 4 bit mask.
1372 * This mask is used as an index into prebuild array of pnum values.
1374 static inline uint16_t *
1375 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1377 static const struct {
1378 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1379 int32_t idx; /* index for new last updated elemnet. */
1380 uint16_t lpv; /* add value to the last updated element. */
1383 /* 0: a != b, b != c, c != d, d != e */
1384 .pnum = UINT64_C(0x0001000100010001),
1389 /* 1: a == b, b != c, c != d, d != e */
1390 .pnum = UINT64_C(0x0001000100010002),
1395 /* 2: a != b, b == c, c != d, d != e */
1396 .pnum = UINT64_C(0x0001000100020001),
1401 /* 3: a == b, b == c, c != d, d != e */
1402 .pnum = UINT64_C(0x0001000100020003),
1407 /* 4: a != b, b != c, c == d, d != e */
1408 .pnum = UINT64_C(0x0001000200010001),
1413 /* 5: a == b, b != c, c == d, d != e */
1414 .pnum = UINT64_C(0x0001000200010002),
1419 /* 6: a != b, b == c, c == d, d != e */
1420 .pnum = UINT64_C(0x0001000200030001),
1425 /* 7: a == b, b == c, c == d, d != e */
1426 .pnum = UINT64_C(0x0001000200030004),
1431 /* 8: a != b, b != c, c != d, d == e */
1432 .pnum = UINT64_C(0x0002000100010001),
1437 /* 9: a == b, b != c, c != d, d == e */
1438 .pnum = UINT64_C(0x0002000100010002),
1443 /* 0xa: a != b, b == c, c != d, d == e */
1444 .pnum = UINT64_C(0x0002000100020001),
1449 /* 0xb: a == b, b == c, c != d, d == e */
1450 .pnum = UINT64_C(0x0002000100020003),
1455 /* 0xc: a != b, b != c, c == d, d == e */
1456 .pnum = UINT64_C(0x0002000300010001),
1461 /* 0xd: a == b, b != c, c == d, d == e */
1462 .pnum = UINT64_C(0x0002000300010002),
1467 /* 0xe: a != b, b == c, c == d, d == e */
1468 .pnum = UINT64_C(0x0002000300040001),
1473 /* 0xf: a == b, b == c, c == d, d == e */
1474 .pnum = UINT64_C(0x0002000300040005),
1481 uint16_t u16[FWDSTEP + 1];
1483 } *pnum = (void *)pn;
1487 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1488 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1489 v = _mm_movemask_ps((__m128)dp1);
1491 /* update last port counter. */
1492 lp[0] += gptbl[v].lpv;
1494 /* if dest port value has changed. */
1496 lp = pnum->u16 + gptbl[v].idx;
1498 pnum->u64 = gptbl[v].pnum;
1504 #endif /* APP_LOOKUP_METHOD */
1506 /* main processing loop */
1508 main_loop(__attribute__((unused)) void *dummy)
1510 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1512 uint64_t prev_tsc, diff_tsc, cur_tsc;
1514 uint8_t portid, queueid;
1515 struct lcore_conf *qconf;
1516 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1517 US_PER_S * BURST_TX_DRAIN_US;
1519 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1520 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1524 uint16_t dst_port[MAX_PKT_BURST];
1525 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1526 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1527 uint16_t pnum[MAX_PKT_BURST + 1];
1532 lcore_id = rte_lcore_id();
1533 qconf = &lcore_conf[lcore_id];
1535 if (qconf->n_rx_queue == 0) {
1536 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1540 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1542 for (i = 0; i < qconf->n_rx_queue; i++) {
1544 portid = qconf->rx_queue_list[i].port_id;
1545 queueid = qconf->rx_queue_list[i].queue_id;
1546 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1552 cur_tsc = rte_rdtsc();
1555 * TX burst queue drain
1557 diff_tsc = cur_tsc - prev_tsc;
1558 if (unlikely(diff_tsc > drain_tsc)) {
1561 * This could be optimized (use queueid instead of
1562 * portid), but it is not called so often
1564 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1565 if (qconf->tx_mbufs[portid].len == 0)
1568 qconf->tx_mbufs[portid].len,
1570 qconf->tx_mbufs[portid].len = 0;
1577 * Read packet from RX queues
1579 for (i = 0; i < qconf->n_rx_queue; ++i) {
1580 portid = qconf->rx_queue_list[i].port_id;
1581 queueid = qconf->rx_queue_list[i].queue_id;
1582 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
1585 rcv_pkt_count[portid] += nb_rx;
1589 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1590 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1593 * Send nb_rx - nb_rx%8 packets
1596 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1597 for (j = 0; j < n; j += 8) {
1599 pkts_burst[j]->packet_type &
1600 pkts_burst[j+1]->packet_type &
1601 pkts_burst[j+2]->packet_type &
1602 pkts_burst[j+3]->packet_type &
1603 pkts_burst[j+4]->packet_type &
1604 pkts_burst[j+5]->packet_type &
1605 pkts_burst[j+6]->packet_type &
1606 pkts_burst[j+7]->packet_type;
1607 if (pkt_type & RTE_PTYPE_L3_IPV4) {
1608 simple_ipv4_replay_8pkts(
1609 &pkts_burst[j], portid, qconf);
1610 } else if (pkt_type &
1611 RTE_PTYPE_L3_IPV6) {
1612 simple_ipv6_replay_8pkts(&pkts_burst[j],
1615 udp_replay_simple_replay(pkts_burst[j],
1617 udp_replay_simple_replay(pkts_burst[j+1],
1619 udp_replay_simple_replay(pkts_burst[j+2],
1621 udp_replay_simple_replay(pkts_burst[j+3],
1623 udp_replay_simple_replay(pkts_burst[j+4],
1625 udp_replay_simple_replay(pkts_burst[j+5],
1627 udp_replay_simple_replay(pkts_burst[j+6],
1629 udp_replay_simple_replay(pkts_burst[j+7],
1633 for (; j < nb_rx ; j++) {
1634 udp_replay_simple_replay(pkts_burst[j],
1638 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1640 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1641 for (j = 0; j != k; j += FWDSTEP) {
1642 processx4_step1(&pkts_burst[j],
1644 &ipv4_flag[j / FWDSTEP]);
1647 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1648 for (j = 0; j != k; j += FWDSTEP) {
1649 processx4_step2(qconf, dip[j / FWDSTEP],
1650 ipv4_flag[j / FWDSTEP], portid,
1651 &pkts_burst[j], &dst_port[j]);
1655 * Finish packet processing and group consecutive
1656 * packets with the same destination port.
1658 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1665 processx4_step3(pkts_burst, dst_port);
1667 /* dp1: <d[0], d[1], d[2], d[3], ... > */
1668 dp1 = _mm_loadu_si128((__m128i *)dst_port);
1670 for (j = FWDSTEP; j != k; j += FWDSTEP) {
1671 processx4_step3(&pkts_burst[j],
1676 * <d[j-3], d[j-2], d[j-1], d[j], ... >
1678 dp2 = _mm_loadu_si128((__m128i *)
1679 &dst_port[j - FWDSTEP + 1]);
1680 lp = port_groupx4(&pnum[j - FWDSTEP],
1685 * <d[j], d[j+1], d[j+2], d[j+3], ... >
1687 dp1 = _mm_srli_si128(dp2,
1689 sizeof(dst_port[0]));
1693 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
1695 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
1696 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
1700 * remove values added by the last repeated
1704 dlp = dst_port[j - 1];
1706 /* set dlp and lp to the never used values. */
1708 lp = pnum + MAX_PKT_BURST;
1711 /* Process up to last 3 packets one by one. */
1712 switch (nb_rx % FWDSTEP) {
1714 process_packet(qconf, pkts_burst[j],
1715 dst_port + j, portid);
1716 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1719 process_packet(qconf, pkts_burst[j],
1720 dst_port + j, portid);
1721 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1724 process_packet(qconf, pkts_burst[j],
1725 dst_port + j, portid);
1726 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1731 * Send packets out, through destination port.
1732 * Consecuteve pacekts with the same destination port
1733 * are already grouped together.
1734 * If destination port for the packet equals BAD_PORT,
1735 * then free the packet without sending it out.
1737 for (j = 0; j < nb_rx; j += k) {
1745 if (likely(pn != BAD_PORT)) {
1746 send_packetsx4(qconf, pn,
1749 for (m = j; m != j + k; m++)
1750 rte_pktmbuf_free(pkts_burst[m]);
1754 #endif /* APP_LOOKUP_METHOD */
1755 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
1757 /* Prefetch first packets */
1758 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
1759 rte_prefetch0(rte_pktmbuf_mtod(
1760 pkts_burst[j], void *));
1763 /* Prefetch and forward already prefetched packets */
1764 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
1765 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
1766 j + PREFETCH_OFFSET], void *));
1767 udp_replay_simple_replay(pkts_burst[j], portid,
1771 /* Forward remaining prefetched packets */
1772 for (; j < nb_rx; j++) {
1773 udp_replay_simple_replay(pkts_burst[j], portid,
1776 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
1788 printf ("UDP_Replay stats:\n");
1789 printf ("--------------\n");
1790 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop \n");
1791 for (i = 0; i < nb_lcore_params; ++i) {
1792 portid = lcore_params[i].port_id;
1793 printf (" %u %lu %lu 0 0", portid, rcv_pkt_count[(uint64_t)portid], tx_pkt_count[(uint64_t)portid]);
1805 for (i = 0; i < 32; i++) {
1806 rcv_pkt_count[i] = 0;
1807 tx_pkt_count[i] = 0;
1814 check_lcore_params(void)
1816 uint8_t queue, lcore;
1820 for (i = 0; i < nb_lcore_params; ++i) {
1821 queue = lcore_params[i].queue_id;
1822 if (queue >= MAX_RX_QUEUE_PER_PORT) {
1823 printf("invalid queue number: %hhu\n", queue);
1826 lcore = lcore_params[i].lcore_id;
1827 if (!rte_lcore_is_enabled(lcore)) {
1828 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
1831 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
1833 printf("warning: lcore %hhu is on socket %d with numa off \n",
1841 check_port_config(const unsigned nb_ports)
1846 for (i = 0; i < nb_lcore_params; ++i) {
1847 portid = lcore_params[i].port_id;
1848 if ((enabled_port_mask & (1 << portid)) == 0) {
1849 printf("port %u is not enabled in port mask\n", portid);
1852 if (portid >= nb_ports) {
1853 printf("port %u is not present on the board\n", portid);
1861 get_port_n_rx_queues(const uint8_t port)
1866 for (i = 0; i < nb_lcore_params; ++i) {
1867 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
1868 queue = lcore_params[i].queue_id;
1870 return (uint8_t)(++queue);
1874 init_lcore_rx_queues(void)
1876 uint16_t i, nb_rx_queue;
1879 for (i = 0; i < nb_lcore_params; ++i) {
1880 lcore = lcore_params[i].lcore_id;
1881 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
1882 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1883 printf("error: too many queues (%u) for lcore: %u\n",
1884 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
1887 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1888 lcore_params[i].port_id;
1889 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1890 lcore_params[i].queue_id;
1891 lcore_conf[lcore].n_rx_queue++;
1899 print_usage(const char *prgname)
1901 printf ("%s [EAL options] -- -p PORTMASK -P"
1902 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
1903 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
1904 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
1905 " -P : enable promiscuous mode\n"
1906 " --config (port,queue,lcore): rx queues configuration\n"
1907 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
1908 " --no-numa: optional, disable numa awareness\n"
1909 " --no-hw-csum: optional, disable hw ip checksum\n"
1910 " --ipv6: optional, specify it if running ipv6 packets\n"
1911 " --enable-jumbo: enable jumbo frame"
1912 " which max packet len is PKTLEN in decimal (64-9600)\n"
1913 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
1917 static int parse_max_pkt_len(const char *pktlen)
1922 /* parse decimal string */
1923 len = strtoul(pktlen, &end, 10);
1924 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
1934 parse_portmask(const char *portmask)
1939 /* parse hexadecimal string */
1940 pm = strtoul(portmask, &end, 16);
1941 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1950 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1952 parse_hash_entry_number(const char *hash_entry_num)
1955 unsigned long hash_en;
1956 /* parse hexadecimal string */
1957 hash_en = strtoul(hash_entry_num, &end, 16);
1958 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
1969 parse_config(const char *q_arg)
1972 const char *p, *p0 = q_arg;
1980 unsigned long int_fld[_NUM_FLD];
1981 char *str_fld[_NUM_FLD];
1985 nb_lcore_params = 0;
1987 while ((p = strchr(p0,'(')) != NULL) {
1989 if((p0 = strchr(p,')')) == NULL)
1993 if(size >= sizeof(s))
1996 snprintf(s, sizeof(s), "%.*s", size, p);
1997 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
1999 for (i = 0; i < _NUM_FLD; i++){
2001 int_fld[i] = strtoul(str_fld[i], &end, 0);
2002 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2005 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2006 printf("exceeded max number of lcore params: %hu\n",
2010 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2011 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2012 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2015 lcore_params = lcore_params_array;
2020 parse_eth_dest(const char *optarg)
2024 uint8_t c, *dest, peer_addr[6];
2027 portid = strtoul(optarg, &port_end, 10);
2028 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2029 rte_exit(EXIT_FAILURE,
2030 "Invalid eth-dest: %s", optarg);
2031 if (portid >= RTE_MAX_ETHPORTS)
2032 rte_exit(EXIT_FAILURE,
2033 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2034 portid, RTE_MAX_ETHPORTS);
2036 if (cmdline_parse_etheraddr(NULL, port_end,
2037 &peer_addr, sizeof(peer_addr)) < 0)
2038 rte_exit(EXIT_FAILURE,
2039 "Invalid ethernet address: %s\n",
2041 dest = (uint8_t *)&dest_eth_addr[portid];
2042 for (c = 0; c < 6; c++)
2043 dest[c] = peer_addr[c];
2044 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2047 #define CMD_LINE_OPT_CONFIG "config"
2048 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2049 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2050 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2051 #define CMD_LINE_OPT_IPV6 "ipv6"
2052 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2053 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2055 /* Parse the argument given in the command line of the application */
2057 parse_args(int argc, char **argv)
2062 char *prgname = argv[0];
2063 static struct option lgopts[] = {
2064 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2065 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2066 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2067 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2068 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2069 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2070 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2076 while ((opt = getopt_long(argc, argvopt, "p:P",
2077 lgopts, &option_index)) != EOF) {
2082 enabled_port_mask = parse_portmask(optarg);
2083 if (enabled_port_mask == 0) {
2084 printf("invalid portmask\n");
2085 print_usage(prgname);
2090 printf("Promiscuous mode selected\n");
2096 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2097 sizeof (CMD_LINE_OPT_CONFIG))) {
2098 ret = parse_config(optarg);
2100 printf("invalid config\n");
2101 print_usage(prgname);
2106 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2107 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2108 parse_eth_dest(optarg);
2111 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2112 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2113 printf("numa is disabled \n");
2117 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2118 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2119 printf("numa is hw ip checksum \n");
2120 port_conf.rxmode.hw_ip_checksum = 0;
2124 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2125 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2126 sizeof(CMD_LINE_OPT_IPV6))) {
2127 printf("ipv6 is specified \n");
2132 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2133 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2134 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2136 printf("jumbo frame is enabled - disabling simple TX path\n");
2137 port_conf.rxmode.jumbo_frame = 1;
2139 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2140 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2141 ret = parse_max_pkt_len(optarg);
2142 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2143 printf("invalid packet length\n");
2144 print_usage(prgname);
2147 port_conf.rxmode.max_rx_pkt_len = ret;
2149 printf("set jumbo frame max packet length to %u\n",
2150 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2152 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2153 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2154 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2155 ret = parse_hash_entry_number(optarg);
2156 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2157 hash_entry_number = ret;
2159 printf("invalid hash entry number\n");
2160 print_usage(prgname);
2168 print_usage(prgname);
2174 argv[optind-1] = prgname;
2177 optind = 0; /* reset getopt lib */
2182 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
2184 char buf[ETHER_ADDR_FMT_SIZE];
2185 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
2186 printf("%s%s", name, buf);
2189 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2191 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2192 union ipv4_5tuple_host* key2)
2194 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2195 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2196 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2197 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2198 key2->proto = key1->proto;
2204 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2205 union ipv6_5tuple_host* key2)
2208 for (i = 0; i < 16; i++)
2210 key2->ip_dst[i] = key1->ip_dst[i];
2211 key2->ip_src[i] = key1->ip_src[i];
2213 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2214 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2215 key2->proto = key1->proto;
2222 #define BYTE_VALUE_MAX 256
2223 #define ALL_32_BITS 0xffffffff
2224 #define BIT_8_TO_15 0x0000ff00
2226 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2230 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2232 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2233 for (i = 0; i < array_len; i++) {
2234 struct ipv4_udp_replay_route entry;
2235 union ipv4_5tuple_host newkey;
2236 entry = ipv4_udp_replay_route_array[i];
2237 convert_ipv4_5tuple(&entry.key, &newkey);
2238 ret = rte_hash_add_key (h,(void *) &newkey);
2240 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2241 " to the udp_replay hash.\n", i);
2243 ipv4_udp_replay_out_if[ret] = entry.if_out;
2245 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2248 #define BIT_16_TO_23 0x00ff0000
2250 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2254 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2256 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2257 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2258 for (i = 0; i < array_len; i++) {
2259 struct ipv6_udp_replay_route entry;
2260 union ipv6_5tuple_host newkey;
2261 entry = ipv6_udp_replay_route_array[i];
2262 convert_ipv6_5tuple(&entry.key, &newkey);
2263 ret = rte_hash_add_key (h, (void *) &newkey);
2265 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2266 " to the udp_replay hash.\n", i);
2268 ipv6_udp_replay_out_if[ret] = entry.if_out;
2270 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2273 #define NUMBER_PORT_USED 4
2275 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2276 unsigned int nr_flow)
2279 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2280 for (i = 0; i < nr_flow; i++) {
2281 struct ipv4_udp_replay_route entry;
2282 union ipv4_5tuple_host newkey;
2283 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2284 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2285 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2286 /* Create the ipv4 exact match flow */
2287 memset(&entry, 0, sizeof(entry));
2288 switch (i & (NUMBER_PORT_USED -1)) {
2290 entry = ipv4_udp_replay_route_array[0];
2291 entry.key.ip_dst = IPv4(101,c,b,a);
2294 entry = ipv4_udp_replay_route_array[1];
2295 entry.key.ip_dst = IPv4(201,c,b,a);
2298 entry = ipv4_udp_replay_route_array[2];
2299 entry.key.ip_dst = IPv4(111,c,b,a);
2302 entry = ipv4_udp_replay_route_array[3];
2303 entry.key.ip_dst = IPv4(211,c,b,a);
2306 convert_ipv4_5tuple(&entry.key, &newkey);
2307 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2309 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2311 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2314 printf("Hash: Adding 0x%x keys\n", nr_flow);
2318 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2319 unsigned int nr_flow)
2322 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2323 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2324 for (i = 0; i < nr_flow; i++) {
2325 struct ipv6_udp_replay_route entry;
2326 union ipv6_5tuple_host newkey;
2327 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2328 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2329 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2330 /* Create the ipv6 exact match flow */
2331 memset(&entry, 0, sizeof(entry));
2332 switch (i & (NUMBER_PORT_USED - 1)) {
2333 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2334 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2335 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2336 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2338 entry.key.ip_dst[13] = c;
2339 entry.key.ip_dst[14] = b;
2340 entry.key.ip_dst[15] = a;
2341 convert_ipv6_5tuple(&entry.key, &newkey);
2342 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2344 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2346 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2349 printf("Hash: Adding 0x%x keys\n", nr_flow);
2353 setup_hash(int socketid)
2355 struct rte_hash_parameters ipv4_udp_replay_hash_params = {
2357 .entries = UDP_Replay_HASH_ENTRIES,
2358 .key_len = sizeof(union ipv4_5tuple_host),
2359 .hash_func = ipv4_hash_crc,
2360 .hash_func_init_val = 0,
2363 struct rte_hash_parameters ipv6_udp_replay_hash_params = {
2365 .entries = UDP_Replay_HASH_ENTRIES,
2366 .key_len = sizeof(union ipv6_5tuple_host),
2367 .hash_func = ipv6_hash_crc,
2368 .hash_func_init_val = 0,
2373 /* create ipv4 hash */
2374 snprintf(s, sizeof(s), "ipv4_udp_replay_hash_%d", socketid);
2375 ipv4_udp_replay_hash_params.name = s;
2376 ipv4_udp_replay_hash_params.socket_id = socketid;
2377 ipv4_udp_replay_lookup_struct[socketid] = rte_hash_create(&ipv4_udp_replay_hash_params);
2378 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2379 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay hash on "
2380 "socket %d\n", socketid);
2382 /* create ipv6 hash */
2383 snprintf(s, sizeof(s), "ipv6_udp_replay_hash_%d", socketid);
2384 ipv6_udp_replay_hash_params.name = s;
2385 ipv6_udp_replay_hash_params.socket_id = socketid;
2386 ipv6_udp_replay_lookup_struct[socketid] = rte_hash_create(&ipv6_udp_replay_hash_params);
2387 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2388 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay hash on "
2389 "socket %d\n", socketid);
2391 if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) {
2392 /* For testing hash matching with a large number of flows we
2393 * generate millions of IP 5-tuples with an incremented dst
2394 * address to initialize the hash table. */
2396 /* populate the ipv4 hash */
2397 populate_ipv4_many_flow_into_table(
2398 ipv4_udp_replay_lookup_struct[socketid], hash_entry_number);
2400 /* populate the ipv6 hash */
2401 populate_ipv6_many_flow_into_table(
2402 ipv6_udp_replay_lookup_struct[socketid], hash_entry_number);
2405 /* Use data in ipv4/ipv6 udp_replay lookup table directly to initialize the hash table */
2407 /* populate the ipv4 hash */
2408 populate_ipv4_few_flow_into_table(ipv4_udp_replay_lookup_struct[socketid]);
2410 /* populate the ipv6 hash */
2411 populate_ipv6_few_flow_into_table(ipv6_udp_replay_lookup_struct[socketid]);
2417 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2419 setup_lpm(int socketid)
2421 struct rte_lpm6_config config;
2426 /* create the LPM table */
2427 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2428 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2429 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2430 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2431 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2432 " on socket %d\n", socketid);
2434 /* populate the LPM table */
2435 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2437 /* skip unused ports */
2438 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2439 enabled_port_mask) == 0)
2442 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2443 ipv4_udp_replay_route_array[i].ip,
2444 ipv4_udp_replay_route_array[i].depth,
2445 ipv4_udp_replay_route_array[i].if_out);
2448 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2449 "udp_replay LPM table on socket %d\n",
2453 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2454 (unsigned)ipv4_udp_replay_route_array[i].ip,
2455 ipv4_udp_replay_route_array[i].depth,
2456 ipv4_udp_replay_route_array[i].if_out);
2459 /* create the LPM6 table */
2460 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2462 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2463 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2465 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2467 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2468 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2469 " on socket %d\n", socketid);
2471 /* populate the LPM table */
2472 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2474 /* skip unused ports */
2475 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2476 enabled_port_mask) == 0)
2479 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2480 ipv6_udp_replay_route_array[i].ip,
2481 ipv6_udp_replay_route_array[i].depth,
2482 ipv6_udp_replay_route_array[i].if_out);
2485 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2486 "udp_replay LPM table on socket %d\n",
2490 printf("LPM: Adding route %s / %d (%d)\n",
2492 ipv6_udp_replay_route_array[i].depth,
2493 ipv6_udp_replay_route_array[i].if_out);
2499 init_mem(unsigned nb_mbuf)
2501 struct lcore_conf *qconf;
2506 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2507 if (rte_lcore_is_enabled(lcore_id) == 0)
2511 socketid = rte_lcore_to_socket_id(lcore_id);
2515 if (socketid >= NB_SOCKETS) {
2516 rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
2517 socketid, lcore_id, NB_SOCKETS);
2519 if (pktmbuf_pool[socketid] == NULL) {
2520 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
2521 pktmbuf_pool[socketid] =
2522 rte_pktmbuf_pool_create(s, nb_mbuf,
2523 MEMPOOL_CACHE_SIZE, 0,
2524 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
2525 if (pktmbuf_pool[socketid] == NULL)
2526 rte_exit(EXIT_FAILURE,
2527 "Cannot init mbuf pool on socket %d\n", socketid);
2529 printf("Allocated mbuf pool on socket %d\n", socketid);
2531 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2532 setup_lpm(socketid);
2534 setup_hash(socketid);
2537 qconf = &lcore_conf[lcore_id];
2538 qconf->ipv4_lookup_struct = ipv4_udp_replay_lookup_struct[socketid];
2539 qconf->ipv6_lookup_struct = ipv6_udp_replay_lookup_struct[socketid];
2544 /* Check the link status of all ports in up to 9s, and print them finally */
2546 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2548 #define CHECK_INTERVAL 100 /* 100ms */
2549 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2550 uint8_t portid, count, all_ports_up, print_flag = 0;
2551 struct rte_eth_link link;
2553 printf("\nChecking link status");
2555 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2557 for (portid = 0; portid < port_num; portid++) {
2558 if ((port_mask & (1 << portid)) == 0)
2560 memset(&link, 0, sizeof(link));
2561 rte_eth_link_get_nowait(portid, &link);
2562 /* print link status if flag set */
2563 if (print_flag == 1) {
2564 if (link.link_status)
2565 printf("Port %d Link Up - speed %u "
2566 "Mbps - %s\n", (uint8_t)portid,
2567 (unsigned)link.link_speed,
2568 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2569 ("full-duplex") : ("half-duplex\n"));
2571 printf("Port %d Link Down\n",
2575 /* clear all_ports_up flag if any link down */
2576 if (link.link_status == 0) {
2581 /* after finally printing all link status, get out */
2582 if (print_flag == 1)
2585 if (all_ports_up == 0) {
2588 rte_delay_ms(CHECK_INTERVAL);
2591 /* set the print_flag if all ports up or timeout */
2592 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2600 main(int argc, char **argv)
2602 struct lcore_conf *qconf;
2603 struct rte_eth_dev_info dev_info;
2604 struct rte_eth_txconf *txconf;
2609 uint32_t n_tx_queue, nb_lcores;
2610 uint8_t portid, nb_rx_queue, queue, socketid;
2614 ret = rte_eal_init(argc, argv);
2616 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2619 /* parse application arguments (after the EAL ones) */
2620 ret = parse_args(argc, argv);
2622 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2624 if (check_lcore_params() < 0)
2625 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2627 ret = init_lcore_rx_queues();
2629 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2631 nb_ports = rte_eth_dev_count();
2632 if (nb_ports > RTE_MAX_ETHPORTS)
2633 nb_ports = RTE_MAX_ETHPORTS;
2635 if (check_port_config(nb_ports) < 0)
2636 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2638 nb_lcores = rte_lcore_count();
2640 /* initialize all ports */
2641 for (portid = 0; portid < nb_ports; portid++) {
2642 /* skip ports that are not enabled */
2643 if ((enabled_port_mask & (1 << portid)) == 0) {
2644 printf("\nSkipping disabled port %d\n", portid);
2649 printf("Initializing port %d ... ", portid );
2652 nb_rx_queue = get_port_n_rx_queues(portid);
2653 n_tx_queue = nb_rx_queue;
2654 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2655 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2656 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
2657 nb_rx_queue, (unsigned)n_tx_queue );
2658 ret = rte_eth_dev_configure(portid, nb_rx_queue,
2659 (uint16_t)n_tx_queue, &port_conf);
2661 printf("Port configuration failed : port: %d... Try with hw-ip-checksum disabled\n", portid);
2662 port_conf.rxmode.hw_ip_checksum = 0;
2663 ret = rte_eth_dev_configure(portid, nb_rx_queue,
2664 (uint16_t)n_tx_queue, &port_conf);
2666 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
2669 /*Since its just swapping of MAC we dont have to fill our own src mac*/
2670 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
2671 print_ethaddr(" Address:", &ports_eth_addr[portid]);
2674 ret = init_mem(NB_MBUF);
2676 rte_exit(EXIT_FAILURE, "init_mem failed\n");
2678 /* init one TX queue per couple (lcore,port) */
2680 for (lcore_id = 0; lcore_id < n_tx_queue; lcore_id++) {
2681 if (rte_lcore_is_enabled(lcore_id) == 0)
2685 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2689 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
2692 rte_eth_dev_info_get(portid, &dev_info);
2693 txconf = &dev_info.default_txconf;
2694 if (port_conf.rxmode.jumbo_frame)
2695 txconf->txq_flags = 0;
2696 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
2699 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
2700 "port=%d\n", ret, portid);
2702 qconf = &lcore_conf[lcore_id];
2703 qconf->tx_queue_id[portid] = queueid;
2709 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2710 if (rte_lcore_is_enabled(lcore_id) == 0)
2712 qconf = &lcore_conf[lcore_id];
2713 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
2715 /* init RX queues */
2716 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
2717 portid = qconf->rx_queue_list[queue].port_id;
2718 queueid = qconf->rx_queue_list[queue].queue_id;
2721 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2725 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
2728 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
2731 pktmbuf_pool[socketid]);
2733 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
2734 "port=%d\n", ret, portid);
2741 for (portid = 0; portid < nb_ports; portid++) {
2742 if ((enabled_port_mask & (1 << portid)) == 0) {
2746 ret = rte_eth_dev_start(portid);
2748 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
2752 * If enabled, put device in promiscuous mode.
2753 * This allows IO forwarding mode to forward packets
2754 * to itself through 2 cross-connected ports of the
2758 rte_eth_promiscuous_enable(portid);
2761 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2763 /* launch per-lcore init on every lcore */
2764 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2765 cl = cmdline_stdin_new(main_ctx, "Replay>");
2767 rte_panic("Cannot create cmdline instance\n");
2768 cmdline_interact(cl);
2769 cmdline_stdin_exit(cl);
2770 rte_exit(0, "Bye!\n");
2771 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2772 if (rte_eal_wait_lcore(lcore_id) < 0)
2778 /**********************************************************/
2780 struct cmd_obj_clear_result {
2781 cmdline_fixed_string_t clear;
2782 cmdline_fixed_string_t udp_replay;
2783 cmdline_fixed_string_t stats;
2786 static void cmd_clear_udp_replay_stats_parsed(
2787 __rte_unused void *parsed_result,
2788 __rte_unused struct cmdline *cl,
2789 __attribute__((unused)) void *data)
2795 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
2796 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
2797 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
2798 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
2799 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
2800 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
2802 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
2803 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
2804 .data = NULL, /* 2nd arg of func */
2805 .help_str = "clears UDP_Replay stats for rx/tx",
2806 .tokens = { /* token list, NULL terminated */
2807 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
2808 (void *)&cmd_clear_udp_replay_stats_clear_string,
2809 (void *)&cmd_clear_udp_replay_stats_stats_string,
2813 /**********************************************************/
2814 struct cmd_obj_add_result {
2815 cmdline_fixed_string_t action;
2816 cmdline_fixed_string_t name;
2819 static void cmd_udp_replay_stats_parsed(
2820 __rte_unused void *parsed_result,
2821 __rte_unused struct cmdline *cl,
2822 __attribute__((unused)) void *data)
2827 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
2828 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
2829 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
2830 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
2832 cmdline_parse_inst_t cmd_udp_replay_stats = {
2833 .f = cmd_udp_replay_stats_parsed, /* function to call */
2834 .data = NULL, /* 2nd arg of func */
2835 .help_str = "UDP_Replay stats for rx/tx",
2836 .tokens = { /* token list, NULL terminated */
2837 (void *)&cmd_udp_replay_stats_udp_replay_string,
2838 (void *)&cmd_udp_replay_stats_stats_string,
2843 struct cmd_quit_result {
2844 cmdline_fixed_string_t quit;
2849 __rte_unused void *parsed_result,
2851 __rte_unused void *data)
2856 static cmdline_parse_token_string_t cmd_quit_quit =
2857 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
2859 static cmdline_parse_inst_t cmd_quit = {
2860 .f = cmd_quit_parsed,
2864 (void *) &cmd_quit_quit,
2869 /**********************************************************/
2870 /****** CONTEXT (list of instruction) */
2871 cmdline_parse_ctx_t main_ctx[] = {
2872 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
2873 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
2874 (cmdline_parse_inst_t *)&cmd_quit,