2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
83 #include <rte_errno.h>
84 #include <rte_cfgfile.h>
86 #include "parse_obj_list.h"
90 #include "interface.h"
91 #include "l3fwd_common.h"
92 #include "l3fwd_lpm4.h"
93 #include "l3fwd_lpm6.h"
94 #include "lib_icmpv6.h"
96 #include "vnf_common.h"
100 #define APP_LOOKUP_EXACT_MATCH 0
101 #define APP_LOOKUP_LPM 1
102 #define DO_RFC_1812_CHECKS
104 #ifndef APP_LOOKUP_METHOD
105 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
110 #include <netinet/in.h>
114 * When set to zero, simple forwaring path is eanbled.
115 * When set to one, optimized forwarding path is enabled.
116 * Note that LPM optimisation path uses SSE4.1 instructions.
118 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
119 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
121 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
124 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
125 #include <rte_hash.h>
126 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
128 #include <rte_lpm6.h>
130 #error "APP_LOOKUP_METHOD set to incorrect value"
134 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
135 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
136 #define IPv6_BYTES(addr) \
137 addr[0], addr[1], addr[2], addr[3], \
138 addr[4], addr[5], addr[6], addr[7], \
139 addr[8], addr[9], addr[10], addr[11],\
140 addr[12], addr[13],addr[14], addr[15]
144 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
146 #define MAX_JUMBO_PKT_LEN 9600
148 #define IPV6_ADDR_LEN 16
150 #define MEMPOOL_CACHE_SIZE 256
153 * This expression is used to calculate the number of mbufs needed depending on user input, taking
154 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
155 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
158 #define NB_MBUF RTE_MAX ( \
159 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
160 nb_ports*nb_lcores*MAX_PKT_BURST + \
161 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
162 nb_lcores*MEMPOOL_CACHE_SIZE), \
165 #define MAX_PKT_BURST 32
166 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
169 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
171 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
175 /* Configure how many packets ahead to prefetch, when reading packets */
176 #define PREFETCH_OFFSET 3
178 /* Used to mark destination port as 'invalid'. */
179 #define BAD_PORT ((uint16_t)-1)
184 * Configurable number of RX/TX ring descriptors
186 #define RTE_TEST_RX_DESC_DEFAULT 128
187 #define RTE_TEST_TX_DESC_DEFAULT 512
188 static uint64_t rcv_pkt_count[32] = {0};
189 static uint64_t tx_pkt_count[32] = {0};
190 static uint32_t arp_support;
193 struct sockaddr_in ipaddr1, ipaddr2;
194 /* ethernet addresses of ports */
195 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
197 static __m128i val_eth[RTE_MAX_ETHPORTS];
199 cmdline_parse_ctx_t main_ctx[];
201 uint32_t timer_lcore;
202 uint32_t exit_loop = 1;
203 port_config_t *port_config;
204 #define MEMPOOL_SIZE 32 * 1024
205 #define BUFFER_SIZE 2048
206 #define CACHE_SIZE 256
207 /* replace first 12B of the ethernet header. */
208 #define MASK_ETH 0x3f
210 #define IP_TYPE_IPV4 0
211 #define IP_TYPE_IPV6 1
213 const char* ipv4[MAX_IP];
214 uint8_t link_ipv6[MAX_IP][16];
215 uint32_t type, numports;
216 /* mask of enabled ports */
217 static uint32_t enabled_port_mask = 0;
218 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
219 static int numa_on = 1; /**< NUMA is enabled by default. */
220 static int csum_on = 1; /**< NUMA is enabled by default. */
221 struct pipeline_params def_pipeline_params = {
230 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
231 static int ipv6 = 0; /**< ipv6 is false by default. */
234 void convert_ipstr_to_numeric(void);
236 int print_l4stats(void);
237 int clear_stats(void);
241 struct rte_mbuf *m_table[MAX_PKT_BURST];
244 struct lcore_rx_queue {
247 } __rte_cache_aligned;
249 #define MAX_RX_QUEUE_PER_LCORE 16
250 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
251 #define MAX_RX_QUEUE_PER_PORT 128
253 #define MAX_LCORE_PARAMS 1024
254 struct lcore_params {
258 } __rte_cache_aligned;
260 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
261 static struct lcore_params lcore_params_array_default[] = {
273 static struct lcore_params * lcore_params = lcore_params_array_default;
274 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
275 sizeof(lcore_params_array_default[0]);
277 static struct rte_eth_conf port_conf = {
279 .mq_mode = ETH_MQ_RX_RSS,
280 .max_rx_pkt_len = ETHER_MAX_LEN,
282 .header_split = 0, /**< Header Split disabled */
283 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
284 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
285 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
286 .hw_strip_crc = 0, /**< CRC stripped by hardware */
291 .rss_hf = ETH_RSS_IP,
295 .mq_mode = ETH_MQ_TX_NONE,
299 /* empty vmdq configuration structure. Filled in programatically */
300 static struct rte_eth_rxconf rx_conf = {
306 .rx_free_thresh = 64,
308 .rx_deferred_start = 0,
310 static struct rte_eth_txconf tx_conf = {
318 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
319 ETH_TXQ_FLAGS_NOOFFLOADS,
320 .tx_deferred_start = 0,
323 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
325 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
326 #include <rte_hash_crc.h>
327 #define DEFAULT_HASH_FUNC rte_hash_crc
329 #include <rte_jhash.h>
330 #define DEFAULT_HASH_FUNC rte_jhash
339 } __attribute__((__packed__));
341 union ipv4_5tuple_host {
354 #define XMM_NUM_IN_IPV6_5TUPLE 3
357 uint8_t ip_dst[IPV6_ADDR_LEN];
358 uint8_t ip_src[IPV6_ADDR_LEN];
362 } __attribute__((__packed__));
364 union ipv6_5tuple_host {
369 uint8_t ip_src[IPV6_ADDR_LEN];
370 uint8_t ip_dst[IPV6_ADDR_LEN];
375 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
378 struct ipv4_udp_replay_route {
379 struct ipv4_5tuple key;
383 struct ipv6_udp_replay_route {
384 struct ipv6_5tuple key;
388 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
389 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
390 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
391 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
392 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
395 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
397 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
398 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
399 101, 11, IPPROTO_TCP}, 0},
402 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
403 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
404 102, 12, IPPROTO_TCP}, 1},
407 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
408 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
409 101, 11, IPPROTO_TCP}, 2},
412 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
413 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
414 102, 12, IPPROTO_TCP}, 3},
417 typedef struct rte_hash lookup_struct_t;
419 #ifdef RTE_ARCH_X86_64
420 /* default to 4 million hash entries (approx) */
421 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
423 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
424 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
426 #define HASH_ENTRY_NUMBER_DEFAULT 4
428 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
430 app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
435 app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
441 * inet_pton_ipv4(src, dst)
442 * like inet_aton() but without all the hexadecimal and shorthand.
444 * 1 if `src' is a valid dotted quad, else 0.
446 * does not touch `dst' unless it's returning 1.
450 static int inet_pton_ipv4(const char *src, unsigned char *dst)
452 static const char digits[] = "0123456789";
453 int saw_digit, octets, ch;
454 unsigned char tmp[INADDRSZ], *tp;
458 while ((ch = *src++) != '\0') {
460 if ((pch = strchr(digits, ch)) != NULL) {
461 unsigned int new = *tp * 10 + (pch - digits);
469 *tp = (unsigned char)new;
470 } else if (ch == '.' && saw_digit) {
480 memcpy(dst, tmp, INADDRSZ);
485 * inet_pton_ipv6(src, dst)
486 * convert presentation level address to network order binary form.
488 * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
490 * (1) does not touch `dst' unless it's returning 1.
491 * (2) :: in a full address is silently ignored.
493 * inspired by Mark Andrews.
497 static int inet_pton_ipv6(const char *src, unsigned char *dst)
499 static const char xdigits_l[] = "0123456789abcdef",
500 xdigits_u[] = "0123456789ABCDEF";
501 unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
502 const char *xdigits = 0, *curtok = 0;
503 int ch = 0, saw_xdigit = 0, count_xdigit = 0;
504 unsigned int val = 0;
505 unsigned dbloct_count = 0;
506 memset((tp = tmp), '\0', IN6ADDRSZ);
507 endp = tp + IN6ADDRSZ;
513 saw_xdigit = count_xdigit = 0;
515 while ((ch = *src++) != '\0') {
517 if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
518 pch = strchr((xdigits = xdigits_u), ch);
520 if (count_xdigit >= 4)
523 val |= (pch - xdigits);
537 } else if (*src == '\0') {
540 if (tp + sizeof(int16_t) > endp)
542 *tp++ = (unsigned char)((val >> 8) & 0xff);
543 *tp++ = (unsigned char)(val & 0xff);
550 if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
551 inet_pton_ipv4(curtok, tp) > 0) {
555 break; /* '\0' was seen by inet_pton4(). */
560 if (tp + sizeof(int16_t) > endp)
562 *tp++ = (unsigned char)((val >> 8) & 0xff);
563 *tp++ = (unsigned char)(val & 0xff);
566 if (colonp != NULL) {
567 if (dbloct_count == 8)
569 const int n = tp - colonp;
571 for (i = 1; i <= n; i++) {
572 endp[-i] = colonp[n - i];
579 memcpy(dst, tmp, IN6ADDRSZ);
582 static int my_inet_pton_ipv6(int af, const char *src, void *dst)
586 return inet_pton_ipv4(src, dst);
588 return inet_pton_ipv6(src, dst);
590 errno = EAFNOSUPPORT;
594 void convert_ipstr_to_numeric(void)
597 for (i = 0; i < numports; i++)
599 if (type == IP_TYPE_IPV4) {
600 memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
601 ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
602 ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
603 } else if (type == IP_TYPE_IPV6) {
604 ifm_add_ipv6_port(i, &link_ipv6[i][0], 128);
609 static inline uint32_t
610 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
613 const union ipv4_5tuple_host *k;
619 p = (const uint32_t *)&k->port_src;
621 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
622 init_val = rte_hash_crc_4byte(t, init_val);
623 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
624 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
625 init_val = rte_hash_crc_4byte(*p, init_val);
626 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
627 init_val = rte_jhash_1word(t, init_val);
628 init_val = rte_jhash_1word(k->ip_src, init_val);
629 init_val = rte_jhash_1word(k->ip_dst, init_val);
630 init_val = rte_jhash_1word(*p, init_val);
631 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
635 static inline int check_arpicmp(struct rte_mbuf *pkt)
637 uint8_t in_port_id = pkt->port;
638 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
639 uint16_t *eth_proto =
640 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
642 uint32_t prot_offset =
643 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
644 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
645 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
646 ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
647 && (*protocol == IP_PROTOCOL_ICMP))) {
648 process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
654 static inline int check_arpicmpv6(struct rte_mbuf *pkt)
656 struct ether_hdr *eth_h;
657 struct ipv6_hdr *ipv6_h;
658 uint8_t in_port_id = pkt->port;
659 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
660 uint16_t *eth_proto =
661 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
662 eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
663 ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
664 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
665 && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
666 process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
672 static inline uint32_t
673 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
675 const union ipv6_5tuple_host *k;
678 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
679 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
680 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
681 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
685 p = (const uint32_t *)&k->port_src;
687 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
688 ip_src0 = (const uint32_t *) k->ip_src;
689 ip_src1 = (const uint32_t *)(k->ip_src+4);
690 ip_src2 = (const uint32_t *)(k->ip_src+8);
691 ip_src3 = (const uint32_t *)(k->ip_src+12);
692 ip_dst0 = (const uint32_t *) k->ip_dst;
693 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
694 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
695 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
696 init_val = rte_hash_crc_4byte(t, init_val);
697 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
698 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
699 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
700 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
701 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
702 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
703 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
704 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
705 init_val = rte_hash_crc_4byte(*p, init_val);
706 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
707 init_val = rte_jhash_1word(t, init_val);
708 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
709 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
710 init_val = rte_jhash_1word(*p, init_val);
711 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
715 #define IPV4_UDP_Replay_NUM_ROUTES \
716 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
718 #define IPV6_UDP_Replay_NUM_ROUTES \
719 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
721 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
722 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
726 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
727 struct ipv4_udp_replay_route {
733 struct ipv6_udp_replay_route {
739 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
740 {IPv4(1,1,1,0), 24, 0},
741 {IPv4(2,1,1,0), 24, 1},
742 {IPv4(3,1,1,0), 24, 2},
743 {IPv4(4,1,1,0), 24, 3},
744 {IPv4(5,1,1,0), 24, 4},
745 {IPv4(6,1,1,0), 24, 5},
746 {IPv4(7,1,1,0), 24, 6},
747 {IPv4(8,1,1,0), 24, 7},
750 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
751 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
752 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
753 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
754 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
755 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
756 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
757 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
758 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
761 #define IPV4_UDP_Replay_NUM_ROUTES \
762 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
763 #define IPV6_UDP_Replay_NUM_ROUTES \
764 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
766 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
767 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
768 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
770 typedef struct rte_lpm lookup_struct_t;
771 typedef struct rte_lpm6 lookup6_struct_t;
772 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
773 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
778 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
779 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
780 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
781 lookup_struct_t * ipv4_lookup_struct;
782 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
783 lookup6_struct_t * ipv6_lookup_struct;
785 lookup_struct_t * ipv6_lookup_struct;
787 } __rte_cache_aligned;
789 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
791 /* Send burst of packets on an output interface */
793 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
795 struct rte_mbuf **m_table;
799 queueid = qconf->tx_queue_id[port];
800 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
802 ret = rte_eth_tx_burst(port, queueid, m_table, n);
803 if (unlikely(ret < n)) {
805 rte_pktmbuf_free(m_table[ret]);
809 tx_pkt_count[port] += ret;
813 /* Enqueue a single packet, and send burst if queue is filled */
815 send_single_packet(struct rte_mbuf *m, uint8_t port)
819 struct lcore_conf *qconf;
821 lcore_id = rte_lcore_id();
823 qconf = &lcore_conf[lcore_id];
824 len = qconf->tx_mbufs[port].len;
825 qconf->tx_mbufs[port].m_table[len] = m;
828 /* enough pkts to be sent */
829 if (unlikely(len == MAX_PKT_BURST)) {
830 send_burst(qconf, MAX_PKT_BURST, port);
834 qconf->tx_mbufs[port].len = len;
838 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
839 static inline __attribute__((always_inline)) void
840 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
841 struct rte_mbuf *m[], uint32_t num)
845 len = qconf->tx_mbufs[port].len;
848 * If TX buffer for that queue is empty, and we have enough packets,
849 * then send them straightway.
851 if (num >= MAX_TX_BURST && len == 0) {
852 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
853 if (unlikely(n < num)) {
855 rte_pktmbuf_free(m[n]);
862 * Put packets into TX buffer for that queue.
866 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
869 switch (n % FWDSTEP) {
872 qconf->tx_mbufs[port].m_table[len + j] = m[j];
875 qconf->tx_mbufs[port].m_table[len + j] = m[j];
878 qconf->tx_mbufs[port].m_table[len + j] = m[j];
881 qconf->tx_mbufs[port].m_table[len + j] = m[j];
888 /* enough pkts to be sent */
889 if (unlikely(len == MAX_PKT_BURST)) {
891 send_burst(qconf, MAX_PKT_BURST, port);
893 /* copy rest of the packets into the TX buffer. */
896 switch (len % FWDSTEP) {
899 qconf->tx_mbufs[port].m_table[j] = m[n + j];
902 qconf->tx_mbufs[port].m_table[j] = m[n + j];
905 qconf->tx_mbufs[port].m_table[j] = m[n + j];
908 qconf->tx_mbufs[port].m_table[j] = m[n + j];
914 qconf->tx_mbufs[port].len = len;
916 #endif /* APP_LOOKUP_LPM */
918 #ifdef DO_RFC_1812_CHECKS
920 is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len)
922 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
924 * 1. The packet length reported by the Link Layer must be large
925 * enough to hold the minimum length legal IP datagram (20 bytes).
927 if (link_len < sizeof(struct ipv4_hdr))
930 /* 2. The IP checksum must be correct. */
931 /* this is checked in H/W */
934 * 3. The IP version number must be 4. If the version number is not 4
935 * then the packet may be another version of IP, such as IPng or
938 if (((pkt->version_ihl) >> 4) != 4)
941 * 4. The IP header length field must be large enough to hold the
942 * minimum length legal IP datagram (20 bytes = 5 words).
944 if ((pkt->version_ihl & 0xf) < 5)
948 * 5. The IP total length field must be large enough to hold the IP
949 * datagram header, whose length is specified in the IP header length
952 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
959 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
961 static __m128i mask0;
962 static __m128i mask1;
963 static __m128i mask2;
964 static inline uint8_t
965 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
968 union ipv4_5tuple_host key;
970 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
971 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
972 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
973 key.xmm = _mm_and_si128(data, mask0);
974 /* Find destination port */
975 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
976 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
979 static inline uint8_t
980 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
983 union ipv6_5tuple_host key;
985 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
986 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
987 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
988 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
989 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
990 key.xmm[0] = _mm_and_si128(data0, mask1);
991 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
993 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
994 key.xmm[2] = _mm_and_si128(data2, mask2);
996 /* Find destination port */
997 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
998 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
1002 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1004 static inline uint8_t
1005 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
1009 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
1010 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
1011 &next_hop) == 0) ? next_hop : portid);
1014 static inline uint8_t
1015 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
1018 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
1019 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
1024 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
1025 struct lcore_conf *qconf) __attribute__((unused));
1027 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
1028 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1030 #define MASK_ALL_PKTS 0xff
1031 #define EXCLUDE_1ST_PKT 0xfe
1032 #define EXCLUDE_2ND_PKT 0xfd
1033 #define EXCLUDE_3RD_PKT 0xfb
1034 #define EXCLUDE_4TH_PKT 0xf7
1035 #define EXCLUDE_5TH_PKT 0xef
1036 #define EXCLUDE_6TH_PKT 0xdf
1037 #define EXCLUDE_7TH_PKT 0xbf
1038 #define EXCLUDE_8TH_PKT 0x7f
1041 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1043 struct ether_hdr *eth_hdr[8];
1044 struct ether_hdr tmp;
1045 struct ipv4_hdr *ipv4_hdr[8];
1046 struct udp_hdr *udp_hdr[8];
1048 l2_phy_interface_t *port = ifm_get_port(portid);
1050 printf("port may be un initialized\n");
1053 if (unlikely(arp_support)) {
1054 check_arpicmp(m[0]);
1055 check_arpicmp(m[1]);
1056 check_arpicmp(m[2]);
1057 check_arpicmp(m[3]);
1058 check_arpicmp(m[4]);
1059 check_arpicmp(m[5]);
1060 check_arpicmp(m[6]);
1061 check_arpicmp(m[7]);
1064 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1065 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1066 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1067 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1068 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1069 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1070 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1071 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1074 memset(&tmp,0,sizeof (struct ether_hdr));
1079 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1080 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1081 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1084 /* Handle IPv4 headers.*/
1085 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
1086 sizeof(struct ether_hdr));
1087 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
1088 sizeof(struct ether_hdr));
1089 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
1090 sizeof(struct ether_hdr));
1091 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
1092 sizeof(struct ether_hdr));
1093 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
1094 sizeof(struct ether_hdr));
1095 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
1096 sizeof(struct ether_hdr));
1097 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
1098 sizeof(struct ether_hdr));
1099 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
1100 sizeof(struct ether_hdr));
1101 struct ipv4_hdr temp_ipv4;
1102 temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
1103 ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
1104 ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
1105 temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
1106 ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
1107 ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
1108 temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
1109 ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
1110 ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
1111 temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
1112 ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
1113 ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
1114 temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
1115 ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
1116 ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
1117 temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
1118 ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
1119 ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
1120 temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
1121 ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
1122 ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
1123 temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
1124 ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
1125 ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
1127 /* Handle UDP headers.*/
1128 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1129 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1131 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1132 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1133 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1134 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1135 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1136 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1137 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1138 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1139 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1140 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1141 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1142 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1143 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1144 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1145 /*1) memcpy or assignment.*/
1147 struct udp_hdr temp_udp;
1148 temp_udp.dst_port = udp_hdr[0]->dst_port;
1149 udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
1150 udp_hdr[0]->src_port = temp_udp.dst_port;
1151 temp_udp.dst_port = udp_hdr[1]->dst_port;
1152 udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
1153 udp_hdr[1]->src_port = temp_udp.dst_port;
1154 temp_udp.dst_port = udp_hdr[2]->dst_port;
1155 udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
1156 udp_hdr[2]->src_port = temp_udp.dst_port;
1157 temp_udp.dst_port = udp_hdr[3]->dst_port;
1158 udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
1159 udp_hdr[3]->src_port = temp_udp.dst_port;
1160 temp_udp.dst_port = udp_hdr[4]->dst_port;
1161 udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
1162 udp_hdr[4]->src_port = temp_udp.dst_port;
1163 temp_udp.dst_port = udp_hdr[5]->dst_port;
1164 udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
1165 udp_hdr[5]->src_port = temp_udp.dst_port;
1166 temp_udp.dst_port = udp_hdr[6]->dst_port;
1167 udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
1168 udp_hdr[6]->src_port = temp_udp.dst_port;
1169 temp_udp.dst_port = udp_hdr[7]->dst_port;
1170 udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
1171 udp_hdr[7]->src_port = temp_udp.dst_port;
1172 #ifdef DO_RFC_1812_CHECKS
1173 /* Check to make sure the packet is valid (RFC1812) */
1174 uint8_t valid_mask = MASK_ALL_PKTS;
1175 if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) {
1176 rte_pktmbuf_free(m[0]);
1177 valid_mask &= EXCLUDE_1ST_PKT;
1179 if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) {
1180 rte_pktmbuf_free(m[1]);
1181 valid_mask &= EXCLUDE_2ND_PKT;
1183 if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) {
1184 rte_pktmbuf_free(m[2]);
1185 valid_mask &= EXCLUDE_3RD_PKT;
1187 if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) {
1188 rte_pktmbuf_free(m[3]);
1189 valid_mask &= EXCLUDE_4TH_PKT;
1191 if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) {
1192 rte_pktmbuf_free(m[4]);
1193 valid_mask &= EXCLUDE_5TH_PKT;
1195 if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) {
1196 rte_pktmbuf_free(m[5]);
1197 valid_mask &= EXCLUDE_6TH_PKT;
1199 if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) {
1200 rte_pktmbuf_free(m[6]);
1201 valid_mask &= EXCLUDE_7TH_PKT;
1203 if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) {
1204 rte_pktmbuf_free(m[7]);
1205 valid_mask &= EXCLUDE_8TH_PKT;
1207 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
1208 if (valid_mask == 0){
1212 for (i = 0; i < 8; i++) {
1213 if ((0x1 << i) & valid_mask) {
1214 udp_replay_simple_replay(m[i], portid, qconf);
1220 #endif // End of #ifdef DO_RFC_1812_CHECKS
1222 #ifdef DO_RFC_1812_CHECKS
1223 /* Update time to live and header checksum */
1224 --(ipv4_hdr[0]->time_to_live);
1225 --(ipv4_hdr[1]->time_to_live);
1226 --(ipv4_hdr[2]->time_to_live);
1227 --(ipv4_hdr[3]->time_to_live);
1228 ++(ipv4_hdr[0]->hdr_checksum);
1229 ++(ipv4_hdr[1]->hdr_checksum);
1230 ++(ipv4_hdr[2]->hdr_checksum);
1231 ++(ipv4_hdr[3]->hdr_checksum);
1232 --(ipv4_hdr[4]->time_to_live);
1233 --(ipv4_hdr[5]->time_to_live);
1234 --(ipv4_hdr[6]->time_to_live);
1235 --(ipv4_hdr[7]->time_to_live);
1236 ++(ipv4_hdr[4]->hdr_checksum);
1237 ++(ipv4_hdr[5]->hdr_checksum);
1238 ++(ipv4_hdr[6]->hdr_checksum);
1239 ++(ipv4_hdr[7]->hdr_checksum);
1242 send_single_packet(m[0],portid );
1243 send_single_packet(m[1],portid );
1244 send_single_packet(m[2],portid );
1245 send_single_packet(m[3],portid);
1246 send_single_packet(m[4],portid);
1247 send_single_packet(m[5],portid);
1248 send_single_packet(m[6],portid);
1249 send_single_packet(m[7],portid);
1253 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
1254 union ipv6_5tuple_host * key)
1256 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
1257 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
1258 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
1259 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
1260 key->xmm[1] = tmpdata1;
1261 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
1266 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1268 struct ether_hdr *eth_hdr[8],tmp;
1270 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
1272 union ipv6_5tuple_host key[8];
1273 struct udp_hdr *udp_hdr[8];
1274 l2_phy_interface_t *port = ifm_get_port(portid);
1276 printf("port may be un initialized\n");
1280 if (unlikely(arp_support)) {
1281 check_arpicmpv6(m[0]);
1282 check_arpicmpv6(m[1]);
1283 check_arpicmpv6(m[2]);
1284 check_arpicmpv6(m[3]);
1285 check_arpicmpv6(m[4]);
1286 check_arpicmpv6(m[5]);
1287 check_arpicmpv6(m[6]);
1288 check_arpicmpv6(m[7]);
1292 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1293 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1294 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1295 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1296 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1297 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1298 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1299 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1301 memset(&tmp,0,sizeof (struct ether_hdr));
1305 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1306 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1307 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1309 /* Handle IPv6 headers.*/
1310 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
1311 sizeof(struct ether_hdr));
1312 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
1313 sizeof(struct ether_hdr));
1314 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
1315 sizeof(struct ether_hdr));
1316 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
1317 sizeof(struct ether_hdr));
1318 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
1319 sizeof(struct ether_hdr));
1320 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
1321 sizeof(struct ether_hdr));
1322 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
1323 sizeof(struct ether_hdr));
1324 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
1325 sizeof(struct ether_hdr));
1328 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
1329 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
1330 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
1333 /* Handle UDP headers.*/
1334 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1335 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1337 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1338 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1339 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1340 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1341 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1342 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1343 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1344 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1345 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1346 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1347 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1348 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1349 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1350 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1351 /*1) memcpy or assignment.*/
1353 struct udp_hdr temp_udp;
1356 temp_udp.dst_port = udp_hdr[i]->dst_port;
1357 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1358 udp_hdr[i]->src_port = temp_udp.dst_port;
1360 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1361 &key[4], &key[5], &key[6], &key[7]};
1362 #if RTE_VERSION < 0x100b0000
1363 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1365 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1367 send_single_packet(m[0],portid);
1368 send_single_packet(m[1],portid);
1369 send_single_packet(m[2],portid);
1370 send_single_packet(m[3],portid);
1371 send_single_packet(m[4],portid);
1372 send_single_packet(m[5],portid);
1373 send_single_packet(m[6],portid);
1374 send_single_packet(m[7],portid);
1377 #endif /* APP_LOOKUP_METHOD */
1379 static inline __attribute__((always_inline)) void
1380 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1382 struct ether_hdr *eth_hdr,tmp;
1383 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1384 struct udp_hdr *udp_hdr,temp_udp;
1385 l2_phy_interface_t *port = ifm_get_port(portid);
1388 printf("port may be un initialized\n");
1392 printf("Null packet received\n");
1395 if (unlikely(arp_support)) {
1396 if (!check_arpicmp(m))
1400 printf("qconf configuration is NULL\n");
1401 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1402 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1403 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1404 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1405 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1407 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1408 /* Handle IPv4 headers.*/
1409 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1410 sizeof(struct ether_hdr));
1411 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1412 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1413 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1414 #ifdef DO_RFC_1812_CHECKS
1415 /* Check to make sure the packet is valid (RFC1812) */
1416 if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
1417 rte_pktmbuf_free(m);
1423 #ifdef DO_RFC_1812_CHECKS
1424 /* Update time to live and header checksum */
1425 --(ipv4_hdr->time_to_live);
1426 ++(ipv4_hdr->hdr_checksum);
1428 /* Handle UDP headers.*/
1429 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1430 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1432 /*Swapping Src and Dst Port*/
1433 temp_udp.dst_port = udp_hdr->dst_port;
1434 udp_hdr->dst_port = udp_hdr->src_port;
1435 udp_hdr->src_port = temp_udp.dst_port;
1437 send_single_packet(m, portid);
1438 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1439 /* Handle IPv6 headers.*/
1440 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1442 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1443 sizeof(struct ether_hdr));
1445 /*Swapping of Src and Dst IP address*/
1446 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1447 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1448 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1450 /* Handle UDP headers.*/
1451 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1452 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1453 /*Swapping Src and Dst Port*/
1454 temp_udp.dst_port = udp_hdr->dst_port;
1455 udp_hdr->dst_port = udp_hdr->src_port;
1456 udp_hdr->src_port = temp_udp.dst_port;
1457 send_single_packet(m, portid);
1459 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1460 rte_pktmbuf_free(m);
1463 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1464 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1465 #ifdef DO_RFC_1812_CHECKS
1467 #define IPV4_MIN_VER_IHL 0x45
1468 #define IPV4_MAX_VER_IHL 0x4f
1469 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1471 /* Minimum value of IPV4 total length (20B) in network byte order. */
1472 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1475 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1476 * - The IP version number must be 4.
1477 * - The IP header length field must be large enough to hold the
1478 * minimum length legal IP datagram (20 bytes = 5 words).
1479 * - The IP total length field must be large enough to hold the IP
1480 * datagram header, whose length is specified in the IP header length
1482 * If we encounter invalid IPV4 packet, then set destination port for it
1483 * to BAD_PORT value.
1485 static inline __attribute__((always_inline)) void
1486 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1490 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1491 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1493 ipv4_hdr->time_to_live--;
1494 ipv4_hdr->hdr_checksum++;
1496 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1497 ((uint8_t)ipv4_hdr->total_length == 0 &&
1498 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1505 #define rfc1812_process(mb, dp) do { } while (0)
1506 #endif /* DO_RFC_1812_CHECKS */
1507 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1510 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1511 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1513 static inline __attribute__((always_inline)) uint16_t
1514 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1515 uint32_t dst_ipv4, uint8_t portid)
1518 struct ipv6_hdr *ipv6_hdr;
1519 struct ether_hdr *eth_hdr;
1520 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1522 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1523 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1526 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1527 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1528 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1529 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1530 ipv6_hdr->dst_addr, &next_hop) != 0)
1540 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1541 uint16_t *dst_port, uint8_t portid)
1543 struct ether_hdr *eth_hdr;
1544 struct ipv4_hdr *ipv4_hdr;
1549 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1550 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1553 dst_ipv4 = ipv4_hdr->dst_addr;
1554 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1556 /*Changing the dp to incoming port*/
1557 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1560 te = _mm_loadu_si128((__m128i *)eth_hdr);
1564 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1566 te = _mm_blend_epi16(te, ve, MASK_ETH);
1567 _mm_storeu_si128((__m128i *)eth_hdr, te);
1569 /* Wont be using the following fucntion*/
1572 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1575 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1577 uint32_t *ipv4_flag)
1579 struct ipv4_hdr *ipv4_hdr;
1580 struct ether_hdr *eth_hdr;
1581 uint32_t x0, x1, x2, x3;
1583 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1584 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1585 x0 = ipv4_hdr->dst_addr;
1586 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1588 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1589 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1590 x1 = ipv4_hdr->dst_addr;
1591 ipv4_flag[0] &= pkt[1]->packet_type;
1593 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1594 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1595 x2 = ipv4_hdr->dst_addr;
1596 ipv4_flag[0] &= pkt[2]->packet_type;
1598 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1599 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1600 x3 = ipv4_hdr->dst_addr;
1601 ipv4_flag[0] &= pkt[3]->packet_type;
1603 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1607 * Lookup into LPM for destination port.
1608 * If lookup fails, use incoming port (portid) as destination port.
1611 processx4_step2(const struct lcore_conf *qconf,
1615 struct rte_mbuf *pkt[FWDSTEP],
1616 uint16_t dprt[FWDSTEP])
1619 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1620 4, 5, 6, 7, 0, 1, 2, 3);
1622 /* Byte swap 4 IPV4 addresses. */
1623 dip = _mm_shuffle_epi8(dip, bswap_mask);
1625 /* if all 4 packets are IPV4. */
1626 if (likely(ipv4_flag)) {
1627 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1630 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1631 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1632 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1633 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1638 * Update source and destination MAC addresses in the ethernet header.
1639 * Perform RFC1812 checks and updates for IPV4 packets.
1642 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1644 __m128i te[FWDSTEP];
1645 __m128i ve[FWDSTEP];
1646 __m128i *p[FWDSTEP];
1648 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1649 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1650 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1651 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1653 ve[0] = val_eth[dst_port[0]];
1654 te[0] = _mm_loadu_si128(p[0]);
1656 ve[1] = val_eth[dst_port[1]];
1657 te[1] = _mm_loadu_si128(p[1]);
1659 ve[2] = val_eth[dst_port[2]];
1660 te[2] = _mm_loadu_si128(p[2]);
1662 ve[3] = val_eth[dst_port[3]];
1663 te[3] = _mm_loadu_si128(p[3]);
1665 /* Update first 12 bytes, keep rest bytes intact. */
1666 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1667 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1668 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1669 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1671 _mm_storeu_si128(p[0], te[0]);
1672 _mm_storeu_si128(p[1], te[1]);
1673 _mm_storeu_si128(p[2], te[2]);
1674 _mm_storeu_si128(p[3], te[3]);
1676 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1677 &dst_port[0], pkt[0]->packet_type);
1678 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1679 &dst_port[1], pkt[1]->packet_type);
1680 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1681 &dst_port[2], pkt[2]->packet_type);
1682 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1683 &dst_port[3], pkt[3]->packet_type);
1687 * We group consecutive packets with the same destionation port into one burst.
1688 * To avoid extra latency this is done together with some other packet
1689 * processing, but after we made a final decision about packet's destination.
1690 * To do this we maintain:
1691 * pnum - array of number of consecutive packets with the same dest port for
1692 * each packet in the input burst.
1693 * lp - pointer to the last updated element in the pnum.
1694 * dlp - dest port value lp corresponds to.
1697 #define GRPSZ (1 << FWDSTEP)
1698 #define GRPMSK (GRPSZ - 1)
1700 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1701 if (likely((dlp) == (dcp)[(idx)])) { \
1704 (dlp) = (dcp)[idx]; \
1705 (lp) = (pn) + (idx); \
1711 * Group consecutive packets with the same destination port in bursts of 4.
1712 * Suppose we have array of destionation ports:
1713 * dst_port[] = {a, b, c, d,, e, ... }
1714 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1715 * We doing 4 comparisions at once and the result is 4 bit mask.
1716 * This mask is used as an index into prebuild array of pnum values.
1718 static inline uint16_t *
1719 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1721 static const struct {
1722 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1723 int32_t idx; /* index for new last updated elemnet. */
1724 uint16_t lpv; /* add value to the last updated element. */
1727 /* 0: a != b, b != c, c != d, d != e */
1728 .pnum = UINT64_C(0x0001000100010001),
1733 /* 1: a == b, b != c, c != d, d != e */
1734 .pnum = UINT64_C(0x0001000100010002),
1739 /* 2: a != b, b == c, c != d, d != e */
1740 .pnum = UINT64_C(0x0001000100020001),
1745 /* 3: a == b, b == c, c != d, d != e */
1746 .pnum = UINT64_C(0x0001000100020003),
1751 /* 4: a != b, b != c, c == d, d != e */
1752 .pnum = UINT64_C(0x0001000200010001),
1757 /* 5: a == b, b != c, c == d, d != e */
1758 .pnum = UINT64_C(0x0001000200010002),
1763 /* 6: a != b, b == c, c == d, d != e */
1764 .pnum = UINT64_C(0x0001000200030001),
1769 /* 7: a == b, b == c, c == d, d != e */
1770 .pnum = UINT64_C(0x0001000200030004),
1775 /* 8: a != b, b != c, c != d, d == e */
1776 .pnum = UINT64_C(0x0002000100010001),
1781 /* 9: a == b, b != c, c != d, d == e */
1782 .pnum = UINT64_C(0x0002000100010002),
1787 /* 0xa: a != b, b == c, c != d, d == e */
1788 .pnum = UINT64_C(0x0002000100020001),
1793 /* 0xb: a == b, b == c, c != d, d == e */
1794 .pnum = UINT64_C(0x0002000100020003),
1799 /* 0xc: a != b, b != c, c == d, d == e */
1800 .pnum = UINT64_C(0x0002000300010001),
1805 /* 0xd: a == b, b != c, c == d, d == e */
1806 .pnum = UINT64_C(0x0002000300010002),
1811 /* 0xe: a != b, b == c, c == d, d == e */
1812 .pnum = UINT64_C(0x0002000300040001),
1817 /* 0xf: a == b, b == c, c == d, d == e */
1818 .pnum = UINT64_C(0x0002000300040005),
1825 uint16_t u16[FWDSTEP + 1];
1827 } *pnum = (void *)pn;
1831 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1832 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1833 v = _mm_movemask_ps((__m128)dp1);
1835 /* update last port counter. */
1836 lp[0] += gptbl[v].lpv;
1838 /* if dest port value has changed. */
1840 lp = pnum->u16 + gptbl[v].idx;
1842 pnum->u64 = gptbl[v].pnum;
1848 #endif /* APP_LOOKUP_METHOD */
1850 /* main processing loop */
1852 main_loop(__attribute__((unused)) void *dummy)
1854 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1856 uint64_t prev_tsc, diff_tsc, cur_tsc;
1858 uint8_t portid, queueid;
1859 struct lcore_conf *qconf;
1860 l2_phy_interface_t *port;
1861 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1862 US_PER_S * BURST_TX_DRAIN_US;
1864 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1865 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1869 uint16_t dst_port[MAX_PKT_BURST];
1870 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1871 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1872 uint16_t pnum[MAX_PKT_BURST + 1];
1877 lcore_id = rte_lcore_id();
1878 qconf = &lcore_conf[lcore_id];
1880 if (qconf->n_rx_queue == 0) {
1881 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1885 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1887 for (i = 0; i < qconf->n_rx_queue; i++) {
1889 portid = qconf->rx_queue_list[i].port_id;
1890 queueid = qconf->rx_queue_list[i].queue_id;
1891 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1897 cur_tsc = rte_rdtsc();
1900 * TX burst queue drain
1902 diff_tsc = cur_tsc - prev_tsc;
1903 if (unlikely(diff_tsc > drain_tsc)) {
1906 * This could be optimized (use queueid instead of
1907 * portid), but it is not called so often
1909 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1910 if (qconf->tx_mbufs[portid].len == 0)
1913 qconf->tx_mbufs[portid].len,
1915 qconf->tx_mbufs[portid].len = 0;
1922 * Read packet from RX queues
1924 for (i = 0; i < qconf->n_rx_queue; ++i) {
1925 portid = qconf->rx_queue_list[i].port_id;
1926 queueid = qconf->rx_queue_list[i].queue_id;
1927 port = ifm_get_port(portid);
1929 nb_rx = port->retrieve_bulk_pkts(portid,
1930 queueid, pkts_burst);
1931 port->n_rxpkts += nb_rx;
1933 printf("port may be un initialized\n");
1937 rcv_pkt_count[portid] += nb_rx;
1941 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1942 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1945 * Send nb_rx - nb_rx%8 packets
1948 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1949 for (j = 0; j < n; j += 8) {
1950 struct ether_hdr *eth_h0 =
1951 rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
1952 struct ether_hdr *eth_h1 =
1953 rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
1954 struct ether_hdr *eth_h2 =
1955 rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
1956 struct ether_hdr *eth_h3 =
1957 rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
1958 struct ether_hdr *eth_h4 =
1959 rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
1960 struct ether_hdr *eth_h5 =
1961 rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
1962 struct ether_hdr *eth_h6 =
1963 rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
1964 struct ether_hdr *eth_h7 =
1965 rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
1967 uint16_t ether_type;
1968 ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
1969 rte_cpu_to_be_16(eth_h1->ether_type) &
1970 rte_cpu_to_be_16(eth_h2->ether_type) &
1971 rte_cpu_to_be_16(eth_h3->ether_type) &
1972 rte_cpu_to_be_16(eth_h4->ether_type) &
1973 rte_cpu_to_be_16(eth_h5->ether_type) &
1974 rte_cpu_to_be_16(eth_h6->ether_type) &
1975 rte_cpu_to_be_16(eth_h7->ether_type));
1977 if (ether_type == ETHER_TYPE_IPv4) {
1978 simple_ipv4_replay_8pkts(
1979 &pkts_burst[j], portid, qconf);
1980 } else if (ether_type == ETHER_TYPE_IPv6) {
1981 simple_ipv6_replay_8pkts(&pkts_burst[j],
1984 udp_replay_simple_replay(pkts_burst[j],
1986 udp_replay_simple_replay(pkts_burst[j+1],
1988 udp_replay_simple_replay(pkts_burst[j+2],
1990 udp_replay_simple_replay(pkts_burst[j+3],
1992 udp_replay_simple_replay(pkts_burst[j+4],
1994 udp_replay_simple_replay(pkts_burst[j+5],
1996 udp_replay_simple_replay(pkts_burst[j+6],
1998 udp_replay_simple_replay(pkts_burst[j+7],
2003 for (; j < nb_rx ; j++) {
2004 udp_replay_simple_replay(pkts_burst[j],
2008 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2010 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2011 for (j = 0; j != k; j += FWDSTEP) {
2012 processx4_step1(&pkts_burst[j],
2014 &ipv4_flag[j / FWDSTEP]);
2017 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2018 for (j = 0; j != k; j += FWDSTEP) {
2019 processx4_step2(qconf, dip[j / FWDSTEP],
2020 ipv4_flag[j / FWDSTEP], portid,
2021 &pkts_burst[j], &dst_port[j]);
2025 * Finish packet processing and group consecutive
2026 * packets with the same destination port.
2028 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2035 processx4_step3(pkts_burst, dst_port);
2037 /* dp1: <d[0], d[1], d[2], d[3], ... > */
2038 dp1 = _mm_loadu_si128((__m128i *)dst_port);
2040 for (j = FWDSTEP; j != k; j += FWDSTEP) {
2041 processx4_step3(&pkts_burst[j],
2046 * <d[j-3], d[j-2], d[j-1], d[j], ... >
2048 dp2 = _mm_loadu_si128((__m128i *)
2049 &dst_port[j - FWDSTEP + 1]);
2050 lp = port_groupx4(&pnum[j - FWDSTEP],
2055 * <d[j], d[j+1], d[j+2], d[j+3], ... >
2057 dp1 = _mm_srli_si128(dp2,
2059 sizeof(dst_port[0]));
2063 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
2065 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
2066 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
2070 * remove values added by the last repeated
2074 dlp = dst_port[j - 1];
2076 /* set dlp and lp to the never used values. */
2078 lp = pnum + MAX_PKT_BURST;
2081 /* Process up to last 3 packets one by one. */
2082 switch (nb_rx % FWDSTEP) {
2084 process_packet(qconf, pkts_burst[j],
2085 dst_port + j, portid);
2086 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2089 process_packet(qconf, pkts_burst[j],
2090 dst_port + j, portid);
2091 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2094 process_packet(qconf, pkts_burst[j],
2095 dst_port + j, portid);
2096 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2101 * Send packets out, through destination port.
2102 * Consecuteve pacekts with the same destination port
2103 * are already grouped together.
2104 * If destination port for the packet equals BAD_PORT,
2105 * then free the packet without sending it out.
2107 for (j = 0; j < nb_rx; j += k) {
2115 if (likely(pn != BAD_PORT)) {
2116 send_packetsx4(qconf, pn,
2119 for (m = j; m != j + k; m++)
2120 rte_pktmbuf_free(pkts_burst[m]);
2124 #endif /* APP_LOOKUP_METHOD */
2125 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
2127 /* Prefetch first packets */
2128 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
2129 rte_prefetch0(rte_pktmbuf_mtod(
2130 pkts_burst[j], void *));
2133 /* Prefetch and forward already prefetched packets */
2134 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
2135 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
2136 j + PREFETCH_OFFSET], void *));
2137 udp_replay_simple_replay(pkts_burst[j], portid,
2141 /* Forward remaining prefetched packets */
2142 for (; j < nb_rx; j++) {
2143 udp_replay_simple_replay(pkts_burst[j], portid,
2146 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
2158 printf ("UDP_Replay stats:\n");
2159 printf ("--------------\n");
2160 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
2161 for (i = 0; i < nb_lcore_params; ++i) {
2162 portid = lcore_params[i].port_id;
2163 printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
2175 for (i = 0; i < 32; i++) {
2176 rcv_pkt_count[i] = 0;
2177 tx_pkt_count[i] = 0;
2184 check_lcore_params(void)
2186 uint8_t queue, lcore;
2190 for (i = 0; i < nb_lcore_params; ++i) {
2191 queue = lcore_params[i].queue_id;
2192 if (queue >= MAX_RX_QUEUE_PER_PORT) {
2193 printf("invalid queue number: %hhu\n", queue);
2196 lcore = lcore_params[i].lcore_id;
2197 if (!rte_lcore_is_enabled(lcore)) {
2198 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
2201 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
2203 printf("warning: lcore %hhu is on socket %d with numa off \n",
2211 check_port_config(const unsigned nb_ports)
2216 for (i = 0; i < nb_lcore_params; ++i) {
2217 portid = lcore_params[i].port_id;
2218 if ((enabled_port_mask & (1 << portid)) == 0) {
2219 printf("port %u is not enabled in port mask\n", portid);
2222 if (portid >= nb_ports) {
2223 printf("port %u is not present on the board\n", portid);
2231 get_port_n_rx_queues(const uint8_t port)
2236 for (i = 0; i < nb_lcore_params; ++i) {
2237 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
2238 queue = lcore_params[i].queue_id;
2240 return (uint8_t)(++queue);
2244 init_lcore_rx_queues(void)
2246 uint16_t i, nb_rx_queue;
2249 for (i = 0; i < nb_lcore_params; ++i) {
2250 lcore = lcore_params[i].lcore_id;
2251 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
2252 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
2253 printf("error: too many queues (%u) for lcore: %u\n",
2254 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
2257 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
2258 lcore_params[i].port_id;
2259 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
2260 lcore_params[i].queue_id;
2261 lcore_conf[lcore].n_rx_queue++;
2269 print_usage(const char *prgname)
2271 printf ("%s [EAL options] -- -p PORTMASK -P"
2272 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
2273 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
2274 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
2275 " -P : enable promiscuous mode\n"
2276 " --config (port,queue,lcore): rx queues configuration\n"
2277 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
2278 " --no-numa: optional, disable numa awareness\n"
2279 " --no-hw-csum: optional, disable hw ip checksum\n"
2280 " --ipv6: optional, specify it if running ipv6 packets\n"
2281 " --enable-jumbo: enable jumbo frame"
2282 " which max packet len is PKTLEN in decimal (64-9600)\n"
2283 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
2287 static int parse_max_pkt_len(const char *pktlen)
2292 /* parse decimal string */
2293 len = strtoul(pktlen, &end, 10);
2294 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
2304 parse_link_ip(const char *file_name)
2307 struct rte_cfgfile *file;
2310 file = rte_cfgfile_load(file_name, 0);
2311 entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
2312 numports = (uint32_t)atoi(entry);
2313 if (numports <= 0 || numports > 32)
2314 rte_panic("numports is not valid\n");
2315 entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
2316 type = (uint32_t)atoi(entry);
2317 for (i = 0;i < numports; i++) {
2318 sprintf(buf, "port%d", i);
2319 entry = rte_cfgfile_get_entry(file, "linkip", buf);
2323 ipv4[i] = strdup(entry);
2325 my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
2330 parse_portmask(const char *portmask)
2335 /* parse hexadecimal string */
2336 pm = strtoul(portmask, &end, 16);
2337 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
2346 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2348 parse_hash_entry_number(const char *hash_entry_num)
2351 unsigned long hash_en;
2352 /* parse hexadecimal string */
2353 hash_en = strtoul(hash_entry_num, &end, 16);
2354 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
2365 parse_config(const char *q_arg)
2368 const char *p, *p0 = q_arg;
2376 unsigned long int_fld[_NUM_FLD];
2377 char *str_fld[_NUM_FLD];
2381 nb_lcore_params = 0;
2383 while ((p = strchr(p0,'(')) != NULL) {
2385 if((p0 = strchr(p,')')) == NULL)
2389 if(size >= sizeof(s))
2392 snprintf(s, sizeof(s), "%.*s", size, p);
2393 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
2395 for (i = 0; i < _NUM_FLD; i++){
2397 int_fld[i] = strtoul(str_fld[i], &end, 0);
2398 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2401 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2402 printf("exceeded max number of lcore params: %hu\n",
2406 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2407 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2408 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2411 lcore_params = lcore_params_array;
2416 parse_eth_dest(const char *optarg)
2420 uint8_t c, *dest, peer_addr[6];
2423 portid = strtoul(optarg, &port_end, 10);
2424 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2425 rte_exit(EXIT_FAILURE,
2426 "Invalid eth-dest: %s", optarg);
2427 if (portid >= RTE_MAX_ETHPORTS)
2428 rte_exit(EXIT_FAILURE,
2429 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2430 portid, RTE_MAX_ETHPORTS);
2432 if (cmdline_parse_etheraddr(NULL, port_end,
2433 &peer_addr, sizeof(peer_addr)) < 0)
2434 rte_exit(EXIT_FAILURE,
2435 "Invalid ethernet address: %s\n",
2437 dest = (uint8_t *)&dest_eth_addr[portid];
2438 for (c = 0; c < 6; c++)
2439 dest[c] = peer_addr[c];
2440 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2443 #define CMD_LINE_OPT_CONFIG "config"
2444 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2445 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2446 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2447 #define CMD_LINE_OPT_IPV6 "ipv6"
2448 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2449 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2451 /* Parse the argument given in the command line of the application */
2453 parse_args(int argc, char **argv)
2458 char *prgname = argv[0];
2459 static struct option lgopts[] = {
2460 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2461 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2462 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2463 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2464 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2465 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2466 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2472 while ((opt = getopt_long(argc, argvopt, "s:p:P",
2473 lgopts, &option_index)) != EOF) {
2477 parse_link_ip(optarg);
2482 enabled_port_mask = parse_portmask(optarg);
2483 if (enabled_port_mask == 0) {
2484 printf("invalid portmask\n");
2485 print_usage(prgname);
2490 printf("Promiscuous mode selected\n");
2496 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2497 sizeof (CMD_LINE_OPT_CONFIG))) {
2498 ret = parse_config(optarg);
2500 printf("invalid config\n");
2501 print_usage(prgname);
2506 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2507 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2508 parse_eth_dest(optarg);
2511 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2512 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2513 printf("numa is disabled \n");
2517 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2518 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2519 printf("numa is hw ip checksum \n");
2520 port_conf.rxmode.hw_ip_checksum = 0;
2521 rx_conf.rx_free_thresh = 30;
2525 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2526 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2527 sizeof(CMD_LINE_OPT_IPV6))) {
2528 printf("ipv6 is specified \n");
2533 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2534 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2535 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2537 printf("jumbo frame is enabled - disabling simple TX path\n");
2538 port_conf.rxmode.jumbo_frame = 1;
2540 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2541 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2542 ret = parse_max_pkt_len(optarg);
2543 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2544 printf("invalid packet length\n");
2545 print_usage(prgname);
2548 port_conf.rxmode.max_rx_pkt_len = ret;
2550 printf("set jumbo frame max packet length to %u\n",
2551 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2553 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2554 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2555 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2556 ret = parse_hash_entry_number(optarg);
2557 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2558 hash_entry_number = ret;
2560 printf("invalid hash entry number\n");
2561 print_usage(prgname);
2569 print_usage(prgname);
2575 argv[optind-1] = prgname;
2578 optind = 0; /* reset getopt lib */
2582 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2584 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2585 union ipv4_5tuple_host* key2)
2587 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2588 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2589 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2590 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2591 key2->proto = key1->proto;
2597 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2598 union ipv6_5tuple_host* key2)
2601 for (i = 0; i < 16; i++)
2603 key2->ip_dst[i] = key1->ip_dst[i];
2604 key2->ip_src[i] = key1->ip_src[i];
2606 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2607 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2608 key2->proto = key1->proto;
2615 #define BYTE_VALUE_MAX 256
2616 #define ALL_32_BITS 0xffffffff
2617 #define BIT_8_TO_15 0x0000ff00
2619 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2623 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2625 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2626 for (i = 0; i < array_len; i++) {
2627 struct ipv4_udp_replay_route entry;
2628 union ipv4_5tuple_host newkey;
2629 entry = ipv4_udp_replay_route_array[i];
2630 convert_ipv4_5tuple(&entry.key, &newkey);
2631 ret = rte_hash_add_key (h,(void *) &newkey);
2633 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2634 " to the udp_replay hash.\n", i);
2636 ipv4_udp_replay_out_if[ret] = entry.if_out;
2638 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2641 #define BIT_16_TO_23 0x00ff0000
2643 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2647 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2649 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2650 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2651 for (i = 0; i < array_len; i++) {
2652 struct ipv6_udp_replay_route entry;
2653 union ipv6_5tuple_host newkey;
2654 entry = ipv6_udp_replay_route_array[i];
2655 convert_ipv6_5tuple(&entry.key, &newkey);
2656 ret = rte_hash_add_key (h, (void *) &newkey);
2658 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2659 " to the udp_replay hash.\n", i);
2661 ipv6_udp_replay_out_if[ret] = entry.if_out;
2663 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2666 #define NUMBER_PORT_USED 4
2668 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2669 unsigned int nr_flow)
2672 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2673 for (i = 0; i < nr_flow; i++) {
2674 struct ipv4_udp_replay_route entry;
2675 union ipv4_5tuple_host newkey;
2676 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2677 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2678 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2679 /* Create the ipv4 exact match flow */
2680 memset(&entry, 0, sizeof(entry));
2681 switch (i & (NUMBER_PORT_USED -1)) {
2683 entry = ipv4_udp_replay_route_array[0];
2684 entry.key.ip_dst = IPv4(101,c,b,a);
2687 entry = ipv4_udp_replay_route_array[1];
2688 entry.key.ip_dst = IPv4(201,c,b,a);
2691 entry = ipv4_udp_replay_route_array[2];
2692 entry.key.ip_dst = IPv4(111,c,b,a);
2695 entry = ipv4_udp_replay_route_array[3];
2696 entry.key.ip_dst = IPv4(211,c,b,a);
2699 convert_ipv4_5tuple(&entry.key, &newkey);
2700 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2702 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2704 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2707 printf("Hash: Adding 0x%x keys\n", nr_flow);
2711 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2712 unsigned int nr_flow)
2715 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2716 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2717 for (i = 0; i < nr_flow; i++) {
2718 struct ipv6_udp_replay_route entry;
2719 union ipv6_5tuple_host newkey;
2720 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2721 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2722 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2723 /* Create the ipv6 exact match flow */
2724 memset(&entry, 0, sizeof(entry));
2725 switch (i & (NUMBER_PORT_USED - 1)) {
2726 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2727 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2728 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2729 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2731 entry.key.ip_dst[13] = c;
2732 entry.key.ip_dst[14] = b;
2733 entry.key.ip_dst[15] = a;
2734 convert_ipv6_5tuple(&entry.key, &newkey);
2735 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2737 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2739 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2742 printf("Hash: Adding 0x%x keys\n", nr_flow);
2747 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2749 setup_lpm(int socketid)
2751 struct rte_lpm6_config config;
2756 /* create the LPM table */
2757 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2758 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2759 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2760 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2761 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2762 " on socket %d\n", socketid);
2764 /* populate the LPM table */
2765 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2767 /* skip unused ports */
2768 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2769 enabled_port_mask) == 0)
2772 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2773 ipv4_udp_replay_route_array[i].ip,
2774 ipv4_udp_replay_route_array[i].depth,
2775 ipv4_udp_replay_route_array[i].if_out);
2778 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2779 "udp_replay LPM table on socket %d\n",
2783 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2784 (unsigned)ipv4_udp_replay_route_array[i].ip,
2785 ipv4_udp_replay_route_array[i].depth,
2786 ipv4_udp_replay_route_array[i].if_out);
2789 /* create the LPM6 table */
2790 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2792 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2793 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2795 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2797 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2798 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2799 " on socket %d\n", socketid);
2801 /* populate the LPM table */
2802 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2804 /* skip unused ports */
2805 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2806 enabled_port_mask) == 0)
2809 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2810 ipv6_udp_replay_route_array[i].ip,
2811 ipv6_udp_replay_route_array[i].depth,
2812 ipv6_udp_replay_route_array[i].if_out);
2815 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2816 "udp_replay LPM table on socket %d\n",
2820 printf("LPM: Adding route %s / %d (%d)\n",
2822 ipv6_udp_replay_route_array[i].depth,
2823 ipv6_udp_replay_route_array[i].if_out);
2833 /* Check the link status of all ports in up to 9s, and print them finally */
2835 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2837 #define CHECK_INTERVAL 100 /* 100ms */
2838 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2839 uint8_t portid, count, all_ports_up, print_flag = 0;
2840 struct rte_eth_link link;
2842 printf("\nChecking link status");
2844 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2846 for (portid = 0; portid < port_num; portid++) {
2847 if ((port_mask & (1 << portid)) == 0)
2849 memset(&link, 0, sizeof(link));
2850 rte_eth_link_get_nowait(portid, &link);
2851 /* print link status if flag set */
2852 if (print_flag == 1) {
2853 if (link.link_status)
2854 printf("Port %d Link Up - speed %u "
2855 "Mbps - %s\n", (uint8_t)portid,
2856 (unsigned)link.link_speed,
2857 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2858 ("full-duplex") : ("half-duplex\n"));
2860 printf("Port %d Link Down\n",
2864 /* clear all_ports_up flag if any link down */
2865 if (link.link_status == 0) {
2870 /* after finally printing all link status, get out */
2871 if (print_flag == 1)
2874 if (all_ports_up == 0) {
2877 rte_delay_ms(CHECK_INTERVAL);
2880 /* set the print_flag if all ports up or timeout */
2881 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2889 main(int argc, char **argv)
2894 uint32_t n_tx_queue;
2895 uint8_t portid, nb_rx_queue;
2898 struct pipeline_params *params;
2901 ret = rte_eal_init(argc, argv);
2903 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2906 timer_lcore = rte_lcore_id();
2907 /* parse application arguments (after the EAL ones) */
2908 ret = parse_args(argc, argv);
2910 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2912 if (check_lcore_params() < 0)
2913 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2915 ret = init_lcore_rx_queues();
2917 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2919 params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
2920 memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
2921 lib_arp_init(params, NULL);
2923 nb_ports = rte_eth_dev_count();
2924 num_ports = nb_ports;
2926 if (nb_ports > RTE_MAX_ETHPORTS)
2927 nb_ports = RTE_MAX_ETHPORTS;
2929 if (check_port_config(nb_ports) < 0)
2930 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2933 *Configuring port_config_t structure for interface manager initialization
2935 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
2936 port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
2937 if (port_config == NULL)
2938 rte_panic("port_config is NULL: Memory Allocation failure\n");
2939 /* initialize all ports */
2940 for (portid = 0; portid < nb_ports; portid++) {
2941 /* skip ports that are not enabled */
2942 if ((enabled_port_mask & (1 << portid)) == 0) {
2943 printf("\nSkipping disabled port %d\n", portid);
2949 printf("Initializing port %d ... ", portid );
2952 nb_rx_queue = get_port_n_rx_queues(portid);
2953 n_tx_queue = nb_rx_queue;
2954 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2955 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2957 port_config[portid].port_id = portid;
2958 port_config[portid].nrx_queue = nb_rx_queue;
2959 port_config[portid].ntx_queue = n_tx_queue;
2960 port_config[portid].state = 1;
2961 port_config[portid].promisc = promiscuous_on;
2962 port_config[portid].mempool.pool_size = MEMPOOL_SIZE;
2963 port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
2964 port_config[portid].mempool.cache_size = CACHE_SIZE;
2965 port_config[portid].mempool.cpu_socket_id = rte_socket_id();
2966 memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf));
2967 memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf));
2968 memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
2970 /* Enable TCP and UDP HW Checksum , when required */
2971 //port_config[portid].tx_conf.txq_flags &=
2972 // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
2974 if (ifm_port_setup (portid, &port_config[portid]))
2975 rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
2978 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2983 populate_lpm_routes();
2984 convert_ipstr_to_numeric();
2985 /* launch per-lcore init on every lcore */
2986 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2987 cl = cmdline_stdin_new(main_ctx, "Replay>");
2989 rte_panic("Cannot create cmdline instance\n");
2990 cmdline_interact(cl);
2991 cmdline_stdin_exit(cl);
2993 rte_exit(0, "Bye!\n");
2994 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2995 if (rte_eal_wait_lcore(lcore_id) < 0)
3001 /**********************************************************/
3003 struct cmd_obj_clear_result {
3004 cmdline_fixed_string_t clear;
3005 cmdline_fixed_string_t udp_replay;
3006 cmdline_fixed_string_t stats;
3009 static void cmd_clear_udp_replay_stats_parsed(
3010 __rte_unused void *parsed_result,
3011 __rte_unused struct cmdline *cl,
3012 __attribute__((unused)) void *data)
3018 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
3019 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
3020 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
3021 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
3022 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
3023 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
3025 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
3026 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
3027 .data = NULL, /* 2nd arg of func */
3028 .help_str = "clears UDP_Replay stats for rx/tx",
3029 .tokens = { /* token list, NULL terminated */
3030 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
3031 (void *)&cmd_clear_udp_replay_stats_clear_string,
3032 (void *)&cmd_clear_udp_replay_stats_stats_string,
3036 /**********************************************************/
3037 struct cmd_obj_add_result {
3038 cmdline_fixed_string_t action;
3039 cmdline_fixed_string_t name;
3042 static void cmd_udp_replay_stats_parsed(
3043 __rte_unused void *parsed_result,
3044 __rte_unused struct cmdline *cl,
3045 __attribute__((unused)) void *data)
3050 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
3051 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
3052 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
3053 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
3055 cmdline_parse_inst_t cmd_udp_replay_stats = {
3056 .f = cmd_udp_replay_stats_parsed, /* function to call */
3057 .data = NULL, /* 2nd arg of func */
3058 .help_str = "UDP_Replay stats for rx/tx",
3059 .tokens = { /* token list, NULL terminated */
3060 (void *)&cmd_udp_replay_stats_udp_replay_string,
3061 (void *)&cmd_udp_replay_stats_stats_string,
3066 struct cmd_quit_result {
3067 cmdline_fixed_string_t quit;
3072 __rte_unused void *parsed_result,
3074 __rte_unused void *data)
3079 static cmdline_parse_token_string_t cmd_quit_quit =
3080 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
3082 static cmdline_parse_inst_t cmd_quit = {
3083 .f = cmd_quit_parsed,
3087 (void *) &cmd_quit_quit,
3092 /**********************************************************/
3093 /****** CONTEXT (list of instruction) */
3094 cmdline_parse_ctx_t main_ctx[] = {
3095 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
3096 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
3097 (cmdline_parse_inst_t *)&cmd_quit,