2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
83 #include <rte_errno.h>
84 #include <rte_cfgfile.h>
86 #include "parse_obj_list.h"
90 #include "interface.h"
91 #include "l3fwd_common.h"
92 #include "l3fwd_lpm4.h"
93 #include "l3fwd_lpm6.h"
94 #include "lib_icmpv6.h"
96 #include "vnf_common.h"
99 #define APP_LOOKUP_EXACT_MATCH 0
100 #define APP_LOOKUP_LPM 1
101 #define DO_RFC_1812_CHECKS
103 #ifndef APP_LOOKUP_METHOD
104 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
109 #include <netinet/in.h>
113 * When set to zero, simple forwaring path is eanbled.
114 * When set to one, optimized forwarding path is enabled.
115 * Note that LPM optimisation path uses SSE4.1 instructions.
117 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
118 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
120 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
123 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
124 #include <rte_hash.h>
125 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
127 #include <rte_lpm6.h>
129 #error "APP_LOOKUP_METHOD set to incorrect value"
133 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
134 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
135 #define IPv6_BYTES(addr) \
136 addr[0], addr[1], addr[2], addr[3], \
137 addr[4], addr[5], addr[6], addr[7], \
138 addr[8], addr[9], addr[10], addr[11],\
139 addr[12], addr[13],addr[14], addr[15]
143 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
145 #define MAX_JUMBO_PKT_LEN 9600
147 #define IPV6_ADDR_LEN 16
149 #define MEMPOOL_CACHE_SIZE 256
152 * This expression is used to calculate the number of mbufs needed depending on user input, taking
153 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
154 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
157 #define NB_MBUF RTE_MAX ( \
158 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
159 nb_ports*nb_lcores*MAX_PKT_BURST + \
160 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
161 nb_lcores*MEMPOOL_CACHE_SIZE), \
164 #define MAX_PKT_BURST 32
165 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
168 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
170 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
174 /* Configure how many packets ahead to prefetch, when reading packets */
175 #define PREFETCH_OFFSET 3
177 /* Used to mark destination port as 'invalid'. */
178 #define BAD_PORT ((uint16_t)-1)
183 * Configurable number of RX/TX ring descriptors
185 #define RTE_TEST_RX_DESC_DEFAULT 128
186 #define RTE_TEST_TX_DESC_DEFAULT 512
187 static uint64_t rcv_pkt_count[32] = {0};
188 static uint64_t tx_pkt_count[32] = {0};
189 static uint32_t arp_support;
192 struct sockaddr_in ipaddr1, ipaddr2;
193 /* ethernet addresses of ports */
194 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
196 static __m128i val_eth[RTE_MAX_ETHPORTS];
198 cmdline_parse_ctx_t main_ctx[];
200 uint32_t timer_lcore;
201 uint32_t exit_loop = 1;
202 port_config_t *port_config;
203 #define MEMPOOL_SIZE 32 * 1024
204 #define BUFFER_SIZE 2048
205 #define CACHE_SIZE 256
206 /* replace first 12B of the ethernet header. */
207 #define MASK_ETH 0x3f
209 #define IP_TYPE_IPV4 0
210 #define IP_TYPE_IPV6 1
212 const char* ipv4[MAX_IP];
213 uint8_t link_ipv6[MAX_IP][16];
214 uint32_t type, numports;
215 /* mask of enabled ports */
216 static uint32_t enabled_port_mask = 0;
217 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
218 static int numa_on = 1; /**< NUMA is enabled by default. */
219 static int csum_on = 1; /**< NUMA is enabled by default. */
220 struct pipeline_params def_pipeline_params = {
229 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
230 static int ipv6 = 0; /**< ipv6 is false by default. */
233 void convert_ipstr_to_numeric(void);
235 int print_l4stats(void);
236 int clear_stats(void);
240 struct rte_mbuf *m_table[MAX_PKT_BURST];
243 struct lcore_rx_queue {
246 } __rte_cache_aligned;
248 #define MAX_RX_QUEUE_PER_LCORE 16
249 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
250 #define MAX_RX_QUEUE_PER_PORT 128
252 #define MAX_LCORE_PARAMS 1024
253 struct lcore_params {
257 } __rte_cache_aligned;
259 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
260 static struct lcore_params lcore_params_array_default[] = {
272 static struct lcore_params * lcore_params = lcore_params_array_default;
273 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
274 sizeof(lcore_params_array_default[0]);
276 static struct rte_eth_conf port_conf = {
278 .mq_mode = ETH_MQ_RX_RSS,
279 .max_rx_pkt_len = ETHER_MAX_LEN,
281 .header_split = 0, /**< Header Split disabled */
282 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
283 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
284 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
285 .hw_strip_crc = 0, /**< CRC stripped by hardware */
290 .rss_hf = ETH_RSS_IP,
294 .mq_mode = ETH_MQ_TX_NONE,
298 /* empty vmdq configuration structure. Filled in programatically */
299 static struct rte_eth_rxconf rx_conf = {
305 .rx_free_thresh = 64,
307 .rx_deferred_start = 0,
309 static struct rte_eth_txconf tx_conf = {
317 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
318 ETH_TXQ_FLAGS_NOOFFLOADS,
319 .tx_deferred_start = 0,
322 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
324 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
325 #include <rte_hash_crc.h>
326 #define DEFAULT_HASH_FUNC rte_hash_crc
328 #include <rte_jhash.h>
329 #define DEFAULT_HASH_FUNC rte_jhash
338 } __attribute__((__packed__));
340 union ipv4_5tuple_host {
353 #define XMM_NUM_IN_IPV6_5TUPLE 3
356 uint8_t ip_dst[IPV6_ADDR_LEN];
357 uint8_t ip_src[IPV6_ADDR_LEN];
361 } __attribute__((__packed__));
363 union ipv6_5tuple_host {
368 uint8_t ip_src[IPV6_ADDR_LEN];
369 uint8_t ip_dst[IPV6_ADDR_LEN];
374 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
377 struct ipv4_udp_replay_route {
378 struct ipv4_5tuple key;
382 struct ipv6_udp_replay_route {
383 struct ipv6_5tuple key;
387 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
388 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
389 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
390 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
391 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
394 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
396 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
397 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
398 101, 11, IPPROTO_TCP}, 0},
401 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
402 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
403 102, 12, IPPROTO_TCP}, 1},
406 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
407 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
408 101, 11, IPPROTO_TCP}, 2},
411 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
412 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
413 102, 12, IPPROTO_TCP}, 3},
416 typedef struct rte_hash lookup_struct_t;
418 #ifdef RTE_ARCH_X86_64
419 /* default to 4 million hash entries (approx) */
420 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
422 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
423 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
425 #define HASH_ENTRY_NUMBER_DEFAULT 4
427 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
429 app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
434 app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
440 * inet_pton_ipv4(src, dst)
441 * like inet_aton() but without all the hexadecimal and shorthand.
443 * 1 if `src' is a valid dotted quad, else 0.
445 * does not touch `dst' unless it's returning 1.
449 static int inet_pton_ipv4(const char *src, unsigned char *dst)
451 static const char digits[] = "0123456789";
452 int saw_digit, octets, ch;
453 unsigned char tmp[INADDRSZ], *tp;
457 while ((ch = *src++) != '\0') {
459 if ((pch = strchr(digits, ch)) != NULL) {
460 unsigned int new = *tp * 10 + (pch - digits);
468 *tp = (unsigned char)new;
469 } else if (ch == '.' && saw_digit) {
479 memcpy(dst, tmp, INADDRSZ);
484 * inet_pton_ipv6(src, dst)
485 * convert presentation level address to network order binary form.
487 * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
489 * (1) does not touch `dst' unless it's returning 1.
490 * (2) :: in a full address is silently ignored.
492 * inspired by Mark Andrews.
496 static int inet_pton_ipv6(const char *src, unsigned char *dst)
498 static const char xdigits_l[] = "0123456789abcdef",
499 xdigits_u[] = "0123456789ABCDEF";
500 unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
501 const char *xdigits = 0, *curtok = 0;
502 int ch = 0, saw_xdigit = 0, count_xdigit = 0;
503 unsigned int val = 0;
504 unsigned dbloct_count = 0;
505 memset((tp = tmp), '\0', IN6ADDRSZ);
506 endp = tp + IN6ADDRSZ;
512 saw_xdigit = count_xdigit = 0;
514 while ((ch = *src++) != '\0') {
516 if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
517 pch = strchr((xdigits = xdigits_u), ch);
519 if (count_xdigit >= 4)
522 val |= (pch - xdigits);
536 } else if (*src == '\0') {
539 if (tp + sizeof(int16_t) > endp)
541 *tp++ = (unsigned char)((val >> 8) & 0xff);
542 *tp++ = (unsigned char)(val & 0xff);
549 if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
550 inet_pton_ipv4(curtok, tp) > 0) {
554 break; /* '\0' was seen by inet_pton4(). */
559 if (tp + sizeof(int16_t) > endp)
561 *tp++ = (unsigned char)((val >> 8) & 0xff);
562 *tp++ = (unsigned char)(val & 0xff);
565 if (colonp != NULL) {
566 if (dbloct_count == 8)
568 const int n = tp - colonp;
570 for (i = 1; i <= n; i++) {
571 endp[-i] = colonp[n - i];
578 memcpy(dst, tmp, IN6ADDRSZ);
581 static int my_inet_pton_ipv6(int af, const char *src, void *dst)
585 return inet_pton_ipv4(src, dst);
587 return inet_pton_ipv6(src, dst);
589 errno = EAFNOSUPPORT;
593 void convert_ipstr_to_numeric(void)
596 for (i = 0; i < numports; i++)
598 if (type == IP_TYPE_IPV4) {
599 memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
600 ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
601 ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
602 } else if (type == IP_TYPE_IPV6) {
603 ifm_add_ipv6_port(i, &link_ipv6[i][0], 128);
608 static inline uint32_t
609 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
612 const union ipv4_5tuple_host *k;
618 p = (const uint32_t *)&k->port_src;
620 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
621 init_val = rte_hash_crc_4byte(t, init_val);
622 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
623 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
624 init_val = rte_hash_crc_4byte(*p, init_val);
625 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
626 init_val = rte_jhash_1word(t, init_val);
627 init_val = rte_jhash_1word(k->ip_src, init_val);
628 init_val = rte_jhash_1word(k->ip_dst, init_val);
629 init_val = rte_jhash_1word(*p, init_val);
630 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
634 static inline int check_arpicmp(struct rte_mbuf *pkt)
636 uint8_t in_port_id = pkt->port;
637 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
638 uint16_t *eth_proto =
639 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
641 uint32_t prot_offset =
642 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
643 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
644 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
645 ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
646 && (*protocol == IP_PROTOCOL_ICMP))) {
647 process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
653 static inline int check_arpicmpv6(struct rte_mbuf *pkt)
655 struct ether_hdr *eth_h;
656 struct ipv6_hdr *ipv6_h;
657 uint8_t in_port_id = pkt->port;
658 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
659 uint16_t *eth_proto =
660 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
661 eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
662 ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
663 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
664 && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
665 process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
671 static inline uint32_t
672 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
674 const union ipv6_5tuple_host *k;
677 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
678 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
679 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
680 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
684 p = (const uint32_t *)&k->port_src;
686 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
687 ip_src0 = (const uint32_t *) k->ip_src;
688 ip_src1 = (const uint32_t *)(k->ip_src+4);
689 ip_src2 = (const uint32_t *)(k->ip_src+8);
690 ip_src3 = (const uint32_t *)(k->ip_src+12);
691 ip_dst0 = (const uint32_t *) k->ip_dst;
692 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
693 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
694 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
695 init_val = rte_hash_crc_4byte(t, init_val);
696 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
697 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
698 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
699 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
700 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
701 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
702 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
703 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
704 init_val = rte_hash_crc_4byte(*p, init_val);
705 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
706 init_val = rte_jhash_1word(t, init_val);
707 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
708 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
709 init_val = rte_jhash_1word(*p, init_val);
710 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
714 #define IPV4_UDP_Replay_NUM_ROUTES \
715 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
717 #define IPV6_UDP_Replay_NUM_ROUTES \
718 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
720 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
721 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
725 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
726 struct ipv4_udp_replay_route {
732 struct ipv6_udp_replay_route {
738 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
739 {IPv4(1,1,1,0), 24, 0},
740 {IPv4(2,1,1,0), 24, 1},
741 {IPv4(3,1,1,0), 24, 2},
742 {IPv4(4,1,1,0), 24, 3},
743 {IPv4(5,1,1,0), 24, 4},
744 {IPv4(6,1,1,0), 24, 5},
745 {IPv4(7,1,1,0), 24, 6},
746 {IPv4(8,1,1,0), 24, 7},
749 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
750 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
751 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
752 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
753 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
754 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
755 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
756 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
757 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
760 #define IPV4_UDP_Replay_NUM_ROUTES \
761 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
762 #define IPV6_UDP_Replay_NUM_ROUTES \
763 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
765 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
766 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
767 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
769 typedef struct rte_lpm lookup_struct_t;
770 typedef struct rte_lpm6 lookup6_struct_t;
771 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
772 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
777 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
778 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
779 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
780 lookup_struct_t * ipv4_lookup_struct;
781 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
782 lookup6_struct_t * ipv6_lookup_struct;
784 lookup_struct_t * ipv6_lookup_struct;
786 } __rte_cache_aligned;
788 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
790 /* Send burst of packets on an output interface */
792 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
794 struct rte_mbuf **m_table;
798 queueid = qconf->tx_queue_id[port];
799 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
801 ret = rte_eth_tx_burst(port, queueid, m_table, n);
802 if (unlikely(ret < n)) {
804 rte_pktmbuf_free(m_table[ret]);
808 tx_pkt_count[port] += ret;
812 /* Enqueue a single packet, and send burst if queue is filled */
814 send_single_packet(struct rte_mbuf *m, uint8_t port)
818 struct lcore_conf *qconf;
820 lcore_id = rte_lcore_id();
822 qconf = &lcore_conf[lcore_id];
823 len = qconf->tx_mbufs[port].len;
824 qconf->tx_mbufs[port].m_table[len] = m;
827 /* enough pkts to be sent */
828 if (unlikely(len == MAX_PKT_BURST)) {
829 send_burst(qconf, MAX_PKT_BURST, port);
833 qconf->tx_mbufs[port].len = len;
837 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
838 static inline __attribute__((always_inline)) void
839 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
840 struct rte_mbuf *m[], uint32_t num)
844 len = qconf->tx_mbufs[port].len;
847 * If TX buffer for that queue is empty, and we have enough packets,
848 * then send them straightway.
850 if (num >= MAX_TX_BURST && len == 0) {
851 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
852 if (unlikely(n < num)) {
854 rte_pktmbuf_free(m[n]);
861 * Put packets into TX buffer for that queue.
865 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
868 switch (n % FWDSTEP) {
871 qconf->tx_mbufs[port].m_table[len + j] = m[j];
874 qconf->tx_mbufs[port].m_table[len + j] = m[j];
877 qconf->tx_mbufs[port].m_table[len + j] = m[j];
880 qconf->tx_mbufs[port].m_table[len + j] = m[j];
887 /* enough pkts to be sent */
888 if (unlikely(len == MAX_PKT_BURST)) {
890 send_burst(qconf, MAX_PKT_BURST, port);
892 /* copy rest of the packets into the TX buffer. */
895 switch (len % FWDSTEP) {
898 qconf->tx_mbufs[port].m_table[j] = m[n + j];
901 qconf->tx_mbufs[port].m_table[j] = m[n + j];
904 qconf->tx_mbufs[port].m_table[j] = m[n + j];
907 qconf->tx_mbufs[port].m_table[j] = m[n + j];
913 qconf->tx_mbufs[port].len = len;
915 #endif /* APP_LOOKUP_LPM */
917 #ifdef DO_RFC_1812_CHECKS
919 is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len)
921 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
923 * 1. The packet length reported by the Link Layer must be large
924 * enough to hold the minimum length legal IP datagram (20 bytes).
926 if (link_len < sizeof(struct ipv4_hdr))
929 /* 2. The IP checksum must be correct. */
930 /* this is checked in H/W */
933 * 3. The IP version number must be 4. If the version number is not 4
934 * then the packet may be another version of IP, such as IPng or
937 if (((pkt->version_ihl) >> 4) != 4)
940 * 4. The IP header length field must be large enough to hold the
941 * minimum length legal IP datagram (20 bytes = 5 words).
943 if ((pkt->version_ihl & 0xf) < 5)
947 * 5. The IP total length field must be large enough to hold the IP
948 * datagram header, whose length is specified in the IP header length
951 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
958 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
960 static __m128i mask0;
961 static __m128i mask1;
962 static __m128i mask2;
963 static inline uint8_t
964 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
967 union ipv4_5tuple_host key;
969 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
970 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
971 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
972 key.xmm = _mm_and_si128(data, mask0);
973 /* Find destination port */
974 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
975 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
978 static inline uint8_t
979 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
982 union ipv6_5tuple_host key;
984 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
985 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
986 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
987 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
988 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
989 key.xmm[0] = _mm_and_si128(data0, mask1);
990 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
992 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
993 key.xmm[2] = _mm_and_si128(data2, mask2);
995 /* Find destination port */
996 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
997 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
1001 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1003 static inline uint8_t
1004 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
1008 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
1009 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
1010 &next_hop) == 0) ? next_hop : portid);
1013 static inline uint8_t
1014 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
1017 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
1018 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
1023 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
1024 struct lcore_conf *qconf) __attribute__((unused));
1026 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
1027 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1029 #define MASK_ALL_PKTS 0xff
1030 #define EXCLUDE_1ST_PKT 0xfe
1031 #define EXCLUDE_2ND_PKT 0xfd
1032 #define EXCLUDE_3RD_PKT 0xfb
1033 #define EXCLUDE_4TH_PKT 0xf7
1034 #define EXCLUDE_5TH_PKT 0xef
1035 #define EXCLUDE_6TH_PKT 0xdf
1036 #define EXCLUDE_7TH_PKT 0xbf
1037 #define EXCLUDE_8TH_PKT 0x7f
1040 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1042 struct ether_hdr *eth_hdr[8];
1043 struct ether_hdr tmp;
1044 struct ipv4_hdr *ipv4_hdr[8];
1045 struct udp_hdr *udp_hdr[8];
1047 l2_phy_interface_t *port = ifm_get_port(portid);
1049 printf("port may be un initialized\n");
1052 if (unlikely(arp_support)) {
1053 check_arpicmp(m[0]);
1054 check_arpicmp(m[1]);
1055 check_arpicmp(m[2]);
1056 check_arpicmp(m[3]);
1057 check_arpicmp(m[4]);
1058 check_arpicmp(m[5]);
1059 check_arpicmp(m[6]);
1060 check_arpicmp(m[7]);
1063 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1064 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1065 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1066 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1067 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1068 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1069 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1070 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1073 memset(&tmp,0,sizeof (struct ether_hdr));
1078 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1079 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1080 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1083 /* Handle IPv4 headers.*/
1084 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
1085 sizeof(struct ether_hdr));
1086 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
1087 sizeof(struct ether_hdr));
1088 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
1089 sizeof(struct ether_hdr));
1090 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
1091 sizeof(struct ether_hdr));
1092 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
1093 sizeof(struct ether_hdr));
1094 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
1095 sizeof(struct ether_hdr));
1096 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
1097 sizeof(struct ether_hdr));
1098 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
1099 sizeof(struct ether_hdr));
1100 struct ipv4_hdr temp_ipv4;
1101 temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
1102 ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
1103 ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
1104 temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
1105 ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
1106 ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
1107 temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
1108 ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
1109 ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
1110 temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
1111 ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
1112 ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
1113 temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
1114 ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
1115 ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
1116 temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
1117 ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
1118 ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
1119 temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
1120 ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
1121 ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
1122 temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
1123 ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
1124 ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
1126 /* Handle UDP headers.*/
1127 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1128 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1130 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1131 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1132 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1133 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1134 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1135 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1136 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1137 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1138 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1139 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1140 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1141 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1142 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1143 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
1144 /*1) memcpy or assignment.*/
1146 struct udp_hdr temp_udp;
1147 temp_udp.dst_port = udp_hdr[0]->dst_port;
1148 udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
1149 udp_hdr[0]->src_port = temp_udp.dst_port;
1150 temp_udp.dst_port = udp_hdr[1]->dst_port;
1151 udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
1152 udp_hdr[1]->src_port = temp_udp.dst_port;
1153 temp_udp.dst_port = udp_hdr[2]->dst_port;
1154 udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
1155 udp_hdr[2]->src_port = temp_udp.dst_port;
1156 temp_udp.dst_port = udp_hdr[3]->dst_port;
1157 udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
1158 udp_hdr[3]->src_port = temp_udp.dst_port;
1159 temp_udp.dst_port = udp_hdr[4]->dst_port;
1160 udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
1161 udp_hdr[4]->src_port = temp_udp.dst_port;
1162 temp_udp.dst_port = udp_hdr[5]->dst_port;
1163 udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
1164 udp_hdr[5]->src_port = temp_udp.dst_port;
1165 temp_udp.dst_port = udp_hdr[6]->dst_port;
1166 udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
1167 udp_hdr[6]->src_port = temp_udp.dst_port;
1168 temp_udp.dst_port = udp_hdr[7]->dst_port;
1169 udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
1170 udp_hdr[7]->src_port = temp_udp.dst_port;
1171 #ifdef DO_RFC_1812_CHECKS
1172 /* Check to make sure the packet is valid (RFC1812) */
1173 uint8_t valid_mask = MASK_ALL_PKTS;
1174 if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) {
1175 rte_pktmbuf_free(m[0]);
1176 valid_mask &= EXCLUDE_1ST_PKT;
1178 if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) {
1179 rte_pktmbuf_free(m[1]);
1180 valid_mask &= EXCLUDE_2ND_PKT;
1182 if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) {
1183 rte_pktmbuf_free(m[2]);
1184 valid_mask &= EXCLUDE_3RD_PKT;
1186 if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) {
1187 rte_pktmbuf_free(m[3]);
1188 valid_mask &= EXCLUDE_4TH_PKT;
1190 if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) {
1191 rte_pktmbuf_free(m[4]);
1192 valid_mask &= EXCLUDE_5TH_PKT;
1194 if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) {
1195 rte_pktmbuf_free(m[5]);
1196 valid_mask &= EXCLUDE_6TH_PKT;
1198 if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) {
1199 rte_pktmbuf_free(m[6]);
1200 valid_mask &= EXCLUDE_7TH_PKT;
1202 if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) {
1203 rte_pktmbuf_free(m[7]);
1204 valid_mask &= EXCLUDE_8TH_PKT;
1206 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
1207 if (valid_mask == 0){
1211 for (i = 0; i < 8; i++) {
1212 if ((0x1 << i) & valid_mask) {
1213 udp_replay_simple_replay(m[i], portid, qconf);
1219 #endif // End of #ifdef DO_RFC_1812_CHECKS
1221 #ifdef DO_RFC_1812_CHECKS
1222 /* Update time to live and header checksum */
1223 --(ipv4_hdr[0]->time_to_live);
1224 --(ipv4_hdr[1]->time_to_live);
1225 --(ipv4_hdr[2]->time_to_live);
1226 --(ipv4_hdr[3]->time_to_live);
1227 ++(ipv4_hdr[0]->hdr_checksum);
1228 ++(ipv4_hdr[1]->hdr_checksum);
1229 ++(ipv4_hdr[2]->hdr_checksum);
1230 ++(ipv4_hdr[3]->hdr_checksum);
1231 --(ipv4_hdr[4]->time_to_live);
1232 --(ipv4_hdr[5]->time_to_live);
1233 --(ipv4_hdr[6]->time_to_live);
1234 --(ipv4_hdr[7]->time_to_live);
1235 ++(ipv4_hdr[4]->hdr_checksum);
1236 ++(ipv4_hdr[5]->hdr_checksum);
1237 ++(ipv4_hdr[6]->hdr_checksum);
1238 ++(ipv4_hdr[7]->hdr_checksum);
1241 send_single_packet(m[0],portid );
1242 send_single_packet(m[1],portid );
1243 send_single_packet(m[2],portid );
1244 send_single_packet(m[3],portid);
1245 send_single_packet(m[4],portid);
1246 send_single_packet(m[5],portid);
1247 send_single_packet(m[6],portid);
1248 send_single_packet(m[7],portid);
1252 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
1253 union ipv6_5tuple_host * key)
1255 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
1256 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
1257 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
1258 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
1259 key->xmm[1] = tmpdata1;
1260 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
1265 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1267 struct ether_hdr *eth_hdr[8],tmp;
1269 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
1271 union ipv6_5tuple_host key[8];
1272 struct udp_hdr *udp_hdr[8];
1273 l2_phy_interface_t *port = ifm_get_port(portid);
1275 printf("port may be un initialized\n");
1279 if (unlikely(arp_support)) {
1280 check_arpicmpv6(m[0]);
1281 check_arpicmpv6(m[1]);
1282 check_arpicmpv6(m[2]);
1283 check_arpicmpv6(m[3]);
1284 check_arpicmpv6(m[4]);
1285 check_arpicmpv6(m[5]);
1286 check_arpicmpv6(m[6]);
1287 check_arpicmpv6(m[7]);
1291 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1292 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1293 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1294 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1295 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1296 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1297 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1298 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1300 memset(&tmp,0,sizeof (struct ether_hdr));
1304 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1305 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1306 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1308 /* Handle IPv6 headers.*/
1309 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
1310 sizeof(struct ether_hdr));
1311 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
1312 sizeof(struct ether_hdr));
1313 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
1314 sizeof(struct ether_hdr));
1315 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
1316 sizeof(struct ether_hdr));
1317 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
1318 sizeof(struct ether_hdr));
1319 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
1320 sizeof(struct ether_hdr));
1321 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
1322 sizeof(struct ether_hdr));
1323 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
1324 sizeof(struct ether_hdr));
1327 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
1328 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
1329 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
1332 /* Handle UDP headers.*/
1333 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1334 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1336 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1337 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1338 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1339 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1340 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1341 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1342 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1343 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1344 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1345 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1346 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1347 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1348 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1349 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1350 /*1) memcpy or assignment.*/
1352 struct udp_hdr temp_udp;
1355 temp_udp.dst_port = udp_hdr[i]->dst_port;
1356 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1357 udp_hdr[i]->src_port = temp_udp.dst_port;
1359 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1360 &key[4], &key[5], &key[6], &key[7]};
1361 #if RTE_VERSION < 0x100b0000
1362 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1364 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1366 send_single_packet(m[0],portid);
1367 send_single_packet(m[1],portid);
1368 send_single_packet(m[2],portid);
1369 send_single_packet(m[3],portid);
1370 send_single_packet(m[4],portid);
1371 send_single_packet(m[5],portid);
1372 send_single_packet(m[6],portid);
1373 send_single_packet(m[7],portid);
1376 #endif /* APP_LOOKUP_METHOD */
1378 static inline __attribute__((always_inline)) void
1379 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1381 struct ether_hdr *eth_hdr,tmp;
1382 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1383 struct udp_hdr *udp_hdr,temp_udp;
1384 l2_phy_interface_t *port = ifm_get_port(portid);
1387 printf("port may be un initialized\n");
1391 printf("Null packet received\n");
1394 if (unlikely(arp_support)) {
1395 if (!check_arpicmp(m))
1399 printf("qconf configuration is NULL\n");
1400 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1401 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1402 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1403 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1404 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1406 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1407 /* Handle IPv4 headers.*/
1408 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1409 sizeof(struct ether_hdr));
1410 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1411 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1412 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1413 #ifdef DO_RFC_1812_CHECKS
1414 /* Check to make sure the packet is valid (RFC1812) */
1415 if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
1416 rte_pktmbuf_free(m);
1422 #ifdef DO_RFC_1812_CHECKS
1423 /* Update time to live and header checksum */
1424 --(ipv4_hdr->time_to_live);
1425 ++(ipv4_hdr->hdr_checksum);
1427 /* Handle UDP headers.*/
1428 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1429 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1431 /*Swapping Src and Dst Port*/
1432 temp_udp.dst_port = udp_hdr->dst_port;
1433 udp_hdr->dst_port = udp_hdr->src_port;
1434 udp_hdr->src_port = temp_udp.dst_port;
1436 send_single_packet(m, portid);
1437 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1438 /* Handle IPv6 headers.*/
1439 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1441 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1442 sizeof(struct ether_hdr));
1444 /*Swapping of Src and Dst IP address*/
1445 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1446 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1447 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1449 /* Handle UDP headers.*/
1450 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1451 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1452 /*Swapping Src and Dst Port*/
1453 temp_udp.dst_port = udp_hdr->dst_port;
1454 udp_hdr->dst_port = udp_hdr->src_port;
1455 udp_hdr->src_port = temp_udp.dst_port;
1456 send_single_packet(m, portid);
1458 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1459 rte_pktmbuf_free(m);
1462 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1463 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1464 #ifdef DO_RFC_1812_CHECKS
1466 #define IPV4_MIN_VER_IHL 0x45
1467 #define IPV4_MAX_VER_IHL 0x4f
1468 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1470 /* Minimum value of IPV4 total length (20B) in network byte order. */
1471 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1474 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1475 * - The IP version number must be 4.
1476 * - The IP header length field must be large enough to hold the
1477 * minimum length legal IP datagram (20 bytes = 5 words).
1478 * - The IP total length field must be large enough to hold the IP
1479 * datagram header, whose length is specified in the IP header length
1481 * If we encounter invalid IPV4 packet, then set destination port for it
1482 * to BAD_PORT value.
1484 static inline __attribute__((always_inline)) void
1485 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1489 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1490 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1492 ipv4_hdr->time_to_live--;
1493 ipv4_hdr->hdr_checksum++;
1495 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1496 ((uint8_t)ipv4_hdr->total_length == 0 &&
1497 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1504 #define rfc1812_process(mb, dp) do { } while (0)
1505 #endif /* DO_RFC_1812_CHECKS */
1506 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1509 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1510 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1512 static inline __attribute__((always_inline)) uint16_t
1513 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1514 uint32_t dst_ipv4, uint8_t portid)
1517 struct ipv6_hdr *ipv6_hdr;
1518 struct ether_hdr *eth_hdr;
1519 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1521 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1522 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1525 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1526 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1527 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1528 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1529 ipv6_hdr->dst_addr, &next_hop) != 0)
1539 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1540 uint16_t *dst_port, uint8_t portid)
1542 struct ether_hdr *eth_hdr;
1543 struct ipv4_hdr *ipv4_hdr;
1548 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1549 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1552 dst_ipv4 = ipv4_hdr->dst_addr;
1553 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1555 /*Changing the dp to incoming port*/
1556 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1559 te = _mm_loadu_si128((__m128i *)eth_hdr);
1563 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1565 te = _mm_blend_epi16(te, ve, MASK_ETH);
1566 _mm_storeu_si128((__m128i *)eth_hdr, te);
1568 /* Wont be using the following fucntion*/
1571 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1574 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1576 uint32_t *ipv4_flag)
1578 struct ipv4_hdr *ipv4_hdr;
1579 struct ether_hdr *eth_hdr;
1580 uint32_t x0, x1, x2, x3;
1582 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1583 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1584 x0 = ipv4_hdr->dst_addr;
1585 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1587 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1588 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1589 x1 = ipv4_hdr->dst_addr;
1590 ipv4_flag[0] &= pkt[1]->packet_type;
1592 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1593 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1594 x2 = ipv4_hdr->dst_addr;
1595 ipv4_flag[0] &= pkt[2]->packet_type;
1597 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1598 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1599 x3 = ipv4_hdr->dst_addr;
1600 ipv4_flag[0] &= pkt[3]->packet_type;
1602 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1606 * Lookup into LPM for destination port.
1607 * If lookup fails, use incoming port (portid) as destination port.
1610 processx4_step2(const struct lcore_conf *qconf,
1614 struct rte_mbuf *pkt[FWDSTEP],
1615 uint16_t dprt[FWDSTEP])
1618 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1619 4, 5, 6, 7, 0, 1, 2, 3);
1621 /* Byte swap 4 IPV4 addresses. */
1622 dip = _mm_shuffle_epi8(dip, bswap_mask);
1624 /* if all 4 packets are IPV4. */
1625 if (likely(ipv4_flag)) {
1626 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1629 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1630 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1631 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1632 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1637 * Update source and destination MAC addresses in the ethernet header.
1638 * Perform RFC1812 checks and updates for IPV4 packets.
1641 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1643 __m128i te[FWDSTEP];
1644 __m128i ve[FWDSTEP];
1645 __m128i *p[FWDSTEP];
1647 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1648 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1649 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1650 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1652 ve[0] = val_eth[dst_port[0]];
1653 te[0] = _mm_loadu_si128(p[0]);
1655 ve[1] = val_eth[dst_port[1]];
1656 te[1] = _mm_loadu_si128(p[1]);
1658 ve[2] = val_eth[dst_port[2]];
1659 te[2] = _mm_loadu_si128(p[2]);
1661 ve[3] = val_eth[dst_port[3]];
1662 te[3] = _mm_loadu_si128(p[3]);
1664 /* Update first 12 bytes, keep rest bytes intact. */
1665 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1666 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1667 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1668 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1670 _mm_storeu_si128(p[0], te[0]);
1671 _mm_storeu_si128(p[1], te[1]);
1672 _mm_storeu_si128(p[2], te[2]);
1673 _mm_storeu_si128(p[3], te[3]);
1675 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1676 &dst_port[0], pkt[0]->packet_type);
1677 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1678 &dst_port[1], pkt[1]->packet_type);
1679 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1680 &dst_port[2], pkt[2]->packet_type);
1681 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1682 &dst_port[3], pkt[3]->packet_type);
1686 * We group consecutive packets with the same destionation port into one burst.
1687 * To avoid extra latency this is done together with some other packet
1688 * processing, but after we made a final decision about packet's destination.
1689 * To do this we maintain:
1690 * pnum - array of number of consecutive packets with the same dest port for
1691 * each packet in the input burst.
1692 * lp - pointer to the last updated element in the pnum.
1693 * dlp - dest port value lp corresponds to.
1696 #define GRPSZ (1 << FWDSTEP)
1697 #define GRPMSK (GRPSZ - 1)
1699 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1700 if (likely((dlp) == (dcp)[(idx)])) { \
1703 (dlp) = (dcp)[idx]; \
1704 (lp) = (pn) + (idx); \
1710 * Group consecutive packets with the same destination port in bursts of 4.
1711 * Suppose we have array of destionation ports:
1712 * dst_port[] = {a, b, c, d,, e, ... }
1713 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1714 * We doing 4 comparisions at once and the result is 4 bit mask.
1715 * This mask is used as an index into prebuild array of pnum values.
1717 static inline uint16_t *
1718 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1720 static const struct {
1721 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1722 int32_t idx; /* index for new last updated elemnet. */
1723 uint16_t lpv; /* add value to the last updated element. */
1726 /* 0: a != b, b != c, c != d, d != e */
1727 .pnum = UINT64_C(0x0001000100010001),
1732 /* 1: a == b, b != c, c != d, d != e */
1733 .pnum = UINT64_C(0x0001000100010002),
1738 /* 2: a != b, b == c, c != d, d != e */
1739 .pnum = UINT64_C(0x0001000100020001),
1744 /* 3: a == b, b == c, c != d, d != e */
1745 .pnum = UINT64_C(0x0001000100020003),
1750 /* 4: a != b, b != c, c == d, d != e */
1751 .pnum = UINT64_C(0x0001000200010001),
1756 /* 5: a == b, b != c, c == d, d != e */
1757 .pnum = UINT64_C(0x0001000200010002),
1762 /* 6: a != b, b == c, c == d, d != e */
1763 .pnum = UINT64_C(0x0001000200030001),
1768 /* 7: a == b, b == c, c == d, d != e */
1769 .pnum = UINT64_C(0x0001000200030004),
1774 /* 8: a != b, b != c, c != d, d == e */
1775 .pnum = UINT64_C(0x0002000100010001),
1780 /* 9: a == b, b != c, c != d, d == e */
1781 .pnum = UINT64_C(0x0002000100010002),
1786 /* 0xa: a != b, b == c, c != d, d == e */
1787 .pnum = UINT64_C(0x0002000100020001),
1792 /* 0xb: a == b, b == c, c != d, d == e */
1793 .pnum = UINT64_C(0x0002000100020003),
1798 /* 0xc: a != b, b != c, c == d, d == e */
1799 .pnum = UINT64_C(0x0002000300010001),
1804 /* 0xd: a == b, b != c, c == d, d == e */
1805 .pnum = UINT64_C(0x0002000300010002),
1810 /* 0xe: a != b, b == c, c == d, d == e */
1811 .pnum = UINT64_C(0x0002000300040001),
1816 /* 0xf: a == b, b == c, c == d, d == e */
1817 .pnum = UINT64_C(0x0002000300040005),
1824 uint16_t u16[FWDSTEP + 1];
1826 } *pnum = (void *)pn;
1830 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1831 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1832 v = _mm_movemask_ps((__m128)dp1);
1834 /* update last port counter. */
1835 lp[0] += gptbl[v].lpv;
1837 /* if dest port value has changed. */
1839 lp = pnum->u16 + gptbl[v].idx;
1841 pnum->u64 = gptbl[v].pnum;
1847 #endif /* APP_LOOKUP_METHOD */
1849 /* main processing loop */
1851 main_loop(__attribute__((unused)) void *dummy)
1853 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1855 uint64_t prev_tsc, diff_tsc, cur_tsc;
1857 uint8_t portid, queueid;
1858 struct lcore_conf *qconf;
1859 l2_phy_interface_t *port;
1860 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1861 US_PER_S * BURST_TX_DRAIN_US;
1863 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1864 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1868 uint16_t dst_port[MAX_PKT_BURST];
1869 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1870 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1871 uint16_t pnum[MAX_PKT_BURST + 1];
1876 lcore_id = rte_lcore_id();
1877 qconf = &lcore_conf[lcore_id];
1879 if (qconf->n_rx_queue == 0) {
1880 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1884 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1886 for (i = 0; i < qconf->n_rx_queue; i++) {
1888 portid = qconf->rx_queue_list[i].port_id;
1889 queueid = qconf->rx_queue_list[i].queue_id;
1890 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1896 cur_tsc = rte_rdtsc();
1899 * TX burst queue drain
1901 diff_tsc = cur_tsc - prev_tsc;
1902 if (unlikely(diff_tsc > drain_tsc)) {
1905 * This could be optimized (use queueid instead of
1906 * portid), but it is not called so often
1908 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1909 if (qconf->tx_mbufs[portid].len == 0)
1912 qconf->tx_mbufs[portid].len,
1914 qconf->tx_mbufs[portid].len = 0;
1921 * Read packet from RX queues
1923 for (i = 0; i < qconf->n_rx_queue; ++i) {
1924 portid = qconf->rx_queue_list[i].port_id;
1925 queueid = qconf->rx_queue_list[i].queue_id;
1926 port = ifm_get_port(portid);
1928 nb_rx = port->retrieve_bulk_pkts(portid,
1929 queueid, pkts_burst);
1930 port->n_rxpkts += nb_rx;
1932 printf("port may be un initialized\n");
1936 rcv_pkt_count[portid] += nb_rx;
1940 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1941 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1944 * Send nb_rx - nb_rx%8 packets
1947 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1948 for (j = 0; j < n; j += 8) {
1949 struct ether_hdr *eth_h0 =
1950 rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
1951 struct ether_hdr *eth_h1 =
1952 rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
1953 struct ether_hdr *eth_h2 =
1954 rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
1955 struct ether_hdr *eth_h3 =
1956 rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
1957 struct ether_hdr *eth_h4 =
1958 rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
1959 struct ether_hdr *eth_h5 =
1960 rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
1961 struct ether_hdr *eth_h6 =
1962 rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
1963 struct ether_hdr *eth_h7 =
1964 rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
1966 uint16_t ether_type;
1967 ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
1968 rte_cpu_to_be_16(eth_h1->ether_type) &
1969 rte_cpu_to_be_16(eth_h2->ether_type) &
1970 rte_cpu_to_be_16(eth_h3->ether_type) &
1971 rte_cpu_to_be_16(eth_h4->ether_type) &
1972 rte_cpu_to_be_16(eth_h5->ether_type) &
1973 rte_cpu_to_be_16(eth_h6->ether_type) &
1974 rte_cpu_to_be_16(eth_h7->ether_type));
1976 if (ether_type == ETHER_TYPE_IPv4) {
1977 simple_ipv4_replay_8pkts(
1978 &pkts_burst[j], portid, qconf);
1979 } else if (ether_type == ETHER_TYPE_IPv6) {
1980 simple_ipv6_replay_8pkts(&pkts_burst[j],
1983 udp_replay_simple_replay(pkts_burst[j],
1985 udp_replay_simple_replay(pkts_burst[j+1],
1987 udp_replay_simple_replay(pkts_burst[j+2],
1989 udp_replay_simple_replay(pkts_burst[j+3],
1991 udp_replay_simple_replay(pkts_burst[j+4],
1993 udp_replay_simple_replay(pkts_burst[j+5],
1995 udp_replay_simple_replay(pkts_burst[j+6],
1997 udp_replay_simple_replay(pkts_burst[j+7],
2002 for (; j < nb_rx ; j++) {
2003 udp_replay_simple_replay(pkts_burst[j],
2007 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2009 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2010 for (j = 0; j != k; j += FWDSTEP) {
2011 processx4_step1(&pkts_burst[j],
2013 &ipv4_flag[j / FWDSTEP]);
2016 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2017 for (j = 0; j != k; j += FWDSTEP) {
2018 processx4_step2(qconf, dip[j / FWDSTEP],
2019 ipv4_flag[j / FWDSTEP], portid,
2020 &pkts_burst[j], &dst_port[j]);
2024 * Finish packet processing and group consecutive
2025 * packets with the same destination port.
2027 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
2034 processx4_step3(pkts_burst, dst_port);
2036 /* dp1: <d[0], d[1], d[2], d[3], ... > */
2037 dp1 = _mm_loadu_si128((__m128i *)dst_port);
2039 for (j = FWDSTEP; j != k; j += FWDSTEP) {
2040 processx4_step3(&pkts_burst[j],
2045 * <d[j-3], d[j-2], d[j-1], d[j], ... >
2047 dp2 = _mm_loadu_si128((__m128i *)
2048 &dst_port[j - FWDSTEP + 1]);
2049 lp = port_groupx4(&pnum[j - FWDSTEP],
2054 * <d[j], d[j+1], d[j+2], d[j+3], ... >
2056 dp1 = _mm_srli_si128(dp2,
2058 sizeof(dst_port[0]));
2062 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
2064 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
2065 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
2069 * remove values added by the last repeated
2073 dlp = dst_port[j - 1];
2075 /* set dlp and lp to the never used values. */
2077 lp = pnum + MAX_PKT_BURST;
2080 /* Process up to last 3 packets one by one. */
2081 switch (nb_rx % FWDSTEP) {
2083 process_packet(qconf, pkts_burst[j],
2084 dst_port + j, portid);
2085 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2088 process_packet(qconf, pkts_burst[j],
2089 dst_port + j, portid);
2090 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2093 process_packet(qconf, pkts_burst[j],
2094 dst_port + j, portid);
2095 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
2100 * Send packets out, through destination port.
2101 * Consecuteve pacekts with the same destination port
2102 * are already grouped together.
2103 * If destination port for the packet equals BAD_PORT,
2104 * then free the packet without sending it out.
2106 for (j = 0; j < nb_rx; j += k) {
2114 if (likely(pn != BAD_PORT)) {
2115 send_packetsx4(qconf, pn,
2118 for (m = j; m != j + k; m++)
2119 rte_pktmbuf_free(pkts_burst[m]);
2123 #endif /* APP_LOOKUP_METHOD */
2124 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
2126 /* Prefetch first packets */
2127 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
2128 rte_prefetch0(rte_pktmbuf_mtod(
2129 pkts_burst[j], void *));
2132 /* Prefetch and forward already prefetched packets */
2133 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
2134 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
2135 j + PREFETCH_OFFSET], void *));
2136 udp_replay_simple_replay(pkts_burst[j], portid,
2140 /* Forward remaining prefetched packets */
2141 for (; j < nb_rx; j++) {
2142 udp_replay_simple_replay(pkts_burst[j], portid,
2145 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
2157 printf ("UDP_Replay stats:\n");
2158 printf ("--------------\n");
2159 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
2160 for (i = 0; i < nb_lcore_params; ++i) {
2161 portid = lcore_params[i].port_id;
2162 printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
2174 for (i = 0; i < 32; i++) {
2175 rcv_pkt_count[i] = 0;
2176 tx_pkt_count[i] = 0;
2183 check_lcore_params(void)
2185 uint8_t queue, lcore;
2189 for (i = 0; i < nb_lcore_params; ++i) {
2190 queue = lcore_params[i].queue_id;
2191 if (queue >= MAX_RX_QUEUE_PER_PORT) {
2192 printf("invalid queue number: %hhu\n", queue);
2195 lcore = lcore_params[i].lcore_id;
2196 if (!rte_lcore_is_enabled(lcore)) {
2197 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
2200 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
2202 printf("warning: lcore %hhu is on socket %d with numa off \n",
2210 check_port_config(const unsigned nb_ports)
2215 for (i = 0; i < nb_lcore_params; ++i) {
2216 portid = lcore_params[i].port_id;
2217 if ((enabled_port_mask & (1 << portid)) == 0) {
2218 printf("port %u is not enabled in port mask\n", portid);
2221 if (portid >= nb_ports) {
2222 printf("port %u is not present on the board\n", portid);
2230 get_port_n_rx_queues(const uint8_t port)
2235 for (i = 0; i < nb_lcore_params; ++i) {
2236 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
2237 queue = lcore_params[i].queue_id;
2239 return (uint8_t)(++queue);
2243 init_lcore_rx_queues(void)
2245 uint16_t i, nb_rx_queue;
2248 for (i = 0; i < nb_lcore_params; ++i) {
2249 lcore = lcore_params[i].lcore_id;
2250 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
2251 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
2252 printf("error: too many queues (%u) for lcore: %u\n",
2253 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
2256 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
2257 lcore_params[i].port_id;
2258 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
2259 lcore_params[i].queue_id;
2260 lcore_conf[lcore].n_rx_queue++;
2268 print_usage(const char *prgname)
2270 printf ("%s [EAL options] -- -p PORTMASK -P"
2271 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
2272 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
2273 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
2274 " -P : enable promiscuous mode\n"
2275 " --config (port,queue,lcore): rx queues configuration\n"
2276 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
2277 " --no-numa: optional, disable numa awareness\n"
2278 " --no-hw-csum: optional, disable hw ip checksum\n"
2279 " --ipv6: optional, specify it if running ipv6 packets\n"
2280 " --enable-jumbo: enable jumbo frame"
2281 " which max packet len is PKTLEN in decimal (64-9600)\n"
2282 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
2286 static int parse_max_pkt_len(const char *pktlen)
2291 /* parse decimal string */
2292 len = strtoul(pktlen, &end, 10);
2293 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
2303 parse_link_ip(const char *file_name)
2306 struct rte_cfgfile *file;
2309 file = rte_cfgfile_load(file_name, 0);
2310 entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
2311 numports = (uint32_t)atoi(entry);
2312 if (numports <= 0 || numports > 32)
2313 rte_panic("numports is not valid\n");
2314 entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
2315 type = (uint32_t)atoi(entry);
2316 for (i = 0;i < numports; i++) {
2317 sprintf(buf, "port%d", i);
2318 entry = rte_cfgfile_get_entry(file, "linkip", buf);
2322 ipv4[i] = strdup(entry);
2324 my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
2329 parse_portmask(const char *portmask)
2334 /* parse hexadecimal string */
2335 pm = strtoul(portmask, &end, 16);
2336 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
2345 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2347 parse_hash_entry_number(const char *hash_entry_num)
2350 unsigned long hash_en;
2351 /* parse hexadecimal string */
2352 hash_en = strtoul(hash_entry_num, &end, 16);
2353 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
2364 parse_config(const char *q_arg)
2367 const char *p, *p0 = q_arg;
2375 unsigned long int_fld[_NUM_FLD];
2376 char *str_fld[_NUM_FLD];
2380 nb_lcore_params = 0;
2382 while ((p = strchr(p0,'(')) != NULL) {
2384 if((p0 = strchr(p,')')) == NULL)
2388 if(size >= sizeof(s))
2391 snprintf(s, sizeof(s), "%.*s", size, p);
2392 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
2394 for (i = 0; i < _NUM_FLD; i++){
2396 int_fld[i] = strtoul(str_fld[i], &end, 0);
2397 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2400 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2401 printf("exceeded max number of lcore params: %hu\n",
2405 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2406 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2407 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2410 lcore_params = lcore_params_array;
2415 parse_eth_dest(const char *optarg)
2419 uint8_t c, *dest, peer_addr[6];
2422 portid = strtoul(optarg, &port_end, 10);
2423 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2424 rte_exit(EXIT_FAILURE,
2425 "Invalid eth-dest: %s", optarg);
2426 if (portid >= RTE_MAX_ETHPORTS)
2427 rte_exit(EXIT_FAILURE,
2428 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2429 portid, RTE_MAX_ETHPORTS);
2431 if (cmdline_parse_etheraddr(NULL, port_end,
2432 &peer_addr, sizeof(peer_addr)) < 0)
2433 rte_exit(EXIT_FAILURE,
2434 "Invalid ethernet address: %s\n",
2436 dest = (uint8_t *)&dest_eth_addr[portid];
2437 for (c = 0; c < 6; c++)
2438 dest[c] = peer_addr[c];
2439 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2442 #define CMD_LINE_OPT_CONFIG "config"
2443 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2444 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2445 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2446 #define CMD_LINE_OPT_IPV6 "ipv6"
2447 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2448 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2450 /* Parse the argument given in the command line of the application */
2452 parse_args(int argc, char **argv)
2457 char *prgname = argv[0];
2458 static struct option lgopts[] = {
2459 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2460 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2461 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2462 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2463 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2464 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2465 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2471 while ((opt = getopt_long(argc, argvopt, "s:p:P",
2472 lgopts, &option_index)) != EOF) {
2476 parse_link_ip(optarg);
2481 enabled_port_mask = parse_portmask(optarg);
2482 if (enabled_port_mask == 0) {
2483 printf("invalid portmask\n");
2484 print_usage(prgname);
2489 printf("Promiscuous mode selected\n");
2495 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2496 sizeof (CMD_LINE_OPT_CONFIG))) {
2497 ret = parse_config(optarg);
2499 printf("invalid config\n");
2500 print_usage(prgname);
2505 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2506 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2507 parse_eth_dest(optarg);
2510 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2511 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2512 printf("numa is disabled \n");
2516 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2517 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2518 printf("numa is hw ip checksum \n");
2519 port_conf.rxmode.hw_ip_checksum = 0;
2520 rx_conf.rx_free_thresh = 30;
2524 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2525 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2526 sizeof(CMD_LINE_OPT_IPV6))) {
2527 printf("ipv6 is specified \n");
2532 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2533 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2534 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2536 printf("jumbo frame is enabled - disabling simple TX path\n");
2537 port_conf.rxmode.jumbo_frame = 1;
2539 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2540 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2541 ret = parse_max_pkt_len(optarg);
2542 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2543 printf("invalid packet length\n");
2544 print_usage(prgname);
2547 port_conf.rxmode.max_rx_pkt_len = ret;
2549 printf("set jumbo frame max packet length to %u\n",
2550 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2552 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2553 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2554 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2555 ret = parse_hash_entry_number(optarg);
2556 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2557 hash_entry_number = ret;
2559 printf("invalid hash entry number\n");
2560 print_usage(prgname);
2568 print_usage(prgname);
2574 argv[optind-1] = prgname;
2577 optind = 0; /* reset getopt lib */
2581 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2583 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2584 union ipv4_5tuple_host* key2)
2586 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2587 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2588 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2589 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2590 key2->proto = key1->proto;
2596 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2597 union ipv6_5tuple_host* key2)
2600 for (i = 0; i < 16; i++)
2602 key2->ip_dst[i] = key1->ip_dst[i];
2603 key2->ip_src[i] = key1->ip_src[i];
2605 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2606 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2607 key2->proto = key1->proto;
2614 #define BYTE_VALUE_MAX 256
2615 #define ALL_32_BITS 0xffffffff
2616 #define BIT_8_TO_15 0x0000ff00
2618 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2622 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2624 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2625 for (i = 0; i < array_len; i++) {
2626 struct ipv4_udp_replay_route entry;
2627 union ipv4_5tuple_host newkey;
2628 entry = ipv4_udp_replay_route_array[i];
2629 convert_ipv4_5tuple(&entry.key, &newkey);
2630 ret = rte_hash_add_key (h,(void *) &newkey);
2632 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2633 " to the udp_replay hash.\n", i);
2635 ipv4_udp_replay_out_if[ret] = entry.if_out;
2637 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2640 #define BIT_16_TO_23 0x00ff0000
2642 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2646 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2648 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2649 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2650 for (i = 0; i < array_len; i++) {
2651 struct ipv6_udp_replay_route entry;
2652 union ipv6_5tuple_host newkey;
2653 entry = ipv6_udp_replay_route_array[i];
2654 convert_ipv6_5tuple(&entry.key, &newkey);
2655 ret = rte_hash_add_key (h, (void *) &newkey);
2657 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2658 " to the udp_replay hash.\n", i);
2660 ipv6_udp_replay_out_if[ret] = entry.if_out;
2662 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2665 #define NUMBER_PORT_USED 4
2667 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2668 unsigned int nr_flow)
2671 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2672 for (i = 0; i < nr_flow; i++) {
2673 struct ipv4_udp_replay_route entry;
2674 union ipv4_5tuple_host newkey;
2675 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2676 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2677 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2678 /* Create the ipv4 exact match flow */
2679 memset(&entry, 0, sizeof(entry));
2680 switch (i & (NUMBER_PORT_USED -1)) {
2682 entry = ipv4_udp_replay_route_array[0];
2683 entry.key.ip_dst = IPv4(101,c,b,a);
2686 entry = ipv4_udp_replay_route_array[1];
2687 entry.key.ip_dst = IPv4(201,c,b,a);
2690 entry = ipv4_udp_replay_route_array[2];
2691 entry.key.ip_dst = IPv4(111,c,b,a);
2694 entry = ipv4_udp_replay_route_array[3];
2695 entry.key.ip_dst = IPv4(211,c,b,a);
2698 convert_ipv4_5tuple(&entry.key, &newkey);
2699 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2701 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2703 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2706 printf("Hash: Adding 0x%x keys\n", nr_flow);
2710 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2711 unsigned int nr_flow)
2714 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2715 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2716 for (i = 0; i < nr_flow; i++) {
2717 struct ipv6_udp_replay_route entry;
2718 union ipv6_5tuple_host newkey;
2719 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2720 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2721 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2722 /* Create the ipv6 exact match flow */
2723 memset(&entry, 0, sizeof(entry));
2724 switch (i & (NUMBER_PORT_USED - 1)) {
2725 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2726 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2727 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2728 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2730 entry.key.ip_dst[13] = c;
2731 entry.key.ip_dst[14] = b;
2732 entry.key.ip_dst[15] = a;
2733 convert_ipv6_5tuple(&entry.key, &newkey);
2734 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2736 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2738 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2741 printf("Hash: Adding 0x%x keys\n", nr_flow);
2746 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2748 setup_lpm(int socketid)
2750 struct rte_lpm6_config config;
2755 /* create the LPM table */
2756 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2757 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2758 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2759 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2760 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2761 " on socket %d\n", socketid);
2763 /* populate the LPM table */
2764 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2766 /* skip unused ports */
2767 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2768 enabled_port_mask) == 0)
2771 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2772 ipv4_udp_replay_route_array[i].ip,
2773 ipv4_udp_replay_route_array[i].depth,
2774 ipv4_udp_replay_route_array[i].if_out);
2777 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2778 "udp_replay LPM table on socket %d\n",
2782 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2783 (unsigned)ipv4_udp_replay_route_array[i].ip,
2784 ipv4_udp_replay_route_array[i].depth,
2785 ipv4_udp_replay_route_array[i].if_out);
2788 /* create the LPM6 table */
2789 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2791 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2792 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2794 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2796 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2797 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2798 " on socket %d\n", socketid);
2800 /* populate the LPM table */
2801 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2803 /* skip unused ports */
2804 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2805 enabled_port_mask) == 0)
2808 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2809 ipv6_udp_replay_route_array[i].ip,
2810 ipv6_udp_replay_route_array[i].depth,
2811 ipv6_udp_replay_route_array[i].if_out);
2814 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2815 "udp_replay LPM table on socket %d\n",
2819 printf("LPM: Adding route %s / %d (%d)\n",
2821 ipv6_udp_replay_route_array[i].depth,
2822 ipv6_udp_replay_route_array[i].if_out);
2832 /* Check the link status of all ports in up to 9s, and print them finally */
2834 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2836 #define CHECK_INTERVAL 100 /* 100ms */
2837 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2838 uint8_t portid, count, all_ports_up, print_flag = 0;
2839 struct rte_eth_link link;
2841 printf("\nChecking link status");
2843 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2845 for (portid = 0; portid < port_num; portid++) {
2846 if ((port_mask & (1 << portid)) == 0)
2848 memset(&link, 0, sizeof(link));
2849 rte_eth_link_get_nowait(portid, &link);
2850 /* print link status if flag set */
2851 if (print_flag == 1) {
2852 if (link.link_status)
2853 printf("Port %d Link Up - speed %u "
2854 "Mbps - %s\n", (uint8_t)portid,
2855 (unsigned)link.link_speed,
2856 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2857 ("full-duplex") : ("half-duplex\n"));
2859 printf("Port %d Link Down\n",
2863 /* clear all_ports_up flag if any link down */
2864 if (link.link_status == 0) {
2869 /* after finally printing all link status, get out */
2870 if (print_flag == 1)
2873 if (all_ports_up == 0) {
2876 rte_delay_ms(CHECK_INTERVAL);
2879 /* set the print_flag if all ports up or timeout */
2880 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2888 main(int argc, char **argv)
2893 uint32_t n_tx_queue, nb_lcores;
2894 uint8_t portid, nb_rx_queue;
2897 struct pipeline_params *params;
2900 ret = rte_eal_init(argc, argv);
2902 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2905 timer_lcore = rte_lcore_id();
2906 /* parse application arguments (after the EAL ones) */
2907 ret = parse_args(argc, argv);
2909 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2911 if (check_lcore_params() < 0)
2912 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2914 ret = init_lcore_rx_queues();
2916 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2918 params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
2919 memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
2920 lib_arp_init(params, NULL);
2922 nb_ports = rte_eth_dev_count();
2923 num_ports = nb_ports;
2924 if (nb_ports > RTE_MAX_ETHPORTS)
2925 nb_ports = RTE_MAX_ETHPORTS;
2927 if (check_port_config(nb_ports) < 0)
2928 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2930 nb_lcores = rte_lcore_count();
2933 *Configuring port_config_t structure for interface manager initialization
2935 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
2936 port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
2937 if (port_config == NULL)
2938 rte_panic("port_config is NULL: Memory Allocation failure\n");
2939 /* initialize all ports */
2940 for (portid = 0; portid < nb_ports; portid++) {
2941 /* skip ports that are not enabled */
2942 if ((enabled_port_mask & (1 << portid)) == 0) {
2943 printf("\nSkipping disabled port %d\n", portid);
2949 printf("Initializing port %d ... ", portid );
2952 nb_rx_queue = get_port_n_rx_queues(portid);
2953 n_tx_queue = nb_rx_queue;
2954 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2955 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2957 port_config[portid].port_id = portid;
2958 port_config[portid].nrx_queue = nb_rx_queue;
2959 port_config[portid].ntx_queue = n_tx_queue;
2960 port_config[portid].state = 1;
2961 port_config[portid].promisc = promiscuous_on;
2962 port_config[portid].mempool.pool_size = MEMPOOL_SIZE;
2963 port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
2964 port_config[portid].mempool.cache_size = CACHE_SIZE;
2965 port_config[portid].mempool.cpu_socket_id = rte_socket_id();
2966 memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf));
2967 memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf));
2968 memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
2970 /* Enable TCP and UDP HW Checksum , when required */
2971 //port_config[portid].tx_conf.txq_flags &=
2972 // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
2974 if (ifm_port_setup (portid, &port_config[portid]))
2975 rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
2978 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2983 populate_lpm_routes();
2984 convert_ipstr_to_numeric();
2985 /* launch per-lcore init on every lcore */
2986 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2987 cl = cmdline_stdin_new(main_ctx, "Replay>");
2989 rte_panic("Cannot create cmdline instance\n");
2990 cmdline_interact(cl);
2991 cmdline_stdin_exit(cl);
2993 rte_exit(0, "Bye!\n");
2994 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2995 if (rte_eal_wait_lcore(lcore_id) < 0)
3001 /**********************************************************/
3003 struct cmd_obj_clear_result {
3004 cmdline_fixed_string_t clear;
3005 cmdline_fixed_string_t udp_replay;
3006 cmdline_fixed_string_t stats;
3009 static void cmd_clear_udp_replay_stats_parsed(
3010 __rte_unused void *parsed_result,
3011 __rte_unused struct cmdline *cl,
3012 __attribute__((unused)) void *data)
3018 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
3019 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
3020 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
3021 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
3022 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
3023 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
3025 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
3026 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
3027 .data = NULL, /* 2nd arg of func */
3028 .help_str = "clears UDP_Replay stats for rx/tx",
3029 .tokens = { /* token list, NULL terminated */
3030 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
3031 (void *)&cmd_clear_udp_replay_stats_clear_string,
3032 (void *)&cmd_clear_udp_replay_stats_stats_string,
3036 /**********************************************************/
3037 struct cmd_obj_add_result {
3038 cmdline_fixed_string_t action;
3039 cmdline_fixed_string_t name;
3042 static void cmd_udp_replay_stats_parsed(
3043 __rte_unused void *parsed_result,
3044 __rte_unused struct cmdline *cl,
3045 __attribute__((unused)) void *data)
3050 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
3051 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
3052 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
3053 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
3055 cmdline_parse_inst_t cmd_udp_replay_stats = {
3056 .f = cmd_udp_replay_stats_parsed, /* function to call */
3057 .data = NULL, /* 2nd arg of func */
3058 .help_str = "UDP_Replay stats for rx/tx",
3059 .tokens = { /* token list, NULL terminated */
3060 (void *)&cmd_udp_replay_stats_udp_replay_string,
3061 (void *)&cmd_udp_replay_stats_stats_string,
3066 struct cmd_quit_result {
3067 cmdline_fixed_string_t quit;
3072 __rte_unused void *parsed_result,
3074 __rte_unused void *data)
3079 static cmdline_parse_token_string_t cmd_quit_quit =
3080 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
3082 static cmdline_parse_inst_t cmd_quit = {
3083 .f = cmd_quit_parsed,
3087 (void *) &cmd_quit_quit,
3092 /**********************************************************/
3093 /****** CONTEXT (list of instruction) */
3094 cmdline_parse_ctx_t main_ctx[] = {
3095 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
3096 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
3097 (cmdline_parse_inst_t *)&cmd_quit,