2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
83 #include <rte_errno.h>
84 #include <rte_cfgfile.h>
88 #include "interface.h"
90 #include "l3fwd_common.h"
91 #include "l3fwd_lpm4.h"
92 #include "l3fwd_lpm6.h"
93 #include "lib_icmpv6.h"
95 #include "vnf_common.h"
99 #define APP_LOOKUP_EXACT_MATCH 0
100 #define APP_LOOKUP_LPM 1
101 #define DO_RFC_1812_CHECKS
103 #ifndef APP_LOOKUP_METHOD
104 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
109 #include <netinet/in.h>
113 * When set to zero, simple forwaring path is eanbled.
114 * When set to one, optimized forwarding path is enabled.
115 * Note that LPM optimisation path uses SSE4.1 instructions.
117 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
118 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
120 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
123 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
124 #include <rte_hash.h>
125 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
127 #include <rte_lpm6.h>
129 #error "APP_LOOKUP_METHOD set to incorrect value"
133 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
134 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
135 #define IPv6_BYTES(addr) \
136 addr[0], addr[1], addr[2], addr[3], \
137 addr[4], addr[5], addr[6], addr[7], \
138 addr[8], addr[9], addr[10], addr[11],\
139 addr[12], addr[13],addr[14], addr[15]
143 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
145 #define MAX_JUMBO_PKT_LEN 9600
147 #define IPV6_ADDR_LEN 16
149 #define MEMPOOL_CACHE_SIZE 256
152 * This expression is used to calculate the number of mbufs needed depending on user input, taking
153 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
154 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
157 #define NB_MBUF RTE_MAX ( \
158 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
159 nb_ports*nb_lcores*MAX_PKT_BURST + \
160 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
161 nb_lcores*MEMPOOL_CACHE_SIZE), \
164 #define MAX_PKT_BURST 32
165 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
168 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
170 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
174 /* Configure how many packets ahead to prefetch, when reading packets */
175 #define PREFETCH_OFFSET 3
177 /* Used to mark destination port as 'invalid'. */
178 #define BAD_PORT ((uint16_t)-1)
183 * Configurable number of RX/TX ring descriptors
185 #define RTE_TEST_RX_DESC_DEFAULT 128
186 #define RTE_TEST_TX_DESC_DEFAULT 512
187 static uint64_t rcv_pkt_count[32] = {0};
188 static uint64_t tx_pkt_count[32] = {0};
189 static uint32_t arp_support;
192 struct sockaddr_in ipaddr1, ipaddr2;
193 /* ethernet addresses of ports */
194 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
196 static __m128i val_eth[RTE_MAX_ETHPORTS];
198 cmdline_parse_ctx_t main_ctx[];
200 uint32_t timer_lcore;
201 uint32_t exit_loop = 1;
202 port_config_t *port_config;
204 #define MEMPOOL_SIZE 32 * 1024
205 #define BUFFER_SIZE 2048
206 #define CACHE_SIZE 256
207 /* replace first 12B of the ethernet header. */
208 #define MASK_ETH 0x3f
210 #define IP_TYPE_IPV4 0
211 #define IP_TYPE_IPV6 1
213 const char* ipv4[MAX_IP];
214 uint8_t link_ipv6[MAX_IP][16];
215 uint32_t type, numports;
216 /* mask of enabled ports */
217 static uint32_t enabled_port_mask = 0;
218 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
219 static int numa_on = 1; /**< NUMA is enabled by default. */
220 static int csum_on = 1; /**< NUMA is enabled by default. */
221 struct pipeline_params def_pipeline_params = {
230 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
231 static int ipv6 = 0; /**< ipv6 is false by default. */
234 void convert_ipstr_to_numeric(void);
236 int print_l4stats(void);
237 int clear_stats(void);
241 struct rte_mbuf *m_table[MAX_PKT_BURST];
244 struct lcore_rx_queue {
247 } __rte_cache_aligned;
249 #define MAX_RX_QUEUE_PER_LCORE 16
250 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
251 #define MAX_RX_QUEUE_PER_PORT 128
253 #define MAX_LCORE_PARAMS 1024
254 struct lcore_params {
258 } __rte_cache_aligned;
260 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
261 static struct lcore_params lcore_params_array_default[] = {
273 static struct lcore_params * lcore_params = lcore_params_array_default;
274 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
275 sizeof(lcore_params_array_default[0]);
277 static struct rte_eth_conf port_conf = {
279 .mq_mode = ETH_MQ_RX_RSS,
280 .max_rx_pkt_len = ETHER_MAX_LEN,
282 .header_split = 0, /**< Header Split disabled */
283 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
284 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
285 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
286 .hw_strip_crc = 0, /**< CRC stripped by hardware */
291 .rss_hf = ETH_RSS_IP,
295 .mq_mode = ETH_MQ_TX_NONE,
299 /* empty vmdq configuration structure. Filled in programatically */
300 static struct rte_eth_rxconf rx_conf = {
306 .rx_free_thresh = 64,
308 .rx_deferred_start = 0,
310 static struct rte_eth_txconf tx_conf = {
318 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
319 ETH_TXQ_FLAGS_NOOFFLOADS,
320 .tx_deferred_start = 0,
323 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
325 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
326 #include <rte_hash_crc.h>
327 #define DEFAULT_HASH_FUNC rte_hash_crc
329 #include <rte_jhash.h>
330 #define DEFAULT_HASH_FUNC rte_jhash
339 } __attribute__((__packed__));
341 union ipv4_5tuple_host {
354 #define XMM_NUM_IN_IPV6_5TUPLE 3
357 uint8_t ip_dst[IPV6_ADDR_LEN];
358 uint8_t ip_src[IPV6_ADDR_LEN];
362 } __attribute__((__packed__));
364 union ipv6_5tuple_host {
369 uint8_t ip_src[IPV6_ADDR_LEN];
370 uint8_t ip_dst[IPV6_ADDR_LEN];
375 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
378 struct ipv4_udp_replay_route {
379 struct ipv4_5tuple key;
383 struct ipv6_udp_replay_route {
384 struct ipv6_5tuple key;
388 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
389 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
390 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
391 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
392 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
395 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
397 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
398 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
399 101, 11, IPPROTO_TCP}, 0},
402 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
403 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
404 102, 12, IPPROTO_TCP}, 1},
407 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
408 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
409 101, 11, IPPROTO_TCP}, 2},
412 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
413 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
414 102, 12, IPPROTO_TCP}, 3},
417 typedef struct rte_hash lookup_struct_t;
419 #ifdef RTE_ARCH_X86_64
420 /* default to 4 million hash entries (approx) */
421 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
423 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
424 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
426 #define HASH_ENTRY_NUMBER_DEFAULT 4
428 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
430 app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
435 app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
440 void convert_ipstr_to_numeric(void)
443 for (i = 0; i < numports; i++)
445 if (type == IP_TYPE_IPV4) {
446 memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
447 ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
448 ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
449 } else if (type == IP_TYPE_IPV6) {
450 ifm_add_ipv6_port(i, &link_ipv6[i][0], 128);
455 static inline uint32_t
456 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
459 const union ipv4_5tuple_host *k;
465 p = (const uint32_t *)&k->port_src;
467 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
468 init_val = rte_hash_crc_4byte(t, init_val);
469 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
470 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
471 init_val = rte_hash_crc_4byte(*p, init_val);
472 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
473 init_val = rte_jhash_1word(t, init_val);
474 init_val = rte_jhash_1word(k->ip_src, init_val);
475 init_val = rte_jhash_1word(k->ip_dst, init_val);
476 init_val = rte_jhash_1word(*p, init_val);
477 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
481 static inline int check_arpicmp(struct rte_mbuf *pkt)
483 uint8_t in_port_id = pkt->port;
484 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
485 uint16_t *eth_proto =
486 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
488 uint32_t prot_offset =
489 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
490 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
491 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
492 ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
493 && (*protocol == IP_PROTOCOL_ICMP))) {
494 process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
500 static inline int check_arpicmpv6(struct rte_mbuf *pkt)
502 struct ether_hdr *eth_h;
503 struct ipv6_hdr *ipv6_h;
504 uint8_t in_port_id = pkt->port;
505 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
506 uint16_t *eth_proto =
507 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
508 eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
509 ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
510 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
511 && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
512 process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
518 static inline uint32_t
519 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
521 const union ipv6_5tuple_host *k;
524 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
525 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
526 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
527 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
531 p = (const uint32_t *)&k->port_src;
533 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
534 ip_src0 = (const uint32_t *) k->ip_src;
535 ip_src1 = (const uint32_t *)(k->ip_src+4);
536 ip_src2 = (const uint32_t *)(k->ip_src+8);
537 ip_src3 = (const uint32_t *)(k->ip_src+12);
538 ip_dst0 = (const uint32_t *) k->ip_dst;
539 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
540 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
541 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
542 init_val = rte_hash_crc_4byte(t, init_val);
543 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
544 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
545 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
546 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
547 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
548 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
549 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
550 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
551 init_val = rte_hash_crc_4byte(*p, init_val);
552 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
553 init_val = rte_jhash_1word(t, init_val);
554 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
555 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
556 init_val = rte_jhash_1word(*p, init_val);
557 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
561 #define IPV4_UDP_Replay_NUM_ROUTES \
562 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
564 #define IPV6_UDP_Replay_NUM_ROUTES \
565 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
567 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
568 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
572 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
573 struct ipv4_udp_replay_route {
579 struct ipv6_udp_replay_route {
585 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
586 {IPv4(1,1,1,0), 24, 0},
587 {IPv4(2,1,1,0), 24, 1},
588 {IPv4(3,1,1,0), 24, 2},
589 {IPv4(4,1,1,0), 24, 3},
590 {IPv4(5,1,1,0), 24, 4},
591 {IPv4(6,1,1,0), 24, 5},
592 {IPv4(7,1,1,0), 24, 6},
593 {IPv4(8,1,1,0), 24, 7},
596 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
597 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
598 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
599 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
600 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
601 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
602 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
603 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
604 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
607 #define IPV4_UDP_Replay_NUM_ROUTES \
608 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
609 #define IPV6_UDP_Replay_NUM_ROUTES \
610 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
612 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
613 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
614 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
616 typedef struct rte_lpm lookup_struct_t;
617 typedef struct rte_lpm6 lookup6_struct_t;
618 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
619 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
624 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
625 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
626 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
627 lookup_struct_t * ipv4_lookup_struct;
628 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
629 lookup6_struct_t * ipv6_lookup_struct;
631 lookup_struct_t * ipv6_lookup_struct;
633 } __rte_cache_aligned;
635 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
637 /* Send burst of packets on an output interface */
639 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
641 struct rte_mbuf **m_table;
645 queueid = qconf->tx_queue_id[port];
646 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
648 ret = rte_eth_tx_burst(port, queueid, m_table, n);
649 if (unlikely(ret < n)) {
651 rte_pktmbuf_free(m_table[ret]);
655 tx_pkt_count[port] += ret;
659 /* Enqueue a single packet, and send burst if queue is filled */
661 send_single_packet(struct rte_mbuf *m, uint8_t port)
665 struct lcore_conf *qconf;
667 lcore_id = rte_lcore_id();
669 qconf = &lcore_conf[lcore_id];
670 len = qconf->tx_mbufs[port].len;
671 qconf->tx_mbufs[port].m_table[len] = m;
674 /* enough pkts to be sent */
675 if (unlikely(len == MAX_PKT_BURST)) {
676 send_burst(qconf, MAX_PKT_BURST, port);
680 qconf->tx_mbufs[port].len = len;
684 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
685 static inline __attribute__((always_inline)) void
686 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
687 struct rte_mbuf *m[], uint32_t num)
691 len = qconf->tx_mbufs[port].len;
694 * If TX buffer for that queue is empty, and we have enough packets,
695 * then send them straightway.
697 if (num >= MAX_TX_BURST && len == 0) {
698 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
699 if (unlikely(n < num)) {
701 rte_pktmbuf_free(m[n]);
708 * Put packets into TX buffer for that queue.
712 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
715 switch (n % FWDSTEP) {
718 qconf->tx_mbufs[port].m_table[len + j] = m[j];
721 qconf->tx_mbufs[port].m_table[len + j] = m[j];
724 qconf->tx_mbufs[port].m_table[len + j] = m[j];
727 qconf->tx_mbufs[port].m_table[len + j] = m[j];
734 /* enough pkts to be sent */
735 if (unlikely(len == MAX_PKT_BURST)) {
737 send_burst(qconf, MAX_PKT_BURST, port);
739 /* copy rest of the packets into the TX buffer. */
742 switch (len % FWDSTEP) {
745 qconf->tx_mbufs[port].m_table[j] = m[n + j];
748 qconf->tx_mbufs[port].m_table[j] = m[n + j];
751 qconf->tx_mbufs[port].m_table[j] = m[n + j];
754 qconf->tx_mbufs[port].m_table[j] = m[n + j];
760 qconf->tx_mbufs[port].len = len;
762 #endif /* APP_LOOKUP_LPM */
764 #ifdef DO_RFC_1812_CHECKS
766 is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len)
768 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
770 * 1. The packet length reported by the Link Layer must be large
771 * enough to hold the minimum length legal IP datagram (20 bytes).
773 if (link_len < sizeof(struct ipv4_hdr))
776 /* 2. The IP checksum must be correct. */
777 /* this is checked in H/W */
780 * 3. The IP version number must be 4. If the version number is not 4
781 * then the packet may be another version of IP, such as IPng or
784 if (((pkt->version_ihl) >> 4) != 4)
787 * 4. The IP header length field must be large enough to hold the
788 * minimum length legal IP datagram (20 bytes = 5 words).
790 if ((pkt->version_ihl & 0xf) < 5)
794 * 5. The IP total length field must be large enough to hold the IP
795 * datagram header, whose length is specified in the IP header length
798 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
805 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
807 static __m128i mask0;
808 static __m128i mask1;
809 static __m128i mask2;
810 static inline uint8_t
811 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
814 union ipv4_5tuple_host key;
816 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
817 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
818 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
819 key.xmm = _mm_and_si128(data, mask0);
820 /* Find destination port */
821 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
822 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
825 static inline uint8_t
826 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
829 union ipv6_5tuple_host key;
831 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
832 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
833 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
834 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
835 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
836 key.xmm[0] = _mm_and_si128(data0, mask1);
837 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
839 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
840 key.xmm[2] = _mm_and_si128(data2, mask2);
842 /* Find destination port */
843 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
844 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
848 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
850 static inline uint8_t
851 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
855 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
856 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
857 &next_hop) == 0) ? next_hop : portid);
860 static inline uint8_t
861 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
864 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
865 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
870 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
871 struct lcore_conf *qconf) __attribute__((unused));
873 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
874 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
876 #define MASK_ALL_PKTS 0xff
877 #define EXCLUDE_1ST_PKT 0xfe
878 #define EXCLUDE_2ND_PKT 0xfd
879 #define EXCLUDE_3RD_PKT 0xfb
880 #define EXCLUDE_4TH_PKT 0xf7
881 #define EXCLUDE_5TH_PKT 0xef
882 #define EXCLUDE_6TH_PKT 0xdf
883 #define EXCLUDE_7TH_PKT 0xbf
884 #define EXCLUDE_8TH_PKT 0x7f
887 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
889 struct ether_hdr *eth_hdr[8];
890 struct ether_hdr tmp;
891 struct ipv4_hdr *ipv4_hdr[8];
892 struct udp_hdr *udp_hdr[8];
894 l2_phy_interface_t *port = ifm_get_port(portid);
896 printf("port may be un initialized\n");
899 if (unlikely(arp_support)) {
910 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
911 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
912 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
913 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
914 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
915 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
916 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
917 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
920 memset(&tmp,0,sizeof (struct ether_hdr));
925 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
926 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
927 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
930 /* Handle IPv4 headers.*/
931 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
932 sizeof(struct ether_hdr));
933 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
934 sizeof(struct ether_hdr));
935 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
936 sizeof(struct ether_hdr));
937 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
938 sizeof(struct ether_hdr));
939 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
940 sizeof(struct ether_hdr));
941 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
942 sizeof(struct ether_hdr));
943 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
944 sizeof(struct ether_hdr));
945 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
946 sizeof(struct ether_hdr));
947 struct ipv4_hdr temp_ipv4;
948 temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
949 ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
950 ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
951 temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
952 ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
953 ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
954 temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
955 ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
956 ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
957 temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
958 ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
959 ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
960 temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
961 ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
962 ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
963 temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
964 ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
965 ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
966 temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
967 ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
968 ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
969 temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
970 ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
971 ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
973 /* Handle UDP headers.*/
974 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
975 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
977 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
978 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
979 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
980 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
981 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
982 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
983 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
984 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
985 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
986 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
987 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
988 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
989 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
990 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
991 /*1) memcpy or assignment.*/
993 struct udp_hdr temp_udp;
994 temp_udp.dst_port = udp_hdr[0]->dst_port;
995 udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
996 udp_hdr[0]->src_port = temp_udp.dst_port;
997 temp_udp.dst_port = udp_hdr[1]->dst_port;
998 udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
999 udp_hdr[1]->src_port = temp_udp.dst_port;
1000 temp_udp.dst_port = udp_hdr[2]->dst_port;
1001 udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
1002 udp_hdr[2]->src_port = temp_udp.dst_port;
1003 temp_udp.dst_port = udp_hdr[3]->dst_port;
1004 udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
1005 udp_hdr[3]->src_port = temp_udp.dst_port;
1006 temp_udp.dst_port = udp_hdr[4]->dst_port;
1007 udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
1008 udp_hdr[4]->src_port = temp_udp.dst_port;
1009 temp_udp.dst_port = udp_hdr[5]->dst_port;
1010 udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
1011 udp_hdr[5]->src_port = temp_udp.dst_port;
1012 temp_udp.dst_port = udp_hdr[6]->dst_port;
1013 udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
1014 udp_hdr[6]->src_port = temp_udp.dst_port;
1015 temp_udp.dst_port = udp_hdr[7]->dst_port;
1016 udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
1017 udp_hdr[7]->src_port = temp_udp.dst_port;
1018 #ifdef DO_RFC_1812_CHECKS
1019 /* Check to make sure the packet is valid (RFC1812) */
1020 uint8_t valid_mask = MASK_ALL_PKTS;
1021 if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) {
1022 rte_pktmbuf_free(m[0]);
1023 valid_mask &= EXCLUDE_1ST_PKT;
1025 if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) {
1026 rte_pktmbuf_free(m[1]);
1027 valid_mask &= EXCLUDE_2ND_PKT;
1029 if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) {
1030 rte_pktmbuf_free(m[2]);
1031 valid_mask &= EXCLUDE_3RD_PKT;
1033 if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) {
1034 rte_pktmbuf_free(m[3]);
1035 valid_mask &= EXCLUDE_4TH_PKT;
1037 if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) {
1038 rte_pktmbuf_free(m[4]);
1039 valid_mask &= EXCLUDE_5TH_PKT;
1041 if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) {
1042 rte_pktmbuf_free(m[5]);
1043 valid_mask &= EXCLUDE_6TH_PKT;
1045 if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) {
1046 rte_pktmbuf_free(m[6]);
1047 valid_mask &= EXCLUDE_7TH_PKT;
1049 if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) {
1050 rte_pktmbuf_free(m[7]);
1051 valid_mask &= EXCLUDE_8TH_PKT;
1053 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
1054 if (valid_mask == 0){
1058 for (i = 0; i < 8; i++) {
1059 if ((0x1 << i) & valid_mask) {
1060 udp_replay_simple_replay(m[i], portid, qconf);
1066 #endif // End of #ifdef DO_RFC_1812_CHECKS
1068 #ifdef DO_RFC_1812_CHECKS
1069 /* Update time to live and header checksum */
1070 --(ipv4_hdr[0]->time_to_live);
1071 --(ipv4_hdr[1]->time_to_live);
1072 --(ipv4_hdr[2]->time_to_live);
1073 --(ipv4_hdr[3]->time_to_live);
1074 ++(ipv4_hdr[0]->hdr_checksum);
1075 ++(ipv4_hdr[1]->hdr_checksum);
1076 ++(ipv4_hdr[2]->hdr_checksum);
1077 ++(ipv4_hdr[3]->hdr_checksum);
1078 --(ipv4_hdr[4]->time_to_live);
1079 --(ipv4_hdr[5]->time_to_live);
1080 --(ipv4_hdr[6]->time_to_live);
1081 --(ipv4_hdr[7]->time_to_live);
1082 ++(ipv4_hdr[4]->hdr_checksum);
1083 ++(ipv4_hdr[5]->hdr_checksum);
1084 ++(ipv4_hdr[6]->hdr_checksum);
1085 ++(ipv4_hdr[7]->hdr_checksum);
1088 send_single_packet(m[0],portid );
1089 send_single_packet(m[1],portid );
1090 send_single_packet(m[2],portid );
1091 send_single_packet(m[3],portid);
1092 send_single_packet(m[4],portid);
1093 send_single_packet(m[5],portid);
1094 send_single_packet(m[6],portid);
1095 send_single_packet(m[7],portid);
1099 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
1100 union ipv6_5tuple_host * key)
1102 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
1103 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
1104 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
1105 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
1106 key->xmm[1] = tmpdata1;
1107 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
1112 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1114 struct ether_hdr *eth_hdr[8],tmp;
1116 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
1118 union ipv6_5tuple_host key[8];
1119 struct udp_hdr *udp_hdr[8];
1120 l2_phy_interface_t *port = ifm_get_port(portid);
1122 printf("port may be un initialized\n");
1126 if (unlikely(arp_support)) {
1127 check_arpicmpv6(m[0]);
1128 check_arpicmpv6(m[1]);
1129 check_arpicmpv6(m[2]);
1130 check_arpicmpv6(m[3]);
1131 check_arpicmpv6(m[4]);
1132 check_arpicmpv6(m[5]);
1133 check_arpicmpv6(m[6]);
1134 check_arpicmpv6(m[7]);
1138 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1139 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1140 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1141 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1142 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1143 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1144 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1145 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1147 memset(&tmp,0,sizeof (struct ether_hdr));
1151 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1152 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1153 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1155 /* Handle IPv6 headers.*/
1156 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
1157 sizeof(struct ether_hdr));
1158 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
1159 sizeof(struct ether_hdr));
1160 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
1161 sizeof(struct ether_hdr));
1162 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
1163 sizeof(struct ether_hdr));
1164 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
1165 sizeof(struct ether_hdr));
1166 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
1167 sizeof(struct ether_hdr));
1168 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
1169 sizeof(struct ether_hdr));
1170 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
1171 sizeof(struct ether_hdr));
1174 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
1175 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
1176 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
1179 /* Handle UDP headers.*/
1180 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1181 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1183 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1184 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1185 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1186 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1187 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1188 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1189 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1190 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1191 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1192 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1193 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1194 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1195 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1196 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1197 /*1) memcpy or assignment.*/
1199 struct udp_hdr temp_udp;
1202 temp_udp.dst_port = udp_hdr[i]->dst_port;
1203 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1204 udp_hdr[i]->src_port = temp_udp.dst_port;
1206 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1207 &key[4], &key[5], &key[6], &key[7]};
1208 #if RTE_VERSION < 0x100b0000
1209 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1211 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1213 send_single_packet(m[0],portid);
1214 send_single_packet(m[1],portid);
1215 send_single_packet(m[2],portid);
1216 send_single_packet(m[3],portid);
1217 send_single_packet(m[4],portid);
1218 send_single_packet(m[5],portid);
1219 send_single_packet(m[6],portid);
1220 send_single_packet(m[7],portid);
1223 #endif /* APP_LOOKUP_METHOD */
1225 static inline __attribute__((always_inline)) void
1226 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1228 struct ether_hdr *eth_hdr,tmp;
1229 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1230 struct udp_hdr *udp_hdr,temp_udp;
1231 l2_phy_interface_t *port = ifm_get_port(portid);
1234 printf("port may be un initialized\n");
1238 printf("Null packet received\n");
1241 if (unlikely(arp_support)) {
1242 if (!check_arpicmp(m))
1246 printf("qconf configuration is NULL\n");
1247 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1248 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1249 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1250 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1251 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1253 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1254 /* Handle IPv4 headers.*/
1255 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1256 sizeof(struct ether_hdr));
1257 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1258 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1259 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1260 #ifdef DO_RFC_1812_CHECKS
1261 /* Check to make sure the packet is valid (RFC1812) */
1262 if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
1263 rte_pktmbuf_free(m);
1269 #ifdef DO_RFC_1812_CHECKS
1270 /* Update time to live and header checksum */
1271 --(ipv4_hdr->time_to_live);
1272 ++(ipv4_hdr->hdr_checksum);
1274 /* Handle UDP headers.*/
1275 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1276 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1278 /*Swapping Src and Dst Port*/
1279 temp_udp.dst_port = udp_hdr->dst_port;
1280 udp_hdr->dst_port = udp_hdr->src_port;
1281 udp_hdr->src_port = temp_udp.dst_port;
1283 send_single_packet(m, portid);
1284 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1285 /* Handle IPv6 headers.*/
1286 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1288 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1289 sizeof(struct ether_hdr));
1291 /*Swapping of Src and Dst IP address*/
1292 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1293 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1294 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1296 /* Handle UDP headers.*/
1297 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1298 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1299 /*Swapping Src and Dst Port*/
1300 temp_udp.dst_port = udp_hdr->dst_port;
1301 udp_hdr->dst_port = udp_hdr->src_port;
1302 udp_hdr->src_port = temp_udp.dst_port;
1303 send_single_packet(m, portid);
1305 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1306 rte_pktmbuf_free(m);
1309 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1310 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1311 #ifdef DO_RFC_1812_CHECKS
1313 #define IPV4_MIN_VER_IHL 0x45
1314 #define IPV4_MAX_VER_IHL 0x4f
1315 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1317 /* Minimum value of IPV4 total length (20B) in network byte order. */
1318 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1321 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1322 * - The IP version number must be 4.
1323 * - The IP header length field must be large enough to hold the
1324 * minimum length legal IP datagram (20 bytes = 5 words).
1325 * - The IP total length field must be large enough to hold the IP
1326 * datagram header, whose length is specified in the IP header length
1328 * If we encounter invalid IPV4 packet, then set destination port for it
1329 * to BAD_PORT value.
1331 static inline __attribute__((always_inline)) void
1332 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1336 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1337 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1339 ipv4_hdr->time_to_live--;
1340 ipv4_hdr->hdr_checksum++;
1342 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1343 ((uint8_t)ipv4_hdr->total_length == 0 &&
1344 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1351 #define rfc1812_process(mb, dp) do { } while (0)
1352 #endif /* DO_RFC_1812_CHECKS */
1353 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1356 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1357 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1359 static inline __attribute__((always_inline)) uint16_t
1360 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1361 uint32_t dst_ipv4, uint8_t portid)
1364 struct ipv6_hdr *ipv6_hdr;
1365 struct ether_hdr *eth_hdr;
1366 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1368 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1369 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1372 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1373 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1374 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1375 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1376 ipv6_hdr->dst_addr, &next_hop) != 0)
1386 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1387 uint16_t *dst_port, uint8_t portid)
1389 struct ether_hdr *eth_hdr;
1390 struct ipv4_hdr *ipv4_hdr;
1395 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1396 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1399 dst_ipv4 = ipv4_hdr->dst_addr;
1400 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1402 /*Changing the dp to incoming port*/
1403 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1406 te = _mm_loadu_si128((__m128i *)eth_hdr);
1410 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1412 te = _mm_blend_epi16(te, ve, MASK_ETH);
1413 _mm_storeu_si128((__m128i *)eth_hdr, te);
1415 /* Wont be using the following fucntion*/
1418 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1421 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1423 uint32_t *ipv4_flag)
1425 struct ipv4_hdr *ipv4_hdr;
1426 struct ether_hdr *eth_hdr;
1427 uint32_t x0, x1, x2, x3;
1429 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1430 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1431 x0 = ipv4_hdr->dst_addr;
1432 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1434 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1435 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1436 x1 = ipv4_hdr->dst_addr;
1437 ipv4_flag[0] &= pkt[1]->packet_type;
1439 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1440 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1441 x2 = ipv4_hdr->dst_addr;
1442 ipv4_flag[0] &= pkt[2]->packet_type;
1444 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1445 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1446 x3 = ipv4_hdr->dst_addr;
1447 ipv4_flag[0] &= pkt[3]->packet_type;
1449 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1453 * Lookup into LPM for destination port.
1454 * If lookup fails, use incoming port (portid) as destination port.
1457 processx4_step2(const struct lcore_conf *qconf,
1461 struct rte_mbuf *pkt[FWDSTEP],
1462 uint16_t dprt[FWDSTEP])
1465 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1466 4, 5, 6, 7, 0, 1, 2, 3);
1468 /* Byte swap 4 IPV4 addresses. */
1469 dip = _mm_shuffle_epi8(dip, bswap_mask);
1471 /* if all 4 packets are IPV4. */
1472 if (likely(ipv4_flag)) {
1473 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1476 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1477 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1478 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1479 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1484 * Update source and destination MAC addresses in the ethernet header.
1485 * Perform RFC1812 checks and updates for IPV4 packets.
1488 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1490 __m128i te[FWDSTEP];
1491 __m128i ve[FWDSTEP];
1492 __m128i *p[FWDSTEP];
1494 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1495 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1496 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1497 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1499 ve[0] = val_eth[dst_port[0]];
1500 te[0] = _mm_loadu_si128(p[0]);
1502 ve[1] = val_eth[dst_port[1]];
1503 te[1] = _mm_loadu_si128(p[1]);
1505 ve[2] = val_eth[dst_port[2]];
1506 te[2] = _mm_loadu_si128(p[2]);
1508 ve[3] = val_eth[dst_port[3]];
1509 te[3] = _mm_loadu_si128(p[3]);
1511 /* Update first 12 bytes, keep rest bytes intact. */
1512 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1513 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1514 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1515 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1517 _mm_storeu_si128(p[0], te[0]);
1518 _mm_storeu_si128(p[1], te[1]);
1519 _mm_storeu_si128(p[2], te[2]);
1520 _mm_storeu_si128(p[3], te[3]);
1522 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1523 &dst_port[0], pkt[0]->packet_type);
1524 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1525 &dst_port[1], pkt[1]->packet_type);
1526 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1527 &dst_port[2], pkt[2]->packet_type);
1528 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1529 &dst_port[3], pkt[3]->packet_type);
1533 * We group consecutive packets with the same destionation port into one burst.
1534 * To avoid extra latency this is done together with some other packet
1535 * processing, but after we made a final decision about packet's destination.
1536 * To do this we maintain:
1537 * pnum - array of number of consecutive packets with the same dest port for
1538 * each packet in the input burst.
1539 * lp - pointer to the last updated element in the pnum.
1540 * dlp - dest port value lp corresponds to.
1543 #define GRPSZ (1 << FWDSTEP)
1544 #define GRPMSK (GRPSZ - 1)
1546 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1547 if (likely((dlp) == (dcp)[(idx)])) { \
1550 (dlp) = (dcp)[idx]; \
1551 (lp) = (pn) + (idx); \
1557 * Group consecutive packets with the same destination port in bursts of 4.
1558 * Suppose we have array of destionation ports:
1559 * dst_port[] = {a, b, c, d,, e, ... }
1560 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1561 * We doing 4 comparisions at once and the result is 4 bit mask.
1562 * This mask is used as an index into prebuild array of pnum values.
1564 static inline uint16_t *
1565 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1567 static const struct {
1568 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1569 int32_t idx; /* index for new last updated elemnet. */
1570 uint16_t lpv; /* add value to the last updated element. */
1573 /* 0: a != b, b != c, c != d, d != e */
1574 .pnum = UINT64_C(0x0001000100010001),
1579 /* 1: a == b, b != c, c != d, d != e */
1580 .pnum = UINT64_C(0x0001000100010002),
1585 /* 2: a != b, b == c, c != d, d != e */
1586 .pnum = UINT64_C(0x0001000100020001),
1591 /* 3: a == b, b == c, c != d, d != e */
1592 .pnum = UINT64_C(0x0001000100020003),
1597 /* 4: a != b, b != c, c == d, d != e */
1598 .pnum = UINT64_C(0x0001000200010001),
1603 /* 5: a == b, b != c, c == d, d != e */
1604 .pnum = UINT64_C(0x0001000200010002),
1609 /* 6: a != b, b == c, c == d, d != e */
1610 .pnum = UINT64_C(0x0001000200030001),
1615 /* 7: a == b, b == c, c == d, d != e */
1616 .pnum = UINT64_C(0x0001000200030004),
1621 /* 8: a != b, b != c, c != d, d == e */
1622 .pnum = UINT64_C(0x0002000100010001),
1627 /* 9: a == b, b != c, c != d, d == e */
1628 .pnum = UINT64_C(0x0002000100010002),
1633 /* 0xa: a != b, b == c, c != d, d == e */
1634 .pnum = UINT64_C(0x0002000100020001),
1639 /* 0xb: a == b, b == c, c != d, d == e */
1640 .pnum = UINT64_C(0x0002000100020003),
1645 /* 0xc: a != b, b != c, c == d, d == e */
1646 .pnum = UINT64_C(0x0002000300010001),
1651 /* 0xd: a == b, b != c, c == d, d == e */
1652 .pnum = UINT64_C(0x0002000300010002),
1657 /* 0xe: a != b, b == c, c == d, d == e */
1658 .pnum = UINT64_C(0x0002000300040001),
1663 /* 0xf: a == b, b == c, c == d, d == e */
1664 .pnum = UINT64_C(0x0002000300040005),
1671 uint16_t u16[FWDSTEP + 1];
1673 } *pnum = (void *)pn;
1677 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1678 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1679 v = _mm_movemask_ps((__m128)dp1);
1681 /* update last port counter. */
1682 lp[0] += gptbl[v].lpv;
1684 /* if dest port value has changed. */
1686 lp = pnum->u16 + gptbl[v].idx;
1688 pnum->u64 = gptbl[v].pnum;
1694 #endif /* APP_LOOKUP_METHOD */
1696 /* main processing loop */
1698 main_loop(__attribute__((unused)) void *dummy)
1700 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1702 uint64_t prev_tsc, diff_tsc, cur_tsc;
1704 uint8_t portid, queueid;
1705 struct lcore_conf *qconf;
1706 l2_phy_interface_t *port;
1707 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1708 US_PER_S * BURST_TX_DRAIN_US;
1710 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1711 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1715 uint16_t dst_port[MAX_PKT_BURST];
1716 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1717 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1718 uint16_t pnum[MAX_PKT_BURST + 1];
1723 lcore_id = rte_lcore_id();
1724 qconf = &lcore_conf[lcore_id];
1726 if (qconf->n_rx_queue == 0) {
1727 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1731 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1733 for (i = 0; i < qconf->n_rx_queue; i++) {
1735 portid = qconf->rx_queue_list[i].port_id;
1736 queueid = qconf->rx_queue_list[i].queue_id;
1737 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1743 cur_tsc = rte_rdtsc();
1746 * TX burst queue drain
1748 diff_tsc = cur_tsc - prev_tsc;
1749 if (unlikely(diff_tsc > drain_tsc)) {
1752 * This could be optimized (use queueid instead of
1753 * portid), but it is not called so often
1755 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1756 if (qconf->tx_mbufs[portid].len == 0)
1759 qconf->tx_mbufs[portid].len,
1761 qconf->tx_mbufs[portid].len = 0;
1768 * Read packet from RX queues
1770 for (i = 0; i < qconf->n_rx_queue; ++i) {
1771 portid = qconf->rx_queue_list[i].port_id;
1772 queueid = qconf->rx_queue_list[i].queue_id;
1773 port = ifm_get_port(portid);
1775 nb_rx = port->retrieve_bulk_pkts(portid,
1776 queueid, pkts_burst);
1777 port->n_rxpkts += nb_rx;
1779 printf("port may be un initialized\n");
1783 rcv_pkt_count[portid] += nb_rx;
1787 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1788 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1791 * Send nb_rx - nb_rx%8 packets
1794 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1795 for (j = 0; j < n; j += 8) {
1796 struct ether_hdr *eth_h0 =
1797 rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
1798 struct ether_hdr *eth_h1 =
1799 rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
1800 struct ether_hdr *eth_h2 =
1801 rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
1802 struct ether_hdr *eth_h3 =
1803 rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
1804 struct ether_hdr *eth_h4 =
1805 rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
1806 struct ether_hdr *eth_h5 =
1807 rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
1808 struct ether_hdr *eth_h6 =
1809 rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
1810 struct ether_hdr *eth_h7 =
1811 rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
1813 uint16_t ether_type;
1814 ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
1815 rte_cpu_to_be_16(eth_h1->ether_type) &
1816 rte_cpu_to_be_16(eth_h2->ether_type) &
1817 rte_cpu_to_be_16(eth_h3->ether_type) &
1818 rte_cpu_to_be_16(eth_h4->ether_type) &
1819 rte_cpu_to_be_16(eth_h5->ether_type) &
1820 rte_cpu_to_be_16(eth_h6->ether_type) &
1821 rte_cpu_to_be_16(eth_h7->ether_type));
1823 if (ether_type == ETHER_TYPE_IPv4) {
1824 simple_ipv4_replay_8pkts(
1825 &pkts_burst[j], portid, qconf);
1826 } else if (ether_type == ETHER_TYPE_IPv6) {
1827 simple_ipv6_replay_8pkts(&pkts_burst[j],
1830 udp_replay_simple_replay(pkts_burst[j],
1832 udp_replay_simple_replay(pkts_burst[j+1],
1834 udp_replay_simple_replay(pkts_burst[j+2],
1836 udp_replay_simple_replay(pkts_burst[j+3],
1838 udp_replay_simple_replay(pkts_burst[j+4],
1840 udp_replay_simple_replay(pkts_burst[j+5],
1842 udp_replay_simple_replay(pkts_burst[j+6],
1844 udp_replay_simple_replay(pkts_burst[j+7],
1849 for (; j < nb_rx ; j++) {
1850 udp_replay_simple_replay(pkts_burst[j],
1854 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1856 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1857 for (j = 0; j != k; j += FWDSTEP) {
1858 processx4_step1(&pkts_burst[j],
1860 &ipv4_flag[j / FWDSTEP]);
1863 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1864 for (j = 0; j != k; j += FWDSTEP) {
1865 processx4_step2(qconf, dip[j / FWDSTEP],
1866 ipv4_flag[j / FWDSTEP], portid,
1867 &pkts_burst[j], &dst_port[j]);
1871 * Finish packet processing and group consecutive
1872 * packets with the same destination port.
1874 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1881 processx4_step3(pkts_burst, dst_port);
1883 /* dp1: <d[0], d[1], d[2], d[3], ... > */
1884 dp1 = _mm_loadu_si128((__m128i *)dst_port);
1886 for (j = FWDSTEP; j != k; j += FWDSTEP) {
1887 processx4_step3(&pkts_burst[j],
1892 * <d[j-3], d[j-2], d[j-1], d[j], ... >
1894 dp2 = _mm_loadu_si128((__m128i *)
1895 &dst_port[j - FWDSTEP + 1]);
1896 lp = port_groupx4(&pnum[j - FWDSTEP],
1901 * <d[j], d[j+1], d[j+2], d[j+3], ... >
1903 dp1 = _mm_srli_si128(dp2,
1905 sizeof(dst_port[0]));
1909 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
1911 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
1912 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
1916 * remove values added by the last repeated
1920 dlp = dst_port[j - 1];
1922 /* set dlp and lp to the never used values. */
1924 lp = pnum + MAX_PKT_BURST;
1927 /* Process up to last 3 packets one by one. */
1928 switch (nb_rx % FWDSTEP) {
1930 process_packet(qconf, pkts_burst[j],
1931 dst_port + j, portid);
1932 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1935 process_packet(qconf, pkts_burst[j],
1936 dst_port + j, portid);
1937 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1940 process_packet(qconf, pkts_burst[j],
1941 dst_port + j, portid);
1942 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1947 * Send packets out, through destination port.
1948 * Consecuteve pacekts with the same destination port
1949 * are already grouped together.
1950 * If destination port for the packet equals BAD_PORT,
1951 * then free the packet without sending it out.
1953 for (j = 0; j < nb_rx; j += k) {
1961 if (likely(pn != BAD_PORT)) {
1962 send_packetsx4(qconf, pn,
1965 for (m = j; m != j + k; m++)
1966 rte_pktmbuf_free(pkts_burst[m]);
1970 #endif /* APP_LOOKUP_METHOD */
1971 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
1973 /* Prefetch first packets */
1974 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
1975 rte_prefetch0(rte_pktmbuf_mtod(
1976 pkts_burst[j], void *));
1979 /* Prefetch and forward already prefetched packets */
1980 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
1981 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
1982 j + PREFETCH_OFFSET], void *));
1983 udp_replay_simple_replay(pkts_burst[j], portid,
1987 /* Forward remaining prefetched packets */
1988 for (; j < nb_rx; j++) {
1989 udp_replay_simple_replay(pkts_burst[j], portid,
1992 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
2005 printf ("UDP_Replay stats:\n");
2006 printf ("--------------\n");
2007 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
2008 for (i = 0; i < nb_lcore_params; ++i) {
2009 portid = lcore_params[i].port_id;
2010 printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
2022 for (i = 0; i < 32; i++) {
2023 rcv_pkt_count[i] = 0;
2024 tx_pkt_count[i] = 0;
2031 check_lcore_params(void)
2033 uint8_t queue, lcore;
2037 for (i = 0; i < nb_lcore_params; ++i) {
2038 queue = lcore_params[i].queue_id;
2039 if (queue >= MAX_RX_QUEUE_PER_PORT) {
2040 printf("invalid queue number: %hhu\n", queue);
2043 lcore = lcore_params[i].lcore_id;
2044 if (!rte_lcore_is_enabled(lcore)) {
2045 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
2048 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
2050 printf("warning: lcore %hhu is on socket %d with numa off \n",
2058 check_port_config(const unsigned nb_ports)
2063 for (i = 0; i < nb_lcore_params; ++i) {
2064 portid = lcore_params[i].port_id;
2065 if ((enabled_port_mask & (1 << portid)) == 0) {
2066 printf("port %u is not enabled in port mask\n", portid);
2069 if (portid >= nb_ports) {
2070 printf("port %u is not present on the board\n", portid);
2078 get_port_n_rx_queues(const uint8_t port)
2083 for (i = 0; i < nb_lcore_params; ++i) {
2084 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
2085 queue = lcore_params[i].queue_id;
2087 return (uint8_t)(++queue);
2091 init_lcore_rx_queues(void)
2093 uint16_t i, nb_rx_queue;
2096 for (i = 0; i < nb_lcore_params; ++i) {
2097 lcore = lcore_params[i].lcore_id;
2098 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
2099 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
2100 printf("error: too many queues (%u) for lcore: %u\n",
2101 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
2104 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
2105 lcore_params[i].port_id;
2106 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
2107 lcore_params[i].queue_id;
2108 lcore_conf[lcore].n_rx_queue++;
2116 print_usage(const char *prgname)
2118 printf ("%s [EAL options] -- -p PORTMASK -P"
2119 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
2120 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
2121 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
2122 " -P : enable promiscuous mode\n"
2123 " --version: display app version\n"
2124 " --config (port,queue,lcore): rx queues configuration\n"
2125 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
2126 " --no-numa: optional, disable numa awareness\n"
2127 " --no-hw-csum: optional, disable hw ip checksum\n"
2128 " --ipv6: optional, specify it if running ipv6 packets\n"
2129 " --enable-jumbo: enable jumbo frame"
2130 " which max packet len is PKTLEN in decimal (64-9600)\n"
2131 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
2135 static int parse_max_pkt_len(const char *pktlen)
2140 /* parse decimal string */
2141 len = strtoul(pktlen, &end, 10);
2142 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
2152 parse_link_ip(const char *file_name)
2155 struct rte_cfgfile *file;
2158 file = rte_cfgfile_load(file_name, 0);
2159 entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
2160 numports = (uint32_t)atoi(entry);
2161 if (numports <= 0 || numports > 32)
2162 rte_panic("numports is not valid\n");
2163 entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
2164 type = (uint32_t)atoi(entry);
2165 for (i = 0;i < numports; i++) {
2166 sprintf(buf, "port%d", i);
2167 entry = rte_cfgfile_get_entry(file, "linkip", buf);
2171 ipv4[i] = strdup(entry);
2173 my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
2178 parse_portmask(const char *portmask)
2183 /* parse hexadecimal string */
2184 pm = strtoul(portmask, &end, 16);
2185 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
2194 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2196 parse_hash_entry_number(const char *hash_entry_num)
2199 unsigned long hash_en;
2200 /* parse hexadecimal string */
2201 hash_en = strtoul(hash_entry_num, &end, 16);
2202 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
2213 parse_config(const char *q_arg)
2216 const char *p, *p0 = q_arg;
2224 unsigned long int_fld[_NUM_FLD];
2225 char *str_fld[_NUM_FLD];
2229 nb_lcore_params = 0;
2231 while ((p = strchr(p0,'(')) != NULL) {
2233 if((p0 = strchr(p,')')) == NULL)
2237 if(size >= sizeof(s))
2240 snprintf(s, sizeof(s), "%.*s", size, p);
2241 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
2243 for (i = 0; i < _NUM_FLD; i++){
2245 int_fld[i] = strtoul(str_fld[i], &end, 0);
2246 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2249 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2250 printf("exceeded max number of lcore params: %hu\n",
2254 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2255 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2256 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2259 lcore_params = lcore_params_array;
2264 parse_eth_dest(const char *optarg)
2268 uint8_t c, *dest, peer_addr[6];
2271 portid = strtoul(optarg, &port_end, 10);
2272 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2273 rte_exit(EXIT_FAILURE,
2274 "Invalid eth-dest: %s", optarg);
2275 if (portid >= RTE_MAX_ETHPORTS)
2276 rte_exit(EXIT_FAILURE,
2277 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2278 portid, RTE_MAX_ETHPORTS);
2280 if (cmdline_parse_etheraddr(NULL, port_end,
2281 &peer_addr, sizeof(peer_addr)) < 0)
2282 rte_exit(EXIT_FAILURE,
2283 "Invalid ethernet address: %s\n",
2285 dest = (uint8_t *)&dest_eth_addr[portid];
2286 for (c = 0; c < 6; c++)
2287 dest[c] = peer_addr[c];
2288 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2291 #define CMD_LINE_OPT_CONFIG "config"
2292 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2293 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2294 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2295 #define CMD_LINE_OPT_IPV6 "ipv6"
2296 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2297 #define CMD_LINE_OPT_VERSION "version"
2298 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2300 /* Parse the argument given in the command line of the application */
2302 parse_args(int argc, char **argv)
2306 int option_index, v_present = 0;
2307 char *prgname = argv[0];
2308 static struct option lgopts[] = {
2309 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2310 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2311 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2312 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2313 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2314 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2315 {CMD_LINE_OPT_VERSION, 0, 0, 0},
2316 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2322 while ((opt = getopt_long(argc, argvopt, "s:p:P",
2323 lgopts, &option_index)) != EOF) {
2327 parse_link_ip(optarg);
2332 enabled_port_mask = parse_portmask(optarg);
2333 if (enabled_port_mask == 0) {
2334 printf("invalid portmask\n");
2335 print_usage(prgname);
2340 printf("Promiscuous mode selected\n");
2346 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2347 sizeof (CMD_LINE_OPT_CONFIG))) {
2348 ret = parse_config(optarg);
2350 printf("invalid config\n");
2351 print_usage(prgname);
2356 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2357 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2358 parse_eth_dest(optarg);
2361 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2362 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2363 printf("numa is disabled \n");
2367 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2368 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2369 printf("numa is hw ip checksum \n");
2370 port_conf.rxmode.hw_ip_checksum = 0;
2371 rx_conf.rx_free_thresh = 30;
2375 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2376 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2377 sizeof(CMD_LINE_OPT_IPV6))) {
2378 printf("ipv6 is specified \n");
2383 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_VERSION,
2384 sizeof (CMD_LINE_OPT_VERSION))) {
2386 rte_panic("Error: VERSION is provided more than once\n");
2388 printf("Version: %s\n", VERSION_STR);
2392 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2393 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2394 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2396 printf("jumbo frame is enabled - disabling simple TX path\n");
2397 port_conf.rxmode.jumbo_frame = 1;
2399 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2400 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2401 ret = parse_max_pkt_len(optarg);
2402 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2403 printf("invalid packet length\n");
2404 print_usage(prgname);
2407 port_conf.rxmode.max_rx_pkt_len = ret;
2409 printf("set jumbo frame max packet length to %u\n",
2410 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2412 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2413 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2414 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2415 ret = parse_hash_entry_number(optarg);
2416 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2417 hash_entry_number = ret;
2419 printf("invalid hash entry number\n");
2420 print_usage(prgname);
2428 print_usage(prgname);
2434 argv[optind-1] = prgname;
2437 optind = 0; /* reset getopt lib */
2441 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2443 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2444 union ipv4_5tuple_host* key2)
2446 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2447 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2448 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2449 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2450 key2->proto = key1->proto;
2456 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2457 union ipv6_5tuple_host* key2)
2460 for (i = 0; i < 16; i++)
2462 key2->ip_dst[i] = key1->ip_dst[i];
2463 key2->ip_src[i] = key1->ip_src[i];
2465 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2466 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2467 key2->proto = key1->proto;
2474 #define BYTE_VALUE_MAX 256
2475 #define ALL_32_BITS 0xffffffff
2476 #define BIT_8_TO_15 0x0000ff00
2478 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2482 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2484 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2485 for (i = 0; i < array_len; i++) {
2486 struct ipv4_udp_replay_route entry;
2487 union ipv4_5tuple_host newkey;
2488 entry = ipv4_udp_replay_route_array[i];
2489 convert_ipv4_5tuple(&entry.key, &newkey);
2490 ret = rte_hash_add_key (h,(void *) &newkey);
2492 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2493 " to the udp_replay hash.\n", i);
2495 ipv4_udp_replay_out_if[ret] = entry.if_out;
2497 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2500 #define BIT_16_TO_23 0x00ff0000
2502 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2506 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2508 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2509 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2510 for (i = 0; i < array_len; i++) {
2511 struct ipv6_udp_replay_route entry;
2512 union ipv6_5tuple_host newkey;
2513 entry = ipv6_udp_replay_route_array[i];
2514 convert_ipv6_5tuple(&entry.key, &newkey);
2515 ret = rte_hash_add_key (h, (void *) &newkey);
2517 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2518 " to the udp_replay hash.\n", i);
2520 ipv6_udp_replay_out_if[ret] = entry.if_out;
2522 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2525 #define NUMBER_PORT_USED 4
2527 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2528 unsigned int nr_flow)
2531 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2532 for (i = 0; i < nr_flow; i++) {
2533 struct ipv4_udp_replay_route entry;
2534 union ipv4_5tuple_host newkey;
2535 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2536 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2537 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2538 /* Create the ipv4 exact match flow */
2539 memset(&entry, 0, sizeof(entry));
2540 switch (i & (NUMBER_PORT_USED -1)) {
2542 entry = ipv4_udp_replay_route_array[0];
2543 entry.key.ip_dst = IPv4(101,c,b,a);
2546 entry = ipv4_udp_replay_route_array[1];
2547 entry.key.ip_dst = IPv4(201,c,b,a);
2550 entry = ipv4_udp_replay_route_array[2];
2551 entry.key.ip_dst = IPv4(111,c,b,a);
2554 entry = ipv4_udp_replay_route_array[3];
2555 entry.key.ip_dst = IPv4(211,c,b,a);
2558 convert_ipv4_5tuple(&entry.key, &newkey);
2559 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2561 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2563 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2566 printf("Hash: Adding 0x%x keys\n", nr_flow);
2570 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2571 unsigned int nr_flow)
2574 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2575 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2576 for (i = 0; i < nr_flow; i++) {
2577 struct ipv6_udp_replay_route entry;
2578 union ipv6_5tuple_host newkey;
2579 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2580 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2581 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2582 /* Create the ipv6 exact match flow */
2583 memset(&entry, 0, sizeof(entry));
2584 switch (i & (NUMBER_PORT_USED - 1)) {
2585 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2586 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2587 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2588 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2590 entry.key.ip_dst[13] = c;
2591 entry.key.ip_dst[14] = b;
2592 entry.key.ip_dst[15] = a;
2593 convert_ipv6_5tuple(&entry.key, &newkey);
2594 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2596 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2598 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2601 printf("Hash: Adding 0x%x keys\n", nr_flow);
2606 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2608 setup_lpm(int socketid)
2610 struct rte_lpm6_config config;
2615 /* create the LPM table */
2616 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2617 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2618 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2619 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2620 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2621 " on socket %d\n", socketid);
2623 /* populate the LPM table */
2624 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2626 /* skip unused ports */
2627 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2628 enabled_port_mask) == 0)
2631 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2632 ipv4_udp_replay_route_array[i].ip,
2633 ipv4_udp_replay_route_array[i].depth,
2634 ipv4_udp_replay_route_array[i].if_out);
2637 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2638 "udp_replay LPM table on socket %d\n",
2642 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2643 (unsigned)ipv4_udp_replay_route_array[i].ip,
2644 ipv4_udp_replay_route_array[i].depth,
2645 ipv4_udp_replay_route_array[i].if_out);
2648 /* create the LPM6 table */
2649 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2651 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2652 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2654 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2656 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2657 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2658 " on socket %d\n", socketid);
2660 /* populate the LPM table */
2661 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2663 /* skip unused ports */
2664 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2665 enabled_port_mask) == 0)
2668 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2669 ipv6_udp_replay_route_array[i].ip,
2670 ipv6_udp_replay_route_array[i].depth,
2671 ipv6_udp_replay_route_array[i].if_out);
2674 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2675 "udp_replay LPM table on socket %d\n",
2679 printf("LPM: Adding route %s / %d (%d)\n",
2681 ipv6_udp_replay_route_array[i].depth,
2682 ipv6_udp_replay_route_array[i].if_out);
2692 /* Check the link status of all ports in up to 9s, and print them finally */
2694 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2696 #define CHECK_INTERVAL 100 /* 100ms */
2697 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2698 uint8_t portid, count, all_ports_up, print_flag = 0;
2699 struct rte_eth_link link;
2701 printf("\nChecking link status");
2703 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2705 for (portid = 0; portid < port_num; portid++) {
2706 if ((port_mask & (1 << portid)) == 0)
2708 memset(&link, 0, sizeof(link));
2709 rte_eth_link_get_nowait(portid, &link);
2710 /* print link status if flag set */
2711 if (print_flag == 1) {
2712 if (link.link_status)
2713 printf("Port %d Link Up - speed %u "
2714 "Mbps - %s\n", (uint8_t)portid,
2715 (unsigned)link.link_speed,
2716 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2717 ("full-duplex") : ("half-duplex\n"));
2719 printf("Port %d Link Down\n",
2723 /* clear all_ports_up flag if any link down */
2724 if (link.link_status == 0) {
2729 /* after finally printing all link status, get out */
2730 if (print_flag == 1)
2733 if (all_ports_up == 0) {
2736 rte_delay_ms(CHECK_INTERVAL);
2739 /* set the print_flag if all ports up or timeout */
2740 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2748 main(int argc, char **argv)
2753 uint32_t n_tx_queue;
2754 uint8_t portid, nb_rx_queue;
2757 struct pipeline_params *params;
2759 /* parse application arguments (after the EAL ones) */
2761 ret = rte_eal_init(argc, argv);
2763 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2766 timer_lcore = rte_lcore_id();
2768 ret = parse_args(argc, argv);
2770 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2772 if (check_lcore_params() < 0)
2773 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2775 ret = init_lcore_rx_queues();
2777 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2779 params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
2780 memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
2781 lib_arp_init(params, NULL);
2783 nb_ports = rte_eth_dev_count();
2784 num_ports = nb_ports;
2786 if (nb_ports > RTE_MAX_ETHPORTS)
2787 nb_ports = RTE_MAX_ETHPORTS;
2789 if (check_port_config(nb_ports) < 0)
2790 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2793 *Configuring port_config_t structure for interface manager initialization
2795 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
2796 port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
2797 if (port_config == NULL)
2798 rte_panic("port_config is NULL: Memory Allocation failure\n");
2799 /* initialize all ports */
2800 for (portid = 0; portid < nb_ports; portid++) {
2801 /* skip ports that are not enabled */
2802 if ((enabled_port_mask & (1 << portid)) == 0) {
2803 printf("\nSkipping disabled port %d\n", portid);
2809 printf("Initializing port %d ... ", portid );
2812 nb_rx_queue = get_port_n_rx_queues(portid);
2813 n_tx_queue = nb_rx_queue;
2814 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2815 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2817 port_config[portid].port_id = portid;
2818 port_config[portid].nrx_queue = nb_rx_queue;
2819 port_config[portid].ntx_queue = n_tx_queue;
2820 port_config[portid].state = 1;
2821 port_config[portid].promisc = promiscuous_on;
2822 port_config[portid].mempool.pool_size = MEMPOOL_SIZE;
2823 port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
2824 port_config[portid].mempool.cache_size = CACHE_SIZE;
2825 port_config[portid].mempool.cpu_socket_id = rte_socket_id();
2826 memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf));
2827 memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf));
2828 memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
2830 /* Enable TCP and UDP HW Checksum , when required */
2831 //port_config[portid].tx_conf.txq_flags &=
2832 // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
2834 if (ifm_port_setup (portid, &port_config[portid]))
2835 rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
2838 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2843 populate_lpm_routes();
2844 convert_ipstr_to_numeric();
2845 /* launch per-lcore init on every lcore */
2846 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2847 cl = cmdline_stdin_new(main_ctx, "Replay>");
2849 rte_panic("Cannot create cmdline instance\n");
2850 cmdline_interact(cl);
2851 cmdline_stdin_exit(cl);
2853 rte_exit(0, "Bye!\n");
2854 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2855 if (rte_eal_wait_lcore(lcore_id) < 0)
2861 /**********************************************************/
2863 struct cmd_obj_clear_result {
2864 cmdline_fixed_string_t clear;
2865 cmdline_fixed_string_t udp_replay;
2866 cmdline_fixed_string_t stats;
2869 static void cmd_clear_udp_replay_stats_parsed(
2870 __rte_unused void *parsed_result,
2871 __rte_unused struct cmdline *cl,
2872 __attribute__((unused)) void *data)
2878 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
2879 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
2880 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
2881 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
2882 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
2883 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
2885 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
2886 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
2887 .data = NULL, /* 2nd arg of func */
2888 .help_str = "clears UDP_Replay stats for rx/tx",
2889 .tokens = { /* token list, NULL terminated */
2890 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
2891 (void *)&cmd_clear_udp_replay_stats_clear_string,
2892 (void *)&cmd_clear_udp_replay_stats_stats_string,
2896 /**********************************************************/
2897 struct cmd_obj_add_result {
2898 cmdline_fixed_string_t action;
2899 cmdline_fixed_string_t name;
2902 static void cmd_udp_replay_stats_parsed(
2903 __rte_unused void *parsed_result,
2904 __rte_unused struct cmdline *cl,
2905 __attribute__((unused)) void *data)
2910 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
2911 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
2912 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
2913 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
2915 cmdline_parse_inst_t cmd_udp_replay_stats = {
2916 .f = cmd_udp_replay_stats_parsed, /* function to call */
2917 .data = NULL, /* 2nd arg of func */
2918 .help_str = "UDP_Replay stats for rx/tx",
2919 .tokens = { /* token list, NULL terminated */
2920 (void *)&cmd_udp_replay_stats_udp_replay_string,
2921 (void *)&cmd_udp_replay_stats_stats_string,
2926 struct cmd_quit_result {
2927 cmdline_fixed_string_t quit;
2932 __rte_unused void *parsed_result,
2934 __rte_unused void *data)
2939 static cmdline_parse_token_string_t cmd_quit_quit =
2940 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
2942 static cmdline_parse_inst_t cmd_quit = {
2943 .f = cmd_quit_parsed,
2947 (void *) &cmd_quit_quit,
2952 /**********************************************************/
2953 /****** CONTEXT (list of instruction) */
2954 cmdline_parse_ctx_t main_ctx[] = {
2955 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
2956 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
2957 (cmdline_parse_inst_t *)&cmd_quit,