2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
83 #include <rte_errno.h>
84 #include <rte_cfgfile.h>
86 #include "parse_obj_list.h"
90 #include "interface.h"
91 #include "l3fwd_common.h"
92 #include "l3fwd_lpm4.h"
93 #include "l3fwd_lpm6.h"
94 #include "lib_icmpv6.h"
96 #include "vnf_common.h"
100 #define APP_LOOKUP_EXACT_MATCH 0
101 #define APP_LOOKUP_LPM 1
102 #define DO_RFC_1812_CHECKS
104 #ifndef APP_LOOKUP_METHOD
105 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
110 #include <netinet/in.h>
114 * When set to zero, simple forwaring path is eanbled.
115 * When set to one, optimized forwarding path is enabled.
116 * Note that LPM optimisation path uses SSE4.1 instructions.
118 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
119 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
121 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
124 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
125 #include <rte_hash.h>
126 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
128 #include <rte_lpm6.h>
130 #error "APP_LOOKUP_METHOD set to incorrect value"
134 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
135 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
136 #define IPv6_BYTES(addr) \
137 addr[0], addr[1], addr[2], addr[3], \
138 addr[4], addr[5], addr[6], addr[7], \
139 addr[8], addr[9], addr[10], addr[11],\
140 addr[12], addr[13],addr[14], addr[15]
144 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
146 #define MAX_JUMBO_PKT_LEN 9600
148 #define IPV6_ADDR_LEN 16
150 #define MEMPOOL_CACHE_SIZE 256
153 * This expression is used to calculate the number of mbufs needed depending on user input, taking
154 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
155 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
158 #define NB_MBUF RTE_MAX ( \
159 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
160 nb_ports*nb_lcores*MAX_PKT_BURST + \
161 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
162 nb_lcores*MEMPOOL_CACHE_SIZE), \
165 #define MAX_PKT_BURST 32
166 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
169 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
171 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
175 /* Configure how many packets ahead to prefetch, when reading packets */
176 #define PREFETCH_OFFSET 3
178 /* Used to mark destination port as 'invalid'. */
179 #define BAD_PORT ((uint16_t)-1)
184 * Configurable number of RX/TX ring descriptors
186 #define RTE_TEST_RX_DESC_DEFAULT 128
187 #define RTE_TEST_TX_DESC_DEFAULT 512
188 static uint64_t rcv_pkt_count[32] = {0};
189 static uint64_t tx_pkt_count[32] = {0};
190 static uint32_t arp_support;
193 struct sockaddr_in ipaddr1, ipaddr2;
194 /* ethernet addresses of ports */
195 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
197 static __m128i val_eth[RTE_MAX_ETHPORTS];
199 cmdline_parse_ctx_t main_ctx[];
201 uint32_t timer_lcore;
202 uint32_t exit_loop = 1;
203 port_config_t *port_config;
205 #define MEMPOOL_SIZE 32 * 1024
206 #define BUFFER_SIZE 2048
207 #define CACHE_SIZE 256
208 /* replace first 12B of the ethernet header. */
209 #define MASK_ETH 0x3f
211 #define IP_TYPE_IPV4 0
212 #define IP_TYPE_IPV6 1
214 const char* ipv4[MAX_IP];
215 uint8_t link_ipv6[MAX_IP][16];
216 uint32_t type, numports;
217 /* mask of enabled ports */
218 static uint32_t enabled_port_mask = 0;
219 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
220 static int numa_on = 1; /**< NUMA is enabled by default. */
221 static int csum_on = 1; /**< NUMA is enabled by default. */
222 struct pipeline_params def_pipeline_params = {
231 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
232 static int ipv6 = 0; /**< ipv6 is false by default. */
235 void convert_ipstr_to_numeric(void);
237 int print_l4stats(void);
238 int clear_stats(void);
242 struct rte_mbuf *m_table[MAX_PKT_BURST];
245 struct lcore_rx_queue {
248 } __rte_cache_aligned;
250 #define MAX_RX_QUEUE_PER_LCORE 16
251 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
252 #define MAX_RX_QUEUE_PER_PORT 128
254 #define MAX_LCORE_PARAMS 1024
255 struct lcore_params {
259 } __rte_cache_aligned;
261 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
262 static struct lcore_params lcore_params_array_default[] = {
274 static struct lcore_params * lcore_params = lcore_params_array_default;
275 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
276 sizeof(lcore_params_array_default[0]);
278 static struct rte_eth_conf port_conf = {
280 .mq_mode = ETH_MQ_RX_RSS,
281 .max_rx_pkt_len = ETHER_MAX_LEN,
283 .header_split = 0, /**< Header Split disabled */
284 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
285 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
286 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
287 .hw_strip_crc = 0, /**< CRC stripped by hardware */
292 .rss_hf = ETH_RSS_IP,
296 .mq_mode = ETH_MQ_TX_NONE,
300 /* empty vmdq configuration structure. Filled in programatically */
301 static struct rte_eth_rxconf rx_conf = {
307 .rx_free_thresh = 64,
309 .rx_deferred_start = 0,
311 static struct rte_eth_txconf tx_conf = {
319 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
320 ETH_TXQ_FLAGS_NOOFFLOADS,
321 .tx_deferred_start = 0,
324 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
326 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
327 #include <rte_hash_crc.h>
328 #define DEFAULT_HASH_FUNC rte_hash_crc
330 #include <rte_jhash.h>
331 #define DEFAULT_HASH_FUNC rte_jhash
340 } __attribute__((__packed__));
342 union ipv4_5tuple_host {
355 #define XMM_NUM_IN_IPV6_5TUPLE 3
358 uint8_t ip_dst[IPV6_ADDR_LEN];
359 uint8_t ip_src[IPV6_ADDR_LEN];
363 } __attribute__((__packed__));
365 union ipv6_5tuple_host {
370 uint8_t ip_src[IPV6_ADDR_LEN];
371 uint8_t ip_dst[IPV6_ADDR_LEN];
376 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
379 struct ipv4_udp_replay_route {
380 struct ipv4_5tuple key;
384 struct ipv6_udp_replay_route {
385 struct ipv6_5tuple key;
389 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
390 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
391 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
392 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
393 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
396 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
398 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
399 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
400 101, 11, IPPROTO_TCP}, 0},
403 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
404 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
405 102, 12, IPPROTO_TCP}, 1},
408 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
409 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
410 101, 11, IPPROTO_TCP}, 2},
413 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
414 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
415 102, 12, IPPROTO_TCP}, 3},
418 typedef struct rte_hash lookup_struct_t;
420 #ifdef RTE_ARCH_X86_64
421 /* default to 4 million hash entries (approx) */
422 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
424 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
425 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
427 #define HASH_ENTRY_NUMBER_DEFAULT 4
429 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
431 app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
436 app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
441 void convert_ipstr_to_numeric(void)
444 for (i = 0; i < numports; i++)
446 if (type == IP_TYPE_IPV4) {
447 memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
448 ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
449 ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
450 } else if (type == IP_TYPE_IPV6) {
451 ifm_add_ipv6_port(i, &link_ipv6[i][0], 128);
456 static inline uint32_t
457 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
460 const union ipv4_5tuple_host *k;
466 p = (const uint32_t *)&k->port_src;
468 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
469 init_val = rte_hash_crc_4byte(t, init_val);
470 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
471 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
472 init_val = rte_hash_crc_4byte(*p, init_val);
473 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
474 init_val = rte_jhash_1word(t, init_val);
475 init_val = rte_jhash_1word(k->ip_src, init_val);
476 init_val = rte_jhash_1word(k->ip_dst, init_val);
477 init_val = rte_jhash_1word(*p, init_val);
478 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
482 static inline int check_arpicmp(struct rte_mbuf *pkt)
484 uint8_t in_port_id = pkt->port;
485 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
486 uint16_t *eth_proto =
487 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
489 uint32_t prot_offset =
490 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
491 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
492 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
493 ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
494 && (*protocol == IP_PROTOCOL_ICMP))) {
495 process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
501 static inline int check_arpicmpv6(struct rte_mbuf *pkt)
503 struct ether_hdr *eth_h;
504 struct ipv6_hdr *ipv6_h;
505 uint8_t in_port_id = pkt->port;
506 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
507 uint16_t *eth_proto =
508 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
509 eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
510 ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
511 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
512 && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
513 process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
519 static inline uint32_t
520 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
522 const union ipv6_5tuple_host *k;
525 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
526 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
527 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
528 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
532 p = (const uint32_t *)&k->port_src;
534 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
535 ip_src0 = (const uint32_t *) k->ip_src;
536 ip_src1 = (const uint32_t *)(k->ip_src+4);
537 ip_src2 = (const uint32_t *)(k->ip_src+8);
538 ip_src3 = (const uint32_t *)(k->ip_src+12);
539 ip_dst0 = (const uint32_t *) k->ip_dst;
540 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
541 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
542 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
543 init_val = rte_hash_crc_4byte(t, init_val);
544 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
545 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
546 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
547 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
548 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
549 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
550 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
551 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
552 init_val = rte_hash_crc_4byte(*p, init_val);
553 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
554 init_val = rte_jhash_1word(t, init_val);
555 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
556 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
557 init_val = rte_jhash_1word(*p, init_val);
558 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
562 #define IPV4_UDP_Replay_NUM_ROUTES \
563 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
565 #define IPV6_UDP_Replay_NUM_ROUTES \
566 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
568 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
569 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
573 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
574 struct ipv4_udp_replay_route {
580 struct ipv6_udp_replay_route {
586 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
587 {IPv4(1,1,1,0), 24, 0},
588 {IPv4(2,1,1,0), 24, 1},
589 {IPv4(3,1,1,0), 24, 2},
590 {IPv4(4,1,1,0), 24, 3},
591 {IPv4(5,1,1,0), 24, 4},
592 {IPv4(6,1,1,0), 24, 5},
593 {IPv4(7,1,1,0), 24, 6},
594 {IPv4(8,1,1,0), 24, 7},
597 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
598 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
599 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
600 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
601 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
602 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
603 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
604 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
605 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
608 #define IPV4_UDP_Replay_NUM_ROUTES \
609 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
610 #define IPV6_UDP_Replay_NUM_ROUTES \
611 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
613 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
614 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
615 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
617 typedef struct rte_lpm lookup_struct_t;
618 typedef struct rte_lpm6 lookup6_struct_t;
619 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
620 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
625 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
626 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
627 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
628 lookup_struct_t * ipv4_lookup_struct;
629 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
630 lookup6_struct_t * ipv6_lookup_struct;
632 lookup_struct_t * ipv6_lookup_struct;
634 } __rte_cache_aligned;
636 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
638 /* Send burst of packets on an output interface */
640 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
642 struct rte_mbuf **m_table;
646 queueid = qconf->tx_queue_id[port];
647 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
649 ret = rte_eth_tx_burst(port, queueid, m_table, n);
650 if (unlikely(ret < n)) {
652 rte_pktmbuf_free(m_table[ret]);
656 tx_pkt_count[port] += ret;
660 /* Enqueue a single packet, and send burst if queue is filled */
662 send_single_packet(struct rte_mbuf *m, uint8_t port)
666 struct lcore_conf *qconf;
668 lcore_id = rte_lcore_id();
670 qconf = &lcore_conf[lcore_id];
671 len = qconf->tx_mbufs[port].len;
672 qconf->tx_mbufs[port].m_table[len] = m;
675 /* enough pkts to be sent */
676 if (unlikely(len == MAX_PKT_BURST)) {
677 send_burst(qconf, MAX_PKT_BURST, port);
681 qconf->tx_mbufs[port].len = len;
685 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
686 static inline __attribute__((always_inline)) void
687 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
688 struct rte_mbuf *m[], uint32_t num)
692 len = qconf->tx_mbufs[port].len;
695 * If TX buffer for that queue is empty, and we have enough packets,
696 * then send them straightway.
698 if (num >= MAX_TX_BURST && len == 0) {
699 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
700 if (unlikely(n < num)) {
702 rte_pktmbuf_free(m[n]);
709 * Put packets into TX buffer for that queue.
713 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
716 switch (n % FWDSTEP) {
719 qconf->tx_mbufs[port].m_table[len + j] = m[j];
722 qconf->tx_mbufs[port].m_table[len + j] = m[j];
725 qconf->tx_mbufs[port].m_table[len + j] = m[j];
728 qconf->tx_mbufs[port].m_table[len + j] = m[j];
735 /* enough pkts to be sent */
736 if (unlikely(len == MAX_PKT_BURST)) {
738 send_burst(qconf, MAX_PKT_BURST, port);
740 /* copy rest of the packets into the TX buffer. */
743 switch (len % FWDSTEP) {
746 qconf->tx_mbufs[port].m_table[j] = m[n + j];
749 qconf->tx_mbufs[port].m_table[j] = m[n + j];
752 qconf->tx_mbufs[port].m_table[j] = m[n + j];
755 qconf->tx_mbufs[port].m_table[j] = m[n + j];
761 qconf->tx_mbufs[port].len = len;
763 #endif /* APP_LOOKUP_LPM */
765 #ifdef DO_RFC_1812_CHECKS
767 is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len)
769 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
771 * 1. The packet length reported by the Link Layer must be large
772 * enough to hold the minimum length legal IP datagram (20 bytes).
774 if (link_len < sizeof(struct ipv4_hdr))
777 /* 2. The IP checksum must be correct. */
778 /* this is checked in H/W */
781 * 3. The IP version number must be 4. If the version number is not 4
782 * then the packet may be another version of IP, such as IPng or
785 if (((pkt->version_ihl) >> 4) != 4)
788 * 4. The IP header length field must be large enough to hold the
789 * minimum length legal IP datagram (20 bytes = 5 words).
791 if ((pkt->version_ihl & 0xf) < 5)
795 * 5. The IP total length field must be large enough to hold the IP
796 * datagram header, whose length is specified in the IP header length
799 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
806 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
808 static __m128i mask0;
809 static __m128i mask1;
810 static __m128i mask2;
811 static inline uint8_t
812 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
815 union ipv4_5tuple_host key;
817 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
818 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
819 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
820 key.xmm = _mm_and_si128(data, mask0);
821 /* Find destination port */
822 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
823 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
826 static inline uint8_t
827 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
830 union ipv6_5tuple_host key;
832 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
833 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
834 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
835 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
836 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
837 key.xmm[0] = _mm_and_si128(data0, mask1);
838 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
840 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
841 key.xmm[2] = _mm_and_si128(data2, mask2);
843 /* Find destination port */
844 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
845 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
849 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
851 static inline uint8_t
852 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
856 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
857 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
858 &next_hop) == 0) ? next_hop : portid);
861 static inline uint8_t
862 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
865 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
866 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
871 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
872 struct lcore_conf *qconf) __attribute__((unused));
874 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
875 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
877 #define MASK_ALL_PKTS 0xff
878 #define EXCLUDE_1ST_PKT 0xfe
879 #define EXCLUDE_2ND_PKT 0xfd
880 #define EXCLUDE_3RD_PKT 0xfb
881 #define EXCLUDE_4TH_PKT 0xf7
882 #define EXCLUDE_5TH_PKT 0xef
883 #define EXCLUDE_6TH_PKT 0xdf
884 #define EXCLUDE_7TH_PKT 0xbf
885 #define EXCLUDE_8TH_PKT 0x7f
888 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
890 struct ether_hdr *eth_hdr[8];
891 struct ether_hdr tmp;
892 struct ipv4_hdr *ipv4_hdr[8];
893 struct udp_hdr *udp_hdr[8];
895 l2_phy_interface_t *port = ifm_get_port(portid);
897 printf("port may be un initialized\n");
900 if (unlikely(arp_support)) {
911 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
912 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
913 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
914 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
915 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
916 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
917 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
918 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
921 memset(&tmp,0,sizeof (struct ether_hdr));
926 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
927 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
928 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
931 /* Handle IPv4 headers.*/
932 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
933 sizeof(struct ether_hdr));
934 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
935 sizeof(struct ether_hdr));
936 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
937 sizeof(struct ether_hdr));
938 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
939 sizeof(struct ether_hdr));
940 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
941 sizeof(struct ether_hdr));
942 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
943 sizeof(struct ether_hdr));
944 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
945 sizeof(struct ether_hdr));
946 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
947 sizeof(struct ether_hdr));
948 struct ipv4_hdr temp_ipv4;
949 temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
950 ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
951 ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
952 temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
953 ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
954 ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
955 temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
956 ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
957 ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
958 temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
959 ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
960 ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
961 temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
962 ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
963 ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
964 temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
965 ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
966 ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
967 temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
968 ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
969 ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
970 temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
971 ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
972 ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
974 /* Handle UDP headers.*/
975 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
976 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
978 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
979 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
980 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
981 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
982 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
983 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
984 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
985 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
986 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
987 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
988 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
989 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
990 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
991 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
992 /*1) memcpy or assignment.*/
994 struct udp_hdr temp_udp;
995 temp_udp.dst_port = udp_hdr[0]->dst_port;
996 udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
997 udp_hdr[0]->src_port = temp_udp.dst_port;
998 temp_udp.dst_port = udp_hdr[1]->dst_port;
999 udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
1000 udp_hdr[1]->src_port = temp_udp.dst_port;
1001 temp_udp.dst_port = udp_hdr[2]->dst_port;
1002 udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
1003 udp_hdr[2]->src_port = temp_udp.dst_port;
1004 temp_udp.dst_port = udp_hdr[3]->dst_port;
1005 udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
1006 udp_hdr[3]->src_port = temp_udp.dst_port;
1007 temp_udp.dst_port = udp_hdr[4]->dst_port;
1008 udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
1009 udp_hdr[4]->src_port = temp_udp.dst_port;
1010 temp_udp.dst_port = udp_hdr[5]->dst_port;
1011 udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
1012 udp_hdr[5]->src_port = temp_udp.dst_port;
1013 temp_udp.dst_port = udp_hdr[6]->dst_port;
1014 udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
1015 udp_hdr[6]->src_port = temp_udp.dst_port;
1016 temp_udp.dst_port = udp_hdr[7]->dst_port;
1017 udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
1018 udp_hdr[7]->src_port = temp_udp.dst_port;
1019 #ifdef DO_RFC_1812_CHECKS
1020 /* Check to make sure the packet is valid (RFC1812) */
1021 uint8_t valid_mask = MASK_ALL_PKTS;
1022 if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) {
1023 rte_pktmbuf_free(m[0]);
1024 valid_mask &= EXCLUDE_1ST_PKT;
1026 if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) {
1027 rte_pktmbuf_free(m[1]);
1028 valid_mask &= EXCLUDE_2ND_PKT;
1030 if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) {
1031 rte_pktmbuf_free(m[2]);
1032 valid_mask &= EXCLUDE_3RD_PKT;
1034 if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) {
1035 rte_pktmbuf_free(m[3]);
1036 valid_mask &= EXCLUDE_4TH_PKT;
1038 if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) {
1039 rte_pktmbuf_free(m[4]);
1040 valid_mask &= EXCLUDE_5TH_PKT;
1042 if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) {
1043 rte_pktmbuf_free(m[5]);
1044 valid_mask &= EXCLUDE_6TH_PKT;
1046 if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) {
1047 rte_pktmbuf_free(m[6]);
1048 valid_mask &= EXCLUDE_7TH_PKT;
1050 if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) {
1051 rte_pktmbuf_free(m[7]);
1052 valid_mask &= EXCLUDE_8TH_PKT;
1054 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
1055 if (valid_mask == 0){
1059 for (i = 0; i < 8; i++) {
1060 if ((0x1 << i) & valid_mask) {
1061 udp_replay_simple_replay(m[i], portid, qconf);
1067 #endif // End of #ifdef DO_RFC_1812_CHECKS
1069 #ifdef DO_RFC_1812_CHECKS
1070 /* Update time to live and header checksum */
1071 --(ipv4_hdr[0]->time_to_live);
1072 --(ipv4_hdr[1]->time_to_live);
1073 --(ipv4_hdr[2]->time_to_live);
1074 --(ipv4_hdr[3]->time_to_live);
1075 ++(ipv4_hdr[0]->hdr_checksum);
1076 ++(ipv4_hdr[1]->hdr_checksum);
1077 ++(ipv4_hdr[2]->hdr_checksum);
1078 ++(ipv4_hdr[3]->hdr_checksum);
1079 --(ipv4_hdr[4]->time_to_live);
1080 --(ipv4_hdr[5]->time_to_live);
1081 --(ipv4_hdr[6]->time_to_live);
1082 --(ipv4_hdr[7]->time_to_live);
1083 ++(ipv4_hdr[4]->hdr_checksum);
1084 ++(ipv4_hdr[5]->hdr_checksum);
1085 ++(ipv4_hdr[6]->hdr_checksum);
1086 ++(ipv4_hdr[7]->hdr_checksum);
1089 send_single_packet(m[0],portid );
1090 send_single_packet(m[1],portid );
1091 send_single_packet(m[2],portid );
1092 send_single_packet(m[3],portid);
1093 send_single_packet(m[4],portid);
1094 send_single_packet(m[5],portid);
1095 send_single_packet(m[6],portid);
1096 send_single_packet(m[7],portid);
1100 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
1101 union ipv6_5tuple_host * key)
1103 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
1104 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
1105 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
1106 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
1107 key->xmm[1] = tmpdata1;
1108 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
1113 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1115 struct ether_hdr *eth_hdr[8],tmp;
1117 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
1119 union ipv6_5tuple_host key[8];
1120 struct udp_hdr *udp_hdr[8];
1121 l2_phy_interface_t *port = ifm_get_port(portid);
1123 printf("port may be un initialized\n");
1127 if (unlikely(arp_support)) {
1128 check_arpicmpv6(m[0]);
1129 check_arpicmpv6(m[1]);
1130 check_arpicmpv6(m[2]);
1131 check_arpicmpv6(m[3]);
1132 check_arpicmpv6(m[4]);
1133 check_arpicmpv6(m[5]);
1134 check_arpicmpv6(m[6]);
1135 check_arpicmpv6(m[7]);
1139 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1140 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1141 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1142 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1143 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1144 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1145 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1146 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1148 memset(&tmp,0,sizeof (struct ether_hdr));
1152 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1153 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1154 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1156 /* Handle IPv6 headers.*/
1157 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
1158 sizeof(struct ether_hdr));
1159 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
1160 sizeof(struct ether_hdr));
1161 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
1162 sizeof(struct ether_hdr));
1163 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
1164 sizeof(struct ether_hdr));
1165 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
1166 sizeof(struct ether_hdr));
1167 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
1168 sizeof(struct ether_hdr));
1169 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
1170 sizeof(struct ether_hdr));
1171 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
1172 sizeof(struct ether_hdr));
1175 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
1176 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
1177 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
1180 /* Handle UDP headers.*/
1181 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1182 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1184 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1185 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1186 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1187 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1188 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1189 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1190 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1191 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1192 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1193 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1194 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1195 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1196 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1197 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1198 /*1) memcpy or assignment.*/
1200 struct udp_hdr temp_udp;
1203 temp_udp.dst_port = udp_hdr[i]->dst_port;
1204 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1205 udp_hdr[i]->src_port = temp_udp.dst_port;
1207 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1208 &key[4], &key[5], &key[6], &key[7]};
1209 #if RTE_VERSION < 0x100b0000
1210 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1212 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1214 send_single_packet(m[0],portid);
1215 send_single_packet(m[1],portid);
1216 send_single_packet(m[2],portid);
1217 send_single_packet(m[3],portid);
1218 send_single_packet(m[4],portid);
1219 send_single_packet(m[5],portid);
1220 send_single_packet(m[6],portid);
1221 send_single_packet(m[7],portid);
1224 #endif /* APP_LOOKUP_METHOD */
1226 static inline __attribute__((always_inline)) void
1227 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1229 struct ether_hdr *eth_hdr,tmp;
1230 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1231 struct udp_hdr *udp_hdr,temp_udp;
1232 l2_phy_interface_t *port = ifm_get_port(portid);
1235 printf("port may be un initialized\n");
1239 printf("Null packet received\n");
1242 if (unlikely(arp_support)) {
1243 if (!check_arpicmp(m))
1247 printf("qconf configuration is NULL\n");
1248 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1249 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1250 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1251 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1252 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1254 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1255 /* Handle IPv4 headers.*/
1256 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1257 sizeof(struct ether_hdr));
1258 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1259 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1260 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1261 #ifdef DO_RFC_1812_CHECKS
1262 /* Check to make sure the packet is valid (RFC1812) */
1263 if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
1264 rte_pktmbuf_free(m);
1270 #ifdef DO_RFC_1812_CHECKS
1271 /* Update time to live and header checksum */
1272 --(ipv4_hdr->time_to_live);
1273 ++(ipv4_hdr->hdr_checksum);
1275 /* Handle UDP headers.*/
1276 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1277 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1279 /*Swapping Src and Dst Port*/
1280 temp_udp.dst_port = udp_hdr->dst_port;
1281 udp_hdr->dst_port = udp_hdr->src_port;
1282 udp_hdr->src_port = temp_udp.dst_port;
1284 send_single_packet(m, portid);
1285 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1286 /* Handle IPv6 headers.*/
1287 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1289 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1290 sizeof(struct ether_hdr));
1292 /*Swapping of Src and Dst IP address*/
1293 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1294 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1295 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1297 /* Handle UDP headers.*/
1298 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1299 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1300 /*Swapping Src and Dst Port*/
1301 temp_udp.dst_port = udp_hdr->dst_port;
1302 udp_hdr->dst_port = udp_hdr->src_port;
1303 udp_hdr->src_port = temp_udp.dst_port;
1304 send_single_packet(m, portid);
1306 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1307 rte_pktmbuf_free(m);
1310 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1311 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1312 #ifdef DO_RFC_1812_CHECKS
1314 #define IPV4_MIN_VER_IHL 0x45
1315 #define IPV4_MAX_VER_IHL 0x4f
1316 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1318 /* Minimum value of IPV4 total length (20B) in network byte order. */
1319 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1322 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1323 * - The IP version number must be 4.
1324 * - The IP header length field must be large enough to hold the
1325 * minimum length legal IP datagram (20 bytes = 5 words).
1326 * - The IP total length field must be large enough to hold the IP
1327 * datagram header, whose length is specified in the IP header length
1329 * If we encounter invalid IPV4 packet, then set destination port for it
1330 * to BAD_PORT value.
1332 static inline __attribute__((always_inline)) void
1333 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1337 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1338 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1340 ipv4_hdr->time_to_live--;
1341 ipv4_hdr->hdr_checksum++;
1343 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1344 ((uint8_t)ipv4_hdr->total_length == 0 &&
1345 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1352 #define rfc1812_process(mb, dp) do { } while (0)
1353 #endif /* DO_RFC_1812_CHECKS */
1354 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1357 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1358 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1360 static inline __attribute__((always_inline)) uint16_t
1361 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1362 uint32_t dst_ipv4, uint8_t portid)
1365 struct ipv6_hdr *ipv6_hdr;
1366 struct ether_hdr *eth_hdr;
1367 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1369 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1370 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1373 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1374 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1375 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1376 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1377 ipv6_hdr->dst_addr, &next_hop) != 0)
1387 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1388 uint16_t *dst_port, uint8_t portid)
1390 struct ether_hdr *eth_hdr;
1391 struct ipv4_hdr *ipv4_hdr;
1396 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1397 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1400 dst_ipv4 = ipv4_hdr->dst_addr;
1401 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1403 /*Changing the dp to incoming port*/
1404 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1407 te = _mm_loadu_si128((__m128i *)eth_hdr);
1411 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1413 te = _mm_blend_epi16(te, ve, MASK_ETH);
1414 _mm_storeu_si128((__m128i *)eth_hdr, te);
1416 /* Wont be using the following fucntion*/
1419 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1422 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1424 uint32_t *ipv4_flag)
1426 struct ipv4_hdr *ipv4_hdr;
1427 struct ether_hdr *eth_hdr;
1428 uint32_t x0, x1, x2, x3;
1430 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1431 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1432 x0 = ipv4_hdr->dst_addr;
1433 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1435 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1436 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1437 x1 = ipv4_hdr->dst_addr;
1438 ipv4_flag[0] &= pkt[1]->packet_type;
1440 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1441 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1442 x2 = ipv4_hdr->dst_addr;
1443 ipv4_flag[0] &= pkt[2]->packet_type;
1445 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1446 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1447 x3 = ipv4_hdr->dst_addr;
1448 ipv4_flag[0] &= pkt[3]->packet_type;
1450 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1454 * Lookup into LPM for destination port.
1455 * If lookup fails, use incoming port (portid) as destination port.
1458 processx4_step2(const struct lcore_conf *qconf,
1462 struct rte_mbuf *pkt[FWDSTEP],
1463 uint16_t dprt[FWDSTEP])
1466 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1467 4, 5, 6, 7, 0, 1, 2, 3);
1469 /* Byte swap 4 IPV4 addresses. */
1470 dip = _mm_shuffle_epi8(dip, bswap_mask);
1472 /* if all 4 packets are IPV4. */
1473 if (likely(ipv4_flag)) {
1474 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1477 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1478 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1479 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1480 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1485 * Update source and destination MAC addresses in the ethernet header.
1486 * Perform RFC1812 checks and updates for IPV4 packets.
1489 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1491 __m128i te[FWDSTEP];
1492 __m128i ve[FWDSTEP];
1493 __m128i *p[FWDSTEP];
1495 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1496 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1497 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1498 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1500 ve[0] = val_eth[dst_port[0]];
1501 te[0] = _mm_loadu_si128(p[0]);
1503 ve[1] = val_eth[dst_port[1]];
1504 te[1] = _mm_loadu_si128(p[1]);
1506 ve[2] = val_eth[dst_port[2]];
1507 te[2] = _mm_loadu_si128(p[2]);
1509 ve[3] = val_eth[dst_port[3]];
1510 te[3] = _mm_loadu_si128(p[3]);
1512 /* Update first 12 bytes, keep rest bytes intact. */
1513 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1514 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1515 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1516 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1518 _mm_storeu_si128(p[0], te[0]);
1519 _mm_storeu_si128(p[1], te[1]);
1520 _mm_storeu_si128(p[2], te[2]);
1521 _mm_storeu_si128(p[3], te[3]);
1523 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1524 &dst_port[0], pkt[0]->packet_type);
1525 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1526 &dst_port[1], pkt[1]->packet_type);
1527 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1528 &dst_port[2], pkt[2]->packet_type);
1529 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1530 &dst_port[3], pkt[3]->packet_type);
1534 * We group consecutive packets with the same destionation port into one burst.
1535 * To avoid extra latency this is done together with some other packet
1536 * processing, but after we made a final decision about packet's destination.
1537 * To do this we maintain:
1538 * pnum - array of number of consecutive packets with the same dest port for
1539 * each packet in the input burst.
1540 * lp - pointer to the last updated element in the pnum.
1541 * dlp - dest port value lp corresponds to.
1544 #define GRPSZ (1 << FWDSTEP)
1545 #define GRPMSK (GRPSZ - 1)
1547 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1548 if (likely((dlp) == (dcp)[(idx)])) { \
1551 (dlp) = (dcp)[idx]; \
1552 (lp) = (pn) + (idx); \
1558 * Group consecutive packets with the same destination port in bursts of 4.
1559 * Suppose we have array of destionation ports:
1560 * dst_port[] = {a, b, c, d,, e, ... }
1561 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1562 * We doing 4 comparisions at once and the result is 4 bit mask.
1563 * This mask is used as an index into prebuild array of pnum values.
1565 static inline uint16_t *
1566 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1568 static const struct {
1569 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1570 int32_t idx; /* index for new last updated elemnet. */
1571 uint16_t lpv; /* add value to the last updated element. */
1574 /* 0: a != b, b != c, c != d, d != e */
1575 .pnum = UINT64_C(0x0001000100010001),
1580 /* 1: a == b, b != c, c != d, d != e */
1581 .pnum = UINT64_C(0x0001000100010002),
1586 /* 2: a != b, b == c, c != d, d != e */
1587 .pnum = UINT64_C(0x0001000100020001),
1592 /* 3: a == b, b == c, c != d, d != e */
1593 .pnum = UINT64_C(0x0001000100020003),
1598 /* 4: a != b, b != c, c == d, d != e */
1599 .pnum = UINT64_C(0x0001000200010001),
1604 /* 5: a == b, b != c, c == d, d != e */
1605 .pnum = UINT64_C(0x0001000200010002),
1610 /* 6: a != b, b == c, c == d, d != e */
1611 .pnum = UINT64_C(0x0001000200030001),
1616 /* 7: a == b, b == c, c == d, d != e */
1617 .pnum = UINT64_C(0x0001000200030004),
1622 /* 8: a != b, b != c, c != d, d == e */
1623 .pnum = UINT64_C(0x0002000100010001),
1628 /* 9: a == b, b != c, c != d, d == e */
1629 .pnum = UINT64_C(0x0002000100010002),
1634 /* 0xa: a != b, b == c, c != d, d == e */
1635 .pnum = UINT64_C(0x0002000100020001),
1640 /* 0xb: a == b, b == c, c != d, d == e */
1641 .pnum = UINT64_C(0x0002000100020003),
1646 /* 0xc: a != b, b != c, c == d, d == e */
1647 .pnum = UINT64_C(0x0002000300010001),
1652 /* 0xd: a == b, b != c, c == d, d == e */
1653 .pnum = UINT64_C(0x0002000300010002),
1658 /* 0xe: a != b, b == c, c == d, d == e */
1659 .pnum = UINT64_C(0x0002000300040001),
1664 /* 0xf: a == b, b == c, c == d, d == e */
1665 .pnum = UINT64_C(0x0002000300040005),
1672 uint16_t u16[FWDSTEP + 1];
1674 } *pnum = (void *)pn;
1678 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1679 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1680 v = _mm_movemask_ps((__m128)dp1);
1682 /* update last port counter. */
1683 lp[0] += gptbl[v].lpv;
1685 /* if dest port value has changed. */
1687 lp = pnum->u16 + gptbl[v].idx;
1689 pnum->u64 = gptbl[v].pnum;
1695 #endif /* APP_LOOKUP_METHOD */
1697 /* main processing loop */
1699 main_loop(__attribute__((unused)) void *dummy)
1701 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1703 uint64_t prev_tsc, diff_tsc, cur_tsc;
1705 uint8_t portid, queueid;
1706 struct lcore_conf *qconf;
1707 l2_phy_interface_t *port;
1708 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1709 US_PER_S * BURST_TX_DRAIN_US;
1711 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1712 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1716 uint16_t dst_port[MAX_PKT_BURST];
1717 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1718 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1719 uint16_t pnum[MAX_PKT_BURST + 1];
1724 lcore_id = rte_lcore_id();
1725 qconf = &lcore_conf[lcore_id];
1727 if (qconf->n_rx_queue == 0) {
1728 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1732 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1734 for (i = 0; i < qconf->n_rx_queue; i++) {
1736 portid = qconf->rx_queue_list[i].port_id;
1737 queueid = qconf->rx_queue_list[i].queue_id;
1738 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1744 cur_tsc = rte_rdtsc();
1747 * TX burst queue drain
1749 diff_tsc = cur_tsc - prev_tsc;
1750 if (unlikely(diff_tsc > drain_tsc)) {
1753 * This could be optimized (use queueid instead of
1754 * portid), but it is not called so often
1756 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1757 if (qconf->tx_mbufs[portid].len == 0)
1760 qconf->tx_mbufs[portid].len,
1762 qconf->tx_mbufs[portid].len = 0;
1769 * Read packet from RX queues
1771 for (i = 0; i < qconf->n_rx_queue; ++i) {
1772 portid = qconf->rx_queue_list[i].port_id;
1773 queueid = qconf->rx_queue_list[i].queue_id;
1774 port = ifm_get_port(portid);
1776 nb_rx = port->retrieve_bulk_pkts(portid,
1777 queueid, pkts_burst);
1778 port->n_rxpkts += nb_rx;
1780 printf("port may be un initialized\n");
1784 rcv_pkt_count[portid] += nb_rx;
1788 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1789 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1792 * Send nb_rx - nb_rx%8 packets
1795 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1796 for (j = 0; j < n; j += 8) {
1797 struct ether_hdr *eth_h0 =
1798 rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
1799 struct ether_hdr *eth_h1 =
1800 rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
1801 struct ether_hdr *eth_h2 =
1802 rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
1803 struct ether_hdr *eth_h3 =
1804 rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
1805 struct ether_hdr *eth_h4 =
1806 rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
1807 struct ether_hdr *eth_h5 =
1808 rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
1809 struct ether_hdr *eth_h6 =
1810 rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
1811 struct ether_hdr *eth_h7 =
1812 rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
1814 uint16_t ether_type;
1815 ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
1816 rte_cpu_to_be_16(eth_h1->ether_type) &
1817 rte_cpu_to_be_16(eth_h2->ether_type) &
1818 rte_cpu_to_be_16(eth_h3->ether_type) &
1819 rte_cpu_to_be_16(eth_h4->ether_type) &
1820 rte_cpu_to_be_16(eth_h5->ether_type) &
1821 rte_cpu_to_be_16(eth_h6->ether_type) &
1822 rte_cpu_to_be_16(eth_h7->ether_type));
1824 if (ether_type == ETHER_TYPE_IPv4) {
1825 simple_ipv4_replay_8pkts(
1826 &pkts_burst[j], portid, qconf);
1827 } else if (ether_type == ETHER_TYPE_IPv6) {
1828 simple_ipv6_replay_8pkts(&pkts_burst[j],
1831 udp_replay_simple_replay(pkts_burst[j],
1833 udp_replay_simple_replay(pkts_burst[j+1],
1835 udp_replay_simple_replay(pkts_burst[j+2],
1837 udp_replay_simple_replay(pkts_burst[j+3],
1839 udp_replay_simple_replay(pkts_burst[j+4],
1841 udp_replay_simple_replay(pkts_burst[j+5],
1843 udp_replay_simple_replay(pkts_burst[j+6],
1845 udp_replay_simple_replay(pkts_burst[j+7],
1850 for (; j < nb_rx ; j++) {
1851 udp_replay_simple_replay(pkts_burst[j],
1855 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1857 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1858 for (j = 0; j != k; j += FWDSTEP) {
1859 processx4_step1(&pkts_burst[j],
1861 &ipv4_flag[j / FWDSTEP]);
1864 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1865 for (j = 0; j != k; j += FWDSTEP) {
1866 processx4_step2(qconf, dip[j / FWDSTEP],
1867 ipv4_flag[j / FWDSTEP], portid,
1868 &pkts_burst[j], &dst_port[j]);
1872 * Finish packet processing and group consecutive
1873 * packets with the same destination port.
1875 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1882 processx4_step3(pkts_burst, dst_port);
1884 /* dp1: <d[0], d[1], d[2], d[3], ... > */
1885 dp1 = _mm_loadu_si128((__m128i *)dst_port);
1887 for (j = FWDSTEP; j != k; j += FWDSTEP) {
1888 processx4_step3(&pkts_burst[j],
1893 * <d[j-3], d[j-2], d[j-1], d[j], ... >
1895 dp2 = _mm_loadu_si128((__m128i *)
1896 &dst_port[j - FWDSTEP + 1]);
1897 lp = port_groupx4(&pnum[j - FWDSTEP],
1902 * <d[j], d[j+1], d[j+2], d[j+3], ... >
1904 dp1 = _mm_srli_si128(dp2,
1906 sizeof(dst_port[0]));
1910 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
1912 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
1913 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
1917 * remove values added by the last repeated
1921 dlp = dst_port[j - 1];
1923 /* set dlp and lp to the never used values. */
1925 lp = pnum + MAX_PKT_BURST;
1928 /* Process up to last 3 packets one by one. */
1929 switch (nb_rx % FWDSTEP) {
1931 process_packet(qconf, pkts_burst[j],
1932 dst_port + j, portid);
1933 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1936 process_packet(qconf, pkts_burst[j],
1937 dst_port + j, portid);
1938 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1941 process_packet(qconf, pkts_burst[j],
1942 dst_port + j, portid);
1943 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1948 * Send packets out, through destination port.
1949 * Consecuteve pacekts with the same destination port
1950 * are already grouped together.
1951 * If destination port for the packet equals BAD_PORT,
1952 * then free the packet without sending it out.
1954 for (j = 0; j < nb_rx; j += k) {
1962 if (likely(pn != BAD_PORT)) {
1963 send_packetsx4(qconf, pn,
1966 for (m = j; m != j + k; m++)
1967 rte_pktmbuf_free(pkts_burst[m]);
1971 #endif /* APP_LOOKUP_METHOD */
1972 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
1974 /* Prefetch first packets */
1975 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
1976 rte_prefetch0(rte_pktmbuf_mtod(
1977 pkts_burst[j], void *));
1980 /* Prefetch and forward already prefetched packets */
1981 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
1982 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
1983 j + PREFETCH_OFFSET], void *));
1984 udp_replay_simple_replay(pkts_burst[j], portid,
1988 /* Forward remaining prefetched packets */
1989 for (; j < nb_rx; j++) {
1990 udp_replay_simple_replay(pkts_burst[j], portid,
1993 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
2006 printf ("UDP_Replay stats:\n");
2007 printf ("--------------\n");
2008 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
2009 for (i = 0; i < nb_lcore_params; ++i) {
2010 portid = lcore_params[i].port_id;
2011 printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
2023 for (i = 0; i < 32; i++) {
2024 rcv_pkt_count[i] = 0;
2025 tx_pkt_count[i] = 0;
2032 check_lcore_params(void)
2034 uint8_t queue, lcore;
2038 for (i = 0; i < nb_lcore_params; ++i) {
2039 queue = lcore_params[i].queue_id;
2040 if (queue >= MAX_RX_QUEUE_PER_PORT) {
2041 printf("invalid queue number: %hhu\n", queue);
2044 lcore = lcore_params[i].lcore_id;
2045 if (!rte_lcore_is_enabled(lcore)) {
2046 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
2049 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
2051 printf("warning: lcore %hhu is on socket %d with numa off \n",
2059 check_port_config(const unsigned nb_ports)
2064 for (i = 0; i < nb_lcore_params; ++i) {
2065 portid = lcore_params[i].port_id;
2066 if ((enabled_port_mask & (1 << portid)) == 0) {
2067 printf("port %u is not enabled in port mask\n", portid);
2070 if (portid >= nb_ports) {
2071 printf("port %u is not present on the board\n", portid);
2079 get_port_n_rx_queues(const uint8_t port)
2084 for (i = 0; i < nb_lcore_params; ++i) {
2085 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
2086 queue = lcore_params[i].queue_id;
2088 return (uint8_t)(++queue);
2092 init_lcore_rx_queues(void)
2094 uint16_t i, nb_rx_queue;
2097 for (i = 0; i < nb_lcore_params; ++i) {
2098 lcore = lcore_params[i].lcore_id;
2099 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
2100 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
2101 printf("error: too many queues (%u) for lcore: %u\n",
2102 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
2105 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
2106 lcore_params[i].port_id;
2107 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
2108 lcore_params[i].queue_id;
2109 lcore_conf[lcore].n_rx_queue++;
2117 print_usage(const char *prgname)
2119 printf ("%s [EAL options] -- -p PORTMASK -P"
2120 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
2121 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
2122 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
2123 " -P : enable promiscuous mode\n"
2124 " --config (port,queue,lcore): rx queues configuration\n"
2125 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
2126 " --no-numa: optional, disable numa awareness\n"
2127 " --no-hw-csum: optional, disable hw ip checksum\n"
2128 " --ipv6: optional, specify it if running ipv6 packets\n"
2129 " --enable-jumbo: enable jumbo frame"
2130 " which max packet len is PKTLEN in decimal (64-9600)\n"
2131 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
2135 static int parse_max_pkt_len(const char *pktlen)
2140 /* parse decimal string */
2141 len = strtoul(pktlen, &end, 10);
2142 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
2152 parse_link_ip(const char *file_name)
2155 struct rte_cfgfile *file;
2158 file = rte_cfgfile_load(file_name, 0);
2159 entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
2160 numports = (uint32_t)atoi(entry);
2161 if (numports <= 0 || numports > 32)
2162 rte_panic("numports is not valid\n");
2163 entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
2164 type = (uint32_t)atoi(entry);
2165 for (i = 0;i < numports; i++) {
2166 sprintf(buf, "port%d", i);
2167 entry = rte_cfgfile_get_entry(file, "linkip", buf);
2171 ipv4[i] = strdup(entry);
2173 my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
2178 parse_portmask(const char *portmask)
2183 /* parse hexadecimal string */
2184 pm = strtoul(portmask, &end, 16);
2185 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
2194 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2196 parse_hash_entry_number(const char *hash_entry_num)
2199 unsigned long hash_en;
2200 /* parse hexadecimal string */
2201 hash_en = strtoul(hash_entry_num, &end, 16);
2202 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
2213 parse_config(const char *q_arg)
2216 const char *p, *p0 = q_arg;
2224 unsigned long int_fld[_NUM_FLD];
2225 char *str_fld[_NUM_FLD];
2229 nb_lcore_params = 0;
2231 while ((p = strchr(p0,'(')) != NULL) {
2233 if((p0 = strchr(p,')')) == NULL)
2237 if(size >= sizeof(s))
2240 snprintf(s, sizeof(s), "%.*s", size, p);
2241 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
2243 for (i = 0; i < _NUM_FLD; i++){
2245 int_fld[i] = strtoul(str_fld[i], &end, 0);
2246 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2249 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2250 printf("exceeded max number of lcore params: %hu\n",
2254 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2255 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2256 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2259 lcore_params = lcore_params_array;
2264 parse_eth_dest(const char *optarg)
2268 uint8_t c, *dest, peer_addr[6];
2271 portid = strtoul(optarg, &port_end, 10);
2272 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2273 rte_exit(EXIT_FAILURE,
2274 "Invalid eth-dest: %s", optarg);
2275 if (portid >= RTE_MAX_ETHPORTS)
2276 rte_exit(EXIT_FAILURE,
2277 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2278 portid, RTE_MAX_ETHPORTS);
2280 if (cmdline_parse_etheraddr(NULL, port_end,
2281 &peer_addr, sizeof(peer_addr)) < 0)
2282 rte_exit(EXIT_FAILURE,
2283 "Invalid ethernet address: %s\n",
2285 dest = (uint8_t *)&dest_eth_addr[portid];
2286 for (c = 0; c < 6; c++)
2287 dest[c] = peer_addr[c];
2288 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2291 #define CMD_LINE_OPT_CONFIG "config"
2292 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2293 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2294 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2295 #define CMD_LINE_OPT_IPV6 "ipv6"
2296 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2297 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2299 /* Parse the argument given in the command line of the application */
2301 parse_args(int argc, char **argv)
2306 char *prgname = argv[0];
2307 static struct option lgopts[] = {
2308 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2309 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2310 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2311 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2312 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2313 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2314 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2320 while ((opt = getopt_long(argc, argvopt, "s:p:P",
2321 lgopts, &option_index)) != EOF) {
2325 parse_link_ip(optarg);
2330 enabled_port_mask = parse_portmask(optarg);
2331 if (enabled_port_mask == 0) {
2332 printf("invalid portmask\n");
2333 print_usage(prgname);
2338 printf("Promiscuous mode selected\n");
2344 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2345 sizeof (CMD_LINE_OPT_CONFIG))) {
2346 ret = parse_config(optarg);
2348 printf("invalid config\n");
2349 print_usage(prgname);
2354 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2355 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2356 parse_eth_dest(optarg);
2359 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2360 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2361 printf("numa is disabled \n");
2365 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2366 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2367 printf("numa is hw ip checksum \n");
2368 port_conf.rxmode.hw_ip_checksum = 0;
2369 rx_conf.rx_free_thresh = 30;
2373 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2374 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2375 sizeof(CMD_LINE_OPT_IPV6))) {
2376 printf("ipv6 is specified \n");
2381 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2382 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2383 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2385 printf("jumbo frame is enabled - disabling simple TX path\n");
2386 port_conf.rxmode.jumbo_frame = 1;
2388 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2389 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2390 ret = parse_max_pkt_len(optarg);
2391 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2392 printf("invalid packet length\n");
2393 print_usage(prgname);
2396 port_conf.rxmode.max_rx_pkt_len = ret;
2398 printf("set jumbo frame max packet length to %u\n",
2399 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2401 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2402 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2403 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2404 ret = parse_hash_entry_number(optarg);
2405 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2406 hash_entry_number = ret;
2408 printf("invalid hash entry number\n");
2409 print_usage(prgname);
2417 print_usage(prgname);
2423 argv[optind-1] = prgname;
2426 optind = 0; /* reset getopt lib */
2430 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2432 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2433 union ipv4_5tuple_host* key2)
2435 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2436 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2437 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2438 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2439 key2->proto = key1->proto;
2445 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2446 union ipv6_5tuple_host* key2)
2449 for (i = 0; i < 16; i++)
2451 key2->ip_dst[i] = key1->ip_dst[i];
2452 key2->ip_src[i] = key1->ip_src[i];
2454 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2455 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2456 key2->proto = key1->proto;
2463 #define BYTE_VALUE_MAX 256
2464 #define ALL_32_BITS 0xffffffff
2465 #define BIT_8_TO_15 0x0000ff00
2467 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2471 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2473 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2474 for (i = 0; i < array_len; i++) {
2475 struct ipv4_udp_replay_route entry;
2476 union ipv4_5tuple_host newkey;
2477 entry = ipv4_udp_replay_route_array[i];
2478 convert_ipv4_5tuple(&entry.key, &newkey);
2479 ret = rte_hash_add_key (h,(void *) &newkey);
2481 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2482 " to the udp_replay hash.\n", i);
2484 ipv4_udp_replay_out_if[ret] = entry.if_out;
2486 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2489 #define BIT_16_TO_23 0x00ff0000
2491 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2495 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2497 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2498 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2499 for (i = 0; i < array_len; i++) {
2500 struct ipv6_udp_replay_route entry;
2501 union ipv6_5tuple_host newkey;
2502 entry = ipv6_udp_replay_route_array[i];
2503 convert_ipv6_5tuple(&entry.key, &newkey);
2504 ret = rte_hash_add_key (h, (void *) &newkey);
2506 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2507 " to the udp_replay hash.\n", i);
2509 ipv6_udp_replay_out_if[ret] = entry.if_out;
2511 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2514 #define NUMBER_PORT_USED 4
2516 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2517 unsigned int nr_flow)
2520 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2521 for (i = 0; i < nr_flow; i++) {
2522 struct ipv4_udp_replay_route entry;
2523 union ipv4_5tuple_host newkey;
2524 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2525 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2526 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2527 /* Create the ipv4 exact match flow */
2528 memset(&entry, 0, sizeof(entry));
2529 switch (i & (NUMBER_PORT_USED -1)) {
2531 entry = ipv4_udp_replay_route_array[0];
2532 entry.key.ip_dst = IPv4(101,c,b,a);
2535 entry = ipv4_udp_replay_route_array[1];
2536 entry.key.ip_dst = IPv4(201,c,b,a);
2539 entry = ipv4_udp_replay_route_array[2];
2540 entry.key.ip_dst = IPv4(111,c,b,a);
2543 entry = ipv4_udp_replay_route_array[3];
2544 entry.key.ip_dst = IPv4(211,c,b,a);
2547 convert_ipv4_5tuple(&entry.key, &newkey);
2548 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2550 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2552 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2555 printf("Hash: Adding 0x%x keys\n", nr_flow);
2559 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2560 unsigned int nr_flow)
2563 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2564 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2565 for (i = 0; i < nr_flow; i++) {
2566 struct ipv6_udp_replay_route entry;
2567 union ipv6_5tuple_host newkey;
2568 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2569 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2570 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2571 /* Create the ipv6 exact match flow */
2572 memset(&entry, 0, sizeof(entry));
2573 switch (i & (NUMBER_PORT_USED - 1)) {
2574 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2575 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2576 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2577 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2579 entry.key.ip_dst[13] = c;
2580 entry.key.ip_dst[14] = b;
2581 entry.key.ip_dst[15] = a;
2582 convert_ipv6_5tuple(&entry.key, &newkey);
2583 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2585 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2587 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2590 printf("Hash: Adding 0x%x keys\n", nr_flow);
2595 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2597 setup_lpm(int socketid)
2599 struct rte_lpm6_config config;
2604 /* create the LPM table */
2605 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2606 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2607 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2608 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2609 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2610 " on socket %d\n", socketid);
2612 /* populate the LPM table */
2613 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2615 /* skip unused ports */
2616 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2617 enabled_port_mask) == 0)
2620 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2621 ipv4_udp_replay_route_array[i].ip,
2622 ipv4_udp_replay_route_array[i].depth,
2623 ipv4_udp_replay_route_array[i].if_out);
2626 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2627 "udp_replay LPM table on socket %d\n",
2631 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2632 (unsigned)ipv4_udp_replay_route_array[i].ip,
2633 ipv4_udp_replay_route_array[i].depth,
2634 ipv4_udp_replay_route_array[i].if_out);
2637 /* create the LPM6 table */
2638 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2640 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2641 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2643 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2645 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2646 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2647 " on socket %d\n", socketid);
2649 /* populate the LPM table */
2650 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2652 /* skip unused ports */
2653 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2654 enabled_port_mask) == 0)
2657 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2658 ipv6_udp_replay_route_array[i].ip,
2659 ipv6_udp_replay_route_array[i].depth,
2660 ipv6_udp_replay_route_array[i].if_out);
2663 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2664 "udp_replay LPM table on socket %d\n",
2668 printf("LPM: Adding route %s / %d (%d)\n",
2670 ipv6_udp_replay_route_array[i].depth,
2671 ipv6_udp_replay_route_array[i].if_out);
2681 /* Check the link status of all ports in up to 9s, and print them finally */
2683 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2685 #define CHECK_INTERVAL 100 /* 100ms */
2686 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2687 uint8_t portid, count, all_ports_up, print_flag = 0;
2688 struct rte_eth_link link;
2690 printf("\nChecking link status");
2692 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2694 for (portid = 0; portid < port_num; portid++) {
2695 if ((port_mask & (1 << portid)) == 0)
2697 memset(&link, 0, sizeof(link));
2698 rte_eth_link_get_nowait(portid, &link);
2699 /* print link status if flag set */
2700 if (print_flag == 1) {
2701 if (link.link_status)
2702 printf("Port %d Link Up - speed %u "
2703 "Mbps - %s\n", (uint8_t)portid,
2704 (unsigned)link.link_speed,
2705 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2706 ("full-duplex") : ("half-duplex\n"));
2708 printf("Port %d Link Down\n",
2712 /* clear all_ports_up flag if any link down */
2713 if (link.link_status == 0) {
2718 /* after finally printing all link status, get out */
2719 if (print_flag == 1)
2722 if (all_ports_up == 0) {
2725 rte_delay_ms(CHECK_INTERVAL);
2728 /* set the print_flag if all ports up or timeout */
2729 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2737 main(int argc, char **argv)
2742 uint32_t n_tx_queue;
2743 uint8_t portid, nb_rx_queue;
2746 struct pipeline_params *params;
2749 ret = rte_eal_init(argc, argv);
2751 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2754 timer_lcore = rte_lcore_id();
2755 /* parse application arguments (after the EAL ones) */
2756 ret = parse_args(argc, argv);
2758 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2760 if (check_lcore_params() < 0)
2761 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2763 ret = init_lcore_rx_queues();
2765 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2767 params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
2768 memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
2769 lib_arp_init(params, NULL);
2771 nb_ports = rte_eth_dev_count();
2772 num_ports = nb_ports;
2774 if (nb_ports > RTE_MAX_ETHPORTS)
2775 nb_ports = RTE_MAX_ETHPORTS;
2777 if (check_port_config(nb_ports) < 0)
2778 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2781 *Configuring port_config_t structure for interface manager initialization
2783 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
2784 port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
2785 if (port_config == NULL)
2786 rte_panic("port_config is NULL: Memory Allocation failure\n");
2787 /* initialize all ports */
2788 for (portid = 0; portid < nb_ports; portid++) {
2789 /* skip ports that are not enabled */
2790 if ((enabled_port_mask & (1 << portid)) == 0) {
2791 printf("\nSkipping disabled port %d\n", portid);
2797 printf("Initializing port %d ... ", portid );
2800 nb_rx_queue = get_port_n_rx_queues(portid);
2801 n_tx_queue = nb_rx_queue;
2802 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2803 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2805 port_config[portid].port_id = portid;
2806 port_config[portid].nrx_queue = nb_rx_queue;
2807 port_config[portid].ntx_queue = n_tx_queue;
2808 port_config[portid].state = 1;
2809 port_config[portid].promisc = promiscuous_on;
2810 port_config[portid].mempool.pool_size = MEMPOOL_SIZE;
2811 port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
2812 port_config[portid].mempool.cache_size = CACHE_SIZE;
2813 port_config[portid].mempool.cpu_socket_id = rte_socket_id();
2814 memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf));
2815 memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf));
2816 memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
2818 /* Enable TCP and UDP HW Checksum , when required */
2819 //port_config[portid].tx_conf.txq_flags &=
2820 // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
2822 if (ifm_port_setup (portid, &port_config[portid]))
2823 rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
2826 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2831 populate_lpm_routes();
2832 convert_ipstr_to_numeric();
2833 /* launch per-lcore init on every lcore */
2834 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2835 cl = cmdline_stdin_new(main_ctx, "Replay>");
2837 rte_panic("Cannot create cmdline instance\n");
2838 cmdline_interact(cl);
2839 cmdline_stdin_exit(cl);
2841 rte_exit(0, "Bye!\n");
2842 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2843 if (rte_eal_wait_lcore(lcore_id) < 0)
2849 /**********************************************************/
2851 struct cmd_obj_clear_result {
2852 cmdline_fixed_string_t clear;
2853 cmdline_fixed_string_t udp_replay;
2854 cmdline_fixed_string_t stats;
2857 static void cmd_clear_udp_replay_stats_parsed(
2858 __rte_unused void *parsed_result,
2859 __rte_unused struct cmdline *cl,
2860 __attribute__((unused)) void *data)
2866 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
2867 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
2868 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
2869 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
2870 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
2871 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
2873 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
2874 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
2875 .data = NULL, /* 2nd arg of func */
2876 .help_str = "clears UDP_Replay stats for rx/tx",
2877 .tokens = { /* token list, NULL terminated */
2878 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
2879 (void *)&cmd_clear_udp_replay_stats_clear_string,
2880 (void *)&cmd_clear_udp_replay_stats_stats_string,
2884 /**********************************************************/
2885 struct cmd_obj_add_result {
2886 cmdline_fixed_string_t action;
2887 cmdline_fixed_string_t name;
2890 static void cmd_udp_replay_stats_parsed(
2891 __rte_unused void *parsed_result,
2892 __rte_unused struct cmdline *cl,
2893 __attribute__((unused)) void *data)
2898 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
2899 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
2900 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
2901 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
2903 cmdline_parse_inst_t cmd_udp_replay_stats = {
2904 .f = cmd_udp_replay_stats_parsed, /* function to call */
2905 .data = NULL, /* 2nd arg of func */
2906 .help_str = "UDP_Replay stats for rx/tx",
2907 .tokens = { /* token list, NULL terminated */
2908 (void *)&cmd_udp_replay_stats_udp_replay_string,
2909 (void *)&cmd_udp_replay_stats_stats_string,
2914 struct cmd_quit_result {
2915 cmdline_fixed_string_t quit;
2920 __rte_unused void *parsed_result,
2922 __rte_unused void *data)
2927 static cmdline_parse_token_string_t cmd_quit_quit =
2928 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
2930 static cmdline_parse_inst_t cmd_quit = {
2931 .f = cmd_quit_parsed,
2935 (void *) &cmd_quit_quit,
2940 /**********************************************************/
2941 /****** CONTEXT (list of instruction) */
2942 cmdline_parse_ctx_t main_ctx[] = {
2943 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
2944 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
2945 (cmdline_parse_inst_t *)&cmd_quit,