2 // Copyright (c) 2016-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 Correlated traffic VNF :
19 ------------------------
21 2. Modify received packet
22 a. exchange src mac and destination mac
23 b. exchange src ip and destination IP for both IPv4 and IPv6 cases
24 c. exchange UDP src port and UDP destination port
25 d. change the len of the response according to the IMIX definition (
26 option to make traffic more realistic to emulate some IoT payloads)
27 3. send modified packet to the port where it was received.
29 Such VNF does not need LPM and routing table implementations.
30 As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
31 performance of the solution should be sufficient for testing the UDP NAT performance.
37 #include <sys/types.h>
39 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_version.h>
75 #include <cmdline_parse.h>
76 #include <cmdline_parse_etheraddr.h>
77 #include <cmdline_rdline.h>
78 #include <cmdline_socket.h>
80 #include <cmdline_parse_num.h>
81 #include <cmdline_parse_string.h>
82 #include <cmdline_parse_ipaddr.h>
83 #include <rte_errno.h>
84 #include <rte_cfgfile.h>
86 #include "parse_obj_list.h"
90 #include "interface.h"
92 #include "l3fwd_common.h"
93 #include "l3fwd_lpm4.h"
94 #include "l3fwd_lpm6.h"
95 #include "lib_icmpv6.h"
97 #include "vnf_common.h"
101 #define APP_LOOKUP_EXACT_MATCH 0
102 #define APP_LOOKUP_LPM 1
103 #define DO_RFC_1812_CHECKS
105 #ifndef APP_LOOKUP_METHOD
106 #define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
111 #include <netinet/in.h>
115 * When set to zero, simple forwaring path is eanbled.
116 * When set to one, optimized forwarding path is enabled.
117 * Note that LPM optimisation path uses SSE4.1 instructions.
119 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
120 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
122 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
125 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
126 #include <rte_hash.h>
127 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
129 #include <rte_lpm6.h>
131 #error "APP_LOOKUP_METHOD set to incorrect value"
135 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
136 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
137 #define IPv6_BYTES(addr) \
138 addr[0], addr[1], addr[2], addr[3], \
139 addr[4], addr[5], addr[6], addr[7], \
140 addr[8], addr[9], addr[10], addr[11],\
141 addr[12], addr[13],addr[14], addr[15]
145 #define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
147 #define MAX_JUMBO_PKT_LEN 9600
149 #define IPV6_ADDR_LEN 16
151 #define MEMPOOL_CACHE_SIZE 256
154 * This expression is used to calculate the number of mbufs needed depending on user input, taking
155 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
156 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
159 #define NB_MBUF RTE_MAX ( \
160 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
161 nb_ports*nb_lcores*MAX_PKT_BURST + \
162 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
163 nb_lcores*MEMPOOL_CACHE_SIZE), \
166 #define MAX_PKT_BURST 32
167 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
170 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
172 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
176 /* Configure how many packets ahead to prefetch, when reading packets */
177 #define PREFETCH_OFFSET 3
179 /* Used to mark destination port as 'invalid'. */
180 #define BAD_PORT ((uint16_t)-1)
185 * Configurable number of RX/TX ring descriptors
187 #define RTE_TEST_RX_DESC_DEFAULT 128
188 #define RTE_TEST_TX_DESC_DEFAULT 512
189 static uint64_t rcv_pkt_count[32] = {0};
190 static uint64_t tx_pkt_count[32] = {0};
191 static uint32_t arp_support;
194 struct sockaddr_in ipaddr1, ipaddr2;
195 /* ethernet addresses of ports */
196 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
198 static __m128i val_eth[RTE_MAX_ETHPORTS];
200 cmdline_parse_ctx_t main_ctx[];
202 uint32_t timer_lcore;
203 uint32_t exit_loop = 1;
204 port_config_t *port_config;
206 #define MEMPOOL_SIZE 32 * 1024
207 #define BUFFER_SIZE 2048
208 #define CACHE_SIZE 256
209 /* replace first 12B of the ethernet header. */
210 #define MASK_ETH 0x3f
212 #define IP_TYPE_IPV4 0
213 #define IP_TYPE_IPV6 1
215 const char* ipv4[MAX_IP];
216 uint8_t link_ipv6[MAX_IP][16];
217 uint32_t type, numports;
218 /* mask of enabled ports */
219 static uint32_t enabled_port_mask = 0;
220 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
221 static int numa_on = 1; /**< NUMA is enabled by default. */
222 static int csum_on = 1; /**< NUMA is enabled by default. */
223 struct pipeline_params def_pipeline_params = {
232 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
233 static int ipv6 = 0; /**< ipv6 is false by default. */
236 void convert_ipstr_to_numeric(void);
238 int print_l4stats(void);
239 int clear_stats(void);
243 struct rte_mbuf *m_table[MAX_PKT_BURST];
246 struct lcore_rx_queue {
249 } __rte_cache_aligned;
251 #define MAX_RX_QUEUE_PER_LCORE 16
252 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
253 #define MAX_RX_QUEUE_PER_PORT 128
255 #define MAX_LCORE_PARAMS 1024
256 struct lcore_params {
260 } __rte_cache_aligned;
262 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
263 static struct lcore_params lcore_params_array_default[] = {
275 static struct lcore_params * lcore_params = lcore_params_array_default;
276 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
277 sizeof(lcore_params_array_default[0]);
279 static struct rte_eth_conf port_conf = {
281 .mq_mode = ETH_MQ_RX_RSS,
282 .max_rx_pkt_len = ETHER_MAX_LEN,
284 .header_split = 0, /**< Header Split disabled */
285 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
286 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
287 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
288 .hw_strip_crc = 0, /**< CRC stripped by hardware */
293 .rss_hf = ETH_RSS_IP,
297 .mq_mode = ETH_MQ_TX_NONE,
301 /* empty vmdq configuration structure. Filled in programatically */
302 static struct rte_eth_rxconf rx_conf = {
308 .rx_free_thresh = 64,
310 .rx_deferred_start = 0,
312 static struct rte_eth_txconf tx_conf = {
320 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
321 ETH_TXQ_FLAGS_NOOFFLOADS,
322 .tx_deferred_start = 0,
325 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
327 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
328 #include <rte_hash_crc.h>
329 #define DEFAULT_HASH_FUNC rte_hash_crc
331 #include <rte_jhash.h>
332 #define DEFAULT_HASH_FUNC rte_jhash
341 } __attribute__((__packed__));
343 union ipv4_5tuple_host {
356 #define XMM_NUM_IN_IPV6_5TUPLE 3
359 uint8_t ip_dst[IPV6_ADDR_LEN];
360 uint8_t ip_src[IPV6_ADDR_LEN];
364 } __attribute__((__packed__));
366 union ipv6_5tuple_host {
371 uint8_t ip_src[IPV6_ADDR_LEN];
372 uint8_t ip_dst[IPV6_ADDR_LEN];
377 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
380 struct ipv4_udp_replay_route {
381 struct ipv4_5tuple key;
385 struct ipv6_udp_replay_route {
386 struct ipv6_5tuple key;
390 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
391 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
392 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
393 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
394 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
397 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
399 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
400 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
401 101, 11, IPPROTO_TCP}, 0},
404 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
405 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
406 102, 12, IPPROTO_TCP}, 1},
409 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
410 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
411 101, 11, IPPROTO_TCP}, 2},
414 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
415 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
416 102, 12, IPPROTO_TCP}, 3},
419 typedef struct rte_hash lookup_struct_t;
421 #ifdef RTE_ARCH_X86_64
422 /* default to 4 million hash entries (approx) */
423 #define UDP_Replay_HASH_ENTRIES 1024*1024*4
425 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
426 #define UDP_Replay_HASH_ENTRIES 1024*1024*1
428 #define HASH_ENTRY_NUMBER_DEFAULT 4
430 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
432 app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
437 app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
442 void convert_ipstr_to_numeric(void)
445 for (i = 0; i < numports; i++)
447 if (type == IP_TYPE_IPV4) {
448 memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
449 ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
450 ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
451 } else if (type == IP_TYPE_IPV6) {
452 ifm_add_ipv6_port(i, &link_ipv6[i][0], 128);
457 static inline uint32_t
458 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
461 const union ipv4_5tuple_host *k;
467 p = (const uint32_t *)&k->port_src;
469 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
470 init_val = rte_hash_crc_4byte(t, init_val);
471 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
472 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
473 init_val = rte_hash_crc_4byte(*p, init_val);
474 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
475 init_val = rte_jhash_1word(t, init_val);
476 init_val = rte_jhash_1word(k->ip_src, init_val);
477 init_val = rte_jhash_1word(k->ip_dst, init_val);
478 init_val = rte_jhash_1word(*p, init_val);
479 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
483 static inline int check_arpicmp(struct rte_mbuf *pkt)
485 uint8_t in_port_id = pkt->port;
486 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
487 uint16_t *eth_proto =
488 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
490 uint32_t prot_offset =
491 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
492 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
493 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
494 ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
495 && (*protocol == IP_PROTOCOL_ICMP))) {
496 process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
502 static inline int check_arpicmpv6(struct rte_mbuf *pkt)
504 struct ether_hdr *eth_h;
505 struct ipv6_hdr *ipv6_h;
506 uint8_t in_port_id = pkt->port;
507 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
508 uint16_t *eth_proto =
509 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
510 eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
511 ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
512 if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
513 && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
514 process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
520 static inline uint32_t
521 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
523 const union ipv6_5tuple_host *k;
526 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
527 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
528 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
529 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
533 p = (const uint32_t *)&k->port_src;
535 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
536 ip_src0 = (const uint32_t *) k->ip_src;
537 ip_src1 = (const uint32_t *)(k->ip_src+4);
538 ip_src2 = (const uint32_t *)(k->ip_src+8);
539 ip_src3 = (const uint32_t *)(k->ip_src+12);
540 ip_dst0 = (const uint32_t *) k->ip_dst;
541 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
542 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
543 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
544 init_val = rte_hash_crc_4byte(t, init_val);
545 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
546 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
547 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
548 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
549 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
550 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
551 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
552 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
553 init_val = rte_hash_crc_4byte(*p, init_val);
554 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
555 init_val = rte_jhash_1word(t, init_val);
556 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
557 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
558 init_val = rte_jhash_1word(*p, init_val);
559 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
563 #define IPV4_UDP_Replay_NUM_ROUTES \
564 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
566 #define IPV6_UDP_Replay_NUM_ROUTES \
567 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
569 static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
570 static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
574 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
575 struct ipv4_udp_replay_route {
581 struct ipv6_udp_replay_route {
587 static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
588 {IPv4(1,1,1,0), 24, 0},
589 {IPv4(2,1,1,0), 24, 1},
590 {IPv4(3,1,1,0), 24, 2},
591 {IPv4(4,1,1,0), 24, 3},
592 {IPv4(5,1,1,0), 24, 4},
593 {IPv4(6,1,1,0), 24, 5},
594 {IPv4(7,1,1,0), 24, 6},
595 {IPv4(8,1,1,0), 24, 7},
598 static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
599 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
600 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
601 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
602 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
603 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
604 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
605 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
606 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
609 #define IPV4_UDP_Replay_NUM_ROUTES \
610 (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
611 #define IPV6_UDP_Replay_NUM_ROUTES \
612 (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
614 #define IPV4_UDP_Replay_LPM_MAX_RULES 1024
615 #define IPV6_UDP_Replay_LPM_MAX_RULES 1024
616 #define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
618 typedef struct rte_lpm lookup_struct_t;
619 typedef struct rte_lpm6 lookup6_struct_t;
620 static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
621 static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
626 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
627 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
628 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
629 lookup_struct_t * ipv4_lookup_struct;
630 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
631 lookup6_struct_t * ipv6_lookup_struct;
633 lookup_struct_t * ipv6_lookup_struct;
635 } __rte_cache_aligned;
637 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
639 /* Send burst of packets on an output interface */
641 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
643 struct rte_mbuf **m_table;
647 queueid = qconf->tx_queue_id[port];
648 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
650 ret = rte_eth_tx_burst(port, queueid, m_table, n);
651 if (unlikely(ret < n)) {
653 rte_pktmbuf_free(m_table[ret]);
657 tx_pkt_count[port] += ret;
661 /* Enqueue a single packet, and send burst if queue is filled */
663 send_single_packet(struct rte_mbuf *m, uint8_t port)
667 struct lcore_conf *qconf;
669 lcore_id = rte_lcore_id();
671 qconf = &lcore_conf[lcore_id];
672 len = qconf->tx_mbufs[port].len;
673 qconf->tx_mbufs[port].m_table[len] = m;
676 /* enough pkts to be sent */
677 if (unlikely(len == MAX_PKT_BURST)) {
678 send_burst(qconf, MAX_PKT_BURST, port);
682 qconf->tx_mbufs[port].len = len;
686 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
687 static inline __attribute__((always_inline)) void
688 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
689 struct rte_mbuf *m[], uint32_t num)
693 len = qconf->tx_mbufs[port].len;
696 * If TX buffer for that queue is empty, and we have enough packets,
697 * then send them straightway.
699 if (num >= MAX_TX_BURST && len == 0) {
700 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
701 if (unlikely(n < num)) {
703 rte_pktmbuf_free(m[n]);
710 * Put packets into TX buffer for that queue.
714 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
717 switch (n % FWDSTEP) {
720 qconf->tx_mbufs[port].m_table[len + j] = m[j];
723 qconf->tx_mbufs[port].m_table[len + j] = m[j];
726 qconf->tx_mbufs[port].m_table[len + j] = m[j];
729 qconf->tx_mbufs[port].m_table[len + j] = m[j];
736 /* enough pkts to be sent */
737 if (unlikely(len == MAX_PKT_BURST)) {
739 send_burst(qconf, MAX_PKT_BURST, port);
741 /* copy rest of the packets into the TX buffer. */
744 switch (len % FWDSTEP) {
747 qconf->tx_mbufs[port].m_table[j] = m[n + j];
750 qconf->tx_mbufs[port].m_table[j] = m[n + j];
753 qconf->tx_mbufs[port].m_table[j] = m[n + j];
756 qconf->tx_mbufs[port].m_table[j] = m[n + j];
762 qconf->tx_mbufs[port].len = len;
764 #endif /* APP_LOOKUP_LPM */
766 #ifdef DO_RFC_1812_CHECKS
768 is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len)
770 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
772 * 1. The packet length reported by the Link Layer must be large
773 * enough to hold the minimum length legal IP datagram (20 bytes).
775 if (link_len < sizeof(struct ipv4_hdr))
778 /* 2. The IP checksum must be correct. */
779 /* this is checked in H/W */
782 * 3. The IP version number must be 4. If the version number is not 4
783 * then the packet may be another version of IP, such as IPng or
786 if (((pkt->version_ihl) >> 4) != 4)
789 * 4. The IP header length field must be large enough to hold the
790 * minimum length legal IP datagram (20 bytes = 5 words).
792 if ((pkt->version_ihl & 0xf) < 5)
796 * 5. The IP total length field must be large enough to hold the IP
797 * datagram header, whose length is specified in the IP header length
800 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
807 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
809 static __m128i mask0;
810 static __m128i mask1;
811 static __m128i mask2;
812 static inline uint8_t
813 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
816 union ipv4_5tuple_host key;
818 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
819 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
820 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
821 key.xmm = _mm_and_si128(data, mask0);
822 /* Find destination port */
823 ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
824 return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
827 static inline uint8_t
828 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
831 union ipv6_5tuple_host key;
833 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
834 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
835 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
836 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
837 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
838 key.xmm[0] = _mm_and_si128(data0, mask1);
839 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
841 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
842 key.xmm[2] = _mm_and_si128(data2, mask2);
844 /* Find destination port */
845 ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
846 return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
850 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
852 static inline uint8_t
853 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
857 return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
858 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
859 &next_hop) == 0) ? next_hop : portid);
862 static inline uint8_t
863 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
866 return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
867 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
872 static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
873 struct lcore_conf *qconf) __attribute__((unused));
875 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
876 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
878 #define MASK_ALL_PKTS 0xff
879 #define EXCLUDE_1ST_PKT 0xfe
880 #define EXCLUDE_2ND_PKT 0xfd
881 #define EXCLUDE_3RD_PKT 0xfb
882 #define EXCLUDE_4TH_PKT 0xf7
883 #define EXCLUDE_5TH_PKT 0xef
884 #define EXCLUDE_6TH_PKT 0xdf
885 #define EXCLUDE_7TH_PKT 0xbf
886 #define EXCLUDE_8TH_PKT 0x7f
889 simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
891 struct ether_hdr *eth_hdr[8];
892 struct ether_hdr tmp;
893 struct ipv4_hdr *ipv4_hdr[8];
894 struct udp_hdr *udp_hdr[8];
896 l2_phy_interface_t *port = ifm_get_port(portid);
898 printf("port may be un initialized\n");
901 if (unlikely(arp_support)) {
912 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
913 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
914 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
915 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
916 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
917 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
918 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
919 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
922 memset(&tmp,0,sizeof (struct ether_hdr));
927 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
928 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
929 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
932 /* Handle IPv4 headers.*/
933 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
934 sizeof(struct ether_hdr));
935 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
936 sizeof(struct ether_hdr));
937 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
938 sizeof(struct ether_hdr));
939 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
940 sizeof(struct ether_hdr));
941 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
942 sizeof(struct ether_hdr));
943 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
944 sizeof(struct ether_hdr));
945 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
946 sizeof(struct ether_hdr));
947 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
948 sizeof(struct ether_hdr));
949 struct ipv4_hdr temp_ipv4;
950 temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
951 ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
952 ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
953 temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
954 ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
955 ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
956 temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
957 ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
958 ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
959 temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
960 ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
961 ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
962 temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
963 ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
964 ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
965 temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
966 ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
967 ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
968 temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
969 ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
970 ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
971 temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
972 ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
973 ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
975 /* Handle UDP headers.*/
976 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
977 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
979 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
980 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
981 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
982 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
983 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
984 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
985 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
986 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
987 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
988 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
989 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
990 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
991 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
992 sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr));
993 /*1) memcpy or assignment.*/
995 struct udp_hdr temp_udp;
996 temp_udp.dst_port = udp_hdr[0]->dst_port;
997 udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
998 udp_hdr[0]->src_port = temp_udp.dst_port;
999 temp_udp.dst_port = udp_hdr[1]->dst_port;
1000 udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
1001 udp_hdr[1]->src_port = temp_udp.dst_port;
1002 temp_udp.dst_port = udp_hdr[2]->dst_port;
1003 udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
1004 udp_hdr[2]->src_port = temp_udp.dst_port;
1005 temp_udp.dst_port = udp_hdr[3]->dst_port;
1006 udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
1007 udp_hdr[3]->src_port = temp_udp.dst_port;
1008 temp_udp.dst_port = udp_hdr[4]->dst_port;
1009 udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
1010 udp_hdr[4]->src_port = temp_udp.dst_port;
1011 temp_udp.dst_port = udp_hdr[5]->dst_port;
1012 udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
1013 udp_hdr[5]->src_port = temp_udp.dst_port;
1014 temp_udp.dst_port = udp_hdr[6]->dst_port;
1015 udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
1016 udp_hdr[6]->src_port = temp_udp.dst_port;
1017 temp_udp.dst_port = udp_hdr[7]->dst_port;
1018 udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
1019 udp_hdr[7]->src_port = temp_udp.dst_port;
1020 #ifdef DO_RFC_1812_CHECKS
1021 /* Check to make sure the packet is valid (RFC1812) */
1022 uint8_t valid_mask = MASK_ALL_PKTS;
1023 if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) {
1024 rte_pktmbuf_free(m[0]);
1025 valid_mask &= EXCLUDE_1ST_PKT;
1027 if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) {
1028 rte_pktmbuf_free(m[1]);
1029 valid_mask &= EXCLUDE_2ND_PKT;
1031 if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) {
1032 rte_pktmbuf_free(m[2]);
1033 valid_mask &= EXCLUDE_3RD_PKT;
1035 if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) {
1036 rte_pktmbuf_free(m[3]);
1037 valid_mask &= EXCLUDE_4TH_PKT;
1039 if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) {
1040 rte_pktmbuf_free(m[4]);
1041 valid_mask &= EXCLUDE_5TH_PKT;
1043 if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) {
1044 rte_pktmbuf_free(m[5]);
1045 valid_mask &= EXCLUDE_6TH_PKT;
1047 if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) {
1048 rte_pktmbuf_free(m[6]);
1049 valid_mask &= EXCLUDE_7TH_PKT;
1051 if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) {
1052 rte_pktmbuf_free(m[7]);
1053 valid_mask &= EXCLUDE_8TH_PKT;
1055 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
1056 if (valid_mask == 0){
1060 for (i = 0; i < 8; i++) {
1061 if ((0x1 << i) & valid_mask) {
1062 udp_replay_simple_replay(m[i], portid, qconf);
1068 #endif // End of #ifdef DO_RFC_1812_CHECKS
1070 #ifdef DO_RFC_1812_CHECKS
1071 /* Update time to live and header checksum */
1072 --(ipv4_hdr[0]->time_to_live);
1073 --(ipv4_hdr[1]->time_to_live);
1074 --(ipv4_hdr[2]->time_to_live);
1075 --(ipv4_hdr[3]->time_to_live);
1076 ++(ipv4_hdr[0]->hdr_checksum);
1077 ++(ipv4_hdr[1]->hdr_checksum);
1078 ++(ipv4_hdr[2]->hdr_checksum);
1079 ++(ipv4_hdr[3]->hdr_checksum);
1080 --(ipv4_hdr[4]->time_to_live);
1081 --(ipv4_hdr[5]->time_to_live);
1082 --(ipv4_hdr[6]->time_to_live);
1083 --(ipv4_hdr[7]->time_to_live);
1084 ++(ipv4_hdr[4]->hdr_checksum);
1085 ++(ipv4_hdr[5]->hdr_checksum);
1086 ++(ipv4_hdr[6]->hdr_checksum);
1087 ++(ipv4_hdr[7]->hdr_checksum);
1090 send_single_packet(m[0],portid );
1091 send_single_packet(m[1],portid );
1092 send_single_packet(m[2],portid );
1093 send_single_packet(m[3],portid);
1094 send_single_packet(m[4],portid);
1095 send_single_packet(m[5],portid);
1096 send_single_packet(m[6],portid);
1097 send_single_packet(m[7],portid);
1101 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
1102 union ipv6_5tuple_host * key)
1104 __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
1105 __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
1106 __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
1107 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
1108 key->xmm[1] = tmpdata1;
1109 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
1114 simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
1116 struct ether_hdr *eth_hdr[8],tmp;
1118 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
1120 union ipv6_5tuple_host key[8];
1121 struct udp_hdr *udp_hdr[8];
1122 l2_phy_interface_t *port = ifm_get_port(portid);
1124 printf("port may be un initialized\n");
1128 if (unlikely(arp_support)) {
1129 check_arpicmpv6(m[0]);
1130 check_arpicmpv6(m[1]);
1131 check_arpicmpv6(m[2]);
1132 check_arpicmpv6(m[3]);
1133 check_arpicmpv6(m[4]);
1134 check_arpicmpv6(m[5]);
1135 check_arpicmpv6(m[6]);
1136 check_arpicmpv6(m[7]);
1140 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
1141 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
1142 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
1143 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
1144 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
1145 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
1146 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
1147 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
1149 memset(&tmp,0,sizeof (struct ether_hdr));
1153 ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
1154 ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
1155 ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
1157 /* Handle IPv6 headers.*/
1158 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
1159 sizeof(struct ether_hdr));
1160 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
1161 sizeof(struct ether_hdr));
1162 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
1163 sizeof(struct ether_hdr));
1164 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
1165 sizeof(struct ether_hdr));
1166 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
1167 sizeof(struct ether_hdr));
1168 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
1169 sizeof(struct ether_hdr));
1170 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
1171 sizeof(struct ether_hdr));
1172 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
1173 sizeof(struct ether_hdr));
1176 memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
1177 memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
1178 memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
1181 /* Handle UDP headers.*/
1182 udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
1183 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1185 udp_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct udp_hdr *,
1186 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1187 udp_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct udp_hdr *,
1188 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1189 udp_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct udp_hdr *,
1190 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1191 udp_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct udp_hdr *,
1192 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1193 udp_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct udp_hdr *,
1194 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1195 udp_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct udp_hdr *,
1196 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1197 udp_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct udp_hdr *,
1198 sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr));
1199 /*1) memcpy or assignment.*/
1201 struct udp_hdr temp_udp;
1204 temp_udp.dst_port = udp_hdr[i]->dst_port;
1205 udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
1206 udp_hdr[i]->src_port = temp_udp.dst_port;
1208 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
1209 &key[4], &key[5], &key[6], &key[7]};
1210 #if RTE_VERSION < 0x100b0000
1211 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1213 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
1215 send_single_packet(m[0],portid);
1216 send_single_packet(m[1],portid);
1217 send_single_packet(m[2],portid);
1218 send_single_packet(m[3],portid);
1219 send_single_packet(m[4],portid);
1220 send_single_packet(m[5],portid);
1221 send_single_packet(m[6],portid);
1222 send_single_packet(m[7],portid);
1225 #endif /* APP_LOOKUP_METHOD */
1227 static inline __attribute__((always_inline)) void
1228 udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
1230 struct ether_hdr *eth_hdr,tmp;
1231 struct ipv4_hdr *ipv4_hdr,temp_ipv4;
1232 struct udp_hdr *udp_hdr,temp_udp;
1233 l2_phy_interface_t *port = ifm_get_port(portid);
1236 printf("port may be un initialized\n");
1240 printf("Null packet received\n");
1243 if (unlikely(arp_support)) {
1244 if (!check_arpicmp(m))
1248 printf("qconf configuration is NULL\n");
1249 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1250 ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
1251 ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
1252 ether_addr_copy(&tmp.s_addr, ð_hdr->d_addr);
1253 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1255 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1256 /* Handle IPv4 headers.*/
1257 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1258 sizeof(struct ether_hdr));
1259 temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
1260 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
1261 ipv4_hdr->src_addr = temp_ipv4.dst_addr;
1262 #ifdef DO_RFC_1812_CHECKS
1263 /* Check to make sure the packet is valid (RFC1812) */
1264 if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
1265 rte_pktmbuf_free(m);
1271 #ifdef DO_RFC_1812_CHECKS
1272 /* Update time to live and header checksum */
1273 --(ipv4_hdr->time_to_live);
1274 ++(ipv4_hdr->hdr_checksum);
1276 /* Handle UDP headers.*/
1277 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1278 (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
1280 /*Swapping Src and Dst Port*/
1281 temp_udp.dst_port = udp_hdr->dst_port;
1282 udp_hdr->dst_port = udp_hdr->src_port;
1283 udp_hdr->src_port = temp_udp.dst_port;
1285 send_single_packet(m, portid);
1286 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1287 /* Handle IPv6 headers.*/
1288 struct ipv6_hdr *ipv6_hdr,temp_ipv6;
1290 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
1291 sizeof(struct ether_hdr));
1293 /*Swapping of Src and Dst IP address*/
1294 memcpy(temp_ipv6.dst_addr,ipv6_hdr->dst_addr,16);
1295 memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16);
1296 memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16);
1298 /* Handle UDP headers.*/
1299 udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
1300 (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr)));
1301 /*Swapping Src and Dst Port*/
1302 temp_udp.dst_port = udp_hdr->dst_port;
1303 udp_hdr->dst_port = udp_hdr->src_port;
1304 udp_hdr->src_port = temp_udp.dst_port;
1305 send_single_packet(m, portid);
1307 /* Free the mbuf that contains non-IPV4/IPV6 packet */
1308 rte_pktmbuf_free(m);
1311 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1312 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1313 #ifdef DO_RFC_1812_CHECKS
1315 #define IPV4_MIN_VER_IHL 0x45
1316 #define IPV4_MAX_VER_IHL 0x4f
1317 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1319 /* Minimum value of IPV4 total length (20B) in network byte order. */
1320 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1323 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1324 * - The IP version number must be 4.
1325 * - The IP header length field must be large enough to hold the
1326 * minimum length legal IP datagram (20 bytes = 5 words).
1327 * - The IP total length field must be large enough to hold the IP
1328 * datagram header, whose length is specified in the IP header length
1330 * If we encounter invalid IPV4 packet, then set destination port for it
1331 * to BAD_PORT value.
1333 static inline __attribute__((always_inline)) void
1334 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
1338 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
1339 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1341 ipv4_hdr->time_to_live--;
1342 ipv4_hdr->hdr_checksum++;
1344 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1345 ((uint8_t)ipv4_hdr->total_length == 0 &&
1346 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1353 #define rfc1812_process(mb, dp) do { } while (0)
1354 #endif /* DO_RFC_1812_CHECKS */
1355 #endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
1358 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1359 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1361 static inline __attribute__((always_inline)) uint16_t
1362 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1363 uint32_t dst_ipv4, uint8_t portid)
1366 struct ipv6_hdr *ipv6_hdr;
1367 struct ether_hdr *eth_hdr;
1368 struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
1370 if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
1371 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1374 } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
1375 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1376 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1377 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1378 ipv6_hdr->dst_addr, &next_hop) != 0)
1388 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1389 uint16_t *dst_port, uint8_t portid)
1391 struct ether_hdr *eth_hdr;
1392 struct ipv4_hdr *ipv4_hdr;
1397 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1398 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1401 dst_ipv4 = ipv4_hdr->dst_addr;
1402 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1404 /*Changing the dp to incoming port*/
1405 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1408 te = _mm_loadu_si128((__m128i *)eth_hdr);
1412 rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
1414 te = _mm_blend_epi16(te, ve, MASK_ETH);
1415 _mm_storeu_si128((__m128i *)eth_hdr, te);
1417 /* Wont be using the following fucntion*/
1420 * Read packet_type and destination IPV4 addresses from 4 mbufs.
1423 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
1425 uint32_t *ipv4_flag)
1427 struct ipv4_hdr *ipv4_hdr;
1428 struct ether_hdr *eth_hdr;
1429 uint32_t x0, x1, x2, x3;
1431 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1432 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1433 x0 = ipv4_hdr->dst_addr;
1434 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
1436 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1437 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1438 x1 = ipv4_hdr->dst_addr;
1439 ipv4_flag[0] &= pkt[1]->packet_type;
1441 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1442 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1443 x2 = ipv4_hdr->dst_addr;
1444 ipv4_flag[0] &= pkt[2]->packet_type;
1446 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1447 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1448 x3 = ipv4_hdr->dst_addr;
1449 ipv4_flag[0] &= pkt[3]->packet_type;
1451 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1455 * Lookup into LPM for destination port.
1456 * If lookup fails, use incoming port (portid) as destination port.
1459 processx4_step2(const struct lcore_conf *qconf,
1463 struct rte_mbuf *pkt[FWDSTEP],
1464 uint16_t dprt[FWDSTEP])
1467 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1468 4, 5, 6, 7, 0, 1, 2, 3);
1470 /* Byte swap 4 IPV4 addresses. */
1471 dip = _mm_shuffle_epi8(dip, bswap_mask);
1473 /* if all 4 packets are IPV4. */
1474 if (likely(ipv4_flag)) {
1475 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1478 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1479 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1480 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1481 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1486 * Update source and destination MAC addresses in the ethernet header.
1487 * Perform RFC1812 checks and updates for IPV4 packets.
1490 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1492 __m128i te[FWDSTEP];
1493 __m128i ve[FWDSTEP];
1494 __m128i *p[FWDSTEP];
1496 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
1497 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
1498 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
1499 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
1501 ve[0] = val_eth[dst_port[0]];
1502 te[0] = _mm_loadu_si128(p[0]);
1504 ve[1] = val_eth[dst_port[1]];
1505 te[1] = _mm_loadu_si128(p[1]);
1507 ve[2] = val_eth[dst_port[2]];
1508 te[2] = _mm_loadu_si128(p[2]);
1510 ve[3] = val_eth[dst_port[3]];
1511 te[3] = _mm_loadu_si128(p[3]);
1513 /* Update first 12 bytes, keep rest bytes intact. */
1514 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1515 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1516 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1517 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1519 _mm_storeu_si128(p[0], te[0]);
1520 _mm_storeu_si128(p[1], te[1]);
1521 _mm_storeu_si128(p[2], te[2]);
1522 _mm_storeu_si128(p[3], te[3]);
1524 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1525 &dst_port[0], pkt[0]->packet_type);
1526 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1527 &dst_port[1], pkt[1]->packet_type);
1528 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1529 &dst_port[2], pkt[2]->packet_type);
1530 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1531 &dst_port[3], pkt[3]->packet_type);
1535 * We group consecutive packets with the same destionation port into one burst.
1536 * To avoid extra latency this is done together with some other packet
1537 * processing, but after we made a final decision about packet's destination.
1538 * To do this we maintain:
1539 * pnum - array of number of consecutive packets with the same dest port for
1540 * each packet in the input burst.
1541 * lp - pointer to the last updated element in the pnum.
1542 * dlp - dest port value lp corresponds to.
1545 #define GRPSZ (1 << FWDSTEP)
1546 #define GRPMSK (GRPSZ - 1)
1548 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1549 if (likely((dlp) == (dcp)[(idx)])) { \
1552 (dlp) = (dcp)[idx]; \
1553 (lp) = (pn) + (idx); \
1559 * Group consecutive packets with the same destination port in bursts of 4.
1560 * Suppose we have array of destionation ports:
1561 * dst_port[] = {a, b, c, d,, e, ... }
1562 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1563 * We doing 4 comparisions at once and the result is 4 bit mask.
1564 * This mask is used as an index into prebuild array of pnum values.
1566 static inline uint16_t *
1567 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1569 static const struct {
1570 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1571 int32_t idx; /* index for new last updated elemnet. */
1572 uint16_t lpv; /* add value to the last updated element. */
1575 /* 0: a != b, b != c, c != d, d != e */
1576 .pnum = UINT64_C(0x0001000100010001),
1581 /* 1: a == b, b != c, c != d, d != e */
1582 .pnum = UINT64_C(0x0001000100010002),
1587 /* 2: a != b, b == c, c != d, d != e */
1588 .pnum = UINT64_C(0x0001000100020001),
1593 /* 3: a == b, b == c, c != d, d != e */
1594 .pnum = UINT64_C(0x0001000100020003),
1599 /* 4: a != b, b != c, c == d, d != e */
1600 .pnum = UINT64_C(0x0001000200010001),
1605 /* 5: a == b, b != c, c == d, d != e */
1606 .pnum = UINT64_C(0x0001000200010002),
1611 /* 6: a != b, b == c, c == d, d != e */
1612 .pnum = UINT64_C(0x0001000200030001),
1617 /* 7: a == b, b == c, c == d, d != e */
1618 .pnum = UINT64_C(0x0001000200030004),
1623 /* 8: a != b, b != c, c != d, d == e */
1624 .pnum = UINT64_C(0x0002000100010001),
1629 /* 9: a == b, b != c, c != d, d == e */
1630 .pnum = UINT64_C(0x0002000100010002),
1635 /* 0xa: a != b, b == c, c != d, d == e */
1636 .pnum = UINT64_C(0x0002000100020001),
1641 /* 0xb: a == b, b == c, c != d, d == e */
1642 .pnum = UINT64_C(0x0002000100020003),
1647 /* 0xc: a != b, b != c, c == d, d == e */
1648 .pnum = UINT64_C(0x0002000300010001),
1653 /* 0xd: a == b, b != c, c == d, d == e */
1654 .pnum = UINT64_C(0x0002000300010002),
1659 /* 0xe: a != b, b == c, c == d, d == e */
1660 .pnum = UINT64_C(0x0002000300040001),
1665 /* 0xf: a == b, b == c, c == d, d == e */
1666 .pnum = UINT64_C(0x0002000300040005),
1673 uint16_t u16[FWDSTEP + 1];
1675 } *pnum = (void *)pn;
1679 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1680 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1681 v = _mm_movemask_ps((__m128)dp1);
1683 /* update last port counter. */
1684 lp[0] += gptbl[v].lpv;
1686 /* if dest port value has changed. */
1688 lp = pnum->u16 + gptbl[v].idx;
1690 pnum->u64 = gptbl[v].pnum;
1696 #endif /* APP_LOOKUP_METHOD */
1698 /* main processing loop */
1700 main_loop(__attribute__((unused)) void *dummy)
1702 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1704 uint64_t prev_tsc, diff_tsc, cur_tsc;
1706 uint8_t portid, queueid;
1707 struct lcore_conf *qconf;
1708 l2_phy_interface_t *port;
1709 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1710 US_PER_S * BURST_TX_DRAIN_US;
1712 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1713 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1717 uint16_t dst_port[MAX_PKT_BURST];
1718 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1719 uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
1720 uint16_t pnum[MAX_PKT_BURST + 1];
1725 lcore_id = rte_lcore_id();
1726 qconf = &lcore_conf[lcore_id];
1728 if (qconf->n_rx_queue == 0) {
1729 RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
1733 RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
1735 for (i = 0; i < qconf->n_rx_queue; i++) {
1737 portid = qconf->rx_queue_list[i].port_id;
1738 queueid = qconf->rx_queue_list[i].queue_id;
1739 RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1745 cur_tsc = rte_rdtsc();
1748 * TX burst queue drain
1750 diff_tsc = cur_tsc - prev_tsc;
1751 if (unlikely(diff_tsc > drain_tsc)) {
1754 * This could be optimized (use queueid instead of
1755 * portid), but it is not called so often
1757 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1758 if (qconf->tx_mbufs[portid].len == 0)
1761 qconf->tx_mbufs[portid].len,
1763 qconf->tx_mbufs[portid].len = 0;
1770 * Read packet from RX queues
1772 for (i = 0; i < qconf->n_rx_queue; ++i) {
1773 portid = qconf->rx_queue_list[i].port_id;
1774 queueid = qconf->rx_queue_list[i].queue_id;
1775 port = ifm_get_port(portid);
1777 nb_rx = port->retrieve_bulk_pkts(portid,
1778 queueid, pkts_burst);
1779 port->n_rxpkts += nb_rx;
1781 printf("port may be un initialized\n");
1785 rcv_pkt_count[portid] += nb_rx;
1789 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1790 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1793 * Send nb_rx - nb_rx%8 packets
1796 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
1797 for (j = 0; j < n; j += 8) {
1798 struct ether_hdr *eth_h0 =
1799 rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
1800 struct ether_hdr *eth_h1 =
1801 rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
1802 struct ether_hdr *eth_h2 =
1803 rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
1804 struct ether_hdr *eth_h3 =
1805 rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
1806 struct ether_hdr *eth_h4 =
1807 rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
1808 struct ether_hdr *eth_h5 =
1809 rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
1810 struct ether_hdr *eth_h6 =
1811 rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
1812 struct ether_hdr *eth_h7 =
1813 rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
1815 uint16_t ether_type;
1816 ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
1817 rte_cpu_to_be_16(eth_h1->ether_type) &
1818 rte_cpu_to_be_16(eth_h2->ether_type) &
1819 rte_cpu_to_be_16(eth_h3->ether_type) &
1820 rte_cpu_to_be_16(eth_h4->ether_type) &
1821 rte_cpu_to_be_16(eth_h5->ether_type) &
1822 rte_cpu_to_be_16(eth_h6->ether_type) &
1823 rte_cpu_to_be_16(eth_h7->ether_type));
1825 if (ether_type == ETHER_TYPE_IPv4) {
1826 simple_ipv4_replay_8pkts(
1827 &pkts_burst[j], portid, qconf);
1828 } else if (ether_type == ETHER_TYPE_IPv6) {
1829 simple_ipv6_replay_8pkts(&pkts_burst[j],
1832 udp_replay_simple_replay(pkts_burst[j],
1834 udp_replay_simple_replay(pkts_burst[j+1],
1836 udp_replay_simple_replay(pkts_burst[j+2],
1838 udp_replay_simple_replay(pkts_burst[j+3],
1840 udp_replay_simple_replay(pkts_burst[j+4],
1842 udp_replay_simple_replay(pkts_burst[j+5],
1844 udp_replay_simple_replay(pkts_burst[j+6],
1846 udp_replay_simple_replay(pkts_burst[j+7],
1851 for (; j < nb_rx ; j++) {
1852 udp_replay_simple_replay(pkts_burst[j],
1856 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1858 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1859 for (j = 0; j != k; j += FWDSTEP) {
1860 processx4_step1(&pkts_burst[j],
1862 &ipv4_flag[j / FWDSTEP]);
1865 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1866 for (j = 0; j != k; j += FWDSTEP) {
1867 processx4_step2(qconf, dip[j / FWDSTEP],
1868 ipv4_flag[j / FWDSTEP], portid,
1869 &pkts_burst[j], &dst_port[j]);
1873 * Finish packet processing and group consecutive
1874 * packets with the same destination port.
1876 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1883 processx4_step3(pkts_burst, dst_port);
1885 /* dp1: <d[0], d[1], d[2], d[3], ... > */
1886 dp1 = _mm_loadu_si128((__m128i *)dst_port);
1888 for (j = FWDSTEP; j != k; j += FWDSTEP) {
1889 processx4_step3(&pkts_burst[j],
1894 * <d[j-3], d[j-2], d[j-1], d[j], ... >
1896 dp2 = _mm_loadu_si128((__m128i *)
1897 &dst_port[j - FWDSTEP + 1]);
1898 lp = port_groupx4(&pnum[j - FWDSTEP],
1903 * <d[j], d[j+1], d[j+2], d[j+3], ... >
1905 dp1 = _mm_srli_si128(dp2,
1907 sizeof(dst_port[0]));
1911 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
1913 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
1914 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
1918 * remove values added by the last repeated
1922 dlp = dst_port[j - 1];
1924 /* set dlp and lp to the never used values. */
1926 lp = pnum + MAX_PKT_BURST;
1929 /* Process up to last 3 packets one by one. */
1930 switch (nb_rx % FWDSTEP) {
1932 process_packet(qconf, pkts_burst[j],
1933 dst_port + j, portid);
1934 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1937 process_packet(qconf, pkts_burst[j],
1938 dst_port + j, portid);
1939 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1942 process_packet(qconf, pkts_burst[j],
1943 dst_port + j, portid);
1944 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1949 * Send packets out, through destination port.
1950 * Consecuteve pacekts with the same destination port
1951 * are already grouped together.
1952 * If destination port for the packet equals BAD_PORT,
1953 * then free the packet without sending it out.
1955 for (j = 0; j < nb_rx; j += k) {
1963 if (likely(pn != BAD_PORT)) {
1964 send_packetsx4(qconf, pn,
1967 for (m = j; m != j + k; m++)
1968 rte_pktmbuf_free(pkts_burst[m]);
1972 #endif /* APP_LOOKUP_METHOD */
1973 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
1975 /* Prefetch first packets */
1976 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
1977 rte_prefetch0(rte_pktmbuf_mtod(
1978 pkts_burst[j], void *));
1981 /* Prefetch and forward already prefetched packets */
1982 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
1983 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
1984 j + PREFETCH_OFFSET], void *));
1985 udp_replay_simple_replay(pkts_burst[j], portid,
1989 /* Forward remaining prefetched packets */
1990 for (; j < nb_rx; j++) {
1991 udp_replay_simple_replay(pkts_burst[j], portid,
1994 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
2007 printf ("UDP_Replay stats:\n");
2008 printf ("--------------\n");
2009 printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
2010 for (i = 0; i < nb_lcore_params; ++i) {
2011 portid = lcore_params[i].port_id;
2012 printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
2024 for (i = 0; i < 32; i++) {
2025 rcv_pkt_count[i] = 0;
2026 tx_pkt_count[i] = 0;
2033 check_lcore_params(void)
2035 uint8_t queue, lcore;
2039 for (i = 0; i < nb_lcore_params; ++i) {
2040 queue = lcore_params[i].queue_id;
2041 if (queue >= MAX_RX_QUEUE_PER_PORT) {
2042 printf("invalid queue number: %hhu\n", queue);
2045 lcore = lcore_params[i].lcore_id;
2046 if (!rte_lcore_is_enabled(lcore)) {
2047 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
2050 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
2052 printf("warning: lcore %hhu is on socket %d with numa off \n",
2060 check_port_config(const unsigned nb_ports)
2065 for (i = 0; i < nb_lcore_params; ++i) {
2066 portid = lcore_params[i].port_id;
2067 if ((enabled_port_mask & (1 << portid)) == 0) {
2068 printf("port %u is not enabled in port mask\n", portid);
2071 if (portid >= nb_ports) {
2072 printf("port %u is not present on the board\n", portid);
2080 get_port_n_rx_queues(const uint8_t port)
2085 for (i = 0; i < nb_lcore_params; ++i) {
2086 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
2087 queue = lcore_params[i].queue_id;
2089 return (uint8_t)(++queue);
2093 init_lcore_rx_queues(void)
2095 uint16_t i, nb_rx_queue;
2098 for (i = 0; i < nb_lcore_params; ++i) {
2099 lcore = lcore_params[i].lcore_id;
2100 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
2101 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
2102 printf("error: too many queues (%u) for lcore: %u\n",
2103 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
2106 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
2107 lcore_params[i].port_id;
2108 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
2109 lcore_params[i].queue_id;
2110 lcore_conf[lcore].n_rx_queue++;
2118 print_usage(const char *prgname)
2120 printf ("%s [EAL options] -- -p PORTMASK -P"
2121 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
2122 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
2123 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
2124 " -P : enable promiscuous mode\n"
2125 " -v version: display app version\n"
2126 " --config (port,queue,lcore): rx queues configuration\n"
2127 " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
2128 " --no-numa: optional, disable numa awareness\n"
2129 " --no-hw-csum: optional, disable hw ip checksum\n"
2130 " --ipv6: optional, specify it if running ipv6 packets\n"
2131 " --enable-jumbo: enable jumbo frame"
2132 " which max packet len is PKTLEN in decimal (64-9600)\n"
2133 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
2137 static int parse_max_pkt_len(const char *pktlen)
2142 /* parse decimal string */
2143 len = strtoul(pktlen, &end, 10);
2144 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
2154 parse_link_ip(const char *file_name)
2157 struct rte_cfgfile *file;
2160 file = rte_cfgfile_load(file_name, 0);
2161 entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
2162 numports = (uint32_t)atoi(entry);
2163 if (numports <= 0 || numports > 32)
2164 rte_panic("numports is not valid\n");
2165 entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
2166 type = (uint32_t)atoi(entry);
2167 for (i = 0;i < numports; i++) {
2168 sprintf(buf, "port%d", i);
2169 entry = rte_cfgfile_get_entry(file, "linkip", buf);
2173 ipv4[i] = strdup(entry);
2175 my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
2180 parse_portmask(const char *portmask)
2185 /* parse hexadecimal string */
2186 pm = strtoul(portmask, &end, 16);
2187 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
2196 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2198 parse_hash_entry_number(const char *hash_entry_num)
2201 unsigned long hash_en;
2202 /* parse hexadecimal string */
2203 hash_en = strtoul(hash_entry_num, &end, 16);
2204 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
2215 parse_config(const char *q_arg)
2218 const char *p, *p0 = q_arg;
2226 unsigned long int_fld[_NUM_FLD];
2227 char *str_fld[_NUM_FLD];
2231 nb_lcore_params = 0;
2233 while ((p = strchr(p0,'(')) != NULL) {
2235 if((p0 = strchr(p,')')) == NULL)
2239 if(size >= sizeof(s))
2242 snprintf(s, sizeof(s), "%.*s", size, p);
2243 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
2245 for (i = 0; i < _NUM_FLD; i++){
2247 int_fld[i] = strtoul(str_fld[i], &end, 0);
2248 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
2251 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
2252 printf("exceeded max number of lcore params: %hu\n",
2256 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
2257 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
2258 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
2261 lcore_params = lcore_params_array;
2266 parse_eth_dest(const char *optarg)
2270 uint8_t c, *dest, peer_addr[6];
2273 portid = strtoul(optarg, &port_end, 10);
2274 if (errno != 0 || port_end == optarg || *port_end++ != ',')
2275 rte_exit(EXIT_FAILURE,
2276 "Invalid eth-dest: %s", optarg);
2277 if (portid >= RTE_MAX_ETHPORTS)
2278 rte_exit(EXIT_FAILURE,
2279 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
2280 portid, RTE_MAX_ETHPORTS);
2282 if (cmdline_parse_etheraddr(NULL, port_end,
2283 &peer_addr, sizeof(peer_addr)) < 0)
2284 rte_exit(EXIT_FAILURE,
2285 "Invalid ethernet address: %s\n",
2287 dest = (uint8_t *)&dest_eth_addr[portid];
2288 for (c = 0; c < 6; c++)
2289 dest[c] = peer_addr[c];
2290 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
2293 #define CMD_LINE_OPT_CONFIG "config"
2294 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
2295 #define CMD_LINE_OPT_NO_NUMA "no-numa"
2296 #define CMD_LINE_OPT_NO_HW_CSUM "no-hw-csum"
2297 #define CMD_LINE_OPT_IPV6 "ipv6"
2298 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
2299 #define CMD_LINE_OPT_VERSION "version"
2300 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
2302 /* Parse the argument given in the command line of the application */
2304 parse_args(int argc, char **argv)
2308 int option_index, v_present = 0;
2309 char *prgname = argv[0];
2310 static struct option lgopts[] = {
2311 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
2312 {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
2313 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
2314 {CMD_LINE_OPT_NO_HW_CSUM, 0, 0, 0},
2315 {CMD_LINE_OPT_IPV6, 0, 0, 0},
2316 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
2317 {CMD_LINE_OPT_VERSION, 0, 0, 0},
2318 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
2324 while ((opt = getopt_long(argc, argvopt, "v:s:p:P",
2325 lgopts, &option_index)) != EOF) {
2329 parse_link_ip(optarg);
2334 enabled_port_mask = parse_portmask(optarg);
2335 if (enabled_port_mask == 0) {
2336 printf("invalid portmask\n");
2337 print_usage(prgname);
2342 printf("Promiscuous mode selected\n");
2348 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
2349 sizeof (CMD_LINE_OPT_CONFIG))) {
2350 ret = parse_config(optarg);
2352 printf("invalid config\n");
2353 print_usage(prgname);
2358 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
2359 sizeof(CMD_LINE_OPT_ETH_DEST))) {
2360 parse_eth_dest(optarg);
2363 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
2364 sizeof(CMD_LINE_OPT_NO_NUMA))) {
2365 printf("numa is disabled \n");
2369 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_HW_CSUM,
2370 sizeof(CMD_LINE_OPT_NO_HW_CSUM))) {
2371 printf("numa is hw ip checksum \n");
2372 port_conf.rxmode.hw_ip_checksum = 0;
2373 rx_conf.rx_free_thresh = 30;
2377 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2378 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
2379 sizeof(CMD_LINE_OPT_IPV6))) {
2380 printf("ipv6 is specified \n");
2385 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_VERSION,
2386 sizeof (CMD_LINE_OPT_VERSION))) {
2388 rte_panic("Error: VERSION is provided more than once\n");
2390 printf("Version: %s\n", VERSION_STR);
2394 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
2395 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
2396 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
2398 printf("jumbo frame is enabled - disabling simple TX path\n");
2399 port_conf.rxmode.jumbo_frame = 1;
2401 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
2402 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
2403 ret = parse_max_pkt_len(optarg);
2404 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
2405 printf("invalid packet length\n");
2406 print_usage(prgname);
2409 port_conf.rxmode.max_rx_pkt_len = ret;
2411 printf("set jumbo frame max packet length to %u\n",
2412 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
2414 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2415 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
2416 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
2417 ret = parse_hash_entry_number(optarg);
2418 if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
2419 hash_entry_number = ret;
2421 printf("invalid hash entry number\n");
2422 print_usage(prgname);
2430 print_usage(prgname);
2436 argv[optind-1] = prgname;
2439 optind = 0; /* reset getopt lib */
2443 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
2445 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
2446 union ipv4_5tuple_host* key2)
2448 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
2449 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
2450 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2451 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2452 key2->proto = key1->proto;
2458 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
2459 union ipv6_5tuple_host* key2)
2462 for (i = 0; i < 16; i++)
2464 key2->ip_dst[i] = key1->ip_dst[i];
2465 key2->ip_src[i] = key1->ip_src[i];
2467 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2468 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2469 key2->proto = key1->proto;
2476 #define BYTE_VALUE_MAX 256
2477 #define ALL_32_BITS 0xffffffff
2478 #define BIT_8_TO_15 0x0000ff00
2480 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2484 uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
2486 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2487 for (i = 0; i < array_len; i++) {
2488 struct ipv4_udp_replay_route entry;
2489 union ipv4_5tuple_host newkey;
2490 entry = ipv4_udp_replay_route_array[i];
2491 convert_ipv4_5tuple(&entry.key, &newkey);
2492 ret = rte_hash_add_key (h,(void *) &newkey);
2494 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2495 " to the udp_replay hash.\n", i);
2497 ipv4_udp_replay_out_if[ret] = entry.if_out;
2499 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2502 #define BIT_16_TO_23 0x00ff0000
2504 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2508 uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
2510 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2511 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2512 for (i = 0; i < array_len; i++) {
2513 struct ipv6_udp_replay_route entry;
2514 union ipv6_5tuple_host newkey;
2515 entry = ipv6_udp_replay_route_array[i];
2516 convert_ipv6_5tuple(&entry.key, &newkey);
2517 ret = rte_hash_add_key (h, (void *) &newkey);
2519 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2520 " to the udp_replay hash.\n", i);
2522 ipv6_udp_replay_out_if[ret] = entry.if_out;
2524 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2527 #define NUMBER_PORT_USED 4
2529 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2530 unsigned int nr_flow)
2533 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2534 for (i = 0; i < nr_flow; i++) {
2535 struct ipv4_udp_replay_route entry;
2536 union ipv4_5tuple_host newkey;
2537 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2538 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2539 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2540 /* Create the ipv4 exact match flow */
2541 memset(&entry, 0, sizeof(entry));
2542 switch (i & (NUMBER_PORT_USED -1)) {
2544 entry = ipv4_udp_replay_route_array[0];
2545 entry.key.ip_dst = IPv4(101,c,b,a);
2548 entry = ipv4_udp_replay_route_array[1];
2549 entry.key.ip_dst = IPv4(201,c,b,a);
2552 entry = ipv4_udp_replay_route_array[2];
2553 entry.key.ip_dst = IPv4(111,c,b,a);
2556 entry = ipv4_udp_replay_route_array[3];
2557 entry.key.ip_dst = IPv4(211,c,b,a);
2560 convert_ipv4_5tuple(&entry.key, &newkey);
2561 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2563 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2565 ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2568 printf("Hash: Adding 0x%x keys\n", nr_flow);
2572 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2573 unsigned int nr_flow)
2576 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2577 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2578 for (i = 0; i < nr_flow; i++) {
2579 struct ipv6_udp_replay_route entry;
2580 union ipv6_5tuple_host newkey;
2581 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2582 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2583 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2584 /* Create the ipv6 exact match flow */
2585 memset(&entry, 0, sizeof(entry));
2586 switch (i & (NUMBER_PORT_USED - 1)) {
2587 case 0: entry = ipv6_udp_replay_route_array[0]; break;
2588 case 1: entry = ipv6_udp_replay_route_array[1]; break;
2589 case 2: entry = ipv6_udp_replay_route_array[2]; break;
2590 case 3: entry = ipv6_udp_replay_route_array[3]; break;
2592 entry.key.ip_dst[13] = c;
2593 entry.key.ip_dst[14] = b;
2594 entry.key.ip_dst[15] = a;
2595 convert_ipv6_5tuple(&entry.key, &newkey);
2596 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2598 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2600 ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
2603 printf("Hash: Adding 0x%x keys\n", nr_flow);
2608 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2610 setup_lpm(int socketid)
2612 struct rte_lpm6_config config;
2617 /* create the LPM table */
2618 snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
2619 ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2620 IPV4_UDP_Replay_LPM_MAX_RULES, 0);
2621 if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
2622 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2623 " on socket %d\n", socketid);
2625 /* populate the LPM table */
2626 for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
2628 /* skip unused ports */
2629 if ((1 << ipv4_udp_replay_route_array[i].if_out &
2630 enabled_port_mask) == 0)
2633 ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
2634 ipv4_udp_replay_route_array[i].ip,
2635 ipv4_udp_replay_route_array[i].depth,
2636 ipv4_udp_replay_route_array[i].if_out);
2639 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2640 "udp_replay LPM table on socket %d\n",
2644 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2645 (unsigned)ipv4_udp_replay_route_array[i].ip,
2646 ipv4_udp_replay_route_array[i].depth,
2647 ipv4_udp_replay_route_array[i].if_out);
2650 /* create the LPM6 table */
2651 snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
2653 config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
2654 config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
2656 ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2658 if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
2659 rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
2660 " on socket %d\n", socketid);
2662 /* populate the LPM table */
2663 for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
2665 /* skip unused ports */
2666 if ((1 << ipv6_udp_replay_route_array[i].if_out &
2667 enabled_port_mask) == 0)
2670 ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
2671 ipv6_udp_replay_route_array[i].ip,
2672 ipv6_udp_replay_route_array[i].depth,
2673 ipv6_udp_replay_route_array[i].if_out);
2676 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2677 "udp_replay LPM table on socket %d\n",
2681 printf("LPM: Adding route %s / %d (%d)\n",
2683 ipv6_udp_replay_route_array[i].depth,
2684 ipv6_udp_replay_route_array[i].if_out);
2694 /* Check the link status of all ports in up to 9s, and print them finally */
2696 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2698 #define CHECK_INTERVAL 100 /* 100ms */
2699 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2700 uint8_t portid, count, all_ports_up, print_flag = 0;
2701 struct rte_eth_link link;
2703 printf("\nChecking link status");
2705 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2707 for (portid = 0; portid < port_num; portid++) {
2708 if ((port_mask & (1 << portid)) == 0)
2710 memset(&link, 0, sizeof(link));
2711 rte_eth_link_get_nowait(portid, &link);
2712 /* print link status if flag set */
2713 if (print_flag == 1) {
2714 if (link.link_status)
2715 printf("Port %d Link Up - speed %u "
2716 "Mbps - %s\n", (uint8_t)portid,
2717 (unsigned)link.link_speed,
2718 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2719 ("full-duplex") : ("half-duplex\n"));
2721 printf("Port %d Link Down\n",
2725 /* clear all_ports_up flag if any link down */
2726 if (link.link_status == 0) {
2731 /* after finally printing all link status, get out */
2732 if (print_flag == 1)
2735 if (all_ports_up == 0) {
2738 rte_delay_ms(CHECK_INTERVAL);
2741 /* set the print_flag if all ports up or timeout */
2742 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2750 main(int argc, char **argv)
2755 uint32_t n_tx_queue;
2756 uint8_t portid, nb_rx_queue;
2759 struct pipeline_params *params;
2761 /* parse application arguments (after the EAL ones) */
2762 ret = parse_args(argc, argv);
2764 rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
2767 ret = rte_eal_init(argc, argv);
2769 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2772 timer_lcore = rte_lcore_id();
2774 if (check_lcore_params() < 0)
2775 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2777 ret = init_lcore_rx_queues();
2779 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2781 params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
2782 memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
2783 lib_arp_init(params, NULL);
2785 nb_ports = rte_eth_dev_count();
2786 num_ports = nb_ports;
2788 if (nb_ports > RTE_MAX_ETHPORTS)
2789 nb_ports = RTE_MAX_ETHPORTS;
2791 if (check_port_config(nb_ports) < 0)
2792 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2795 *Configuring port_config_t structure for interface manager initialization
2797 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
2798 port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
2799 if (port_config == NULL)
2800 rte_panic("port_config is NULL: Memory Allocation failure\n");
2801 /* initialize all ports */
2802 for (portid = 0; portid < nb_ports; portid++) {
2803 /* skip ports that are not enabled */
2804 if ((enabled_port_mask & (1 << portid)) == 0) {
2805 printf("\nSkipping disabled port %d\n", portid);
2811 printf("Initializing port %d ... ", portid );
2814 nb_rx_queue = get_port_n_rx_queues(portid);
2815 n_tx_queue = nb_rx_queue;
2816 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2817 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2819 port_config[portid].port_id = portid;
2820 port_config[portid].nrx_queue = nb_rx_queue;
2821 port_config[portid].ntx_queue = n_tx_queue;
2822 port_config[portid].state = 1;
2823 port_config[portid].promisc = promiscuous_on;
2824 port_config[portid].mempool.pool_size = MEMPOOL_SIZE;
2825 port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
2826 port_config[portid].mempool.cache_size = CACHE_SIZE;
2827 port_config[portid].mempool.cpu_socket_id = rte_socket_id();
2828 memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf));
2829 memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf));
2830 memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
2832 /* Enable TCP and UDP HW Checksum , when required */
2833 //port_config[portid].tx_conf.txq_flags &=
2834 // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
2836 if (ifm_port_setup (portid, &port_config[portid]))
2837 rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
2840 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2845 populate_lpm_routes();
2846 convert_ipstr_to_numeric();
2847 /* launch per-lcore init on every lcore */
2848 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2849 cl = cmdline_stdin_new(main_ctx, "Replay>");
2851 rte_panic("Cannot create cmdline instance\n");
2852 cmdline_interact(cl);
2853 cmdline_stdin_exit(cl);
2855 rte_exit(0, "Bye!\n");
2856 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2857 if (rte_eal_wait_lcore(lcore_id) < 0)
2863 /**********************************************************/
2865 struct cmd_obj_clear_result {
2866 cmdline_fixed_string_t clear;
2867 cmdline_fixed_string_t udp_replay;
2868 cmdline_fixed_string_t stats;
2871 static void cmd_clear_udp_replay_stats_parsed(
2872 __rte_unused void *parsed_result,
2873 __rte_unused struct cmdline *cl,
2874 __attribute__((unused)) void *data)
2880 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
2881 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
2882 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
2883 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
2884 cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
2885 TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
2887 cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
2888 .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
2889 .data = NULL, /* 2nd arg of func */
2890 .help_str = "clears UDP_Replay stats for rx/tx",
2891 .tokens = { /* token list, NULL terminated */
2892 (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
2893 (void *)&cmd_clear_udp_replay_stats_clear_string,
2894 (void *)&cmd_clear_udp_replay_stats_stats_string,
2898 /**********************************************************/
2899 struct cmd_obj_add_result {
2900 cmdline_fixed_string_t action;
2901 cmdline_fixed_string_t name;
2904 static void cmd_udp_replay_stats_parsed(
2905 __rte_unused void *parsed_result,
2906 __rte_unused struct cmdline *cl,
2907 __attribute__((unused)) void *data)
2912 cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
2913 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
2914 cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
2915 TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
2917 cmdline_parse_inst_t cmd_udp_replay_stats = {
2918 .f = cmd_udp_replay_stats_parsed, /* function to call */
2919 .data = NULL, /* 2nd arg of func */
2920 .help_str = "UDP_Replay stats for rx/tx",
2921 .tokens = { /* token list, NULL terminated */
2922 (void *)&cmd_udp_replay_stats_udp_replay_string,
2923 (void *)&cmd_udp_replay_stats_stats_string,
2928 struct cmd_quit_result {
2929 cmdline_fixed_string_t quit;
2934 __rte_unused void *parsed_result,
2936 __rte_unused void *data)
2941 static cmdline_parse_token_string_t cmd_quit_quit =
2942 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
2944 static cmdline_parse_inst_t cmd_quit = {
2945 .f = cmd_quit_parsed,
2949 (void *) &cmd_quit_quit,
2954 /**********************************************************/
2955 /****** CONTEXT (list of instruction) */
2956 cmdline_parse_ctx_t main_ctx[] = {
2957 (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
2958 (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
2959 (cmdline_parse_inst_t *)&cmd_quit,