/*
-// Copyright (c) 2017 Intel Corporation
+// Copyright (c) 2016-2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// limitations under the License.
*/
-/* Changes for Correlated traffic VNF
-1. Receive UDP packet
-2. Modify received packet
- a.exchange src mac and destination mac
- b.exchange src ip and destination IP for both IPv4 and IPv6 cases
- c.exchange UDP src port and UDP destination port
- d.change the len of the response according to the IMIX definition (
- option to make traffic more realistic to emulate some IoT payloads)
-3. send modified packet to the port where it was received.
+/*
+Correlated traffic VNF :
+------------------------
+1. Receive UDP packet
+2. Modify received packet
+ a. exchange src mac and destination mac
+ b. exchange src ip and destination IP for both IPv4 and IPv6 cases
+ c. exchange UDP src port and UDP destination port
+ d. change the len of the response according to the IMIX definition (
+ option to make traffic more realistic to emulate some IoT payloads)
+3. send modified packet to the port where it was received.
Such VNF does not need LPM and routing table implementations.
As the packet modification is very minimal and there is no memory access as the packet is stored in L3 cache the
performance of the solution should be sufficient for testing the UDP NAT performance.
*/
+
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_version.h>
#include <cmdline_parse.h>
#include <cmdline_parse_etheraddr.h>
#include "lib_icmpv6.h"
#include "app.h"
#include "vnf_common.h"
+#include "gateway.h"
#define IN6ADDRSZ 16
#define INADDRSZ 4
#define APP_LOOKUP_EXACT_MATCH 0
#define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
#endif
#endif
-/*raji*/
#include <stdio.h>
#include <netinet/in.h>
#include <termios.h>
+
/*
* When set to zero, simple forwaring path is eanbled.
* When set to one, optimized forwarding path is enabled.
#endif
-#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_UDP_Replay RTE_LOGTYPE_USER1
#define MAX_JUMBO_PKT_LEN 9600
#define RTE_TEST_TX_DESC_DEFAULT 512
static uint64_t rcv_pkt_count[32] = {0};
static uint64_t tx_pkt_count[32] = {0};
+static uint32_t arp_support;
+
unsigned num_ports;
struct sockaddr_in ipaddr1, ipaddr2;
-
/* ethernet addresses of ports */
static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
+
static __m128i val_eth[RTE_MAX_ETHPORTS];
cmdline_parse_ctx_t main_ctx[];
uint32_t timer_lcore;
uint32_t exit_loop = 1;
-
port_config_t *port_config;
#define MEMPOOL_SIZE 32 * 1024
#define BUFFER_SIZE 2048
#define CACHE_SIZE 256
-
-
/* replace first 12B of the ethernet header. */
#define MASK_ETH 0x3f
#define IP_TYPE_IPV4 0
#define IP_TYPE_IPV6 1
#define MAX_IP 32
-
const char* ipv4[MAX_IP];
uint8_t link_ipv6[MAX_IP][16];
uint32_t type, numports;
-
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
.rx_drop_en = 0,
.rx_deferred_start = 0,
};
-
static struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
__m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
};
-struct ipv4_l3fwd_route {
+struct ipv4_udp_replay_route {
struct ipv4_5tuple key;
uint8_t if_out;
};
-struct ipv6_l3fwd_route {
+struct ipv6_udp_replay_route {
struct ipv6_5tuple key;
uint8_t if_out;
};
-static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
+static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
{{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
{{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
{{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
{{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
};
-static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
+static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
{{
{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
#ifdef RTE_ARCH_X86_64
/* default to 4 million hash entries (approx) */
-#define L3FWD_HASH_ENTRIES 1024*1024*4
+#define UDP_Replay_HASH_ENTRIES 1024*1024*4
#else
/* 32-bit has less address-space for hugepage memory, limit to 1M entries */
-#define L3FWD_HASH_ENTRIES 1024*1024*1
+#define UDP_Replay_HASH_ENTRIES 1024*1024*1
#endif
#define HASH_ENTRY_NUMBER_DEFAULT 4
static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
-
void
app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
{
cp->state = 1;
}
-
void
app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
{
static const char digits[] = "0123456789";
int saw_digit, octets, ch;
unsigned char tmp[INADDRSZ], *tp;
-
saw_digit = 0;
octets = 0;
*(tp = tmp) = 0;
while ((ch = *src++) != '\0') {
const char *pch;
-
if ((pch = strchr(digits, ch)) != NULL) {
unsigned int new = *tp * 10 + (pch - digits);
-
if (new > 255)
return 0;
if (!saw_digit) {
}
if (octets < 4)
return 0;
-
memcpy(dst, tmp, INADDRSZ);
return 1;
}
int ch = 0, saw_xdigit = 0, count_xdigit = 0;
unsigned int val = 0;
unsigned dbloct_count = 0;
-
memset((tp = tmp), '\0', IN6ADDRSZ);
endp = tp + IN6ADDRSZ;
colonp = NULL;
- /* Leading :: requires some special handling. */
if (*src == ':')
if (*++src != ':')
return 0;
curtok = src;
saw_xdigit = count_xdigit = 0;
val = 0;
-
while ((ch = *src++) != '\0') {
const char *pch;
-
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
pch = strchr((xdigits = xdigits_u), ch);
if (pch != NULL) {
dbloct_count++;
}
if (colonp != NULL) {
- /* if we already have 8 double octets, having a colon means error */
if (dbloct_count == 8)
return 0;
-
- /*
- * Since some memmove()'s erroneously fail to handle
- * overlapping regions, we'll do the shift by hand.
- */
const int n = tp - colonp;
int i;
-
for (i = 1; i <= n; i++) {
endp[-i] = colonp[n - i];
colonp[n - i] = 0;
memcpy(dst, tmp, IN6ADDRSZ);
return 1;
}
-
static int my_inet_pton_ipv6(int af, const char *src, void *dst)
{
switch (af) {
return -1;
}
}
-
void convert_ipstr_to_numeric(void)
{
uint32_t i;
-
for (i = 0; i < numports; i++)
{
if (type == IP_TYPE_IPV4) {
- memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
+ memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]);
ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24);
} else if (type == IP_TYPE_IPV6) {
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
return (init_val);
}
-static uint64_t arp_pkts[32] = {0};
-
+static int arp_pkts;
static inline int check_arpicmp(struct rte_mbuf *pkt)
{
-
uint8_t in_port_id = pkt->port;
uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
-
uint16_t *eth_proto =
RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
-
uint8_t *protocol;
uint32_t prot_offset =
MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
-
protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
-
if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
&& (*protocol == IP_PROTOCOL_ICMP))) {
process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
- arp_pkts[in_port_id]++;
+ arp_pkts++;
return 0;
}
-
return 1;
-
}
-
static inline int check_arpicmpv6(struct rte_mbuf *pkt)
{
-
struct ether_hdr *eth_h;
struct ipv6_hdr *ipv6_h;
-
uint8_t in_port_id = pkt->port;
uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
-
uint16_t *eth_proto =
RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
-
eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
-
if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
&& (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) {
process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
return 0;
}
-
return 1;
-
}
static inline uint32_t
return (init_val);
}
-#define IPV4_L3FWD_NUM_ROUTES \
- (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
+#define IPV4_UDP_Replay_NUM_ROUTES \
+ (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
-#define IPV6_L3FWD_NUM_ROUTES \
- (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
+#define IPV6_UDP_Replay_NUM_ROUTES \
+ (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint8_t ipv4_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
+static uint8_t ipv6_udp_replay_out_if[UDP_Replay_HASH_ENTRIES] __rte_cache_aligned;
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-struct ipv4_l3fwd_route {
+struct ipv4_udp_replay_route {
uint32_t ip;
uint8_t depth;
uint8_t if_out;
};
-struct ipv6_l3fwd_route {
+struct ipv6_udp_replay_route {
uint8_t ip[16];
uint8_t depth;
uint8_t if_out;
};
-static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
+static struct ipv4_udp_replay_route ipv4_udp_replay_route_array[] = {
{IPv4(1,1,1,0), 24, 0},
{IPv4(2,1,1,0), 24, 1},
{IPv4(3,1,1,0), 24, 2},
{IPv4(8,1,1,0), 24, 7},
};
-static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
+static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = {
{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
{{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
{{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
{{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
};
-#define IPV4_L3FWD_NUM_ROUTES \
- (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
-#define IPV6_L3FWD_NUM_ROUTES \
- (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
+#define IPV4_UDP_Replay_NUM_ROUTES \
+ (sizeof(ipv4_udp_replay_route_array) / sizeof(ipv4_udp_replay_route_array[0]))
+#define IPV6_UDP_Replay_NUM_ROUTES \
+ (sizeof(ipv6_udp_replay_route_array) / sizeof(ipv6_udp_replay_route_array[0]))
-#define IPV4_L3FWD_LPM_MAX_RULES 1024
-#define IPV6_L3FWD_LPM_MAX_RULES 1024
-#define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
+#define IPV4_UDP_Replay_LPM_MAX_RULES 1024
+#define IPV6_UDP_Replay_LPM_MAX_RULES 1024
+#define IPV6_UDP_Replay_LPM_NUMBER_TBL8S (1 << 16)
typedef struct rte_lpm lookup_struct_t;
typedef struct rte_lpm6 lookup6_struct_t;
-static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
-static lookup6_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
+static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS];
+static lookup6_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS];
#endif
struct lcore_conf {
static __m128i mask1;
static __m128i mask2;
static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
+get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
{
int ret = 0;
union ipv4_5tuple_host key;
/* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
key.xmm = _mm_and_si128(data, mask0);
/* Find destination port */
- ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
+ ret = rte_hash_lookup(ipv4_udp_replay_lookup_struct, (const void *)&key);
+ return (uint8_t)((ret < 0)? portid : ipv4_udp_replay_out_if[ret]);
}
static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct)
+get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_udp_replay_lookup_struct)
{
int ret = 0;
union ipv6_5tuple_host key;
key.xmm[2] = _mm_and_si128(data2, mask2);
/* Find destination port */
- ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
+ ret = rte_hash_lookup(ipv6_udp_replay_lookup_struct, (const void *)&key);
+ return (uint8_t)((ret < 0)? portid : ipv6_udp_replay_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
+get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_udp_replay_lookup_struct)
{
uint8_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return (uint8_t) ((rte_lpm_lookup(ipv4_udp_replay_lookup_struct,
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_l3fwd_lookup_struct)
+get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_udp_replay_lookup_struct)
{
uint8_t next_hop;
- return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
+ return (uint8_t) ((rte_lpm6_lookup(ipv6_udp_replay_lookup_struct,
((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
next_hop : portid);
}
#endif
-static inline void l3fwd_simple_replay(struct rte_mbuf *m, uint8_t portid,
+static inline void udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid,
struct lcore_conf *qconf) __attribute__((unused));
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
struct ether_hdr tmp;
struct ipv4_hdr *ipv4_hdr[8];
struct udp_hdr *udp_hdr[8];
- int i, a[8];
+ int i;
l2_phy_interface_t *port = ifm_get_port(portid);
-
if (port == NULL) {
printf("port may be un initialized\n");
return;
}
-
- for ( i = 0; i < 8; i++)
- {
- if (m[i])
- a[i] = check_arpicmp(m[i]);
- else {
- printf("null packet received\n");
- return;
- }
- }
+ if (unlikely(arp_support)) {
+ check_arpicmp(m[0]);
+ check_arpicmp(m[1]);
+ check_arpicmp(m[2]);
+ check_arpicmp(m[3]);
+ check_arpicmp(m[4]);
+ check_arpicmp(m[5]);
+ check_arpicmp(m[6]);
+ check_arpicmp(m[7]);
+ }
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
for(i=0;i<8;i++)
{
+
ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
sizeof(struct ether_hdr));
struct ipv4_hdr temp_ipv4;
- for(i=0;i<8;i++)
- {
- temp_ipv4.dst_addr = ipv4_hdr[i]->dst_addr;
- ipv4_hdr[i]->dst_addr = ipv4_hdr[i]->src_addr;
- ipv4_hdr[i]->src_addr = temp_ipv4.dst_addr;
- }
+ temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr;
+ ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr;
+ ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr;
+ ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr;
+ ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr;
+ ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr;
+ ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr;
+ ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr;
+ ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr;
+ ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr;
+ ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr;
+ ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr;
+ ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr;
+ ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr;
+ ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr;
+ temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr;
+ ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr;
+ ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr;
/* Handle UDP headers.*/
udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *,
/*1) memcpy or assignment.*/
struct udp_hdr temp_udp;
- for(i=0;i<8;i++)
- {
- temp_udp.dst_port = udp_hdr[i]->dst_port;
- udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
- udp_hdr[i]->src_port = temp_udp.dst_port;
- }
+ temp_udp.dst_port = udp_hdr[0]->dst_port;
+ udp_hdr[0]->dst_port = udp_hdr[0]->src_port;
+ udp_hdr[0]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[1]->dst_port;
+ udp_hdr[1]->dst_port = udp_hdr[1]->src_port;
+ udp_hdr[1]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[2]->dst_port;
+ udp_hdr[2]->dst_port = udp_hdr[2]->src_port;
+ udp_hdr[2]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[3]->dst_port;
+ udp_hdr[3]->dst_port = udp_hdr[3]->src_port;
+ udp_hdr[3]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[4]->dst_port;
+ udp_hdr[4]->dst_port = udp_hdr[4]->src_port;
+ udp_hdr[4]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[5]->dst_port;
+ udp_hdr[5]->dst_port = udp_hdr[5]->src_port;
+ udp_hdr[5]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[6]->dst_port;
+ udp_hdr[6]->dst_port = udp_hdr[6]->src_port;
+ udp_hdr[6]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[7]->dst_port;
+ udp_hdr[7]->dst_port = udp_hdr[7]->src_port;
+ udp_hdr[7]->src_port = temp_udp.dst_port;
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
uint8_t valid_mask = MASK_ALL_PKTS;
uint8_t i = 0;
for (i = 0; i < 8; i++) {
if ((0x1 << i) & valid_mask) {
- l3fwd_simple_replay(m[i], portid, qconf);
+ udp_replay_simple_replay(m[i], portid, qconf);
}
}
return;
++(ipv4_hdr[7]->hdr_checksum);
#endif
- for ( i = 0; i < 8; i++)
- {
- /* if not already processed as a arp/icmp pkt */
- if (a[i]) {
- port->transmit_single_pkt(port, m[i]);
- tx_pkt_count[(uint64_t)port]++;
- }
- }
+ send_single_packet(m[0],portid );
+ send_single_packet(m[1],portid );
+ send_single_packet(m[2],portid );
+ send_single_packet(m[3],portid);
+ send_single_packet(m[4],portid);
+ send_single_packet(m[5],portid);
+ send_single_packet(m[6],portid);
+ send_single_packet(m[7],portid);
}
simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr[8],tmp;
- int i, a[8];
-
+ int i;
__attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6;
int32_t ret[8];
union ipv6_5tuple_host key[8];
struct udp_hdr *udp_hdr[8];
l2_phy_interface_t *port = ifm_get_port(portid);
-
if (port == NULL) {
printf("port may be un initialized\n");
return;
}
- for ( i = 0; i < 8; i++)
- {
- a[i] = check_arpicmpv6(m[i]);
- }
+ if (unlikely(arp_support)) {
+ check_arpicmpv6(m[0]);
+ check_arpicmpv6(m[1]);
+ check_arpicmpv6(m[2]);
+ check_arpicmpv6(m[3]);
+ check_arpicmpv6(m[4]);
+ check_arpicmpv6(m[5]);
+ check_arpicmpv6(m[6]);
+ check_arpicmpv6(m[7]);
+ }
+
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
for(i=0;i<8;i++)
{
-
- ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
- ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
- ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
+ ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr);
+ ether_addr_copy(ð_hdr[i]->d_addr, ð_hdr[i]->s_addr);
+ ether_addr_copy(&tmp.s_addr, ð_hdr[i]->d_addr);
}
/* Handle IPv6 headers.*/
ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
sizeof(struct ether_hdr));
for(i=0;i<8;i++)
{
- memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
- memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
- memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
+ memcpy(temp_ipv6.dst_addr,ipv6_hdr[i]->dst_addr,16);
+ memcpy(ipv6_hdr[i]->dst_addr,ipv6_hdr[i]->src_addr,16);
+ memcpy(ipv6_hdr[i]->src_addr,temp_ipv6.dst_addr,16);
}
/* Handle UDP headers.*/
struct udp_hdr temp_udp;
for(i=0;i<8;i++)
{
- temp_udp.dst_port = udp_hdr[i]->dst_port;
- udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
- udp_hdr[i]->src_port = temp_udp.dst_port;
+ temp_udp.dst_port = udp_hdr[i]->dst_port;
+ udp_hdr[i]->dst_port = udp_hdr[i]->src_port;
+ udp_hdr[i]->src_port = temp_udp.dst_port;
}
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
+#if RTE_VERSION < 0x100b0000
rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
-
- for ( i = 0; i < 8; i++)
- {
- /* if not already processed as a arp/icmp pkt */
- if (a[i]){
- port->transmit_single_pkt(port, m[i]);
- tx_pkt_count[(uint64_t)portid]++;
- }
- }
+#else
+ rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
+#endif
+ send_single_packet(m[0],portid);
+ send_single_packet(m[1],portid);
+ send_single_packet(m[2],portid);
+ send_single_packet(m[3],portid);
+ send_single_packet(m[4],portid);
+ send_single_packet(m[5],portid);
+ send_single_packet(m[6],portid);
+ send_single_packet(m[7],portid);
}
#endif /* APP_LOOKUP_METHOD */
static inline __attribute__((always_inline)) void
-l3fwd_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
+udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr,tmp;
struct ipv4_hdr *ipv4_hdr,temp_ipv4;
printf("port may be un initialized\n");
return;
}
-
if (m == NULL) {
printf("Null packet received\n");
return;
}
-
- /* arp packet already processed return back */
+ if (unlikely(arp_support)) {
if (!check_arpicmp(m))
return;
-
+ }
if (qconf == NULL)
printf("qconf configuration is NULL\n");
-
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr);
ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr);
/* Handle IPv4 headers.*/
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
sizeof(struct ether_hdr));
- temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
- ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
- ipv4_hdr->src_addr = temp_ipv4.dst_addr;
-
+ temp_ipv4.dst_addr = ipv4_hdr->dst_addr;
+ ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
+ ipv4_hdr->src_addr = temp_ipv4.dst_addr;
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) {
- printf("not of type 1812\n");
rte_pktmbuf_free(m);
return;
}
#endif
+
#ifdef DO_RFC_1812_CHECKS
/* Update time to live and header checksum */
--(ipv4_hdr->time_to_live);
++(ipv4_hdr->hdr_checksum);
#endif
- /* Handle UDP headers.*/
- udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
+ /* Handle UDP headers.*/
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
(sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
- /*Swapping Src and Dst Port*/
- temp_udp.dst_port = udp_hdr->dst_port;
- udp_hdr->dst_port = udp_hdr->src_port;
- udp_hdr->src_port = temp_udp.dst_port;
-
- if (m) {
- port->transmit_single_pkt(port, m);
- tx_pkt_count[portid]++;
- }
+
+ /*Swapping Src and Dst Port*/
+ temp_udp.dst_port = udp_hdr->dst_port;
+ udp_hdr->dst_port = udp_hdr->src_port;
+ udp_hdr->src_port = temp_udp.dst_port;
+
+ send_single_packet(m, portid);
} else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
/* Handle IPv6 headers.*/
struct ipv6_hdr *ipv6_hdr,temp_ipv6;
- /* Handle IPv4 headers.*/
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
sizeof(struct ether_hdr));
uint8_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
+ if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv4) {
if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
&next_hop) != 0)
next_hop = portid;
- } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+ } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) {
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
static int
main_loop(__attribute__((unused)) void *dummy)
{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ int i, j, nb_rx;
uint8_t portid, queueid;
struct lcore_conf *qconf;
-
l2_phy_interface_t *port;
- struct rte_mbuf *pkts_burst[IFM_BURST_SIZE];
- uint32_t nb_tx = 0, nb_rx, j, i;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /US_PER_S * BURST_TX_DRAIN_US;
- uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
qconf = &lcore_conf[lcore_id];
if (qconf->n_rx_queue == 0) {
- RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+ RTE_LOG(INFO, UDP_Replay, "lcore %u has nothing to do\n", lcore_id);
return 0;
}
- RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+ RTE_LOG(INFO, UDP_Replay, "entering main loop on lcore %u\n", lcore_id);
+
for (i = 0; i < qconf->n_rx_queue; i++) {
+
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
- portid, queueid);
+ RTE_LOG(INFO, UDP_Replay, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
+ portid, queueid);
}
while (exit_loop) {
* This could be optimized (use queueid instead of
* portid), but it is not called so often
*/
- for (portid = 0; portid < num_ports; portid++) {
- port = ifm_get_port(portid);
- if (!port)
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ if (qconf->tx_mbufs[portid].len == 0)
continue;
-
- if(port->tx_buf_len > 0){
- RTE_SET_USED(nb_tx);
- port->tx_buf_len = 0;
- }
- prev_tsc = cur_tsc;
+ send_burst(qconf,
+ qconf->tx_mbufs[portid].len,
+ portid);
+ qconf->tx_mbufs[portid].len = 0;
}
prev_tsc = cur_tsc;
}
-
/*
* Read packet from RX queues
*/
printf("port may be un initialized\n");
return 0;
}
-
if(nb_rx)
- rcv_pkt_count[portid] += nb_rx;
-
+ rcv_pkt_count[portid] += nb_rx;
if (nb_rx == 0)
continue;
* Send nb_rx - nb_rx%8 packets
* in groups of 8.
*/
- uint32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
+ int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
for (j = 0; j < n; j += 8) {
- uint32_t pkt_type =
- pkts_burst[j]->packet_type &
- pkts_burst[j+1]->packet_type &
- pkts_burst[j+2]->packet_type &
- pkts_burst[j+3]->packet_type &
- pkts_burst[j+4]->packet_type &
- pkts_burst[j+5]->packet_type &
- pkts_burst[j+6]->packet_type &
- pkts_burst[j+7]->packet_type;
- if (pkt_type & RTE_PTYPE_L3_IPV4) {
+ struct ether_hdr *eth_h0 =
+ rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *);
+ struct ether_hdr *eth_h1 =
+ rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *);
+ struct ether_hdr *eth_h2 =
+ rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *);
+ struct ether_hdr *eth_h3 =
+ rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *);
+ struct ether_hdr *eth_h4 =
+ rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *);
+ struct ether_hdr *eth_h5 =
+ rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *);
+ struct ether_hdr *eth_h6 =
+ rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *);
+ struct ether_hdr *eth_h7 =
+ rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *);
+
+ uint16_t ether_type;
+ ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) &
+ rte_cpu_to_be_16(eth_h1->ether_type) &
+ rte_cpu_to_be_16(eth_h2->ether_type) &
+ rte_cpu_to_be_16(eth_h3->ether_type) &
+ rte_cpu_to_be_16(eth_h4->ether_type) &
+ rte_cpu_to_be_16(eth_h5->ether_type) &
+ rte_cpu_to_be_16(eth_h6->ether_type) &
+ rte_cpu_to_be_16(eth_h7->ether_type));
+
+ if (ether_type == ETHER_TYPE_IPv4) {
simple_ipv4_replay_8pkts(
&pkts_burst[j], portid, qconf);
- } else if (pkt_type &
- RTE_PTYPE_L3_IPV6) {
+ } else if (ether_type == ETHER_TYPE_IPv6) {
simple_ipv6_replay_8pkts(&pkts_burst[j],
portid, qconf);
} else {
- for (i = j; i < j + 8; i++) {
- l3fwd_simple_replay(pkts_burst[i],
+ udp_replay_simple_replay(pkts_burst[j],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+1],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+2],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+3],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+4],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+5],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+6],
+ portid, qconf);
+ udp_replay_simple_replay(pkts_burst[j+7],
portid, qconf);
- }
}
}
+
for (; j < nb_rx ; j++) {
- l3fwd_simple_replay(pkts_burst[j],
+ udp_replay_simple_replay(pkts_burst[j],
portid, qconf);
}
}
for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
j + PREFETCH_OFFSET], void *));
- l3fwd_simple_replay(pkts_burst[j], portid,
+ udp_replay_simple_replay(pkts_burst[j], portid,
qconf);
}
/* Forward remaining prefetched packets */
for (; j < nb_rx; j++) {
- l3fwd_simple_replay(pkts_burst[j], portid,
+ udp_replay_simple_replay(pkts_burst[j], portid,
qconf);
}
#endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
}
-
}
- return 0;
}
/* display usage */
print_l4stats(void)
{
unsigned portid;
- uint16_t i;
+ uint16_t i, j=0;
printf ("\n");
printf ("UDP_Replay stats:\n");
printf ("--------------\n");
- printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
+ printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n");
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params[i].port_id;
- printf (" %u %"PRId64" %"PRId64" 0 0 %"PRId64"",portid, rcv_pkt_count[(uint64_t)portid], tx_pkt_count[(uint64_t)portid], arp_pkts[(uint64_t)portid]);
+ printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts);
printf ("\n");
}
printf ("\n");
static int
parse_link_ip(const char *file_name)
{
-
uint32_t i, type;
struct rte_cfgfile *file;
const char *entry;
char buf[256];
-
file = rte_cfgfile_load(file_name, 0);
-
entry = rte_cfgfile_get_entry(file, "linkip", "num_ports");
numports = (uint32_t)atoi(entry);
if (numports <= 0 || numports > 32)
rte_panic("numports is not valid\n");
-
entry = rte_cfgfile_get_entry(file, "linkip", "ip_type");
type = (uint32_t)atoi(entry);
-
for (i = 0;i < numports; i++) {
sprintf(buf, "port%d", i);
entry = rte_cfgfile_get_entry(file, "linkip", buf);
-
if (entry == NULL)
continue;
-
if (!type)
ipv4[i] = strdup(entry);
else if (type)
my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]);
}
-
return 0;
}
-
static int
parse_portmask(const char *portmask)
{
switch (opt) {
case 's':
parse_link_ip(optarg);
+ arp_support = 1;
break;
/* portmask */
case 'p':
if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
ret = parse_hash_entry_number(optarg);
- if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
+ if ((ret > 0) && (ret <= UDP_Replay_HASH_ENTRIES)) {
hash_entry_number = ret;
} else {
printf("invalid hash entry number\n");
{
uint32_t i;
int32_t ret;
- uint32_t array_len = sizeof(ipv4_l3fwd_route_array)/sizeof(ipv4_l3fwd_route_array[0]);
+ uint32_t array_len = sizeof(ipv4_udp_replay_route_array)/sizeof(ipv4_udp_replay_route_array[0]);
mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
for (i = 0; i < array_len; i++) {
- struct ipv4_l3fwd_route entry;
+ struct ipv4_udp_replay_route entry;
union ipv4_5tuple_host newkey;
- entry = ipv4_l3fwd_route_array[i];
+ entry = ipv4_udp_replay_route_array[i];
convert_ipv4_5tuple(&entry.key, &newkey);
ret = rte_hash_add_key (h,(void *) &newkey);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
- " to the l3fwd hash.\n", i);
+ " to the udp_replay hash.\n", i);
}
- ipv4_l3fwd_out_if[ret] = entry.if_out;
+ ipv4_udp_replay_out_if[ret] = entry.if_out;
}
printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
}
{
uint32_t i;
int32_t ret;
- uint32_t array_len = sizeof(ipv6_l3fwd_route_array)/sizeof(ipv6_l3fwd_route_array[0]);
+ uint32_t array_len = sizeof(ipv6_udp_replay_route_array)/sizeof(ipv6_udp_replay_route_array[0]);
mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
for (i = 0; i < array_len; i++) {
- struct ipv6_l3fwd_route entry;
+ struct ipv6_udp_replay_route entry;
union ipv6_5tuple_host newkey;
- entry = ipv6_l3fwd_route_array[i];
+ entry = ipv6_udp_replay_route_array[i];
convert_ipv6_5tuple(&entry.key, &newkey);
ret = rte_hash_add_key (h, (void *) &newkey);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
- " to the l3fwd hash.\n", i);
+ " to the udp_replay hash.\n", i);
}
- ipv6_l3fwd_out_if[ret] = entry.if_out;
+ ipv6_udp_replay_out_if[ret] = entry.if_out;
}
printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
}
unsigned i;
mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
for (i = 0; i < nr_flow; i++) {
- struct ipv4_l3fwd_route entry;
+ struct ipv4_udp_replay_route entry;
union ipv4_5tuple_host newkey;
uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
memset(&entry, 0, sizeof(entry));
switch (i & (NUMBER_PORT_USED -1)) {
case 0:
- entry = ipv4_l3fwd_route_array[0];
+ entry = ipv4_udp_replay_route_array[0];
entry.key.ip_dst = IPv4(101,c,b,a);
break;
case 1:
- entry = ipv4_l3fwd_route_array[1];
+ entry = ipv4_udp_replay_route_array[1];
entry.key.ip_dst = IPv4(201,c,b,a);
break;
case 2:
- entry = ipv4_l3fwd_route_array[2];
+ entry = ipv4_udp_replay_route_array[2];
entry.key.ip_dst = IPv4(111,c,b,a);
break;
case 3:
- entry = ipv4_l3fwd_route_array[3];
+ entry = ipv4_udp_replay_route_array[3];
entry.key.ip_dst = IPv4(211,c,b,a);
break;
};
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
}
- ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
+ ipv4_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
}
printf("Hash: Adding 0x%x keys\n", nr_flow);
mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
for (i = 0; i < nr_flow; i++) {
- struct ipv6_l3fwd_route entry;
+ struct ipv6_udp_replay_route entry;
union ipv6_5tuple_host newkey;
uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
/* Create the ipv6 exact match flow */
memset(&entry, 0, sizeof(entry));
switch (i & (NUMBER_PORT_USED - 1)) {
- case 0: entry = ipv6_l3fwd_route_array[0]; break;
- case 1: entry = ipv6_l3fwd_route_array[1]; break;
- case 2: entry = ipv6_l3fwd_route_array[2]; break;
- case 3: entry = ipv6_l3fwd_route_array[3]; break;
+ case 0: entry = ipv6_udp_replay_route_array[0]; break;
+ case 1: entry = ipv6_udp_replay_route_array[1]; break;
+ case 2: entry = ipv6_udp_replay_route_array[2]; break;
+ case 3: entry = ipv6_udp_replay_route_array[3]; break;
};
entry.key.ip_dst[13] = c;
entry.key.ip_dst[14] = b;
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
}
- ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
+ ipv6_udp_replay_out_if[ret] = (uint8_t) entry.if_out;
}
printf("Hash: Adding 0x%x keys\n", nr_flow);
char s[64];
/* create the LPM table */
- snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
- ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- IPV4_L3FWD_LPM_MAX_RULES, 0);
- if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
+ snprintf(s, sizeof(s), "IPV4_UDP_Replay_LPM_%d", socketid);
+ ipv4_udp_replay_lookup_struct[socketid] = rte_lpm_create(s, socketid,
+ IPV4_UDP_Replay_LPM_MAX_RULES, 0);
+ if (ipv4_udp_replay_lookup_struct[socketid] == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
" on socket %d\n", socketid);
/* populate the LPM table */
- for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
+ for (i = 0; i < IPV4_UDP_Replay_NUM_ROUTES; i++) {
/* skip unused ports */
- if ((1 << ipv4_l3fwd_route_array[i].if_out &
+ if ((1 << ipv4_udp_replay_route_array[i].if_out &
enabled_port_mask) == 0)
continue;
- ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
- ipv4_l3fwd_route_array[i].ip,
- ipv4_l3fwd_route_array[i].depth,
- ipv4_l3fwd_route_array[i].if_out);
+ ret = rte_lpm_add(ipv4_udp_replay_lookup_struct[socketid],
+ ipv4_udp_replay_route_array[i].ip,
+ ipv4_udp_replay_route_array[i].depth,
+ ipv4_udp_replay_route_array[i].if_out);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
- "l3fwd LPM table on socket %d\n",
+ "udp_replay LPM table on socket %d\n",
i, socketid);
}
printf("LPM: Adding route 0x%08x / %d (%d)\n",
- (unsigned)ipv4_l3fwd_route_array[i].ip,
- ipv4_l3fwd_route_array[i].depth,
- ipv4_l3fwd_route_array[i].if_out);
+ (unsigned)ipv4_udp_replay_route_array[i].ip,
+ ipv4_udp_replay_route_array[i].depth,
+ ipv4_udp_replay_route_array[i].if_out);
}
/* create the LPM6 table */
- snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
+ snprintf(s, sizeof(s), "IPV6_UDP_Replay_LPM_%d", socketid);
- config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
- config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
+ config.max_rules = IPV6_UDP_Replay_LPM_MAX_RULES;
+ config.number_tbl8s = IPV6_UDP_Replay_LPM_NUMBER_TBL8S;
config.flags = 0;
- ipv6_l3fwd_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
+ ipv6_udp_replay_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
&config);
- if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
+ if (ipv6_udp_replay_lookup_struct[socketid] == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to create the udp_replay LPM table"
" on socket %d\n", socketid);
/* populate the LPM table */
- for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
+ for (i = 0; i < IPV6_UDP_Replay_NUM_ROUTES; i++) {
/* skip unused ports */
- if ((1 << ipv6_l3fwd_route_array[i].if_out &
+ if ((1 << ipv6_udp_replay_route_array[i].if_out &
enabled_port_mask) == 0)
continue;
- ret = rte_lpm6_add(ipv6_l3fwd_lookup_struct[socketid],
- ipv6_l3fwd_route_array[i].ip,
- ipv6_l3fwd_route_array[i].depth,
- ipv6_l3fwd_route_array[i].if_out);
+ ret = rte_lpm6_add(ipv6_udp_replay_lookup_struct[socketid],
+ ipv6_udp_replay_route_array[i].ip,
+ ipv6_udp_replay_route_array[i].depth,
+ ipv6_udp_replay_route_array[i].if_out);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
- "l3fwd LPM table on socket %d\n",
+ "udp_replay LPM table on socket %d\n",
i, socketid);
}
printf("LPM: Adding route %s / %d (%d)\n",
"IPV6",
- ipv6_l3fwd_route_array[i].depth,
- ipv6_l3fwd_route_array[i].if_out);
+ ipv6_udp_replay_route_array[i].depth,
+ ipv6_udp_replay_route_array[i].if_out);
}
}
#endif
+
+
+
+
+
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
int ret;
unsigned nb_ports;
unsigned lcore_id;
- uint32_t n_tx_queue, nb_lcores;
+ uint32_t n_tx_queue;
uint8_t portid, nb_rx_queue;
- struct cmdline *cl;
+ struct cmdline *cl;
uint32_t size;
struct pipeline_params *params;
rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
argc -= ret;
argv += ret;
-
timer_lcore = rte_lcore_id();
-
/* parse application arguments (after the EAL ones) */
ret = parse_args(argc, argv);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+ rte_exit(EXIT_FAILURE, "Invalid UDP_Replay parameters\n");
if (check_lcore_params() < 0)
rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE);
-
memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params));
lib_arp_init(params, NULL);
-
- /* configure the interface manager */
ifm_init();
-
nb_ports = rte_eth_dev_count();
num_ports = nb_ports;
+ gw_init(num_ports);
if (nb_ports > RTE_MAX_ETHPORTS)
nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
- nb_lcores = rte_lcore_count();
-
/*
*Configuring port_config_t structure for interface manager initialization
*/
port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE);
if (port_config == NULL)
rte_panic("port_config is NULL: Memory Allocation failure\n");
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
fflush(stdout);
nb_rx_queue = get_port_n_rx_queues(portid);
- n_tx_queue = nb_lcores;
+ n_tx_queue = nb_rx_queue;
if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
n_tx_queue = MAX_TX_QUEUE_PER_PORT;
memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf));
/* Enable TCP and UDP HW Checksum , when required */
- if (csum_on) {
- port_config[portid].tx_conf.txq_flags &=
- ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
- }
+ //port_config[portid].tx_conf.txq_flags &=
+ // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
if (ifm_port_setup (portid, &port_config[portid]))
- rte_panic("Port Setup Failed: %"PRIu32"\n. Try running by disabling checksum with (--no-hw-csum)", portid);
+ rte_panic ("Port Setup Failed: %"PRIu32"\n", portid);
}
check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
create_arp_table();
create_nd_table();
populate_lpm_routes();
-
convert_ipstr_to_numeric();
-
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- cl = cmdline_stdin_new(main_ctx, "Replay> ");
+ cl = cmdline_stdin_new(main_ctx, "Replay>");
if (cl == NULL)
rte_panic("Cannot create cmdline instance\n");
cmdline_interact(cl);
cmdline_stdin_exit(cl);
exit_loop = 0;
- rte_exit(0, "Bye!\n");
+ rte_exit(0, "Bye!\n");
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
return 0;
}
-
/**********************************************************/
struct cmd_obj_clear_result {
cmdline_fixed_string_t clear;
- cmdline_fixed_string_t l3fwd;
+ cmdline_fixed_string_t udp_replay;
cmdline_fixed_string_t stats;
};
-static void cmd_clear_l3fwd_stats_parsed(
+static void cmd_clear_udp_replay_stats_parsed(
__rte_unused void *parsed_result,
__rte_unused struct cmdline *cl,
__attribute__((unused)) void *data)
clear_stats();
}
-cmdline_parse_token_string_t cmd_clear_l3fwd_stats_l3fwd_string =
- TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, l3fwd, "UDP_Replay");
-cmdline_parse_token_string_t cmd_clear_l3fwd_stats_clear_string =
+cmdline_parse_token_string_t cmd_clear_udp_replay_stats_udp_replay_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, udp_replay, "UDP_Replay");
+cmdline_parse_token_string_t cmd_clear_udp_replay_stats_clear_string =
TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, clear, "clear");
-cmdline_parse_token_string_t cmd_clear_l3fwd_stats_stats_string =
+cmdline_parse_token_string_t cmd_clear_udp_replay_stats_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_obj_clear_result, stats, "stats");
-cmdline_parse_inst_t cmd_clear_l3fwd_stats = {
- .f = cmd_clear_l3fwd_stats_parsed, /* function to call */
+cmdline_parse_inst_t cmd_clear_udp_replay_stats = {
+ .f = cmd_clear_udp_replay_stats_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
.help_str = "clears UDP_Replay stats for rx/tx",
.tokens = { /* token list, NULL terminated */
- (void *)&cmd_clear_l3fwd_stats_l3fwd_string,
- (void *)&cmd_clear_l3fwd_stats_clear_string,
- (void *)&cmd_clear_l3fwd_stats_stats_string,
+ (void *)&cmd_clear_udp_replay_stats_udp_replay_string,
+ (void *)&cmd_clear_udp_replay_stats_clear_string,
+ (void *)&cmd_clear_udp_replay_stats_stats_string,
NULL,
},
};
cmdline_fixed_string_t name;
};
-static void cmd_l3fwd_stats_parsed(
+static void cmd_udp_replay_stats_parsed(
__rte_unused void *parsed_result,
__rte_unused struct cmdline *cl,
__attribute__((unused)) void *data)
{
-
- /*printf("\n Rx value : Tx Value : \n");*/
print_l4stats();
- /*cmdline_printf(cl, "Object %s added, ip=%s\n",
- o->name, ip_str);*/
}
-cmdline_parse_token_string_t cmd_l3fwd_stats_l3fwd_string =
+cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string =
TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "UDP_Replay");
-cmdline_parse_token_string_t cmd_l3fwd_stats_stats_string =
+cmdline_parse_token_string_t cmd_udp_replay_stats_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, "stats");
-cmdline_parse_inst_t cmd_l3fwd_stats = {
- .f = cmd_l3fwd_stats_parsed, /* function to call */
+cmdline_parse_inst_t cmd_udp_replay_stats = {
+ .f = cmd_udp_replay_stats_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
.help_str = "UDP_Replay stats for rx/tx",
.tokens = { /* token list, NULL terminated */
- (void *)&cmd_l3fwd_stats_l3fwd_string,
- (void *)&cmd_l3fwd_stats_stats_string,
+ (void *)&cmd_udp_replay_stats_udp_replay_string,
+ (void *)&cmd_udp_replay_stats_stats_string,
NULL,
},
};
/**********************************************************/
/****** CONTEXT (list of instruction) */
-
cmdline_parse_ctx_t main_ctx[] = {
- (cmdline_parse_inst_t *)&cmd_l3fwd_stats,
- (cmdline_parse_inst_t *)&cmd_clear_l3fwd_stats,
+ (cmdline_parse_inst_t *)&cmd_udp_replay_stats,
+ (cmdline_parse_inst_t *)&cmd_clear_udp_replay_stats,
(cmdline_parse_inst_t *)&cmd_quit,
NULL,
};