Support for DPDK 18.05 and DPDK 18.08 39/64339/1
authorXavier Simonart <xavier.simonart@intel.com>
Sat, 27 Oct 2018 14:26:26 +0000 (16:26 +0200)
committerDeepak S <deepak.s@linux.intel.com>
Wed, 31 Oct 2018 23:08:34 +0000 (04:38 +0530)
Improve DPDK 18.05 support introduced by 3e532aca.
Support for DPDK 18.08.

Change-Id: Ide712ee94254b506a0ad88c95a7e01b789f99d48
Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
22 files changed:
VNFs/DPPD-PROX/README
VNFs/DPPD-PROX/commands.c
VNFs/DPPD-PROX/defaults.c
VNFs/DPPD-PROX/display_ports.c
VNFs/DPPD-PROX/handle_cgnat.c
VNFs/DPPD-PROX/handle_esp.c
VNFs/DPPD-PROX/handle_gen.c
VNFs/DPPD-PROX/handle_gre_decap_encap.c
VNFs/DPPD-PROX/handle_ipv6_tunnel.c
VNFs/DPPD-PROX/handle_nat.c
VNFs/DPPD-PROX/handle_police.c
VNFs/DPPD-PROX/handle_qinq_decap4.c
VNFs/DPPD-PROX/handle_qinq_encap4.c
VNFs/DPPD-PROX/main.c
VNFs/DPPD-PROX/prox_args.c
VNFs/DPPD-PROX/prox_cksum.c
VNFs/DPPD-PROX/prox_compat.h
VNFs/DPPD-PROX/prox_port_cfg.c
VNFs/DPPD-PROX/prox_port_cfg.h
VNFs/DPPD-PROX/run.c
VNFs/DPPD-PROX/stats_port.c
VNFs/DPPD-PROX/task_base.h

index 7527479..229ee24 100644 (file)
@@ -24,13 +24,15 @@ finer grained network functions like QoS, Routing, load-balancing...
 
 Compiling and running this application
 --------------------------------------
-This application supports DPDK 16.04, 16.11, 16.11.1, 17.02, 17.05 17.08 and 17.11.
+This application supports DPDK 16.04, 16.11, 16.11.1, 17.02, 17.05, 17.08, 
+17.11, 18.02, 18.05 and 18.08.
+
 The following commands assume that the following variables have been set:
 
 export RTE_SDK=/path/to/dpdk
 export RTE_TARGET=x86_64-native-linuxapp-gcc
 
-IPSec is only supported in PROX with DPDK 17.02 and DPDK 17.05
+IPSec is only supported in PROX starting from DPDK 17.02
 It will only be compiled if CONFIG_RTE_LIBRTE_PMD_AESNI_MB is
 set in DPDK .config. This also requires AESNI_MULTI_BUFFER_LIB_PATH to point to
 the multi-buffer library which can be downloaded from 
index 6c715c2..22d158a 100644 (file)
@@ -18,6 +18,9 @@
 #include <rte_table_hash.h>
 #include <rte_version.h>
 #include <rte_malloc.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+#include <rte_eal_memconfig.h>
+#endif
 
 #include "prox_malloc.h"
 #include "display.h"
@@ -259,6 +262,93 @@ void cmd_mem_stats(void)
        }
 }
 
+static void get_hp_sz_string(char *sz_str, uint64_t hp_sz)
+{
+       switch (hp_sz >> 20) {
+       case 0:
+               strcpy(sz_str, " 0 ");
+               break;
+       case 2:
+               strcpy(sz_str, "2MB");
+               break;
+       case 1024:
+               strcpy(sz_str, "1GB");
+               break;
+       default:
+               strcpy(sz_str, "??");
+       }
+}
+
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+// Print all segments, 1 by 1
+// Unused for now, keep for reference
+static int print_all_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, void *arg)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int memseg_list_idx, memseg_idx;
+       int n = (*(int *)arg)++;
+
+       memseg_list_idx = memseg_list - mcfg->memsegs;
+       if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
+               plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
+               return -1;
+       }
+       memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
+       if (memseg_idx < 0) {
+               plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
+               return -1;
+       }
+
+       char sz_str[5];
+       get_hp_sz_string(sz_str, memseg->hugepage_sz);
+       plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
+               n,
+               memseg->socket_id,
+               memseg_list_idx,
+               memseg_idx,
+               memseg->iova,
+               memseg->iova+memseg->len,
+               memseg->addr,
+               memseg->len/memseg->hugepage_sz, sz_str);
+
+        return 0;
+}
+
+// Print memory segments
+// Contiguous segments are shown as 1 big segment
+static int print_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, size_t len, void *arg)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int memseg_list_idx, memseg_idx;
+       static int n = 0;
+
+       memseg_list_idx = memseg_list - mcfg->memsegs;
+       if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
+               plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
+               return -1;
+       }
+       memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
+       if (memseg_idx < 0) {
+               plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
+               return -1;
+       }
+
+       char sz_str[5];
+       get_hp_sz_string(sz_str, memseg->hugepage_sz);
+       plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
+               n++,
+               memseg->socket_id,
+               memseg_list_idx,
+               memseg_idx,
+               memseg->iova,
+               memseg->iova+len,
+               memseg->addr,
+               memseg->hugepage_sz?len/memseg->hugepage_sz:0, sz_str);
+
+        return 0;
+}
+
+#endif
 void cmd_mem_layout(void)
 {
 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
@@ -269,17 +359,8 @@ void cmd_mem_layout(void)
                if (memseg[i].addr == NULL)
                        break;
 
-               const char *sz_str;
-               switch (memseg[i].hugepage_sz >> 20) {
-               case 2:
-                       sz_str = "2MB";
-                       break;
-               case 1024:
-                       sz_str = "1GB";
-                       break;
-               default:
-                       sz_str = "??";
-               }
+               char sz_str[5];
+               get_hp_sz_string(sz_str, memseg[i].hugepage_sz);
 
                plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
                          i,
@@ -289,8 +370,9 @@ void cmd_mem_layout(void)
                          memseg[i].len/memseg[i].hugepage_sz, sz_str);
        }
 #else
-       plog_info("Memory layout: command not supported in this DPDK version\n");
-       // TODO DPDK1805
+       int segment_number = 0;
+       //rte_memseg_walk(print_all_segments, &segment_number);
+       rte_memseg_contig_walk(print_segments, &segment_number);
 #endif
 }
 
index a6be0d7..700b63e 100644 (file)
@@ -27,6 +27,7 @@
 #include "etypes.h"
 #include "toeplitz.h"
 #include "handle_master.h"
+#include "prox_compat.h"
 
 #define TEN_GIGABIT     1250000000
 #define QUEUE_SIZES     128
 
 static const struct rte_eth_conf default_port_conf = {
        .rxmode = {
-               .split_hdr_size = 0,
-               .header_split   = 0, /* Header Split disabled */
-               .hw_ip_checksum = 0, /* IP checksum offload disabled */
-               .hw_vlan_filter = 0, /* VLAN filtering disabled */
-               .hw_vlan_strip = 0, /* VLAN filtering disabled */
-               .jumbo_frame    = 0, /* Jumbo frame support disabled */
-               .hw_strip_crc   = 1, /* CRC stripped by hardware --- always set to 1 in VF */
-               .hw_vlan_extend = 0,
                .mq_mode        = 0,
                .max_rx_pkt_len = PROX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
        },
@@ -184,5 +177,7 @@ void set_port_defaults(void)
                prox_port_cfg[i].rx_ring[0] = '\0';
                prox_port_cfg[i].tx_ring[0] = '\0';
                prox_port_cfg[i].mtu = PROX_MTU;
+               prox_port_cfg[i].requested_rx_offload = DEV_RX_OFFLOAD_CRC_STRIP;
+               prox_port_cfg[i].requested_tx_offload = DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM;
        }
 }
index b1027f9..e7ffb1e 100644 (file)
@@ -22,6 +22,7 @@
 #include "stats_port.h"
 #include "prox_globals.h"
 #include "prox_port_cfg.h"
+#include "prox_compat.h"
 
 static struct display_page display_page_ports;
 static struct display_column *nb_col;
@@ -179,7 +180,7 @@ static void display_ports_draw_per_sec_stats(void)
                struct percent rx_percent;
                struct percent tx_percent;
                if (strcmp(prox_port_cfg[port_id].short_name, "i40e") == 0) {
-                       if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) {
+                       if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
                                rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
                                tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
                        } else {
@@ -187,7 +188,7 @@ static void display_ports_draw_per_sec_stats(void)
                                tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
                        }
                } else {
-                       if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) {
+                       if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
                                rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
                                tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
                        } else {
index d79a6d5..84ad546 100644 (file)
@@ -961,7 +961,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
 
        struct prox_port_cfg *port = find_reachable_port(targ);
        if (port) {
-               task->offload_crc = port->capabilities.tx_offload_cksum;
+               task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
        }
 }
 
index 0039c9a..0d359c6 100644 (file)
@@ -37,6 +37,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_bus_vdev.h>
 #include "prox_port_cfg.h"
+#include "prox_compat.h"
 
 typedef unsigned int u32;
 typedef unsigned char u8;
@@ -147,8 +148,11 @@ static uint8_t get_cdev_id(void)
                ++cdev_id;
                return cdev_id1;
        }
-
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
        int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
+#else
+       int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
+#endif
        PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
 
        return cdev_id++;
@@ -176,8 +180,8 @@ static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
        cdev_conf.socket_id = rte_socket_id();
        rte_cryptodev_configure(task->cdev_id, &cdev_conf);
 
-       unsigned int session_size = rte_cryptodev_get_private_session_size(task->cdev_id);
-       plog_info("rte_cryptodev_get_private_session_size=%d\n", session_size);
+       unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
+       plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
        sprintf(name, "core_%03u_session_pool", lcore_id);
        task->session_pool = rte_mempool_create(name,
                                MAX_SESSIONS,
@@ -277,8 +281,8 @@ static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
        cdev_conf.socket_id = rte_socket_id();
        rte_cryptodev_configure(task->cdev_id, &cdev_conf);
 
-       unsigned int session_size = rte_cryptodev_get_private_session_size(task->cdev_id);
-       plog_info("rte_cryptodev_get_private_session_size=%d\n", session_size);
+       unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
+       plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
        sprintf(name, "core_%03u_session_pool", lcore_id);
        task->session_pool = rte_mempool_create(name,
                                MAX_SESSIONS,
index ffdbb1b..c3be11a 100644 (file)
@@ -1246,7 +1246,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
        struct prox_port_cfg *port = find_reachable_port(targ);
        // TODO: check that all reachable ports have the same mtu...
        if (port) {
-               task->cksum_offload = port->capabilities.tx_offload_cksum;
+               task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
                task->port = port;
                task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
        } else {
index 41f6dd3..02ba4c3 100644 (file)
@@ -156,7 +156,7 @@ static void init_task_gre_encap(struct task_base *tbase, struct task_args *targ)
 
        struct port_cfg *port = find_reachable_task_sending_to_port(targ);
        if (port) {
-               task->offload_crc = port->capabilities.tx_offload_cksum;
+               task->offload_crc = port->requested_tx_offload & TX_OFFLOAD_CKSUM;
        }
 
 #ifdef GRE_TP
index 13570b1..cf56069 100644 (file)
@@ -167,7 +167,7 @@ static void init_task_ipv6_tun_base(struct task_ipv6_tun_base* tun_base, struct
 
        struct prox_port_cfg *port = find_reachable_port(targ);
        if (port) {
-               tun_base->offload_crc = port->capabilities.tx_offload_cksum;
+               tun_base->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
        }
 }
 
index dff53d6..8e6789a 100644 (file)
@@ -171,7 +171,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
        PROX_PANIC(ret != 0, "Failed to load NAT table from lua:\n%s\n", get_lua_to_errors());
        struct prox_port_cfg *port = find_reachable_port(targ);
        if (port) {
-               task->offload_crc = port->capabilities.tx_offload_cksum;
+               task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
        }
 
 }
index 564cf4b..c897bc1 100644 (file)
@@ -44,7 +44,12 @@ struct task_police {
                struct rte_meter_srtcm *sr_flows;
                struct rte_meter_trtcm *tr_flows;
        };
-
+       union {
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+               struct rte_meter_srtcm_profile sr_profile;
+               struct rte_meter_trtcm_profile tr_profile;
+#endif
+       };
        uint16_t           *user_table;
        enum police_action police_act[3][3];
        uint16_t overhead;
@@ -58,10 +63,11 @@ static uint8_t handle_police(struct task_police *task, struct rte_mbuf *mbuf, ui
        enum rte_meter_color in_color = e_RTE_METER_GREEN;
        enum rte_meter_color out_color;
        uint32_t pkt_len = rte_pktmbuf_pkt_len(mbuf) + task->overhead;
+
 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
        out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], tsc, pkt_len, in_color);
 #else
-       out_color = 0;  // TODO DPDK1805
+       out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], &task->sr_profile, tsc, pkt_len, in_color);
 #endif
        return task->police_act[in_color][out_color] == ACT_DROP? OUT_DISCARD : 0;
 }
@@ -74,7 +80,7 @@ static uint8_t handle_police_tr(struct task_police *task, struct rte_mbuf *mbuf,
 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
        out_color = rte_meter_trtcm_color_aware_check(&task->tr_flows[user], tsc, pkt_len, in_color);
 #else
-       out_color = 0;// TODO DPDK1805
+       out_color = rte_meter_trtcm_color_aware_check(&task->tr_flows[user], &task->tr_profile, tsc, pkt_len, in_color);
 #endif
 
        if (task->runtime_flags  & TASK_MARK) {
@@ -198,10 +204,6 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
        struct task_police *task = (struct task_police *)tbase;
        const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
 
-#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
-       plog_warn("mode police might not be supported in this prox/dpdk version\n"); // TODO DPDK1805
-#endif
-
        task->overhead = targ->overhead;
        task->runtime_flags = targ->runtime_flags;
 
@@ -225,14 +227,16 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
                        .cbs = targ->cbs,
                        .ebs = targ->ebs,
                };
-
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+               PROX_PANIC(rte_meter_srtcm_profile_config(&task->sr_profile, &params) != 0, "Failed to rte_meter_srtcm_profile_config\n");
                for (uint32_t i = 0; i < targ->n_flows; ++i) {
-#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
-                       rte_meter_srtcm_config(&task->sr_flows[i], &params);
+                       PROX_PANIC(rte_meter_srtcm_config(&task->sr_flows[i], &task->sr_profile) != 0, "Failed to rte_meter_srtcm_config");
+               }
 #else
-       // TODO DPDK1805
-#endif
+               for (uint32_t i = 0; i < targ->n_flows; ++i) {
+                       rte_meter_srtcm_config(&task->sr_flows[i], &params);
                }
+#endif
        }
        else {
                task->tr_flows = prox_zmalloc(targ->n_flows * sizeof(*task->tr_flows), socket_id);
@@ -248,14 +252,17 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
                        .cir = targ->cir,
                        .cbs = targ->cbs,
                };
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+               PROX_PANIC(rte_meter_trtcm_profile_config(&task->tr_profile, &params) != 0, "Failed to rte_meter_srtcm_profile_config\n");
+               for (uint32_t i = 0; i < targ->n_flows; ++i) {
+                       PROX_PANIC(rte_meter_trtcm_config(&task->tr_flows[i], &task->tr_profile) != 0, "Failed to rte_meter_trtcm_config\n");
+               }
+#else
 
                for (uint32_t i = 0; i < targ->n_flows; ++i) {
-#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
                        rte_meter_trtcm_config(&task->tr_flows[i], &params);
-#else
-       // TODO DPDK1805
-#endif
                }
+#endif
        }
 
        for (uint32_t i = 0; i < 3; ++i) {
index c171580..767c0d1 100644 (file)
@@ -148,7 +148,7 @@ static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *tar
 
        struct prox_port_cfg *port = find_reachable_port(targ);
        if (port) {
-               task->offload_crc = port->capabilities.tx_offload_cksum;
+               task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
        }
 
        // By default, calling this function 1K times per second => 64K ARP per second max
index 0b31660..44e4345 100644 (file)
@@ -152,7 +152,7 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
 
        struct prox_port_cfg *port = find_reachable_port(targ);
        if (port) {
-               task->offload_crc = port->capabilities.tx_offload_cksum;
+               task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
        }
 
        /* TODO: check if it is not necessary to limit reverse mapping
index ec1ecb5..499a1ab 100644 (file)
@@ -224,6 +224,21 @@ static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
        return 0;
 }
 
+static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
+{
+       return (!chain_flag_state(targ, flag, 0));
+}
+
+static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
+{
+       return (!chain_flag_state(targ, flag, 1));
+}
+
+static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
+{
+       return (chain_flag_state(targ, flag, 1));
+}
+
 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
 {
        uint8_t if_port;
@@ -247,21 +262,19 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
                        prox_port_cfg[if_port].n_txq = 1;
                        targ->tx_port_queue[i].queue = 0;
                }
-               /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
-                  the tasks up to the task transmitting to the port
-                  use refcnt. */
-               if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
-                       prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
-               }
-
                /* By default OFFLOAD is enabled, but if the whole
                   chain has NOOFFLOADS set all the way until the
                   first task that receives from a port, it will be
                   disabled for the destination port. */
-               if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 0)) {
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+               if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
                        prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
                }
-
+#else
+               if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
+                       prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+               }
+#endif
        }
 }
 
@@ -301,35 +314,73 @@ static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
        }
 }
 
-static void configure_multi_segments(void)
+static void configure_if_queues(void)
 {
        struct lcore_cfg *lconf = NULL;
        struct task_args *targ;
-       uint8_t if_port;
+       uint8_t socket;
 
        while (core_targ_next(&lconf, &targ, 0) == 0) {
-               for (uint8_t i = 0; i < targ->nb_txports; ++i) {
-                       if_port = targ->tx_port_queue[i].port;
-                       // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
-                       // We can only enable "no multi segment" if no such task exists in the chain of tasks.
-                       if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS, 1)) {
-                               prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
-                       }
-               }
+               socket = rte_lcore_to_socket_id(lconf->id);
+
+               configure_if_rx_queues(targ, socket);
+               configure_if_tx_queues(targ, socket);
        }
 }
 
-static void configure_if_queues(void)
+static void configure_tx_queue_flags(void)
 {
        struct lcore_cfg *lconf = NULL;
        struct task_args *targ;
        uint8_t socket;
+       uint8_t if_port;
 
-       while (core_targ_next(&lconf, &targ, 0) == 0) {
-               socket = rte_lcore_to_socket_id(lconf->id);
+        while (core_targ_next(&lconf, &targ, 0) == 0) {
+                socket = rte_lcore_to_socket_id(lconf->id);
+                for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+                        if_port = targ->tx_port_queue[i].port;
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+                        /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
+                        the tasks up to the task transmitting to the port
+                        use refcnt. */
+                        if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+                                prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+                        }
+#else
+                        /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+                        the tasks up to the task transmitting to the port
+                        use refcnt and per-queue all mbufs comes from the same mempool. */
+                        if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+                                if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
+                                        prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+                        }
+#endif
+                }
+       }
+}
 
-               configure_if_rx_queues(targ, socket);
-               configure_if_tx_queues(targ, socket);
+static void configure_multi_segments(void)
+{
+       struct lcore_cfg *lconf = NULL;
+       struct task_args *targ;
+       uint8_t if_port;
+
+       while (core_targ_next(&lconf, &targ, 0) == 0) {
+               for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+                       if_port = targ->tx_port_queue[i].port;
+                       // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+                       // We can only enable "no multi segment" if no such task exists in the chain of tasks.
+                       if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+                               prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+                       }
+#else
+                       // We enable "multi segment" if at least one task requires it in the chain of tasks.
+                       if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+                               prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                       }
+#endif
+               }
        }
 }
 
@@ -509,6 +560,8 @@ static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct
                PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
                dtarg->rx_rings[dtarg->nb_rxrings] = ring;
                ++dtarg->nb_rxrings;
+               if (dtarg->nb_rxrings > 1)
+                       dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
        }
        dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
        dtarg->lb_friend_core = lconf->id;
@@ -914,19 +967,16 @@ static void init_lcores(void)
        plog_info("=== Initializing queue numbers on cores ===\n");
        configure_if_queues();
 
-       configure_multi_segments();
-
        plog_info("=== Initializing rings on cores ===\n");
        init_rings();
 
+       configure_multi_segments();
+       configure_tx_queue_flags();
+
        plog_info("=== Checking configuration consistency ===\n");
        check_cfg_consistent();
 
        plog_all_rings();
-
-       setup_all_task_structs_early_init();
-       plog_info("=== Initializing tasks ===\n");
-       setup_all_task_structs();
 }
 
 static int setup_prox(int argc, char **argv)
@@ -954,6 +1004,10 @@ static int setup_prox(int argc, char **argv)
        plog_info("=== Initializing ports ===\n");
        init_port_all();
 
+       setup_all_task_structs_early_init();
+       plog_info("=== Initializing tasks ===\n");
+       setup_all_task_structs();
+
        if (prox_cfg.logbuf_size) {
                prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
                PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
index fb88a65..c09c563 100644 (file)
@@ -539,12 +539,25 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
                }
                cfg->lsc_val = val;
        }
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       else if (STR_EQ(str, "disable tx offload")) {
+               uint32_t val;
+               if (parse_int(&val, pkey)) {
+                       return -1;
+               }
+               if (val)
+                       cfg->disabled_tx_offload = val;
+       }
+#endif
        else if (STR_EQ(str, "strip crc")) {
                uint32_t val;
                if (parse_bool(&val, pkey)) {
                        return -1;
                }
-               cfg->port_conf.rxmode.hw_strip_crc = val;
+               if (val)
+                       cfg->requested_rx_offload |= DEV_RX_OFFLOAD_CRC_STRIP;
+               else
+                       cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_CRC_STRIP;
        }
        else if (STR_EQ(str, "mtu size")) {
                uint32_t val;
@@ -558,7 +571,7 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
                        // the max_rx_pkt_len for a non jumbo frame is 1518
                        cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
                        if (cfg->port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN) {
-                               cfg->port_conf.rxmode.jumbo_frame = 1;
+                               cfg->requested_rx_offload |= DEV_RX_OFFLOAD_JUMBO_FRAME;
                        }
                }
        }
index 9a05097..10dc3a8 100644 (file)
@@ -81,13 +81,13 @@ static void prox_write_tcp_pseudo_hdr(struct tcp_hdr *tcp, uint16_t len, uint32_
 
 void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload)
 {
-       prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & IPV4_CKSUM);
+       prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & DEV_TX_OFFLOAD_IPV4_CKSUM);
 
        uint32_t l4_len = rte_bswap16(pip->total_length) - l3_len;
        if (pip->next_proto_id == IPPROTO_UDP) {
                struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t*)pip) + l3_len);
 #ifndef SOFT_CRC
-               if (cksum_offload & UDP_CKSUM) {
+               if (cksum_offload & DEV_TX_OFFLOAD_UDP_CKSUM) {
                        mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
                        prox_write_udp_pseudo_hdr(udp, l4_len, pip->src_addr, pip->dst_addr);
                } else
@@ -96,7 +96,7 @@ void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint16_t l2_
        } else if (pip->next_proto_id == IPPROTO_TCP) {
                struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t*)pip) + l3_len);
 #ifndef SOFT_CRC
-               if (cksum_offload & UDP_CKSUM) {
+               if (cksum_offload & DEV_TX_OFFLOAD_TCP_CKSUM) {
                        prox_write_tcp_pseudo_hdr(tcp, l4_len, pip->src_addr, pip->dst_addr);
                        mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
                } else
index ee61ee4..3ad0414 100644 (file)
@@ -117,3 +117,14 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc
 #define prox_rte_table_key8_stats       rte_table_hash_key8_ext_ops.f_stats
 
 #endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
+#define rte_cryptodev_sym_get_private_session_size rte_cryptodev_get_private_session_size
+#endif
+
+#ifndef DEV_RX_OFFLOAD_CRC_STRIP
+#define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
+#endif
+#ifndef DEV_RX_OFFLOAD_JUMBO_FRAME
+#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
+#endif
index 7b763c5..a71d0cc 100644 (file)
@@ -40,6 +40,7 @@
 #include "defines.h"
 #include "prox_cksum.h"
 #include "stats_irq.h"
+#include "prox_compat.h"
 
 struct prox_port_cfg prox_port_cfg[PROX_MAX_PORTS];
 rte_atomic32_t lsc;
@@ -123,6 +124,42 @@ void prox_pktmbuf_reinit(void *arg, void *start, __attribute__((unused)) void *e
        prox_pktmbuf_init(init_args->mp, init_args->lconf, obj, idx);
 }
 
+#define CONFIGURE_TX_OFFLOAD(flag)                                           \
+        if (port_cfg->requested_tx_offload & flag)                              {\
+               if (port_cfg->disabled_tx_offload & flag)                       {\
+                       plog_info("\t\t%s disabled by configuration\n", #flag);\
+                        port_cfg->requested_tx_offload &= ~flag;\
+                } else if (port_cfg->dev_info.tx_offload_capa & flag) {\
+                        port_cfg->port_conf.txmode.offloads |= flag;\
+                        plog_info("\t\t%s enabled on port\n", #flag);\
+                } else if (port_cfg->dev_info.tx_queue_offload_capa & flag) {\
+                        port_cfg->tx_conf.offloads |= flag;\
+                        plog_info("\t\t%s enabled on queue\n", #flag);\
+                } else {\
+                        port_cfg->requested_tx_offload &= ~flag;\
+                        plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
+                }\
+        } else {\
+                plog_info("\t\t%s disabled\n", #flag);\
+        }\
+
+#define CONFIGURE_RX_OFFLOAD(flag)                                           \
+        if (port_cfg->requested_rx_offload & flag)                              {\
+                if (port_cfg->dev_info.rx_offload_capa & flag) {\
+                        port_cfg->port_conf.rxmode.offloads |= flag;\
+                        plog_info("\t\t%s enabled on port\n", #flag);\
+                } else if (port_cfg->dev_info.rx_queue_offload_capa & flag) {\
+                        port_cfg->rx_conf.offloads |= flag;\
+                        plog_info("\t\t%s enabled on queue\n", #flag);\
+                } else {\
+                        port_cfg->requested_rx_offload &= ~flag;\
+                        plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
+                }\
+        } else {\
+                plog_info("\t\t%s disabled\n", #flag);\
+        }\
+
+
 /* initialize rte devices and check the number of available ports */
 void init_rte_dev(int use_dummy_devices)
 {
@@ -181,6 +218,7 @@ void init_rte_dev(int use_dummy_devices)
                struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
                port_cfg->socket = -1;
 
+               memcpy(&port_cfg->dev_info, &dev_info, sizeof(struct rte_eth_dev_info));
                port_cfg->max_txq = dev_info.max_tx_queues;
                port_cfg->max_rxq = dev_info.max_rx_queues;
                port_cfg->max_rx_pkt_len = dev_info.max_rx_pktlen;
@@ -226,11 +264,14 @@ void init_rte_dev(int use_dummy_devices)
                        fclose(numa_node_fd);
                }
 
-               if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
-                       port_cfg->capabilities.tx_offload_cksum |= IPV4_CKSUM;
+               // In DPDK 18.08 vmxnet3 reports it supports IPV4 checksum, but packets does not go through when IPv4 cksum is enabled
+               if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
+                       plog_info("\t\tDisabling IPV4 cksum on vmxnet3\n");
+                       port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_IPV4_CKSUM;
                }
-               if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
-                       port_cfg->capabilities.tx_offload_cksum |= UDP_CKSUM;
+               if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+                       plog_info("\t\tDisabling UDP cksum on vmxnet3\n");
+                       port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM;
                }
        }
 }
@@ -278,6 +319,9 @@ static void init_port(struct prox_port_cfg *port_cfg)
        plog_info("\t\tPort name is set to %s\n", port_cfg->name);
        plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
        plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
+#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
+       plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
+#endif
 
        PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
                   "\t\t port %u is enabled but no RX or TX queues have been configured", port_id);
@@ -328,20 +372,157 @@ static void init_port(struct prox_port_cfg *port_cfg)
 #endif
        }
 
-       if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT)
-               plog_info("\t\tEnabling No refcnt on port %d\n", port_id);
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP)
+               plog_info("VLAN STRIP | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM)
+               plog_info("IPV4 CKSUM | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM)
+               plog_info("UDP CKSUM | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)
+               plog_info("TCP CKSUM | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+               plog_info("TCP LRO | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP)
+               plog_info("QINQ STRIP | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
+               plog_info("OUTER_IPV4_CKSUM | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_MACSEC_STRIP)
+               plog_info("MACSEC STRIP | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_HEADER_SPLIT)
+               plog_info("HEADER SPLIT | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_FILTER)
+               plog_info("VLAN FILTER | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_EXTEND)
+               plog_info("VLAN EXTEND | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)
+               plog_info("JUMBO FRAME | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_CRC_STRIP)
+               plog_info("CRC STRIP | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
+               plog_info("SCATTER | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
+               plog_info("TIMESTAMP | ");
+       if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+               plog_info("SECURITY ");
+       plog_info("\n");
+
+       plog_info("\t\tTX offload capa = 0x%lx = ", port_cfg->dev_info.tx_offload_capa);
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT)
+               plog_info("VLAN INSERT | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
+               plog_info("IPV4 CKSUM | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)
+               plog_info("UDP CKSUM | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)
+               plog_info("TCP CKSUM | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM)
+               plog_info("SCTP CKSUM | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO)
+               plog_info("TCP TS0 | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO)
+               plog_info("UDP TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+               plog_info("OUTER IPV4 CKSUM | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT)
+               plog_info("QINQ INSERT | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
+               plog_info("VLAN TNL TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO)
+               plog_info("GRE TNL TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO)
+               plog_info("IPIP TNL TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+               plog_info("GENEVE TNL TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT)
+               plog_info("MACSEC INSERT | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE)
+               plog_info("MT LOCKFREE | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS)
+               plog_info("MULTI SEG | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+               plog_info("SECURITY | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO)
+               plog_info("UDP TNL TSO | ");
+       if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO)
+               plog_info("IP TNL TSO | ");
+       plog_info("\n");
+
+       plog_info("\t\trx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.rx_queue_offload_capa);
+       plog_info("\t\ttx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.tx_queue_offload_capa);
+       plog_info("\t\tflow_type_rss_offloads = 0x%lx\n", port_cfg->dev_info.flow_type_rss_offloads);
+       plog_info("\t\tdefault RX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_rxportconf.burst_size, port_cfg->dev_info.default_rxportconf.ring_size, port_cfg->dev_info.default_rxportconf.nb_queues);
+       plog_info("\t\tdefault TX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_txportconf.burst_size, port_cfg->dev_info.default_txportconf.ring_size, port_cfg->dev_info.default_txportconf.nb_queues);
+#endif
+
+       // rxmode such as hw src strip
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_CRC_STRIP);
+       CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_JUMBO_FRAME);
+#else
+       if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
+               port_cfg->port_conf.rxmode.hw_strip_crc = 1;
+       }
+       if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+               port_cfg->port_conf.rxmode.jumbo_frame = 1;
+       }
+#endif
+
+       // IPV4, UDP, SCTP Checksums
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_IPV4_CKSUM);
+       CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_UDP_CKSUM);
+#else
+       if ((port_cfg->dev_info.tx_offload_capa & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) == 0) {
+               port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
+               plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n");
+       }
+       if (!strcmp(port_cfg->short_name, "vmxnet3")) {
+               port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+               plog_info("\t\tDisabling SCTP offload on port %d as vmxnet3 does not support them\n", port_id);
+       }
+#endif
+       // Multi Segments
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MULTI_SEGS);
+       //if (port_cfg->requested_tx_offload & DEV_TX_OFFLOAD_MULTI_SEGS) {
+               //if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) {
+                       //port_cfg->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                       //plog_info("\t\tMULTI SEGS TX offloads enabled on port)\n");
+               //} else if (port_cfg->dev_info.tx_queue_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) {
+                       //port_cfg->tx_conf.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                       //plog_info("\t\tMULTI SEGS TX offloads enabled on queue)\n");
+               //} else {
+                       //port_cfg->requested_tx_offload &= ~DEV_TX_OFFLOAD_MULTI_SEGS;
+                       //plog_info("\t\tMULTI SEGS TX offloads disabled) as neither port or queue supports it\n");
+               //}
+       //} else
+               //plog_info("\t\tMULTI SEGS TX offloads disabled)\n");
+#else
+       if (!strcmp(port_cfg->short_name, "vmxnet3")) {
+               port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+               plog_info("\t\tDisabling TX multsegs on port %d as vmxnet3 does not support them\n", port_id);
+       } else if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)
+               plog_info("\t\tDisabling TX multsegs on port %d\n", port_id);
        else
-               plog_info("\t\tRefcnt enabled on port %d\n", port_id);
+               plog_info("\t\tEnabling TX multsegs on port %d\n", port_id);
 
        if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
                plog_info("\t\tEnabling No TX offloads on port %d\n", port_id);
        else
                plog_info("\t\tTX offloads enabled on port %d\n", port_id);
+#endif
 
-       if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)
-               plog_info("\t\tEnabling No TX MultiSegs on port %d\n", port_id);
+       // Refcount
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+       CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+#else
+       if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT)
+               plog_info("\t\tEnabling No refcnt on port %d\n", port_id);
        else
-               plog_info("\t\tTX Multi segments enabled on port %d\n", port_id);
+               plog_info("\t\tRefcnt enabled on port %d\n", port_id);
+#endif
 
        plog_info("\t\tConfiguring port %u... with %u RX queues and %u TX queues\n",
                  port_id, port_cfg->n_rxq, port_cfg->n_txq);
@@ -384,16 +565,6 @@ static void init_port(struct prox_port_cfg *port_cfg)
 
        plog_info("\t\tMAC address set to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
 
-       if (port_cfg->capabilities.tx_offload_cksum == 0) {
-               port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
-               plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n");
-       }
-
-       if (!strcmp(port_cfg->short_name, "vmxnet3")) {
-               port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
-               plog_info("\t\tDisabling multsegs on port %d as vmxnet3 does not support them\n", port_id);
-       }
-
        /* initialize TX queues first */
        for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) {
                plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n",
index 0c804c6..696beb7 100644 (file)
@@ -61,6 +61,10 @@ struct prox_port_cfg {
        struct rte_eth_conf port_conf;
        struct rte_eth_rxconf rx_conf;
        struct rte_eth_txconf tx_conf;
+       uint64_t requested_rx_offload;
+       uint64_t requested_tx_offload;
+       uint64_t disabled_tx_offload;
+       struct rte_eth_dev_info dev_info;
        struct {
                int tx_offload_cksum;
        } capabilities;
index 3abdb81..6ffd76b 100644 (file)
@@ -80,6 +80,7 @@ static void update_link_states(void)
                rte_eth_link_get_nowait(portid, &link);
                port_cfg->link_up = link.link_status;
                port_cfg->link_speed = link.link_speed;
+               plog_info("Link speed now %d Mbps\n", port_cfg->link_speed);
        }
 }
 
index b5e70dc..7cc36fc 100644 (file)
@@ -28,6 +28,7 @@
 #include "stats_port.h"
 #include "prox_port_cfg.h"
 #include "rw_reg.h"
+#include "prox_compat.h"
 
 #if defined(PROX_STATS) && defined(PROX_HW_DIRECT_STATS)
 
@@ -285,7 +286,7 @@ static void nic_read_stats(uint8_t port_id)
                           dropped by the nic". Note that in case CRC
                           is stripped on ixgbe, the CRC bytes are not
                           counted. */
-                       if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1)
+                       if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP)
                                stats->rx_bytes = eth_stat.ibytes +
                                        (24 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
                        else
index 1327a6c..b4a3337 100644 (file)
@@ -54,6 +54,7 @@
 #define TASK_FEATURE_LUT_QINQ_HASH             0x4000
 #define TASK_FEATURE_RX_ALL                    0x8000
 #define TASK_MULTIPLE_MAC                      0x10000
+#define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL        0x20000
 
 #define FLAG_TX_FLUSH                  0x01
 #define FLAG_NEVER_FLUSH               0x02