Support for DPDK 18.05 and DPDK 18.08
[samplevnf.git] / VNFs / DPPD-PROX / main.c
index 1c4dced..499a1ab 100644 (file)
@@ -94,13 +94,15 @@ static void check_mixed_normal_pipeline(void)
                int all_thread_nop = 1;
                int generic = 0;
                int pipeline = 0;
+               int l3 = 0;
                for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
                        struct task_args *targ = &lconf->targs[task_id];
-                       all_thread_nop = all_thread_nop &&
+                       l3 = !strcmp("l3", targ->sub_mode_str);
+                       all_thread_nop = all_thread_nop && !l3 &&
                                targ->task_init->thread_x == thread_nop;
 
                        pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
-                       generic = generic || targ->task_init->thread_x == thread_generic;
+                       generic = generic || targ->task_init->thread_x == thread_generic || l3;
                }
                PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
 
@@ -127,8 +129,8 @@ static void check_zero_rx(void)
 
 static void check_missing_rx(void)
 {
-       struct lcore_cfg *lconf = NULL, *rx_lconf = NULL;
-       struct task_args *targ, *rx_targ = NULL;
+       struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
+       struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
        struct prox_port_cfg *port;
        uint8_t port_id, rx_port_id, ok;
 
@@ -143,19 +145,38 @@ static void check_missing_rx(void)
 
        lconf = NULL;
        while (core_targ_next(&lconf, &targ, 0) == 0) {
-               if (strcmp(targ->task_init->sub_mode_str, "l3") != 0)
+               if (strcmp(targ->sub_mode_str, "l3") != 0)
                        continue;
-               port = find_reachable_port(targ);
-               if (port == NULL)
+
+               PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
+               // If the L3 sub_mode receives from a port, check that there is at least one core/task
+               // transmitting to this port in L3 sub_mode
+               for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
+                       rx_port_id = targ->rx_port_queue[i].port;
+                       ok = 0;
+                       tx_lconf = NULL;
+                       while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
+                               if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
+                                       continue;
+                               if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
+                                       ok = 1;
+                                       break;
+                               }
+                       }
+                       PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
+               }
+
+               // If the L3 sub_mode transmits to a port, check that there is at least one core/task
+               // receiving from that port in L3 sub_mode.
+               if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
                        continue;
-                       port_id = port - prox_port_cfg;
                rx_lconf = NULL;
                ok = 0;
                plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
                while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
                        for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
                                rx_port_id = rx_targ->rx_port_queue[i].port;
-                               if ((rx_port_id == port_id) && (rx_targ->task_init->flag_features & TASK_FEATURE_L3)){
+                               if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
                                        ok = 1;
                                        break;
                                }
@@ -203,6 +224,21 @@ static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
        return 0;
 }
 
+static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
+{
+       return (!chain_flag_state(targ, flag, 0));
+}
+
+static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
+{
+       return (!chain_flag_state(targ, flag, 1));
+}
+
+static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
+{
+       return (chain_flag_state(targ, flag, 1));
+}
+
 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
 {
        uint8_t if_port;
@@ -226,44 +262,25 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
                        prox_port_cfg[if_port].n_txq = 1;
                        targ->tx_port_queue[i].queue = 0;
                }
-               /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
-                  the tasks up to the task transmitting to the port
-                  does not use refcnt. */
-               if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
-                       prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
-                       plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
-               }
-               else {
-                       plog_info("\t\tRefcnt used on port %d\n", if_port);
-               }
-
                /* By default OFFLOAD is enabled, but if the whole
                   chain has NOOFFLOADS set all the way until the
                   first task that receives from a port, it will be
                   disabled for the destination port. */
-               if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+               if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
                        prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
-                       plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
-               } else {
-                       plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
                }
-
-               /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
-                  It should only be enabled when we know for sure that the RX does not split packets.
-                  Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
-                  transmitting to the port does not use multsegs. */
-               if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
-                       prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
-                       plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
-               }
-               else {
-                       plog_info("\t\tMultiSegs used on port %d\n", if_port);
+#else
+               if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
+                       prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
                }
+#endif
        }
 }
 
 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
 {
+       struct prox_port_cfg *port;
        for (int i = 0; i < targ->nb_rxports; i++) {
                uint8_t if_port = targ->rx_port_queue[i].port;
 
@@ -271,18 +288,26 @@ static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
                        return;
                }
 
-               PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);
+               port = &prox_port_cfg[if_port];
+               PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
 
-               if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
-                       prox_port_cfg[if_port].n_rxq = 0;
+               if(port->rx_ring[0] != '\0') {
+                       port->n_rxq = 0;
                }
 
-               targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
-               prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
-               prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
-               prox_port_cfg[if_port].n_rxq++;
+               // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
+               // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
+               // If the packets get transmitted, then multi segments will have to be enabled on the TX port
+               uint16_t max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+               if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
+                       targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
+               }
+               targ->rx_port_queue[i].queue = port->n_rxq;
+               port->pool[targ->rx_port_queue[i].queue] = targ->pool;
+               port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
+               port->n_rxq++;
 
-               int dsocket = prox_port_cfg[if_port].socket;
+               int dsocket = port->socket;
                if (dsocket != -1 && dsocket != socket) {
                        plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
                }
@@ -298,8 +323,64 @@ static void configure_if_queues(void)
        while (core_targ_next(&lconf, &targ, 0) == 0) {
                socket = rte_lcore_to_socket_id(lconf->id);
 
-               configure_if_tx_queues(targ, socket);
                configure_if_rx_queues(targ, socket);
+               configure_if_tx_queues(targ, socket);
+       }
+}
+
+static void configure_tx_queue_flags(void)
+{
+       struct lcore_cfg *lconf = NULL;
+       struct task_args *targ;
+       uint8_t socket;
+       uint8_t if_port;
+
+        while (core_targ_next(&lconf, &targ, 0) == 0) {
+                socket = rte_lcore_to_socket_id(lconf->id);
+                for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+                        if_port = targ->tx_port_queue[i].port;
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+                        /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
+                        the tasks up to the task transmitting to the port
+                        use refcnt. */
+                        if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+                                prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+                        }
+#else
+                        /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+                        the tasks up to the task transmitting to the port
+                        use refcnt and per-queue all mbufs comes from the same mempool. */
+                        if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+                                if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
+                                        prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+                        }
+#endif
+                }
+       }
+}
+
+static void configure_multi_segments(void)
+{
+       struct lcore_cfg *lconf = NULL;
+       struct task_args *targ;
+       uint8_t if_port;
+
+       while (core_targ_next(&lconf, &targ, 0) == 0) {
+               for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+                       if_port = targ->tx_port_queue[i].port;
+                       // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+                       // We can only enable "no multi segment" if no such task exists in the chain of tasks.
+                       if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+                               prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+                       }
+#else
+                       // We enable "multi segment" if at least one task requires it in the chain of tasks.
+                       if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+                               prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                       }
+#endif
+               }
        }
 }
 
@@ -479,6 +560,8 @@ static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct
                PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
                dtarg->rx_rings[dtarg->nb_rxrings] = ring;
                ++dtarg->nb_rxrings;
+               if (dtarg->nb_rxrings > 1)
+                       dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
        }
        dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
        dtarg->lb_friend_core = lconf->id;
@@ -522,7 +605,7 @@ static void init_rings(void)
        lconf = NULL;
        struct prox_port_cfg *port;
        while (core_targ_next(&lconf, &starg, 1) == 0) {
-               if ((starg->task_init) && (starg->task_init->flag_features & TASK_FEATURE_L3)) {
+               if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
                        struct core_task ct;
                        ct.core = prox_cfg.master;
                        ct.task = 0;
@@ -541,13 +624,14 @@ static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
        struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
        uint64_t got = 0;
 
-       while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
+       while ((got < nb_mbuf) && (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0))
                ++got;
 
+       nb_mbuf = got;
        while (got) {
                int idx;
                do {
-                       idx = rand() % nb_mbuf - 1;
+                       idx = rand() % nb_mbuf;
                } while (pkts[idx] == 0);
 
                rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
@@ -557,6 +641,50 @@ static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
        prox_free(pkts);
 }
 
+static void set_mbuf_size(struct task_args *targ)
+{
+       /* mbuf size can be set
+        *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
+        *  - defaulted to MBUF_SIZE.
+        * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
+        */
+       if (targ->mbuf_size)
+               return;
+
+       targ->mbuf_size = MBUF_SIZE;
+       struct prox_port_cfg *port;
+       uint16_t max_frame_size = 0, min_buffer_size = 0;
+       int i40e = 0;
+       for (int i = 0; i < targ->nb_rxports; i++) {
+               uint8_t if_port = targ->rx_port_queue[i].port;
+
+               if (if_port == OUT_DISCARD) {
+                       continue;
+               }
+               port = &prox_port_cfg[if_port];
+               if (max_frame_size < port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
+                       max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+               if (min_buffer_size < port->min_rx_bufsize)
+                       min_buffer_size = port->min_rx_bufsize;
+
+               // Check whether we receive from i40e. This driver have extra mbuf size requirements
+               if (strcmp(port->short_name, "i40e") == 0)
+                       i40e = 1;
+       }
+       if (i40e) {
+               // i40e supports a maximum of 5 descriptors chained
+               uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+               if (required_mbuf_size > targ->mbuf_size) {
+                       targ->mbuf_size = required_mbuf_size;
+                       plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
+               }
+       }
+       if (min_buffer_size > targ->mbuf_size) {
+               plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
+       }
+
+}
+
 static void setup_mempools_unique_per_socket(void)
 {
        uint32_t flags = 0;
@@ -574,11 +702,7 @@ static void setup_mempools_unique_per_socket(void)
                uint8_t socket = rte_lcore_to_socket_id(lconf->id);
                PROX_ASSERT(socket < MAX_SOCKETS);
 
-               if (targ->mbuf_size_set_explicitely)
-                       flags = MEMPOOL_F_NO_SPREAD;
-               if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
-                       targ->mbuf_size = targ->task_init->mbuf_size;
-               }
+               set_mbuf_size(targ);
                if (targ->rx_port_queue[0].port != OUT_DISCARD) {
                        struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
                        PROX_ASSERT(targ->nb_mbuf != 0);
@@ -595,10 +719,6 @@ static void setup_mempools_unique_per_socket(void)
                                PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
                                           "all mbuf_size must have the same size if using a unique mempool per socket\n");
                        }
-                       if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
-                               if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
-                                       mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
-                       }
                }
        }
        for (int i = 0 ; i < MAX_SOCKETS; i++) {
@@ -647,24 +767,7 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
        char memzone_name[64];
        char name[64];
 
-       /* mbuf size can be set
-        *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
-        *  - through each 'mode', overwriting the default mbuf_size
-        *  - defaulted to MBUF_SIZE i.e. 1518 Bytes
-        * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
-        */
-       if (targ->mbuf_size_set_explicitely) {
-               flags = MEMPOOL_F_NO_SPREAD;
-               /* targ->mbuf_size already set */
-       }
-       else if (targ->task_init->mbuf_size != 0) {
-               /* mbuf_size not set through config file but set through mode */
-               targ->mbuf_size = targ->task_init->mbuf_size;
-       }
-       else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
-               if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
-                       targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
-       }
+       set_mbuf_size(targ);
 
        /* allocate memory pool for packets */
        PROX_ASSERT(targ->nb_mbuf != 0);
@@ -867,14 +970,13 @@ static void init_lcores(void)
        plog_info("=== Initializing rings on cores ===\n");
        init_rings();
 
+       configure_multi_segments();
+       configure_tx_queue_flags();
+
        plog_info("=== Checking configuration consistency ===\n");
        check_cfg_consistent();
 
        plog_all_rings();
-
-       setup_all_task_structs_early_init();
-       plog_info("=== Initializing tasks ===\n");
-       setup_all_task_structs();
 }
 
 static int setup_prox(int argc, char **argv)
@@ -902,6 +1004,10 @@ static int setup_prox(int argc, char **argv)
        plog_info("=== Initializing ports ===\n");
        init_port_all();
 
+       setup_all_task_structs_early_init();
+       plog_info("=== Initializing tasks ===\n");
+       setup_all_task_structs();
+
        if (prox_cfg.logbuf_size) {
                prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
                PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);