Support packets in flight
[samplevnf.git] / VNFs / DPPD-PROX / main.c
index 1af49b7..61abe6e 100644 (file)
@@ -1,5 +1,5 @@
 /*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -54,6 +54,7 @@
 #endif
 
 uint8_t lb_nb_txrings = 0xff;
+extern const char *git_version;
 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
 
 static void __attribute__((noreturn)) prox_usage(const char *prgname)
@@ -115,7 +116,7 @@ static void check_mixed_normal_pipeline(void)
        }
 }
 
-static void check_zero_rx(void)
+static void check_no_rx(void)
 {
        struct lcore_cfg *lconf = NULL;
        struct task_args *targ;
@@ -155,7 +156,7 @@ static void check_missing_rx(void)
 {
        struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
        struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
-       uint8_t port_id, rx_port_id, ok;
+       uint8_t port_id, rx_port_id, ok, l3, ndp;
 
        while (core_targ_next(&lconf, &targ, 0) == 0) {
                PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
@@ -168,12 +169,17 @@ static void check_missing_rx(void)
 
        lconf = NULL;
        while (core_targ_next(&lconf, &targ, 0) == 0) {
-               if (strcmp(targ->sub_mode_str, "l3") != 0)
+               l3 = ndp = 0;
+               if (strcmp(targ->sub_mode_str, "l3") == 0)
+                       l3 = 1;
+               else if (strcmp(targ->sub_mode_str, "ndp") == 0)
+                       ndp = 1;
+               else
                        continue;
 
-               PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
-               // If the L3 sub_mode receives from a port, check that there is at least one core/task
-               // transmitting to this port in L3 sub_mode
+               PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3/NDP task must have a RX or a TX port\n");
+               // If the L3/NDP sub_mode receives from a port, check that there is at least one core/task
+               // transmitting to this port in L3/NDP sub_mode
                for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
                        rx_port_id = targ->rx_port_queue[i].port;
                        ok = 0;
@@ -181,35 +187,40 @@ static void check_missing_rx(void)
                        while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
                                if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
                                        continue;
-                               if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
+                               if ((rx_port_id == port_id) &&
+                                       ( ((tx_targ->flags & TASK_ARG_L3) && l3) ||
+                                         ((tx_targ->flags & TASK_ARG_NDP) && ndp) ) ) {
                                        ok = 1;
                                        break;
                                }
                        }
-                       PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
+                       PROX_PANIC(ok == 0, "RX %s sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", l3 ? "l3":"ndp", rx_port_id, lconf->id, targ->id);
                }
 
-               // If the L3 sub_mode transmits to a port, check that there is at least one core/task
-               // receiving from that port in L3 sub_mode.
+               // If the L3/NDP sub_mode transmits to a port, check that there is at least one core/task
+               // receiving from that port in L3/NDP sub_mode.
                if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
                        continue;
                rx_lconf = NULL;
                ok = 0;
-               plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
+               plog_info("\tCore %d task %d transmitting to port %d in %s submode\n", lconf->id, targ->id, port_id, l3 ? "l3":"ndp");
                while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
                        for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
                                rx_port_id = rx_targ->rx_port_queue[i].port;
-                               if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
+                               if ((rx_port_id == port_id) &&
+                                       ( ((rx_targ->flags & TASK_ARG_L3) && l3) ||
+                                       ((rx_targ->flags & TASK_ARG_NDP) && ndp) ) ){
                                        ok = 1;
                                        break;
                                }
                        }
                        if (ok == 1) {
-                               plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
+                               plog_info("\tCore %d task %d has found core %d task %d receiving from port %d in %s submode\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id,
+                                       ((rx_targ->flags & TASK_ARG_L3) && l3) ? "l3":"ndp");
                                break;
                        }
                }
-               PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
+               PROX_PANIC(ok == 0, "%s sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", l3 ? "l3":"ndp", port_id, lconf->id, targ->id);
        }
 }
 
@@ -217,7 +228,7 @@ static void check_cfg_consistent(void)
 {
        check_nb_mbuf();
        check_missing_rx();
-       check_zero_rx();
+       check_no_rx();
        check_mixed_normal_pipeline();
 }
 
@@ -296,7 +307,7 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
                }
 #else
                if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
-                       prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+                       prox_port_cfg[if_port].requested_tx_offload &= ~(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
                }
 #endif
        }
@@ -305,6 +316,49 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
 {
        struct prox_port_cfg *port;
+       uint8_t port_used_counter[PROX_MAX_PORTS] = {0};
+       bool multiple_port_reference = false;
+       uint8_t total_number_of_queues = 0;
+       // Check how many times a port is referenced for this task
+       for (uint8_t i = 0; i < targ->nb_rxports; i++) {
+               uint8_t if_port = targ->rx_port_queue[i].port;
+               port_used_counter[if_port]++;
+               if (port_used_counter[if_port] > 1) {
+                       multiple_port_reference = true;
+                       port = &prox_port_cfg[if_port];
+                       PROX_PANIC((port->all_rx_queues), "Multiple queues defined in rx port, but all_rx_queues also set for port %s\n", port->names[0]);
+               }
+       }
+       // If only referenced once, it is possible that we want to use all queues
+       // Therefore we will check all_rx_queues for that port
+       if (!multiple_port_reference) {
+               for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+                       uint8_t if_port = targ->rx_port_queue[i].port;
+                       if (port_used_counter[if_port]) {
+                               port = &prox_port_cfg[if_port];
+                               if (port->all_rx_queues) {
+                                       port_used_counter[if_port] = port->max_rxq;
+                                       total_number_of_queues += port->max_rxq;
+                                       plog_info("\tall_rx_queues for Port %s: %u rx_queues will be applied\n", port->names[0], port_used_counter[if_port]);
+                               }
+                       }
+               }
+       }
+       if (total_number_of_queues) {
+               PROX_PANIC((total_number_of_queues > PROX_MAX_PORTS), "%u queues using the all_rx_queues. PROX_MAX_PORTS is set to %u\n", total_number_of_queues, PROX_MAX_PORTS);
+               uint8_t index = 0;
+               for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+                       if (port_used_counter[i]) {
+                               for (uint8_t j = 0; j < port_used_counter[i]; j++) {
+                                       targ->rx_port_queue[index].port = i;
+                                       index ++;
+                               }
+                               port = &prox_port_cfg[i];
+                               plog_info("\t\tConfiguring task to use port %s with %u rx_queues\n", port->names[0], port_used_counter[i]);
+                       }
+               }
+               targ->nb_rxports = index;
+       }
        for (int i = 0; i < targ->nb_rxports; i++) {
                uint8_t if_port = targ->rx_port_queue[i].port;
 
@@ -371,12 +425,12 @@ static void configure_tx_queue_flags(void)
                                 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
                         }
 #else
-                        /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+                        /* Set the RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
                         the tasks up to the task transmitting to the port
                         use refcnt and per-queue all mbufs comes from the same mempool. */
                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
                                 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
-                                        prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+                                        prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
                         }
 #endif
                 }
@@ -401,7 +455,7 @@ static void configure_multi_segments(void)
 #else
                        // We enable "multi segment" if at least one task requires it in the chain of tasks.
                        if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
-                               prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                               prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
                        }
 #endif
                }
@@ -629,7 +683,7 @@ static void init_rings(void)
        lconf = NULL;
        struct prox_port_cfg *port;
        while (core_targ_next(&lconf, &starg, 1) == 0) {
-               if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
+               if ((starg->task_init) && (starg->flags & (TASK_ARG_L3|TASK_ARG_NDP))) {
                        struct core_task ct;
                        ct.core = prox_cfg.master;
                        ct.task = 0;
@@ -750,12 +804,12 @@ static void setup_mempools_unique_per_socket(void)
                        sprintf(name, "socket_%u_pool", i);
                        if ((pool[i] = rte_mempool_lookup(name)) == NULL) {
                                pool[i] = rte_mempool_create(name,
-                                                    mbuf_count[i] - 1, mbuf_size[i],
-                                                    nb_cache_mbuf[i],
-                                                    sizeof(struct rte_pktmbuf_pool_private),
-                                                    rte_pktmbuf_pool_init, NULL,
-                                                    prox_pktmbuf_init, NULL,
-                                                    i, flags);
+                                       mbuf_count[i] - 1, mbuf_size[i],
+                                       nb_cache_mbuf[i],
+                                       sizeof(struct rte_pktmbuf_pool_private),
+                                       rte_pktmbuf_pool_init, NULL,
+                                       prox_pktmbuf_init, NULL,
+                                       i, flags);
                                PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
                                plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
                                        mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
@@ -802,7 +856,7 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
                sprintf(name, "core_%u_task_%u_pool", lconf->id, targ->id);
        }
 
-       snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
+       snprintf(memzone_name, sizeof(memzone_name), "MP_%.*s", (int)(sizeof(memzone_name)-4), targ->pool_name);
        mz = rte_memzone_lookup(memzone_name);
 
        if (mz != NULL) {
@@ -937,10 +991,10 @@ static void setup_all_task_structs(void)
 
        while(prox_core_next(&lcore_id, 1) == 0) {
                lconf = &lcore_cfg[lcore_id];
-               plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
+               plog_info("\t*** Initializing core %d (%d task) ***\n", lcore_id, lconf->n_tasks_all);
                for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
                        if (!task_is_master(&lconf->targs[task_id])) {
-                               plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
+                               plog_info("\t\tInitializing struct for core %d task %d\n", lcore_id, task_id);
                                lconf->targs[task_id].tmaster = tmaster;
                                lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
                        }
@@ -1138,7 +1192,7 @@ static void set_term_env(void)
                plog_info("\tncurses version = %d.%d (%s)\n", max_ver, min_ver, ncurses_version);
        }
 
-       if (((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
+       if ((old_value) && ((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
                // On recent OSes such as RHEL 8.0, ncurses(6.1)  introduced support
                // for ECMA-48 repeat character control.
                // Some terminal emulators use TERM=xterm but do not support this feature.
@@ -1165,6 +1219,7 @@ int main(int argc, char **argv)
        plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
        plog_info("=== " PROGRAM_NAME " %s ===\n", VERSION_STR());
        plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
+       plog_info("\tgit version %s\n", git_version);
        set_term_env();
        read_rdt_info();