#include "thread_generic.h"
#include "thread_pipeline.h"
#include "cqm.h"
+#include "handle_master.h"
#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
int all_thread_nop = 1;
int generic = 0;
int pipeline = 0;
+ int l3 = 0;
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
struct task_args *targ = &lconf->targs[task_id];
- all_thread_nop = all_thread_nop &&
+ l3 = !strcmp("l3", targ->sub_mode_str);
+ all_thread_nop = all_thread_nop && !l3 &&
targ->task_init->thread_x == thread_nop;
pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
- generic = generic || targ->task_init->thread_x == thread_generic;
+ generic = generic || targ->task_init->thread_x == thread_generic || l3;
}
PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
}
}
-static void check_missing_rx(void)
+static void check_zero_rx(void)
{
struct lcore_cfg *lconf = NULL;
struct task_args *targ;
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ if (targ->nb_rxports != 0) {
+ PROX_PANIC(task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
+ "\tCore %u task %u: rx_ports configured while mode %s does not use it\n", lconf->id, targ->id, targ->task_init->mode_str);
+ }
+ }
+}
+
+static void check_nb_mbuf(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ = NULL;
+ uint8_t port_id;
+ int n_txd = 0, n_rxd = 0;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ port_id = targ->tx_port_queue[i].port;
+ n_txd = prox_port_cfg[port_id].n_txd;
+ }
+ for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
+ port_id = targ->rx_port_queue[i].port;
+ n_rxd = prox_port_cfg[port_id].n_rxd;
+ }
+ if (targ->nb_mbuf <= n_rxd + n_txd + targ->nb_cache_mbuf + MAX_PKT_BURST) {
+ plog_warn("Core %d, task %d might not have enough mbufs (%d) to support %d txd, %d rxd and %d cache_mbuf\n",
+ lconf->id, targ->id, targ->nb_mbuf, n_txd, n_rxd, targ->nb_cache_mbuf);
+ }
+ }
+}
+
+static void check_missing_rx(void)
+{
+ struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
+ struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
+ uint8_t port_id, rx_port_id, ok;
+
while (core_targ_next(&lconf, &targ, 0) == 0) {
PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
"Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
"\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
}
}
+
+ lconf = NULL;
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ if (strcmp(targ->sub_mode_str, "l3") != 0)
+ continue;
+
+ PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
+ // If the L3 sub_mode receives from a port, check that there is at least one core/task
+ // transmitting to this port in L3 sub_mode
+ for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
+ rx_port_id = targ->rx_port_queue[i].port;
+ ok = 0;
+ tx_lconf = NULL;
+ while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
+ if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
+ continue;
+ if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
+ ok = 1;
+ break;
+ }
+ }
+ PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
+ }
+
+ // If the L3 sub_mode transmits to a port, check that there is at least one core/task
+ // receiving from that port in L3 sub_mode.
+ if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
+ continue;
+ rx_lconf = NULL;
+ ok = 0;
+ plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
+ while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
+ for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
+ rx_port_id = rx_targ->rx_port_queue[i].port;
+ if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
+ ok = 1;
+ break;
+ }
+ }
+ if (ok == 1) {
+ plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
+ break;
+ }
+ }
+ PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
+ }
}
static void check_cfg_consistent(void)
{
+ check_nb_mbuf();
check_missing_rx();
+ check_zero_rx();
check_mixed_normal_pipeline();
}
return 0;
}
+static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
+{
+ return (!chain_flag_state(targ, flag, 0));
+}
+
+static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
+{
+ return (!chain_flag_state(targ, flag, 1));
+}
+
+static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
+{
+ return (chain_flag_state(targ, flag, 1));
+}
+
static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
{
uint8_t if_port;
prox_port_cfg[if_port].n_txq = 1;
targ->tx_port_queue[i].queue = 0;
}
- /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
- the tasks up to the task transmitting to the port
- does not use refcnt. */
- if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
- prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
- plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
- }
- else {
- plog_info("\t\tRefcnt used on port %d\n", if_port);
- }
-
/* By default OFFLOAD is enabled, but if the whole
chain has NOOFFLOADS set all the way until the
first task that receives from a port, it will be
disabled for the destination port. */
- if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
- plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
- } else {
- plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
- }
-
- /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
- It should only be enabled when we know for sure that the RX does not split packets.
- Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
- transmitting to the port does not use multsegs. */
- if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
- prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
- plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
}
- else {
- plog_info("\t\tMultiSegs used on port %d\n", if_port);
+#else
+ if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
+ prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
}
+#endif
}
}
static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
{
+ struct prox_port_cfg *port;
for (int i = 0; i < targ->nb_rxports; i++) {
uint8_t if_port = targ->rx_port_queue[i].port;
return;
}
- PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);
+ port = &prox_port_cfg[if_port];
+ PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
- if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
- prox_port_cfg[if_port].n_rxq = 0;
+ if(port->rx_ring[0] != '\0') {
+ port->n_rxq = 0;
}
- targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
- prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
- prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
- prox_port_cfg[if_port].n_rxq++;
+ // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
+ // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
+ // If the packets get transmitted, then multi segments will have to be enabled on the TX port
+ uint16_t max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+ if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
+ targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
+ }
+ targ->rx_port_queue[i].queue = port->n_rxq;
+ port->pool[targ->rx_port_queue[i].queue] = targ->pool;
+ port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
+ port->n_rxq++;
- int dsocket = prox_port_cfg[if_port].socket;
+ int dsocket = port->socket;
if (dsocket != -1 && dsocket != socket) {
plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
}
while (core_targ_next(&lconf, &targ, 0) == 0) {
socket = rte_lcore_to_socket_id(lconf->id);
- configure_if_tx_queues(targ, socket);
configure_if_rx_queues(targ, socket);
+ configure_if_tx_queues(targ, socket);
+ }
+}
+
+static void configure_tx_queue_flags(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ uint8_t socket;
+ uint8_t if_port;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ socket = rte_lcore_to_socket_id(lconf->id);
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ if_port = targ->tx_port_queue[i].port;
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
+ the tasks up to the task transmitting to the port
+ use refcnt. */
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+ prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+ }
+#else
+ /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+ the tasks up to the task transmitting to the port
+ use refcnt and per-queue all mbufs comes from the same mempool. */
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
+ prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ }
+#endif
+ }
+ }
+}
+
+static void configure_multi_segments(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ uint8_t if_port;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ if_port = targ->tx_port_queue[i].port;
+ // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ // We can only enable "no multi segment" if no such task exists in the chain of tasks.
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+ prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ }
+#else
+ // We enable "multi segment" if at least one task requires it in the chain of tasks.
+ if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+ prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+#endif
+ }
}
}
return retval;
}
-static int task_is_master(struct task_args *targ)
-{
- return !targ->lconf;
-}
-
struct ring_init_stats {
uint32_t n_pkt_rings;
uint32_t n_ctrl_rings;
return lconf->targs[task_id].rx_rings[0];
}
-static void init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
+static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
const struct core_task ct, uint8_t ring_idx, int idx,
struct ring_init_stats *ris)
{
*dring = ring;
if (lconf->id == prox_cfg.master) {
ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
+ } else if (ct.core == prox_cfg.master) {
+ starg->ctrl_plane_ring = ring;
}
- plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
+ plog_info("\t\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
"pkt" : "msg", ring, ring->name);
ris->n_ctrl_rings++;
- return;
+ return ring;
}
dtarg = &lworker->targs[ct.task];
/* If all the following conditions are met, the ring can be
optimized away. */
- if (!task_is_master(starg) && starg->lconf->id == dtarg->lconf->id &&
+ if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
dtarg->tx_opt_ring_task = starg;
ris->n_opt_rings++;
++dtarg->nb_rxrings;
- return;
+ return NULL;
}
int ring_created = 1;
PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
dtarg->rx_rings[dtarg->nb_rxrings] = ring;
++dtarg->nb_rxrings;
+ if (dtarg->nb_rxrings > 1)
+ dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
}
dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
dtarg->lb_friend_core = lconf->id;
lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
dtarg->nb_slave_threads);
++ris->n_pkt_rings;
+ return ring;
}
static void init_rings(void)
ris.n_pkt_rings,
ris.n_ctrl_rings,
ris.n_opt_rings);
+
+ lconf = NULL;
+ struct prox_port_cfg *port;
+ while (core_targ_next(&lconf, &starg, 1) == 0) {
+ if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
+ struct core_task ct;
+ ct.core = prox_cfg.master;
+ ct.task = 0;
+ ct.type = CTRL_TYPE_PKT;
+ struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);
+
+ ct.core = lconf->id;
+ ct.task = starg->id;;
+ struct rte_ring *tx_ring = init_ring_between_tasks(&lcore_cfg[prox_cfg.master], lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
+ }
+ }
}
static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
uint64_t got = 0;
- while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
+ while ((got < nb_mbuf) && (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0))
++got;
+ nb_mbuf = got;
while (got) {
int idx;
do {
- idx = rand() % nb_mbuf - 1;
+ idx = rand() % nb_mbuf;
} while (pkts[idx] == 0);
rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
prox_free(pkts);
}
+static void set_mbuf_size(struct task_args *targ)
+{
+ /* mbuf size can be set
+ * - from config file (highest priority, overwriting any other config) - should only be used as workaround
+ * - defaulted to MBUF_SIZE.
+ * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
+ */
+ if (targ->mbuf_size)
+ return;
+
+ targ->mbuf_size = MBUF_SIZE;
+ struct prox_port_cfg *port;
+ uint16_t max_frame_size = 0, min_buffer_size = 0;
+ int i40e = 0;
+ for (int i = 0; i < targ->nb_rxports; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+
+ if (if_port == OUT_DISCARD) {
+ continue;
+ }
+ port = &prox_port_cfg[if_port];
+ if (max_frame_size < port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
+ max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+ if (min_buffer_size < port->min_rx_bufsize)
+ min_buffer_size = port->min_rx_bufsize;
+
+ // Check whether we receive from i40e. This driver have extra mbuf size requirements
+ if (strcmp(port->short_name, "i40e") == 0)
+ i40e = 1;
+ }
+ if (i40e) {
+ // i40e supports a maximum of 5 descriptors chained
+ uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+ if (required_mbuf_size > targ->mbuf_size) {
+ targ->mbuf_size = required_mbuf_size;
+ plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
+ }
+ }
+ if (min_buffer_size > targ->mbuf_size) {
+ plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
+ }
+
+}
+
static void setup_mempools_unique_per_socket(void)
{
uint32_t flags = 0;
uint8_t socket = rte_lcore_to_socket_id(lconf->id);
PROX_ASSERT(socket < MAX_SOCKETS);
- if (targ->mbuf_size_set_explicitely)
- flags = MEMPOOL_F_NO_SPREAD;
- if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
- targ->mbuf_size = targ->task_init->mbuf_size;
- }
+ set_mbuf_size(targ);
if (targ->rx_port_queue[0].port != OUT_DISCARD) {
struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
PROX_ASSERT(targ->nb_mbuf != 0);
PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
"all mbuf_size must have the same size if using a unique mempool per socket\n");
}
- if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
- if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
- mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
- }
}
}
for (int i = 0 ; i < MAX_SOCKETS; i++) {
char memzone_name[64];
char name[64];
- /* mbuf size can be set
- * - from config file (highest priority, overwriting any other config) - should only be used as workaround
- * - through each 'mode', overwriting the default mbuf_size
- * - defaulted to MBUF_SIZE i.e. 1518 Bytes
- * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
- */
- if (targ->mbuf_size_set_explicitely) {
- flags = MEMPOOL_F_NO_SPREAD;
- /* targ->mbuf_size already set */
- }
- else if (targ->task_init->mbuf_size != 0) {
- /* mbuf_size not set through config file but set through mode */
- targ->mbuf_size = targ->task_init->mbuf_size;
- }
- else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
- if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
- targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
- }
+ set_mbuf_size(targ);
/* allocate memory pool for packets */
PROX_ASSERT(targ->nb_mbuf != 0);
struct lcore_cfg *lconf;
uint32_t lcore_id = -1;
- while(prox_core_next(&lcore_id, 0) == 0) {
+ while(prox_core_next(&lcore_id, 1) == 0) {
lconf = &lcore_cfg[lcore_id];
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
lconf->targs[task_id].lconf = lconf;
{
struct lcore_cfg *lconf;
uint32_t lcore_id = -1;
+ struct task_base *tmaster = NULL;
- while(prox_core_next(&lcore_id, 0) == 0) {
+ while(prox_core_next(&lcore_id, 1) == 0) {
lconf = &lcore_cfg[lcore_id];
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
- lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
+ if (task_is_master(&lconf->targs[task_id])) {
+ plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
+ lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
+ tmaster = lconf->tasks_all[task_id];
+ }
+ }
+ }
+ PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
+ lcore_id = -1;
+
+ while(prox_core_next(&lcore_id, 1) == 0) {
+ lconf = &lcore_cfg[lcore_id];
+ plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
+ for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
+ if (!task_is_master(&lconf->targs[task_id])) {
+ plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
+ lconf->targs[task_id].tmaster = tmaster;
+ lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
+ }
}
}
}
plog_info("=== Initializing rings on cores ===\n");
init_rings();
+ configure_multi_segments();
+ configure_tx_queue_flags();
+
plog_info("=== Checking configuration consistency ===\n");
check_cfg_consistent();
plog_all_rings();
-
- setup_all_task_structs_early_init();
- plog_info("=== Initializing tasks ===\n");
- setup_all_task_structs();
}
static int setup_prox(int argc, char **argv)
plog_info("=== Initializing ports ===\n");
init_port_all();
+ setup_all_task_structs_early_init();
+ plog_info("=== Initializing tasks ===\n");
+ setup_all_task_structs();
+
if (prox_cfg.logbuf_size) {
prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
}
plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
- plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
+ plog_info("=== " PROGRAM_NAME " %s ===\n", VERSION_STR());
plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
read_rdt_info();