2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
22 #include <rte_cycles.h>
23 #include <rte_atomic.h>
24 #include <rte_table_hash.h>
25 #include <rte_memzone.h>
26 #include <rte_errno.h>
28 #include "prox_malloc.h"
36 #include "prox_args.h"
37 #include "prox_assert.h"
39 #include "prox_shared.h"
40 #include "prox_port_cfg.h"
42 #include "hash_utils.h"
43 #include "handle_lb_net.h"
44 #include "prox_cksum.h"
45 #include "thread_nop.h"
46 #include "thread_generic.h"
47 #include "thread_pipeline.h"
50 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
51 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
54 uint8_t lb_nb_txrings = 0xff;
55 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
57 static void __attribute__((noreturn)) prox_usage(const char *prgname)
59 plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
60 "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
61 "\t-l LOG_FILE : log file name, ./prox.log by default\n"
62 "\t-p : include PID in log file name if default log file is used\n"
63 "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
64 "\t-v verbosity : initial logging verbosity\n"
65 "\t-a : autostart all cores (by default)\n"
66 "\t-e : don't autostart\n"
67 "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
68 "\t-m : list supported task modes and exit\n"
69 "\t-s : check configuration file syntax and exit\n"
70 "\t-i : check initialization sequence and exit\n"
71 "\t-u : Listen on UDS /tmp/prox.sock\n"
72 "\t-t : Listen on TCP port 8474\n"
73 "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
74 "\t-w : define variable using syntax varname=value\n"
75 "\t takes precedence over variables defined in CONFIG_FILE\n"
76 "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
77 "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
78 "\t-z : Ignore CPU topology, implies -i\n"
79 "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
80 "\t screen refreshing will be disabled\n"
85 static void check_mixed_normal_pipeline(void)
87 struct lcore_cfg *lconf = NULL;
88 uint32_t lcore_id = -1;
90 while (prox_core_next(&lcore_id, 0) == 0) {
91 lconf = &lcore_cfg[lcore_id];
93 int all_thread_nop = 1;
96 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
97 struct task_args *targ = &lconf->targs[task_id];
98 all_thread_nop = all_thread_nop &&
99 targ->task_init->thread_x == thread_nop;
101 pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
102 generic = generic || targ->task_init->thread_x == thread_generic;
104 PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
107 lconf->thread_x = thread_nop;
109 lconf->thread_x = thread_generic;
114 static void check_missing_rx(void)
116 struct lcore_cfg *lconf = NULL;
117 struct task_args *targ;
119 while (core_targ_next(&lconf, &targ, 0) == 0) {
120 PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
121 "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
122 if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
123 PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
124 "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
129 static void check_cfg_consistent(void)
132 check_mixed_normal_pipeline();
135 static void plog_all_rings(void)
137 struct lcore_cfg *lconf = NULL;
138 struct task_args *targ;
140 while (core_targ_next(&lconf, &targ, 0) == 0) {
141 for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
142 plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
147 static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
149 if (task_init_flag_set(targ->task_init, flag) == is_set)
154 for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) {
155 ret = chain_flag_state(targ->prev_tasks[i], flag, is_set);
162 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
166 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
167 if_port = targ->tx_port_queue[i].port;
169 PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");
171 PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);
173 int dsocket = prox_port_cfg[if_port].socket;
174 if (dsocket != -1 && dsocket != socket) {
175 plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
178 if (prox_port_cfg[if_port].tx_ring[0] == '\0') { // Rings-backed port can use single queue
179 targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
180 prox_port_cfg[if_port].n_txq++;
182 prox_port_cfg[if_port].n_txq = 1;
183 targ->tx_port_queue[i].queue = 0;
185 /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
186 the tasks up to the task transmitting to the port
187 does not use refcnt. */
188 if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
189 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
190 plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
193 plog_info("\t\tRefcnt used on port %d\n", if_port);
196 /* By default OFFLOAD is enabled, but if the whole
197 chain has NOOFFLOADS set all the way until the
198 first task that receives from a port, it will be
199 disabled for the destination port. */
200 if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
201 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
202 plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
204 plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
207 /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
208 It should only be enabled when we know for sure that the RX does not split packets.
209 Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
210 transmitting to the port does not use multsegs. */
211 if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
212 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
213 plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
216 plog_info("\t\tMultiSegs used on port %d\n", if_port);
221 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
223 for (int i = 0; i < targ->nb_rxports; i++) {
224 uint8_t if_port = targ->rx_port_queue[i].port;
226 if (if_port == OUT_DISCARD) {
230 PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);
232 if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
233 prox_port_cfg[if_port].n_rxq = 0;
236 targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
237 prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
238 prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
239 prox_port_cfg[if_port].n_rxq++;
241 int dsocket = prox_port_cfg[if_port].socket;
242 if (dsocket != -1 && dsocket != socket) {
243 plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
248 static void configure_if_queues(void)
250 struct lcore_cfg *lconf = NULL;
251 struct task_args *targ;
254 while (core_targ_next(&lconf, &targ, 0) == 0) {
255 socket = rte_lcore_to_socket_id(lconf->id);
257 configure_if_tx_queues(targ, socket);
258 configure_if_rx_queues(targ, socket);
262 static const char *gen_ring_name(void)
264 static char retval[] = "XX";
265 static const char* ring_names =
266 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
267 "abcdefghijklmnopqrstuvwxyz"
268 "[\\]^_`!\"#$%&'()*+,-./:;<="
274 retval[0] = ring_names[idx % strlen(ring_names)];
275 idx /= strlen(ring_names);
276 retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0;
283 static int task_is_master(struct task_args *targ)
288 struct ring_init_stats {
289 uint32_t n_pkt_rings;
290 uint32_t n_ctrl_rings;
291 uint32_t n_opt_rings;
294 static uint32_t ring_init_stats_total(const struct ring_init_stats *ris)
296 return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings;
299 static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task)
301 struct lcore_cfg *lconf = NULL;
302 struct task_args *targ;
306 while (core_targ_next(&lconf, &targ, 0) == 0) {
307 for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) {
308 for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) {
309 ct = targ->core_task_set[idxx].core_task[ridx];
311 if (dest_task == ct.task && lcore_worker == ct.core)
319 static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id)
321 if (!prox_core_active(lcore_id, 0))
324 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
326 if (task_id >= lconf->n_tasks_all)
329 if (lconf->targs[task_id].nb_rxrings == 0)
332 return lconf->targs[task_id].rx_rings[0];
335 static void init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
336 const struct core_task ct, uint8_t ring_idx, int idx,
337 struct ring_init_stats *ris)
340 struct rte_ring *ring = NULL;
341 struct lcore_cfg *lworker;
342 struct task_args *dtarg;
344 PROX_ASSERT(prox_core_active(ct.core, 0));
345 lworker = &lcore_cfg[ct.core];
347 /* socket used is the one that the sending core resides on */
348 socket = rte_lcore_to_socket_id(lconf->id);
350 plog_info("\t\tCreating ring on socket %u with size %u\n"
351 "\t\t\tsource core, task and socket = %u, %u, %u\n"
352 "\t\t\tdestination core, task and socket = %u, %u, %u\n"
353 "\t\t\tdestination worker id = %u\n",
354 socket, starg->ring_size,
355 lconf->id, starg->id, socket,
356 ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
360 struct rte_ring **dring = NULL;
362 if (ct.type == CTRL_TYPE_MSG)
363 dring = &lworker->ctrl_rings_m[ct.task];
364 else if (ct.type == CTRL_TYPE_PKT) {
365 dring = &lworker->ctrl_rings_p[ct.task];
366 starg->flags |= TASK_ARG_CTRL_RINGS_P;
370 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
373 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
375 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
376 starg->tot_n_txrings_inited++;
378 if (lconf->id == prox_cfg.master) {
379 ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
382 plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
383 lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
384 "pkt" : "msg", ring, ring->name);
389 dtarg = &lworker->targs[ct.task];
390 lworker->targs[ct.task].worker_thread_id = ring_idx;
391 PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
392 PROX_ASSERT(ct.task < lworker->n_tasks_all);
394 /* If all the following conditions are met, the ring can be
396 if (!task_is_master(starg) && starg->lconf->id == dtarg->lconf->id &&
397 starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
398 dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
399 plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
400 dtarg->lconf->id, starg->task, dtarg->task);
401 /* No need to set up ws_mbuf. */
402 starg->tx_opt_ring = 1;
403 /* During init of destination task, the buffer in the
404 source task will be initialized. */
405 dtarg->tx_opt_ring_task = starg;
411 int ring_created = 1;
412 /* Only create multi-producer rings if configured to do so AND
413 there is only one task sending to the task */
414 if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
415 || (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
416 ring = get_existing_ring(ct.core, ct.task);
419 plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
420 lconf->id, starg->id, ring, ct.core, ct.task);
424 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
425 plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
426 lconf->id, starg->id, ring, ct.core, ct.task);
430 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
432 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
434 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
435 starg->tot_n_txrings_inited++;
438 PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
439 dtarg->rx_rings[dtarg->nb_rxrings] = ring;
442 dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
443 dtarg->lb_friend_core = lconf->id;
444 dtarg->lb_friend_task = starg->id;
445 plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
446 plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
447 lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
448 dtarg->nb_slave_threads);
452 static void init_rings(void)
454 struct lcore_cfg *lconf = NULL;
455 struct task_args *starg;
456 struct ring_init_stats ris = {0};
458 while (core_targ_next(&lconf, &starg, 1) == 0) {
459 plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
460 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
461 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
462 PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
463 PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);
465 struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
466 init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
471 plog_info("\tInitialized %d rings:\n"
472 "\t\tNumber of packet rings: %u\n"
473 "\t\tNumber of control rings: %u\n"
474 "\t\tNumber of optimized rings: %u\n",
475 ring_init_stats_total(&ris),
481 static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
483 struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
486 while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
492 idx = rand() % nb_mbuf - 1;
493 } while (pkts[idx] == 0);
495 rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
502 static void setup_mempools_unique_per_socket(void)
506 struct lcore_cfg *lconf = NULL;
507 struct task_args *targ;
509 struct rte_mempool *pool[MAX_SOCKETS];
510 uint32_t mbuf_count[MAX_SOCKETS] = {0};
511 uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
512 uint32_t mbuf_size[MAX_SOCKETS] = {0};
514 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
515 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
516 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
517 PROX_ASSERT(socket < MAX_SOCKETS);
519 if (targ->mbuf_size_set_explicitely)
520 flags = MEMPOOL_F_NO_SPREAD;
521 if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
522 targ->mbuf_size = targ->task_init->mbuf_size;
524 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
525 struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
526 PROX_ASSERT(targ->nb_mbuf != 0);
527 mbuf_count[socket] += targ->nb_mbuf;
528 if (nb_cache_mbuf[socket] == 0)
529 nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
531 PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
532 "all mbuf_cache must have the same size if using a unique mempool per socket\n");
534 if (mbuf_size[socket] == 0)
535 mbuf_size[socket] = targ->mbuf_size;
537 PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
538 "all mbuf_size must have the same size if using a unique mempool per socket\n");
540 if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
541 if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
542 mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
546 for (int i = 0 ; i < MAX_SOCKETS; i++) {
547 if (mbuf_count[i] != 0) {
548 sprintf(name, "socket_%u_pool", i);
549 pool[i] = rte_mempool_create(name,
550 mbuf_count[i] - 1, mbuf_size[i],
552 sizeof(struct rte_pktmbuf_pool_private),
553 rte_pktmbuf_pool_init, NULL,
554 prox_pktmbuf_init, NULL,
556 PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
557 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
558 mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
560 if (prox_cfg.flags & DSF_SHUFFLE) {
561 shuffle_mempool(pool[i], mbuf_count[i]);
567 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
568 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
570 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
571 /* use this pool for the interface that the core is receiving from */
572 /* If one core receives from multiple ports, all the ports use the same mempool */
573 targ->pool = pool[socket];
574 /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
575 targ->nb_mbuf = mbuf_count[socket];
576 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
577 targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
582 static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
584 const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
585 struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
586 const struct rte_memzone *mz;
587 struct rte_mempool *mp = NULL;
589 char memzone_name[64];
592 /* mbuf size can be set
593 * - from config file (highest priority, overwriting any other config) - should only be used as workaround
594 * - through each 'mode', overwriting the default mbuf_size
595 * - defaulted to MBUF_SIZE i.e. 1518 Bytes
596 * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
598 if (targ->mbuf_size_set_explicitely) {
599 flags = MEMPOOL_F_NO_SPREAD;
600 /* targ->mbuf_size already set */
602 else if (targ->task_init->mbuf_size != 0) {
603 /* mbuf_size not set through config file but set through mode */
604 targ->mbuf_size = targ->task_init->mbuf_size;
606 else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
607 if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
608 targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
611 /* allocate memory pool for packets */
612 PROX_ASSERT(targ->nb_mbuf != 0);
614 if (targ->pool_name[0] == '\0') {
615 sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
618 snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
619 mz = rte_memzone_lookup(memzone_name);
622 mp = (struct rte_mempool*)mz->addr;
624 targ->nb_mbuf = mp->size;
628 #ifdef RTE_LIBRTE_IVSHMEM_FALSE
629 if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
630 /* Init mbufs with ioremap_addr for dma */
631 mp->phys_addr = mz->ioremap_addr;
632 mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);
634 struct prox_pktmbuf_reinit_args init_args;
636 init_args.lconf = lconf;
638 uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
639 rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
640 mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
644 /* Use this pool for the interface that the core is
645 receiving from if one core receives from multiple
646 ports, all the ports use the same mempool */
647 if (targ->pool == NULL) {
648 plog_info("\t\tCreating mempool with name '%s'\n", name);
649 targ->pool = rte_mempool_create(name,
650 targ->nb_mbuf - 1, targ->mbuf_size,
652 sizeof(struct rte_pktmbuf_pool_private),
653 rte_pktmbuf_pool_init, NULL,
654 prox_pktmbuf_init, lconf,
658 PROX_PANIC(targ->pool == NULL,
659 "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
661 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
662 targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
663 if (prox_cfg.flags & DSF_SHUFFLE) {
664 shuffle_mempool(targ->pool, targ->nb_mbuf);
668 static void setup_mempools_multiple_per_socket(void)
670 struct lcore_cfg *lconf = NULL;
671 struct task_args *targ;
673 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
674 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
675 if (targ->rx_port_queue[0].port == OUT_DISCARD)
677 setup_mempool_for_rx_task(lconf, targ);
681 static void setup_mempools(void)
683 if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET)
684 setup_mempools_unique_per_socket();
686 setup_mempools_multiple_per_socket();
689 static void set_task_lconf(void)
691 struct lcore_cfg *lconf;
692 uint32_t lcore_id = -1;
694 while(prox_core_next(&lcore_id, 0) == 0) {
695 lconf = &lcore_cfg[lcore_id];
696 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
697 lconf->targs[task_id].lconf = lconf;
702 static void set_dest_threads(void)
704 struct lcore_cfg *lconf = NULL;
705 struct task_args *targ;
707 while (core_targ_next(&lconf, &targ, 0) == 0) {
708 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
709 for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
710 struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
712 struct task_args *dest_task = core_targ_get(ct.core, ct.task);
713 dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ;
719 static void setup_all_task_structs_early_init(void)
721 struct lcore_cfg *lconf = NULL;
722 struct task_args *targ;
724 plog_info("\t*** Calling early init on all tasks ***\n");
725 while (core_targ_next(&lconf, &targ, 0) == 0) {
726 if (targ->task_init->early_init) {
727 targ->task_init->early_init(targ);
732 static void setup_all_task_structs(void)
734 struct lcore_cfg *lconf;
735 uint32_t lcore_id = -1;
737 while(prox_core_next(&lcore_id, 0) == 0) {
738 lconf = &lcore_cfg[lcore_id];
739 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
740 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
745 static void init_port_activate(void)
747 struct lcore_cfg *lconf = NULL;
748 struct task_args *targ;
751 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
752 for (int i = 0; i < targ->nb_rxports; i++) {
753 port_id = targ->rx_port_queue[i].port;
754 prox_port_cfg[port_id].active = 1;
757 for (int i = 0; i < targ->nb_txports; i++) {
758 port_id = targ->tx_port_queue[i].port;
759 prox_port_cfg[port_id].active = 1;
764 /* Initialize cores and allocate mempools */
765 static void init_lcores(void)
767 struct lcore_cfg *lconf = 0;
768 uint32_t lcore_id = -1;
770 while(prox_core_next(&lcore_id, 0) == 0) {
771 uint8_t socket = rte_lcore_to_socket_id(lcore_id);
772 PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
775 /* need to allocate mempools as the first thing to use the lowest possible address range */
776 plog_info("=== Initializing mempools ===\n");
779 lcore_cfg_alloc_hp();
784 plog_info("=== Initializing port addresses ===\n");
787 plog_info("=== Initializing queue numbers on cores ===\n");
788 configure_if_queues();
790 plog_info("=== Initializing rings on cores ===\n");
793 plog_info("=== Checking configuration consistency ===\n");
794 check_cfg_consistent();
798 setup_all_task_structs_early_init();
799 plog_info("=== Initializing tasks ===\n");
800 setup_all_task_structs();
803 static int setup_prox(int argc, char **argv)
805 if (prox_read_config_file() != 0 ||
806 prox_setup_rte(argv[0]) != 0) {
810 if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
811 plog_info("=== Configuration file syntax has been checked ===\n\n");
815 init_port_activate();
816 plog_info("=== Initializing rte devices ===\n");
817 if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
819 init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
820 plog_info("=== Calibrating TSC overhead ===\n");
822 plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());
825 plog_info("=== Initializing ports ===\n");
828 if (prox_cfg.logbuf_size) {
829 prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
830 PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
833 if (prox_cfg.flags & DSF_CHECK_INIT) {
834 plog_info("=== Initialization sequence completed ===\n\n");
838 /* Current way that works to disable DPDK logging */
839 FILE *f = fopen("/dev/null", "r");
840 rte_openlog_stream(f);
841 plog_info("=== PROX started ===\n");
845 static int success = 0;
846 static void siguser_handler(int signal)
848 if (signal == SIGUSR1)
854 static void sigabrt_handler(__attribute__((unused)) int signum)
856 /* restore default disposition for SIGABRT and SIGPIPE */
857 signal(SIGABRT, SIG_DFL);
858 signal(SIGPIPE, SIG_DFL);
860 /* ignore further Ctrl-C */
861 signal(SIGINT, SIG_IGN);
863 /* more drastic exit on tedious termination signal */
864 plog_info("Aborting...\n");
865 if (lcore_cfg != NULL) {
867 pthread_t thread_id, tid0, tid = pthread_self();
868 memset(&tid0, 0, sizeof(tid0));
870 /* cancel all threads except current one */
872 while (prox_core_next(&lcore_id, 1) == 0) {
873 thread_id = lcore_cfg[lcore_id].thread_id;
874 if (pthread_equal(thread_id, tid0))
876 if (pthread_equal(thread_id, tid))
878 pthread_cancel(thread_id);
881 /* wait for cancelled threads to terminate */
883 while (prox_core_next(&lcore_id, 1) == 0) {
884 thread_id = lcore_cfg[lcore_id].thread_id;
885 if (pthread_equal(thread_id, tid0))
887 if (pthread_equal(thread_id, tid))
889 pthread_join(thread_id, NULL);
896 /* close ports on termination signal */
897 close_ports_atexit();
903 static void sigterm_handler(int signum)
905 /* abort on second Ctrl-C */
906 if (signum == SIGINT)
907 signal(SIGINT, sigabrt_handler);
909 /* gracefully quit on harmless termination signal */
910 /* ports will subsequently get closed at resulting exit */
914 int main(int argc, char **argv)
916 /* set en_US locale to print big numbers with ',' */
917 setlocale(LC_NUMERIC, "en_US.utf-8");
919 if (prox_parse_args(argc, argv) != 0){
923 plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
924 plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
925 plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
928 if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
929 /* list supported task modes and exit */
934 /* close ports at normal exit */
935 atexit(close_ports_atexit);
936 /* gracefully quit on harmless termination signals */
937 signal(SIGHUP, sigterm_handler);
938 signal(SIGINT, sigterm_handler);
939 signal(SIGQUIT, sigterm_handler);
940 signal(SIGTERM, sigterm_handler);
941 signal(SIGUSR1, sigterm_handler);
942 signal(SIGUSR2, sigterm_handler);
943 /* more drastic exit on tedious termination signals */
944 signal(SIGABRT, sigabrt_handler);
945 signal(SIGPIPE, sigabrt_handler);
947 if (prox_cfg.flags & DSF_DAEMON) {
948 signal(SIGUSR1, siguser_handler);
949 signal(SIGUSR2, siguser_handler);
950 plog_info("=== Running in Daemon mode ===\n");
951 plog_info("\tForking child and waiting for setup completion\n");
953 pid_t ppid = getpid();
956 plog_err("Failed to fork process to run in daemon mode\n");
968 if (setup_prox(argc, argv) != 0) {
979 /* Before exiting the parent, wait until the
980 child process has finished setting up */
982 if (prox_cfg.logbuf) {
983 file_print(prox_cfg.logbuf);
985 return success? EXIT_SUCCESS : EXIT_FAILURE;
989 if (setup_prox(argc, argv) != 0)