2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
22 #include <rte_cycles.h>
23 #include <rte_atomic.h>
24 #include <rte_table_hash.h>
25 #include <rte_memzone.h>
26 #include <rte_errno.h>
28 #include "prox_malloc.h"
36 #include "prox_args.h"
37 #include "prox_assert.h"
39 #include "prox_shared.h"
40 #include "prox_port_cfg.h"
42 #include "hash_utils.h"
43 #include "handle_lb_net.h"
44 #include "prox_cksum.h"
45 #include "thread_nop.h"
46 #include "thread_generic.h"
47 #include "thread_pipeline.h"
49 #include "handle_master.h"
51 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
52 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
55 uint8_t lb_nb_txrings = 0xff;
56 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
58 static void __attribute__((noreturn)) prox_usage(const char *prgname)
60 plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
61 "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
62 "\t-l LOG_FILE : log file name, ./prox.log by default\n"
63 "\t-p : include PID in log file name if default log file is used\n"
64 "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
65 "\t-v verbosity : initial logging verbosity\n"
66 "\t-a : autostart all cores (by default)\n"
67 "\t-e : don't autostart\n"
68 "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
69 "\t-m : list supported task modes and exit\n"
70 "\t-s : check configuration file syntax and exit\n"
71 "\t-i : check initialization sequence and exit\n"
72 "\t-u : Listen on UDS /tmp/prox.sock\n"
73 "\t-t : Listen on TCP port 8474\n"
74 "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
75 "\t-w : define variable using syntax varname=value\n"
76 "\t takes precedence over variables defined in CONFIG_FILE\n"
77 "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
78 "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
79 "\t-z : Ignore CPU topology, implies -i\n"
80 "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
81 "\t screen refreshing will be disabled\n"
86 static void check_mixed_normal_pipeline(void)
88 struct lcore_cfg *lconf = NULL;
89 uint32_t lcore_id = -1;
91 while (prox_core_next(&lcore_id, 0) == 0) {
92 lconf = &lcore_cfg[lcore_id];
94 int all_thread_nop = 1;
98 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
99 struct task_args *targ = &lconf->targs[task_id];
100 l3 = !strcmp("l3", targ->sub_mode_str);
101 all_thread_nop = all_thread_nop && !l3 &&
102 targ->task_init->thread_x == thread_nop;
104 pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
105 generic = generic || targ->task_init->thread_x == thread_generic || l3;
107 PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
110 lconf->thread_x = thread_nop;
112 lconf->thread_x = thread_generic;
117 static void check_zero_rx(void)
119 struct lcore_cfg *lconf = NULL;
120 struct task_args *targ;
122 while (core_targ_next(&lconf, &targ, 0) == 0) {
123 if (targ->nb_rxports != 0) {
124 PROX_PANIC(task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
125 "\tCore %u task %u: rx_ports configured while mode %s does not use it\n", lconf->id, targ->id, targ->task_init->mode_str);
130 static void check_missing_rx(void)
132 struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
133 struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
134 struct prox_port_cfg *port;
135 uint8_t port_id, rx_port_id, ok;
137 while (core_targ_next(&lconf, &targ, 0) == 0) {
138 PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
139 "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
140 if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
141 PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
142 "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
147 while (core_targ_next(&lconf, &targ, 0) == 0) {
148 if (strcmp(targ->sub_mode_str, "l3") != 0)
151 // If the L3 sub_mode receives from a port, check that there is at least one core/task
152 // transmitting to this port in L3 sub_mode
153 for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
154 rx_port_id = targ->rx_port_queue[i].port;
157 while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
158 port = find_reachable_port(tx_targ);
161 port_id = port - prox_port_cfg;
162 if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
167 PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
170 // If the L3 sub_mode transmits to a port, check that there is at least one core/task
171 // receiving from that port in L3 sub_mode.
172 port = find_reachable_port(targ);
175 port_id = port - prox_port_cfg;
178 plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
179 while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
180 for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
181 rx_port_id = rx_targ->rx_port_queue[i].port;
182 if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
188 plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
192 PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
196 static void check_cfg_consistent(void)
200 check_mixed_normal_pipeline();
203 static void plog_all_rings(void)
205 struct lcore_cfg *lconf = NULL;
206 struct task_args *targ;
208 while (core_targ_next(&lconf, &targ, 0) == 0) {
209 for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
210 plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
215 static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
217 if (task_init_flag_set(targ->task_init, flag) == is_set)
222 for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) {
223 ret = chain_flag_state(targ->prev_tasks[i], flag, is_set);
230 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
234 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
235 if_port = targ->tx_port_queue[i].port;
237 PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");
239 PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);
241 int dsocket = prox_port_cfg[if_port].socket;
242 if (dsocket != -1 && dsocket != socket) {
243 plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
246 if (prox_port_cfg[if_port].tx_ring[0] == '\0') { // Rings-backed port can use single queue
247 targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
248 prox_port_cfg[if_port].n_txq++;
250 prox_port_cfg[if_port].n_txq = 1;
251 targ->tx_port_queue[i].queue = 0;
253 /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
254 the tasks up to the task transmitting to the port
255 does not use refcnt. */
256 if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
257 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
258 plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
261 plog_info("\t\tRefcnt used on port %d\n", if_port);
264 /* By default OFFLOAD is enabled, but if the whole
265 chain has NOOFFLOADS set all the way until the
266 first task that receives from a port, it will be
267 disabled for the destination port. */
268 if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
269 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
270 plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
272 plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
275 /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
276 It should only be enabled when we know for sure that the RX does not split packets.
277 Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
278 transmitting to the port does not use multsegs. */
279 if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
280 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
281 plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
284 plog_info("\t\tMultiSegs used on port %d\n", if_port);
289 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
291 for (int i = 0; i < targ->nb_rxports; i++) {
292 uint8_t if_port = targ->rx_port_queue[i].port;
294 if (if_port == OUT_DISCARD) {
298 PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);
300 if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
301 prox_port_cfg[if_port].n_rxq = 0;
304 targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
305 prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
306 prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
307 prox_port_cfg[if_port].n_rxq++;
309 int dsocket = prox_port_cfg[if_port].socket;
310 if (dsocket != -1 && dsocket != socket) {
311 plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
316 static void configure_if_queues(void)
318 struct lcore_cfg *lconf = NULL;
319 struct task_args *targ;
322 while (core_targ_next(&lconf, &targ, 0) == 0) {
323 socket = rte_lcore_to_socket_id(lconf->id);
325 configure_if_tx_queues(targ, socket);
326 configure_if_rx_queues(targ, socket);
330 static const char *gen_ring_name(void)
332 static char retval[] = "XX";
333 static const char* ring_names =
334 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
335 "abcdefghijklmnopqrstuvwxyz"
336 "[\\]^_`!\"#$%&'()*+,-./:;<="
342 retval[0] = ring_names[idx % strlen(ring_names)];
343 idx /= strlen(ring_names);
344 retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0;
351 struct ring_init_stats {
352 uint32_t n_pkt_rings;
353 uint32_t n_ctrl_rings;
354 uint32_t n_opt_rings;
357 static uint32_t ring_init_stats_total(const struct ring_init_stats *ris)
359 return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings;
362 static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task)
364 struct lcore_cfg *lconf = NULL;
365 struct task_args *targ;
369 while (core_targ_next(&lconf, &targ, 0) == 0) {
370 for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) {
371 for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) {
372 ct = targ->core_task_set[idxx].core_task[ridx];
374 if (dest_task == ct.task && lcore_worker == ct.core)
382 static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id)
384 if (!prox_core_active(lcore_id, 0))
387 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
389 if (task_id >= lconf->n_tasks_all)
392 if (lconf->targs[task_id].nb_rxrings == 0)
395 return lconf->targs[task_id].rx_rings[0];
398 static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
399 const struct core_task ct, uint8_t ring_idx, int idx,
400 struct ring_init_stats *ris)
403 struct rte_ring *ring = NULL;
404 struct lcore_cfg *lworker;
405 struct task_args *dtarg;
407 PROX_ASSERT(prox_core_active(ct.core, 0));
408 lworker = &lcore_cfg[ct.core];
410 /* socket used is the one that the sending core resides on */
411 socket = rte_lcore_to_socket_id(lconf->id);
413 plog_info("\t\tCreating ring on socket %u with size %u\n"
414 "\t\t\tsource core, task and socket = %u, %u, %u\n"
415 "\t\t\tdestination core, task and socket = %u, %u, %u\n"
416 "\t\t\tdestination worker id = %u\n",
417 socket, starg->ring_size,
418 lconf->id, starg->id, socket,
419 ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
423 struct rte_ring **dring = NULL;
425 if (ct.type == CTRL_TYPE_MSG)
426 dring = &lworker->ctrl_rings_m[ct.task];
427 else if (ct.type == CTRL_TYPE_PKT) {
428 dring = &lworker->ctrl_rings_p[ct.task];
429 starg->flags |= TASK_ARG_CTRL_RINGS_P;
433 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
436 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
438 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
439 starg->tot_n_txrings_inited++;
441 if (lconf->id == prox_cfg.master) {
442 ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
443 } else if (ct.core == prox_cfg.master) {
444 starg->ctrl_plane_ring = ring;
447 plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
448 lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
449 "pkt" : "msg", ring, ring->name);
454 dtarg = &lworker->targs[ct.task];
455 lworker->targs[ct.task].worker_thread_id = ring_idx;
456 PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
457 PROX_ASSERT(ct.task < lworker->n_tasks_all);
459 /* If all the following conditions are met, the ring can be
461 if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
462 starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
463 dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
464 plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
465 dtarg->lconf->id, starg->task, dtarg->task);
466 /* No need to set up ws_mbuf. */
467 starg->tx_opt_ring = 1;
468 /* During init of destination task, the buffer in the
469 source task will be initialized. */
470 dtarg->tx_opt_ring_task = starg;
476 int ring_created = 1;
477 /* Only create multi-producer rings if configured to do so AND
478 there is only one task sending to the task */
479 if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
480 || (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
481 ring = get_existing_ring(ct.core, ct.task);
484 plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
485 lconf->id, starg->id, ring, ct.core, ct.task);
489 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
490 plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
491 lconf->id, starg->id, ring, ct.core, ct.task);
495 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
497 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
499 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
500 starg->tot_n_txrings_inited++;
503 PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
504 dtarg->rx_rings[dtarg->nb_rxrings] = ring;
507 dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
508 dtarg->lb_friend_core = lconf->id;
509 dtarg->lb_friend_task = starg->id;
510 plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
511 plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
512 lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
513 dtarg->nb_slave_threads);
518 static void init_rings(void)
520 struct lcore_cfg *lconf = NULL;
521 struct task_args *starg;
522 struct ring_init_stats ris = {0};
524 while (core_targ_next(&lconf, &starg, 1) == 0) {
525 plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
526 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
527 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
528 PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
529 PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);
531 struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
532 init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
537 plog_info("\tInitialized %d rings:\n"
538 "\t\tNumber of packet rings: %u\n"
539 "\t\tNumber of control rings: %u\n"
540 "\t\tNumber of optimized rings: %u\n",
541 ring_init_stats_total(&ris),
547 struct prox_port_cfg *port;
548 while (core_targ_next(&lconf, &starg, 1) == 0) {
549 if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
551 ct.core = prox_cfg.master;
553 ct.type = CTRL_TYPE_PKT;
554 struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);
557 ct.task = starg->id;;
558 struct rte_ring *tx_ring = init_ring_between_tasks(lcore_cfg, lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
563 static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
565 struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
568 while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
574 idx = rand() % nb_mbuf - 1;
575 } while (pkts[idx] == 0);
577 rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
584 static void setup_mempools_unique_per_socket(void)
588 struct lcore_cfg *lconf = NULL;
589 struct task_args *targ;
591 struct rte_mempool *pool[MAX_SOCKETS];
592 uint32_t mbuf_count[MAX_SOCKETS] = {0};
593 uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
594 uint32_t mbuf_size[MAX_SOCKETS] = {0};
596 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
597 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
598 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
599 PROX_ASSERT(socket < MAX_SOCKETS);
601 if (targ->mbuf_size_set_explicitely)
602 flags = MEMPOOL_F_NO_SPREAD;
603 if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
604 targ->mbuf_size = targ->task_init->mbuf_size;
606 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
607 struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
608 PROX_ASSERT(targ->nb_mbuf != 0);
609 mbuf_count[socket] += targ->nb_mbuf;
610 if (nb_cache_mbuf[socket] == 0)
611 nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
613 PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
614 "all mbuf_cache must have the same size if using a unique mempool per socket\n");
616 if (mbuf_size[socket] == 0)
617 mbuf_size[socket] = targ->mbuf_size;
619 PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
620 "all mbuf_size must have the same size if using a unique mempool per socket\n");
622 if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
623 if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
624 mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
628 for (int i = 0 ; i < MAX_SOCKETS; i++) {
629 if (mbuf_count[i] != 0) {
630 sprintf(name, "socket_%u_pool", i);
631 pool[i] = rte_mempool_create(name,
632 mbuf_count[i] - 1, mbuf_size[i],
634 sizeof(struct rte_pktmbuf_pool_private),
635 rte_pktmbuf_pool_init, NULL,
636 prox_pktmbuf_init, NULL,
638 PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
639 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
640 mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
642 if (prox_cfg.flags & DSF_SHUFFLE) {
643 shuffle_mempool(pool[i], mbuf_count[i]);
649 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
650 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
652 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
653 /* use this pool for the interface that the core is receiving from */
654 /* If one core receives from multiple ports, all the ports use the same mempool */
655 targ->pool = pool[socket];
656 /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
657 targ->nb_mbuf = mbuf_count[socket];
658 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
659 targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
664 static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
666 const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
667 struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
668 const struct rte_memzone *mz;
669 struct rte_mempool *mp = NULL;
671 char memzone_name[64];
674 /* mbuf size can be set
675 * - from config file (highest priority, overwriting any other config) - should only be used as workaround
676 * - through each 'mode', overwriting the default mbuf_size
677 * - defaulted to MBUF_SIZE i.e. 1518 Bytes
678 * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
680 if (targ->mbuf_size_set_explicitely) {
681 flags = MEMPOOL_F_NO_SPREAD;
682 /* targ->mbuf_size already set */
684 else if (targ->task_init->mbuf_size != 0) {
685 /* mbuf_size not set through config file but set through mode */
686 targ->mbuf_size = targ->task_init->mbuf_size;
688 else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
689 if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
690 targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
693 /* allocate memory pool for packets */
694 PROX_ASSERT(targ->nb_mbuf != 0);
696 if (targ->pool_name[0] == '\0') {
697 sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
700 snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
701 mz = rte_memzone_lookup(memzone_name);
704 mp = (struct rte_mempool*)mz->addr;
706 targ->nb_mbuf = mp->size;
710 #ifdef RTE_LIBRTE_IVSHMEM_FALSE
711 if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
712 /* Init mbufs with ioremap_addr for dma */
713 mp->phys_addr = mz->ioremap_addr;
714 mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);
716 struct prox_pktmbuf_reinit_args init_args;
718 init_args.lconf = lconf;
720 uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
721 rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
722 mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
726 /* Use this pool for the interface that the core is
727 receiving from if one core receives from multiple
728 ports, all the ports use the same mempool */
729 if (targ->pool == NULL) {
730 plog_info("\t\tCreating mempool with name '%s'\n", name);
731 targ->pool = rte_mempool_create(name,
732 targ->nb_mbuf - 1, targ->mbuf_size,
734 sizeof(struct rte_pktmbuf_pool_private),
735 rte_pktmbuf_pool_init, NULL,
736 prox_pktmbuf_init, lconf,
740 PROX_PANIC(targ->pool == NULL,
741 "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
743 plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
744 targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
745 if (prox_cfg.flags & DSF_SHUFFLE) {
746 shuffle_mempool(targ->pool, targ->nb_mbuf);
750 static void setup_mempools_multiple_per_socket(void)
752 struct lcore_cfg *lconf = NULL;
753 struct task_args *targ;
755 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
756 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
757 if (targ->rx_port_queue[0].port == OUT_DISCARD)
759 setup_mempool_for_rx_task(lconf, targ);
763 static void setup_mempools(void)
765 if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET)
766 setup_mempools_unique_per_socket();
768 setup_mempools_multiple_per_socket();
771 static void set_task_lconf(void)
773 struct lcore_cfg *lconf;
774 uint32_t lcore_id = -1;
776 while(prox_core_next(&lcore_id, 1) == 0) {
777 lconf = &lcore_cfg[lcore_id];
778 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
779 lconf->targs[task_id].lconf = lconf;
784 static void set_dest_threads(void)
786 struct lcore_cfg *lconf = NULL;
787 struct task_args *targ;
789 while (core_targ_next(&lconf, &targ, 0) == 0) {
790 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
791 for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
792 struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
794 struct task_args *dest_task = core_targ_get(ct.core, ct.task);
795 dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ;
801 static void setup_all_task_structs_early_init(void)
803 struct lcore_cfg *lconf = NULL;
804 struct task_args *targ;
806 plog_info("\t*** Calling early init on all tasks ***\n");
807 while (core_targ_next(&lconf, &targ, 0) == 0) {
808 if (targ->task_init->early_init) {
809 targ->task_init->early_init(targ);
814 static void setup_all_task_structs(void)
816 struct lcore_cfg *lconf;
817 uint32_t lcore_id = -1;
818 struct task_base *tmaster = NULL;
820 while(prox_core_next(&lcore_id, 1) == 0) {
821 lconf = &lcore_cfg[lcore_id];
822 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
823 if (task_is_master(&lconf->targs[task_id])) {
824 plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
825 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
826 tmaster = lconf->tasks_all[task_id];
830 PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
833 while(prox_core_next(&lcore_id, 1) == 0) {
834 lconf = &lcore_cfg[lcore_id];
835 plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
836 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
837 if (!task_is_master(&lconf->targs[task_id])) {
838 plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
839 lconf->targs[task_id].tmaster = tmaster;
840 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
846 static void init_port_activate(void)
848 struct lcore_cfg *lconf = NULL;
849 struct task_args *targ;
852 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
853 for (int i = 0; i < targ->nb_rxports; i++) {
854 port_id = targ->rx_port_queue[i].port;
855 prox_port_cfg[port_id].active = 1;
858 for (int i = 0; i < targ->nb_txports; i++) {
859 port_id = targ->tx_port_queue[i].port;
860 prox_port_cfg[port_id].active = 1;
865 /* Initialize cores and allocate mempools */
866 static void init_lcores(void)
868 struct lcore_cfg *lconf = 0;
869 uint32_t lcore_id = -1;
871 while(prox_core_next(&lcore_id, 0) == 0) {
872 uint8_t socket = rte_lcore_to_socket_id(lcore_id);
873 PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
876 /* need to allocate mempools as the first thing to use the lowest possible address range */
877 plog_info("=== Initializing mempools ===\n");
880 lcore_cfg_alloc_hp();
885 plog_info("=== Initializing port addresses ===\n");
888 plog_info("=== Initializing queue numbers on cores ===\n");
889 configure_if_queues();
891 plog_info("=== Initializing rings on cores ===\n");
894 plog_info("=== Checking configuration consistency ===\n");
895 check_cfg_consistent();
899 setup_all_task_structs_early_init();
900 plog_info("=== Initializing tasks ===\n");
901 setup_all_task_structs();
904 static int setup_prox(int argc, char **argv)
906 if (prox_read_config_file() != 0 ||
907 prox_setup_rte(argv[0]) != 0) {
911 if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
912 plog_info("=== Configuration file syntax has been checked ===\n\n");
916 init_port_activate();
917 plog_info("=== Initializing rte devices ===\n");
918 if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
920 init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
921 plog_info("=== Calibrating TSC overhead ===\n");
923 plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());
926 plog_info("=== Initializing ports ===\n");
929 if (prox_cfg.logbuf_size) {
930 prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
931 PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
934 if (prox_cfg.flags & DSF_CHECK_INIT) {
935 plog_info("=== Initialization sequence completed ===\n\n");
939 /* Current way that works to disable DPDK logging */
940 FILE *f = fopen("/dev/null", "r");
941 rte_openlog_stream(f);
942 plog_info("=== PROX started ===\n");
946 static int success = 0;
947 static void siguser_handler(int signal)
949 if (signal == SIGUSR1)
955 static void sigabrt_handler(__attribute__((unused)) int signum)
957 /* restore default disposition for SIGABRT and SIGPIPE */
958 signal(SIGABRT, SIG_DFL);
959 signal(SIGPIPE, SIG_DFL);
961 /* ignore further Ctrl-C */
962 signal(SIGINT, SIG_IGN);
964 /* more drastic exit on tedious termination signal */
965 plog_info("Aborting...\n");
966 if (lcore_cfg != NULL) {
968 pthread_t thread_id, tid0, tid = pthread_self();
969 memset(&tid0, 0, sizeof(tid0));
971 /* cancel all threads except current one */
973 while (prox_core_next(&lcore_id, 1) == 0) {
974 thread_id = lcore_cfg[lcore_id].thread_id;
975 if (pthread_equal(thread_id, tid0))
977 if (pthread_equal(thread_id, tid))
979 pthread_cancel(thread_id);
982 /* wait for cancelled threads to terminate */
984 while (prox_core_next(&lcore_id, 1) == 0) {
985 thread_id = lcore_cfg[lcore_id].thread_id;
986 if (pthread_equal(thread_id, tid0))
988 if (pthread_equal(thread_id, tid))
990 pthread_join(thread_id, NULL);
997 /* close ports on termination signal */
998 close_ports_atexit();
1004 static void sigterm_handler(int signum)
1006 /* abort on second Ctrl-C */
1007 if (signum == SIGINT)
1008 signal(SIGINT, sigabrt_handler);
1010 /* gracefully quit on harmless termination signal */
1011 /* ports will subsequently get closed at resulting exit */
1015 int main(int argc, char **argv)
1017 /* set en_US locale to print big numbers with ',' */
1018 setlocale(LC_NUMERIC, "en_US.utf-8");
1020 if (prox_parse_args(argc, argv) != 0){
1021 prox_usage(argv[0]);
1024 plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
1025 plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
1026 plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
1029 if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
1030 /* list supported task modes and exit */
1032 return EXIT_SUCCESS;
1035 /* close ports at normal exit */
1036 atexit(close_ports_atexit);
1037 /* gracefully quit on harmless termination signals */
1038 signal(SIGHUP, sigterm_handler);
1039 signal(SIGINT, sigterm_handler);
1040 signal(SIGQUIT, sigterm_handler);
1041 signal(SIGTERM, sigterm_handler);
1042 signal(SIGUSR1, sigterm_handler);
1043 signal(SIGUSR2, sigterm_handler);
1044 /* more drastic exit on tedious termination signals */
1045 signal(SIGABRT, sigabrt_handler);
1046 signal(SIGPIPE, sigabrt_handler);
1048 if (prox_cfg.flags & DSF_DAEMON) {
1049 signal(SIGUSR1, siguser_handler);
1050 signal(SIGUSR2, siguser_handler);
1051 plog_info("=== Running in Daemon mode ===\n");
1052 plog_info("\tForking child and waiting for setup completion\n");
1054 pid_t ppid = getpid();
1057 plog_err("Failed to fork process to run in daemon mode\n");
1058 return EXIT_FAILURE;
1066 kill(ppid, SIGUSR2);
1067 return EXIT_FAILURE;
1069 if (setup_prox(argc, argv) != 0) {
1070 kill(ppid, SIGUSR2);
1071 return EXIT_FAILURE;
1074 kill(ppid, SIGUSR1);
1075 run(prox_cfg.flags);
1076 return EXIT_SUCCESS;
1080 /* Before exiting the parent, wait until the
1081 child process has finished setting up */
1083 if (prox_cfg.logbuf) {
1084 file_print(prox_cfg.logbuf);
1086 return success? EXIT_SUCCESS : EXIT_FAILURE;
1090 if (setup_prox(argc, argv) != 0)
1091 return EXIT_FAILURE;
1092 run(prox_cfg.flags);
1093 return EXIT_SUCCESS;