2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
23 #include <rte_cycles.h>
24 #include <rte_atomic.h>
25 #include <rte_table_hash.h>
26 #include <rte_memzone.h>
27 #include <rte_errno.h>
29 #include "prox_malloc.h"
37 #include "prox_args.h"
38 #include "prox_assert.h"
40 #include "prox_shared.h"
41 #include "prox_port_cfg.h"
43 #include "hash_utils.h"
44 #include "handle_lb_net.h"
45 #include "prox_cksum.h"
46 #include "thread_nop.h"
47 #include "thread_generic.h"
48 #include "thread_pipeline.h"
50 #include "handle_master.h"
52 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
53 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
56 uint8_t lb_nb_txrings = 0xff;
57 extern const char *git_version;
58 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
60 static void __attribute__((noreturn)) prox_usage(const char *prgname)
62 plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
63 "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
64 "\t-l LOG_FILE : log file name, ./prox.log by default\n"
65 "\t-p : include PID in log file name if default log file is used\n"
66 "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
67 "\t-v verbosity : initial logging verbosity\n"
68 "\t-a : autostart all cores (by default)\n"
69 "\t-e : don't autostart\n"
70 "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
71 "\t-m : list supported task modes and exit\n"
72 "\t-s : check configuration file syntax and exit\n"
73 "\t-i : check initialization sequence and exit\n"
74 "\t-u : Listen on UDS /tmp/prox.sock\n"
75 "\t-t : Listen on TCP port 8474\n"
76 "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
77 "\t-w : define variable using syntax varname=value\n"
78 "\t takes precedence over variables defined in CONFIG_FILE\n"
79 "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
80 "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
81 "\t-z : Ignore CPU topology, implies -i\n"
82 "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
83 "\t screen refreshing will be disabled\n"
88 static void check_mixed_normal_pipeline(void)
90 struct lcore_cfg *lconf = NULL;
91 uint32_t lcore_id = -1;
93 while (prox_core_next(&lcore_id, 0) == 0) {
94 lconf = &lcore_cfg[lcore_id];
96 int all_thread_nop = 1;
100 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
101 struct task_args *targ = &lconf->targs[task_id];
102 l3 = !strcmp("l3", targ->sub_mode_str);
103 all_thread_nop = all_thread_nop && !l3 &&
104 targ->task_init->thread_x == thread_nop;
106 pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
107 generic = generic || targ->task_init->thread_x == thread_generic || l3;
109 PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
112 lconf->thread_x = thread_nop;
114 lconf->thread_x = thread_generic;
119 static void check_zero_rx(void)
121 struct lcore_cfg *lconf = NULL;
122 struct task_args *targ;
124 while (core_targ_next(&lconf, &targ, 0) == 0) {
125 if (targ->nb_rxports != 0) {
126 PROX_PANIC(task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
127 "\tCore %u task %u: rx_ports configured while mode %s does not use it\n", lconf->id, targ->id, targ->task_init->mode_str);
132 static void check_nb_mbuf(void)
134 struct lcore_cfg *lconf = NULL;
135 struct task_args *targ = NULL;
137 int n_txd = 0, n_rxd = 0;
139 while (core_targ_next(&lconf, &targ, 0) == 0) {
140 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
141 port_id = targ->tx_port_queue[i].port;
142 n_txd = prox_port_cfg[port_id].n_txd;
144 for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
145 port_id = targ->rx_port_queue[i].port;
146 n_rxd = prox_port_cfg[port_id].n_rxd;
148 if (targ->nb_mbuf <= n_rxd + n_txd + targ->nb_cache_mbuf + MAX_PKT_BURST) {
149 plog_warn("Core %d, task %d might not have enough mbufs (%d) to support %d txd, %d rxd and %d cache_mbuf\n",
150 lconf->id, targ->id, targ->nb_mbuf, n_txd, n_rxd, targ->nb_cache_mbuf);
155 static void check_missing_rx(void)
157 struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
158 struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
159 uint8_t port_id, rx_port_id, ok, l3, ndp;
161 while (core_targ_next(&lconf, &targ, 0) == 0) {
162 PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
163 "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
164 if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
165 PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
166 "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
171 while (core_targ_next(&lconf, &targ, 0) == 0) {
173 if (strcmp(targ->sub_mode_str, "l3") == 0)
175 else if (strcmp(targ->sub_mode_str, "ndp") == 0)
180 PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3/NDP task must have a RX or a TX port\n");
181 // If the L3/NDP sub_mode receives from a port, check that there is at least one core/task
182 // transmitting to this port in L3/NDP sub_mode
183 for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
184 rx_port_id = targ->rx_port_queue[i].port;
187 while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
188 if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
190 if ((rx_port_id == port_id) &&
191 ( ((tx_targ->flags & TASK_ARG_L3) && l3) ||
192 ((tx_targ->flags & TASK_ARG_NDP) && ndp) ) ) {
197 PROX_PANIC(ok == 0, "RX %s sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", l3 ? "l3":"ndp", rx_port_id, lconf->id, targ->id);
200 // If the L3/NDP sub_mode transmits to a port, check that there is at least one core/task
201 // receiving from that port in L3/NDP sub_mode.
202 if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
206 plog_info("\tCore %d task %d transmitting to port %d in %s submode\n", lconf->id, targ->id, port_id, l3 ? "l3":"ndp");
207 while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
208 for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
209 rx_port_id = rx_targ->rx_port_queue[i].port;
210 if ((rx_port_id == port_id) &&
211 ( ((rx_targ->flags & TASK_ARG_L3) && l3) ||
212 ((rx_targ->flags & TASK_ARG_NDP) && ndp) ) ){
218 plog_info("\tCore %d task %d has found core %d task %d receiving from port %d in %s submode\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id,
219 ((rx_targ->flags & TASK_ARG_L3) && l3) ? "l3":"ndp");
223 PROX_PANIC(ok == 0, "%s sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", l3 ? "l3":"ndp", port_id, lconf->id, targ->id);
227 static void check_cfg_consistent(void)
232 check_mixed_normal_pipeline();
235 static void plog_all_rings(void)
237 struct lcore_cfg *lconf = NULL;
238 struct task_args *targ;
240 while (core_targ_next(&lconf, &targ, 0) == 0) {
241 for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
242 plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
247 static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
249 if (task_init_flag_set(targ->task_init, flag) == is_set)
254 for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) {
255 ret = chain_flag_state(targ->prev_tasks[i], flag, is_set);
262 static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
264 return (!chain_flag_state(targ, flag, 0));
267 static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
269 return (!chain_flag_state(targ, flag, 1));
272 static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
274 return (chain_flag_state(targ, flag, 1));
277 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
281 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
282 if_port = targ->tx_port_queue[i].port;
284 PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");
286 PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);
288 int dsocket = prox_port_cfg[if_port].socket;
289 if (dsocket != -1 && dsocket != socket) {
290 plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
293 if (prox_port_cfg[if_port].tx_ring[0] == '\0') { // Rings-backed port can use single queue
294 targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
295 prox_port_cfg[if_port].n_txq++;
297 prox_port_cfg[if_port].n_txq = 1;
298 targ->tx_port_queue[i].queue = 0;
300 /* By default OFFLOAD is enabled, but if the whole
301 chain has NOOFFLOADS set all the way until the
302 first task that receives from a port, it will be
303 disabled for the destination port. */
304 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
305 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
306 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
309 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
310 prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
316 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
318 struct prox_port_cfg *port;
319 uint8_t port_used_counter[PROX_MAX_PORTS] = {0};
320 bool multiple_port_reference = false;
321 uint8_t total_number_of_queues = 0;
322 // Check how many times a port is referenced for this task
323 for (uint8_t i = 0; i < targ->nb_rxports; i++) {
324 uint8_t if_port = targ->rx_port_queue[i].port;
325 port_used_counter[if_port]++;
326 if (port_used_counter[if_port] > 1) {
327 multiple_port_reference = true;
328 port = &prox_port_cfg[if_port];
329 PROX_PANIC((port->all_rx_queues), "Multiple queues defined in rx port, but all_rx_queues also set for port %s\n", port->names[0]);
332 // If only referenced once, it is possible that we want to use all queues
333 // Therefore we will check all_rx_queues for that port
334 if (!multiple_port_reference) {
335 for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
336 uint8_t if_port = targ->rx_port_queue[i].port;
337 if (port_used_counter[if_port]) {
338 port = &prox_port_cfg[if_port];
339 if (port->all_rx_queues) {
340 port_used_counter[if_port] = port->max_rxq;
341 total_number_of_queues += port->max_rxq;
342 plog_info("\tall_rx_queues for Port %s: %u rx_queues will be applied\n", port->names[0], port_used_counter[if_port]);
347 if (total_number_of_queues) {
348 PROX_PANIC((total_number_of_queues > PROX_MAX_PORTS), "%u queues using the all_rx_queues. PROX_MAX_PORTS is set to %u\n", total_number_of_queues, PROX_MAX_PORTS);
350 for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
351 if (port_used_counter[i]) {
352 for (uint8_t j = 0; j < port_used_counter[i]; j++) {
353 targ->rx_port_queue[index].port = i;
356 port = &prox_port_cfg[i];
357 plog_info("\t\tConfiguring task to use port %s with %u rx_queues\n", port->names[0], port_used_counter[i]);
360 targ->nb_rxports = index;
362 for (int i = 0; i < targ->nb_rxports; i++) {
363 uint8_t if_port = targ->rx_port_queue[i].port;
365 if (if_port == OUT_DISCARD) {
369 port = &prox_port_cfg[if_port];
370 PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
372 if(port->rx_ring[0] != '\0') {
376 // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
377 // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
378 // If the packets get transmitted, then multi segments will have to be enabled on the TX port
379 uint16_t max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
380 if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
381 targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
383 targ->rx_port_queue[i].queue = port->n_rxq;
384 port->pool[targ->rx_port_queue[i].queue] = targ->pool;
385 port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
388 int dsocket = port->socket;
389 if (dsocket != -1 && dsocket != socket) {
390 plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
395 static void configure_if_queues(void)
397 struct lcore_cfg *lconf = NULL;
398 struct task_args *targ;
401 while (core_targ_next(&lconf, &targ, 0) == 0) {
402 socket = rte_lcore_to_socket_id(lconf->id);
404 configure_if_rx_queues(targ, socket);
405 configure_if_tx_queues(targ, socket);
409 static void configure_tx_queue_flags(void)
411 struct lcore_cfg *lconf = NULL;
412 struct task_args *targ;
416 while (core_targ_next(&lconf, &targ, 0) == 0) {
417 socket = rte_lcore_to_socket_id(lconf->id);
418 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
419 if_port = targ->tx_port_queue[i].port;
420 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
421 /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
422 the tasks up to the task transmitting to the port
424 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
425 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
428 /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
429 the tasks up to the task transmitting to the port
430 use refcnt and per-queue all mbufs comes from the same mempool. */
431 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
432 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
433 prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
440 static void configure_multi_segments(void)
442 struct lcore_cfg *lconf = NULL;
443 struct task_args *targ;
446 while (core_targ_next(&lconf, &targ, 0) == 0) {
447 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
448 if_port = targ->tx_port_queue[i].port;
449 // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
450 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
451 // We can only enable "no multi segment" if no such task exists in the chain of tasks.
452 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
453 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
456 // We enable "multi segment" if at least one task requires it in the chain of tasks.
457 if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
458 prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
465 static const char *gen_ring_name(void)
467 static char retval[] = "XX";
468 static const char* ring_names =
469 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
470 "abcdefghijklmnopqrstuvwxyz"
471 "[\\]^_`!\"#$%&'()*+,-./:;<="
477 retval[0] = ring_names[idx % strlen(ring_names)];
478 idx /= strlen(ring_names);
479 retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0;
486 struct ring_init_stats {
487 uint32_t n_pkt_rings;
488 uint32_t n_ctrl_rings;
489 uint32_t n_opt_rings;
492 static uint32_t ring_init_stats_total(const struct ring_init_stats *ris)
494 return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings;
497 static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task)
499 struct lcore_cfg *lconf = NULL;
500 struct task_args *targ;
504 while (core_targ_next(&lconf, &targ, 0) == 0) {
505 for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) {
506 for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) {
507 ct = targ->core_task_set[idxx].core_task[ridx];
509 if (dest_task == ct.task && lcore_worker == ct.core)
517 static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id)
519 if (!prox_core_active(lcore_id, 0))
522 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
524 if (task_id >= lconf->n_tasks_all)
527 if (lconf->targs[task_id].nb_rxrings == 0)
530 return lconf->targs[task_id].rx_rings[0];
533 static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
534 const struct core_task ct, uint8_t ring_idx, int idx,
535 struct ring_init_stats *ris)
538 struct rte_ring *ring = NULL;
539 struct lcore_cfg *lworker;
540 struct task_args *dtarg;
542 PROX_ASSERT(prox_core_active(ct.core, 0));
543 lworker = &lcore_cfg[ct.core];
545 /* socket used is the one that the sending core resides on */
546 socket = rte_lcore_to_socket_id(lconf->id);
548 plog_info("\t\tCreating ring on socket %u with size %u\n"
549 "\t\t\tsource core, task and socket = %u, %u, %u\n"
550 "\t\t\tdestination core, task and socket = %u, %u, %u\n"
551 "\t\t\tdestination worker id = %u\n",
552 socket, starg->ring_size,
553 lconf->id, starg->id, socket,
554 ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
558 struct rte_ring **dring = NULL;
560 if (ct.type == CTRL_TYPE_MSG)
561 dring = &lworker->ctrl_rings_m[ct.task];
562 else if (ct.type == CTRL_TYPE_PKT) {
563 dring = &lworker->ctrl_rings_p[ct.task];
564 starg->flags |= TASK_ARG_CTRL_RINGS_P;
568 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
571 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
573 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
574 starg->tot_n_txrings_inited++;
576 if (lconf->id == prox_cfg.master) {
577 ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
578 } else if (ct.core == prox_cfg.master) {
579 starg->ctrl_plane_ring = ring;
582 plog_info("\t\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
583 lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
584 "pkt" : "msg", ring, ring->name);
589 dtarg = &lworker->targs[ct.task];
590 lworker->targs[ct.task].worker_thread_id = ring_idx;
591 PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
592 PROX_ASSERT(ct.task < lworker->n_tasks_all);
594 /* If all the following conditions are met, the ring can be
596 if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
597 starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
598 dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
599 plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
600 dtarg->lconf->id, starg->task, dtarg->task);
601 /* No need to set up ws_mbuf. */
602 starg->tx_opt_ring = 1;
603 /* During init of destination task, the buffer in the
604 source task will be initialized. */
605 dtarg->tx_opt_ring_task = starg;
611 int ring_created = 1;
612 /* Only create multi-producer rings if configured to do so AND
613 there is only one task sending to the task */
614 if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
615 || (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
616 ring = get_existing_ring(ct.core, ct.task);
619 plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
620 lconf->id, starg->id, ring, ct.core, ct.task);
624 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
625 plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
626 lconf->id, starg->id, ring, ct.core, ct.task);
630 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
632 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
634 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
635 starg->tot_n_txrings_inited++;
638 PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
639 dtarg->rx_rings[dtarg->nb_rxrings] = ring;
641 if (dtarg->nb_rxrings > 1)
642 dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
644 dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
645 dtarg->lb_friend_core = lconf->id;
646 dtarg->lb_friend_task = starg->id;
647 plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
648 plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
649 lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
650 dtarg->nb_slave_threads);
655 static void init_rings(void)
657 struct lcore_cfg *lconf = NULL;
658 struct task_args *starg;
659 struct ring_init_stats ris = {0};
661 while (core_targ_next(&lconf, &starg, 1) == 0) {
662 plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
663 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
664 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
665 PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
666 PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);
668 struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
669 init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
674 plog_info("\tInitialized %d rings:\n"
675 "\t\tNumber of packet rings: %u\n"
676 "\t\tNumber of control rings: %u\n"
677 "\t\tNumber of optimized rings: %u\n",
678 ring_init_stats_total(&ris),
684 struct prox_port_cfg *port;
685 while (core_targ_next(&lconf, &starg, 1) == 0) {
686 if ((starg->task_init) && (starg->flags & (TASK_ARG_L3|TASK_ARG_NDP))) {
688 ct.core = prox_cfg.master;
690 ct.type = CTRL_TYPE_PKT;
691 struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);
694 ct.task = starg->id;;
695 struct rte_ring *tx_ring = init_ring_between_tasks(&lcore_cfg[prox_cfg.master], lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
700 static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
702 struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
705 while ((got < nb_mbuf) && (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0))
712 idx = rand() % nb_mbuf;
713 } while (pkts[idx] == 0);
715 rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
722 static void set_mbuf_size(struct task_args *targ)
724 /* mbuf size can be set
725 * - from config file (highest priority, overwriting any other config) - should only be used as workaround
726 * - defaulted to MBUF_SIZE.
727 * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
732 targ->mbuf_size = MBUF_SIZE;
733 struct prox_port_cfg *port;
734 uint16_t max_frame_size = 0, min_buffer_size = 0;
736 for (int i = 0; i < targ->nb_rxports; i++) {
737 uint8_t if_port = targ->rx_port_queue[i].port;
739 if (if_port == OUT_DISCARD) {
742 port = &prox_port_cfg[if_port];
743 if (max_frame_size < port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
744 max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
745 if (min_buffer_size < port->min_rx_bufsize)
746 min_buffer_size = port->min_rx_bufsize;
748 // Check whether we receive from i40e. This driver have extra mbuf size requirements
749 if (strcmp(port->short_name, "i40e") == 0)
753 // i40e supports a maximum of 5 descriptors chained
754 uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
755 if (required_mbuf_size > targ->mbuf_size) {
756 targ->mbuf_size = required_mbuf_size;
757 plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
760 if (min_buffer_size > targ->mbuf_size) {
761 plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
766 static void setup_mempools_unique_per_socket(void)
770 struct lcore_cfg *lconf = NULL;
771 struct task_args *targ;
773 struct rte_mempool *pool[MAX_SOCKETS];
774 uint32_t mbuf_count[MAX_SOCKETS] = {0};
775 uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
776 uint32_t mbuf_size[MAX_SOCKETS] = {0};
778 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
779 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
780 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
781 PROX_ASSERT(socket < MAX_SOCKETS);
784 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
785 struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
786 PROX_ASSERT(targ->nb_mbuf != 0);
787 mbuf_count[socket] += targ->nb_mbuf;
788 if (nb_cache_mbuf[socket] == 0)
789 nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
791 PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
792 "all mbuf_cache must have the same size if using a unique mempool per socket\n");
794 if (mbuf_size[socket] == 0)
795 mbuf_size[socket] = targ->mbuf_size;
797 PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
798 "all mbuf_size must have the same size if using a unique mempool per socket\n");
802 for (int i = 0 ; i < MAX_SOCKETS; i++) {
803 if (mbuf_count[i] != 0) {
804 sprintf(name, "socket_%u_pool", i);
805 if ((pool[i] = rte_mempool_lookup(name)) == NULL) {
806 pool[i] = rte_mempool_create(name,
807 mbuf_count[i] - 1, mbuf_size[i],
809 sizeof(struct rte_pktmbuf_pool_private),
810 rte_pktmbuf_pool_init, NULL,
811 prox_pktmbuf_init, NULL,
813 PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
814 plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
815 mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
817 if (prox_cfg.flags & DSF_SHUFFLE) {
818 shuffle_mempool(pool[i], mbuf_count[i]);
825 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
826 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
828 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
829 /* use this pool for the interface that the core is receiving from */
830 /* If one core receives from multiple ports, all the ports use the same mempool */
831 targ->pool = pool[socket];
832 /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
833 targ->nb_mbuf = mbuf_count[socket];
834 plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
835 targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
840 static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
842 const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
843 struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
844 const struct rte_memzone *mz;
845 struct rte_mempool *mp = NULL;
847 char memzone_name[64];
852 /* allocate memory pool for packets */
853 PROX_ASSERT(targ->nb_mbuf != 0);
855 if (targ->pool_name[0] == '\0') {
856 sprintf(name, "core_%u_task_%u_pool", lconf->id, targ->id);
859 snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
860 mz = rte_memzone_lookup(memzone_name);
863 mp = (struct rte_mempool*)mz->addr;
865 targ->nb_mbuf = mp->size;
869 #ifdef RTE_LIBRTE_IVSHMEM_FALSE
870 if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
871 /* Init mbufs with ioremap_addr for dma */
872 mp->phys_addr = mz->ioremap_addr;
873 mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);
875 struct prox_pktmbuf_reinit_args init_args;
877 init_args.lconf = lconf;
879 uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
880 rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
881 mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
885 /* Use this pool for the interface that the core is
886 receiving from if one core receives from multiple
887 ports, all the ports use the same mempool */
888 if (targ->pool == NULL) {
889 plog_info("\tCreating mempool with name '%s' on socket %d\n", name, socket);
890 targ->pool = rte_mempool_create(name,
891 targ->nb_mbuf - 1, targ->mbuf_size,
893 sizeof(struct rte_pktmbuf_pool_private),
894 rte_pktmbuf_pool_init, NULL,
895 prox_pktmbuf_init, lconf,
899 PROX_PANIC(targ->pool == NULL,
900 "\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
902 plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
903 targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
904 if (prox_cfg.flags & DSF_SHUFFLE) {
905 shuffle_mempool(targ->pool, targ->nb_mbuf);
909 static void setup_mempools_multiple_per_socket(void)
911 struct lcore_cfg *lconf = NULL;
912 struct task_args *targ;
914 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
915 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
916 if (targ->rx_port_queue[0].port == OUT_DISCARD)
918 setup_mempool_for_rx_task(lconf, targ);
922 static void setup_mempools(void)
924 if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET)
925 setup_mempools_unique_per_socket();
927 setup_mempools_multiple_per_socket();
930 static void set_task_lconf(void)
932 struct lcore_cfg *lconf;
933 uint32_t lcore_id = -1;
935 while(prox_core_next(&lcore_id, 1) == 0) {
936 lconf = &lcore_cfg[lcore_id];
937 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
938 lconf->targs[task_id].lconf = lconf;
943 static void set_dest_threads(void)
945 struct lcore_cfg *lconf = NULL;
946 struct task_args *targ;
948 while (core_targ_next(&lconf, &targ, 0) == 0) {
949 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
950 for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
951 struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
953 struct task_args *dest_task = core_targ_get(ct.core, ct.task);
954 dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ;
960 static void setup_all_task_structs_early_init(void)
962 struct lcore_cfg *lconf = NULL;
963 struct task_args *targ;
965 plog_info("\t*** Calling early init on all tasks ***\n");
966 while (core_targ_next(&lconf, &targ, 0) == 0) {
967 if (targ->task_init->early_init) {
968 targ->task_init->early_init(targ);
973 static void setup_all_task_structs(void)
975 struct lcore_cfg *lconf;
976 uint32_t lcore_id = -1;
977 struct task_base *tmaster = NULL;
979 while(prox_core_next(&lcore_id, 1) == 0) {
980 lconf = &lcore_cfg[lcore_id];
981 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
982 if (task_is_master(&lconf->targs[task_id])) {
983 plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
984 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
985 tmaster = lconf->tasks_all[task_id];
989 PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
992 while(prox_core_next(&lcore_id, 1) == 0) {
993 lconf = &lcore_cfg[lcore_id];
994 plog_info("\t*** Initializing core %d (%d task) ***\n", lcore_id, lconf->n_tasks_all);
995 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
996 if (!task_is_master(&lconf->targs[task_id])) {
997 plog_info("\t\tInitializing struct for core %d task %d\n", lcore_id, task_id);
998 lconf->targs[task_id].tmaster = tmaster;
999 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
1005 static void init_port_activate(void)
1007 struct lcore_cfg *lconf = NULL;
1008 struct task_args *targ;
1009 uint8_t port_id = 0;
1011 while (core_targ_next_early(&lconf, &targ, 0) == 0) {
1012 for (int i = 0; i < targ->nb_rxports; i++) {
1013 port_id = targ->rx_port_queue[i].port;
1014 prox_port_cfg[port_id].active = 1;
1017 for (int i = 0; i < targ->nb_txports; i++) {
1018 port_id = targ->tx_port_queue[i].port;
1019 prox_port_cfg[port_id].active = 1;
1024 /* Initialize cores and allocate mempools */
1025 static void init_lcores(void)
1027 struct lcore_cfg *lconf = 0;
1028 uint32_t lcore_id = -1;
1030 while(prox_core_next(&lcore_id, 0) == 0) {
1031 uint8_t socket = rte_lcore_to_socket_id(lcore_id);
1032 PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
1035 /* need to allocate mempools as the first thing to use the lowest possible address range */
1036 plog_info("=== Initializing mempools ===\n");
1039 lcore_cfg_alloc_hp();
1044 plog_info("=== Initializing port addresses ===\n");
1047 plog_info("=== Initializing queue numbers on cores ===\n");
1048 configure_if_queues();
1050 plog_info("=== Initializing rings on cores ===\n");
1053 configure_multi_segments();
1054 configure_tx_queue_flags();
1056 plog_info("=== Checking configuration consistency ===\n");
1057 check_cfg_consistent();
1062 static int setup_prox(int argc, char **argv)
1064 if (prox_read_config_file() != 0 ||
1065 prox_setup_rte(argv[0]) != 0) {
1069 if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
1070 plog_info("=== Configuration file syntax has been checked ===\n\n");
1074 init_port_activate();
1075 plog_info("=== Initializing rte devices ===\n");
1076 if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
1077 init_rte_ring_dev();
1078 init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
1079 plog_info("=== Calibrating TSC overhead ===\n");
1081 plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());
1084 plog_info("=== Initializing ports ===\n");
1087 setup_all_task_structs_early_init();
1088 plog_info("=== Initializing tasks ===\n");
1089 setup_all_task_structs();
1091 if (prox_cfg.logbuf_size) {
1092 prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
1093 PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
1096 if (prox_cfg.flags & DSF_CHECK_INIT) {
1097 plog_info("=== Initialization sequence completed ===\n\n");
1101 /* Current way that works to disable DPDK logging */
1102 FILE *f = fopen("/dev/null", "r");
1103 rte_openlog_stream(f);
1104 plog_info("=== PROX started ===\n");
1108 static int success = 0;
1109 static void siguser_handler(int signal)
1111 if (signal == SIGUSR1)
1117 static void sigabrt_handler(__attribute__((unused)) int signum)
1119 /* restore default disposition for SIGABRT and SIGPIPE */
1120 signal(SIGABRT, SIG_DFL);
1121 signal(SIGPIPE, SIG_DFL);
1123 /* ignore further Ctrl-C */
1124 signal(SIGINT, SIG_IGN);
1126 /* more drastic exit on tedious termination signal */
1127 plog_info("Aborting...\n");
1128 if (lcore_cfg != NULL) {
1130 pthread_t thread_id, tid0, tid = pthread_self();
1131 memset(&tid0, 0, sizeof(tid0));
1133 /* cancel all threads except current one */
1135 while (prox_core_next(&lcore_id, 1) == 0) {
1136 thread_id = lcore_cfg[lcore_id].thread_id;
1137 if (pthread_equal(thread_id, tid0))
1139 if (pthread_equal(thread_id, tid))
1141 pthread_cancel(thread_id);
1144 /* wait for cancelled threads to terminate */
1146 while (prox_core_next(&lcore_id, 1) == 0) {
1147 thread_id = lcore_cfg[lcore_id].thread_id;
1148 if (pthread_equal(thread_id, tid0))
1150 if (pthread_equal(thread_id, tid))
1152 pthread_join(thread_id, NULL);
1159 /* close ports on termination signal */
1160 close_ports_atexit();
1166 static void sigterm_handler(int signum)
1168 /* abort on second Ctrl-C */
1169 if (signum == SIGINT)
1170 signal(SIGINT, sigabrt_handler);
1172 /* gracefully quit on harmless termination signal */
1173 /* ports will subsequently get closed at resulting exit */
1177 static void set_term_env(void)
1179 static const char var[] = "TERM";
1180 static char str[] = "TERM=putty";
1181 char *old_value, *new_value;
1182 int max_ver = 0, min_ver = 0, n;
1184 old_value = getenv(var);
1186 const char *ncurses_version = curses_version();
1187 n = sscanf(ncurses_version, "ncurses %d.%d", &max_ver, &min_ver);
1189 plog_info("\tUnable to extract ncurses version from %s. TERM left unchanged to %s\n", ncurses_version, old_value);
1192 plog_info("\tncurses version = %d.%d (%s)\n", max_ver, min_ver, ncurses_version);
1195 if (((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
1196 // On recent OSes such as RHEL 8.0, ncurses(6.1) introduced support
1197 // for ECMA-48 repeat character control.
1198 // Some terminal emulators use TERM=xterm but do not support this feature.
1199 // In this case, printing repeating character such as "22000000 Hz" might
1200 // display as 220 Hz.
1201 // Other emulattors, such as tmux, use TERM=screen, and do not exhibit the issue.
1202 plog_info("\tChanged TERM from %s ", old_value);
1204 new_value = getenv(var);
1205 plog_info("to %s\n", new_value);
1207 plog_info("\tTERM left unchanged to %s\n", old_value);
1211 int main(int argc, char **argv)
1213 /* set en_US locale to print big numbers with ',' */
1214 setlocale(LC_NUMERIC, "en_US.utf-8");
1216 if (prox_parse_args(argc, argv) != 0){
1217 prox_usage(argv[0]);
1219 plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
1220 plog_info("=== " PROGRAM_NAME " %s ===\n", VERSION_STR());
1221 plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
1222 plog_info("\tgit version %s\n", git_version);
1226 if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
1227 /* list supported task modes and exit */
1229 return EXIT_SUCCESS;
1232 /* close ports at normal exit */
1233 atexit(close_ports_atexit);
1234 /* gracefully quit on harmless termination signals */
1235 signal(SIGHUP, sigterm_handler);
1236 signal(SIGINT, sigterm_handler);
1237 signal(SIGQUIT, sigterm_handler);
1238 signal(SIGTERM, sigterm_handler);
1239 signal(SIGUSR1, sigterm_handler);
1240 signal(SIGUSR2, sigterm_handler);
1241 /* more drastic exit on tedious termination signals */
1242 signal(SIGABRT, sigabrt_handler);
1243 signal(SIGPIPE, sigabrt_handler);
1245 if (prox_cfg.flags & DSF_DAEMON) {
1246 signal(SIGUSR1, siguser_handler);
1247 signal(SIGUSR2, siguser_handler);
1248 plog_info("=== Running in Daemon mode ===\n");
1249 plog_info("\tForking child and waiting for setup completion\n");
1251 pid_t ppid = getpid();
1254 plog_err("Failed to fork process to run in daemon mode\n");
1255 return EXIT_FAILURE;
1263 kill(ppid, SIGUSR2);
1264 return EXIT_FAILURE;
1266 if (setup_prox(argc, argv) != 0) {
1267 kill(ppid, SIGUSR2);
1268 return EXIT_FAILURE;
1271 kill(ppid, SIGUSR1);
1272 run(prox_cfg.flags);
1273 return EXIT_SUCCESS;
1277 /* Before exiting the parent, wait until the
1278 child process has finished setting up */
1280 if (prox_cfg.logbuf) {
1281 file_print(prox_cfg.logbuf);
1283 return success? EXIT_SUCCESS : EXIT_FAILURE;
1287 if (setup_prox(argc, argv) != 0)
1288 return EXIT_FAILURE;
1289 run(prox_cfg.flags);
1291 return EXIT_SUCCESS;