2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_sched.h>
21 #include <rte_string_fns.h>
22 #include <rte_version.h>
24 #include "prox_malloc.h"
27 #include "prox_args.h"
28 #include "prox_assert.h"
33 #include "parse_utils.h"
34 #include "prox_port_cfg.h"
39 #include "prox_ipv6.h"
40 #include "prox_compat.h"
41 #include "ip_subnet.h"
43 #define MAX_RTE_ARGV 64
44 #define MAX_ARG_LEN 256
52 #define STR_EQ(s1, s2) (!strcmp((s1), (s2)))
54 /* configuration files support */
55 static int get_rte_cfg(unsigned sindex, char *str, void *data);
56 static int get_global_cfg(unsigned sindex, char *str, void *data);
57 static int get_port_cfg(unsigned sindex, char *str, void *data);
58 static int get_defaults_cfg(unsigned sindex, char *str, void *data);
59 static int get_cache_set_cfg(unsigned sindex, char *str, void *data);
60 static int get_var_cfg(unsigned sindex, char *str, void *data);
61 static int get_lua_cfg(unsigned sindex, char *str, void *data);
62 static int get_core_cfg(unsigned sindex, char *str, void *data);
64 static const char *cfg_file = DEFAULT_CONFIG_FILE;
65 static struct rte_cfg rte_cfg;
66 struct prox_cache_set_cfg prox_cache_set_cfg[PROX_MAX_CACHE_SET];
68 static char format_err_str[1024];
69 static const char *err_str = "Unknown error";
71 static struct cfg_section eal_default_cfg = {
72 .name = "eal options",
73 .parser = get_rte_cfg,
80 static struct cfg_section port_cfg = {
82 .parser = get_port_cfg,
83 .data = &prox_port_cfg,
89 static struct cfg_section var_cfg = {
91 .parser = get_var_cfg,
98 static struct cfg_section cache_set_cfg = {
99 .name = "cache set #",
100 .parser = get_cache_set_cfg,
101 .data = &prox_cache_set_cfg,
107 static struct cfg_section defaults_cfg = {
109 .parser = get_defaults_cfg,
116 static struct cfg_section settings_cfg = {
118 .parser = get_global_cfg,
125 static struct cfg_section lua_cfg = {
127 .parser = get_lua_cfg,
134 static struct cfg_section core_cfg = {
136 .parser = get_core_cfg,
137 .data = lcore_cfg_init,
143 struct deferred_port {
144 struct task_args *targ;
149 static struct deferred_port deferred_port[PROX_MAX_PORTS];
150 static int n_deferred_ports = 0;
152 static void set_errf(const char *format, ...)
155 va_start(ap, format);
156 vsnprintf(format_err_str, sizeof(format_err_str), format, ap);
158 err_str = format_err_str;
161 /* [eal options] parser */
162 static int get_rte_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
164 struct rte_cfg *pconfig = (struct rte_cfg *)data;
166 if (str == NULL || pconfig == NULL) {
170 char *pkey = get_cfg_key(str);
172 set_errf("Missing key after option");
176 if (STR_EQ(str, "-m")) {
177 return parse_int(&pconfig->memory, pkey);
179 if (STR_EQ(str, "-n")) {
180 if (parse_int(&pconfig->force_nchannel, pkey)) {
183 if (pconfig->force_nchannel == 0) {
184 set_errf("Invalid number of memory channels");
189 if (STR_EQ(str, "-r")) {
190 if (parse_int(&pconfig->force_nrank, pkey)) {
193 if (pconfig->force_nrank == 0 || pconfig->force_nrank > 16) {
194 set_errf("Invalid number of memory ranks");
200 if (STR_EQ(str, "no-pci")) {
201 return parse_bool(&pconfig->no_pci, pkey);
203 if (STR_EQ(str, "no-hpet")) {
204 return parse_bool(&pconfig->no_hpet, pkey);
206 if (STR_EQ(str, "no-shconf")) {
207 return parse_bool(&pconfig->no_shconf, pkey);
209 if (STR_EQ(str, "no-huge")) {
210 return parse_bool(&pconfig->no_hugetlbfs, pkey);
212 if (STR_EQ(str, "no-output")) {
213 return parse_bool(&pconfig->no_output, pkey);
216 if (STR_EQ(str, "huge-dir")) {
217 if (pconfig->hugedir) {
218 free(pconfig->hugedir);
220 pconfig->hugedir = strdup(pkey);
224 if (STR_EQ(str, "eal")) {
225 char eal[MAX_STR_LEN_PROC];
230 if (parse_str(eal, pkey, sizeof(eal)))
233 strip_spaces(&pkey, 1);
235 pconfig->eal = strdup(pkey);
239 set_errf("Option '%s' is not known", str);
243 struct cfg_depr global_cfg_depr[] = {
244 {"virtualization", "This is now set automatically if needed"},
245 {"qinq_tag", "This option is deprecated"},
246 {"wait on quit", "This is now set automatically if needed"},
250 const char *get_cfg_dir(void)
252 static char dir[PATH_MAX];
253 size_t end = strlen(cfg_file) - 1;
254 while (end > 0 && cfg_file[end] != '/')
257 strncpy(dir, cfg_file, end);
261 static int get_lua_cfg(__attribute__((unused)) unsigned sindex, __attribute__((unused)) char *str, __attribute__((unused)) void *data)
265 if (NULL == getcwd(cwd, sizeof(cwd))) {
266 set_errf("Failed to get current directory while loading Lua file\n");
269 status = chdir(get_cfg_dir());
271 set_errf("Failed to change directory to '%s' while loading Lua file\n", get_cfg_dir());
275 struct lua_State *l = prox_lua();
278 prox_strncpy(str_cpy, str, sizeof(str_cpy));
279 uint32_t len = strlen(str_cpy);
280 str_cpy[len++] = '\n';
283 status = luaL_loadstring(l, str_cpy);
285 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
290 status = lua_pcall(l, 0, LUA_MULTRET, 0);
292 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
299 set_errf("Failed to restore current directory to '%s' while loading Lua file\n", cwd);
306 /* [global] parser */
307 static int get_global_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
309 struct prox_cfg *pset = (struct prox_cfg *)data;
311 if (str == NULL || pset == NULL) {
315 char *pkey = get_cfg_key(str);
317 set_errf("Missing key after option");
321 for (uint32_t i = 0; i < RTE_DIM(global_cfg_depr); ++i) {
322 if (STR_EQ(str, global_cfg_depr[i].opt)) {
323 set_errf("Option '%s' is deprecated%s%s",
324 global_cfg_depr[i].opt, strlen(global_cfg_depr[i].info)? ": ": "", global_cfg_depr[i].info);
329 if (STR_EQ(str, "name")) {
330 return parse_str(pset->name, pkey, sizeof(pset->name));
333 if (STR_EQ(str, "start time")) {
334 return parse_int(&pset->start_time, pkey);
337 if (STR_EQ(str, "duration time")) {
338 return parse_int(&pset->duration_time, pkey);
341 if (STR_EQ(str, "shuffle")) {
342 return parse_flag(&pset->flags, DSF_SHUFFLE, pkey);
344 if (STR_EQ(str, "disable cmt")) {
345 return parse_flag(&pset->flags, DSF_DISABLE_CMT, pkey);
347 if (STR_EQ(str, "mp rings")) {
348 return parse_flag(&pset->flags, DSF_MP_RINGS, pkey);
350 if (STR_EQ(str, "enable bypass")) {
351 return parse_flag(&pset->flags, DSF_ENABLE_BYPASS, pkey);
353 if (STR_EQ(str, "poll timeout")) {
354 return parse_int(&pset->poll_timeout, pkey);
356 if (STR_EQ(str, "heartbeat timeout")) {
357 return parse_int(&pset->heartbeat_timeout, pkey);
360 if (STR_EQ(str, "cpe table map")) {
361 /* The config defined ports through 0, 1, 2 ... which
362 need to be associated with ports. This is done
363 through defining it using "cpe table map=" */
364 return parse_port_name_list((uint32_t*)pset->cpe_table_ports, NULL, PROX_MAX_PORTS, pkey);
367 if (STR_EQ(str, "pre cmd")) {
371 if (STR_EQ(str, "unique mempool per socket")) {
372 return parse_flag(&pset->flags, UNIQUE_MEMPOOL_PER_SOCKET, pkey);
375 if (STR_EQ(str, "log buffer size")) {
376 if (parse_kmg(&pset->logbuf_size, pkey)) {
379 plog_info("Logging to buffer with size = %d\n", pset->logbuf_size);
383 set_errf("Option '%s' is not known", str);
387 /* [variable] parser */
388 static int get_var_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
390 return add_var(str, get_cfg_key(str), 0);
393 /* [defaults] parser */
394 static int get_defaults_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
399 pkey = get_cfg_key(str);
401 set_errf("Missing key after option");
405 if (STR_EQ(str, "mempool size")) {
407 if (parse_kmg(&val, pkey)) {
411 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
412 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
413 cur_lcore_cfg_init->id = lcore_id;
414 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
415 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
423 if (STR_EQ(str, "qinq tag")) {
424 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
425 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
426 cur_lcore_cfg_init->id = lcore_id;
427 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
428 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
429 parse_int(&targ->qinq_tag, pkey);
434 if (STR_EQ(str, "memcache size")) {
436 if (parse_kmg(&val, pkey)) {
440 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
441 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
442 cur_lcore_cfg_init->id = lcore_id;
443 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
444 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
445 targ->nb_cache_mbuf = val;
451 set_errf("Option '%s' is not known", str);
455 /* [cache set] parser */
456 static int get_cache_set_cfg(unsigned sindex, char *str, void *data)
458 struct prox_cache_set_cfg *cfg = (struct prox_cache_set_cfg *)data;
460 uint8_t cur_if = sindex & ~CFG_INDEXED;
462 if (cur_if >= PROX_MAX_CACHE_SET) {
463 set_errf("Cache set ID is too high (max allowed %d)", PROX_MAX_CACHE_SET - 1 );
467 cfg = &prox_cache_set_cfg[cur_if];
469 if (str == NULL || data == NULL) {
473 char *pkey = get_cfg_key(str);
476 set_errf("Missing key after option");
480 if (STR_EQ(str, "mask")) {
482 int err = parse_int(&val, pkey);
488 plog_info("\tCache set %d has mask %x\n", cur_if, cfg->mask);
495 static int get_port_cfg(unsigned sindex, char *str, void *data)
497 struct prox_port_cfg *cfg = (struct prox_port_cfg *)data;
499 uint8_t cur_if = sindex & ~CFG_INDEXED;
501 if (cur_if >= PROX_MAX_PORTS) {
502 set_errf("Port ID is too high (max allowed %d)", PROX_MAX_PORTS - 1 );
506 cfg = &prox_port_cfg[cur_if];
508 if (str == NULL || data == NULL) {
512 char *pkey = get_cfg_key(str);
515 set_errf("Missing key after option");
519 if (STR_EQ(str, "mac")) {
520 if (STR_EQ(pkey, "hardware")) {
521 cfg->type = PROX_PORT_MAC_HW;
523 else if (STR_EQ(pkey, "random")) {
524 cfg->type = PROX_PORT_MAC_RAND;
527 cfg->type = PROX_PORT_MAC_SET;
528 if (parse_mac(&cfg->eth_addr, pkey)) {
533 else if (STR_EQ(str, "name")) {
535 prox_strncpy(cfg->names[0], pkey, MAX_NAME_SIZE);
536 PROX_ASSERT(cur_if < PROX_MAX_PORTS);
537 return add_port_name(cur_if, pkey);
539 else if (STR_EQ(str, "rx desc")) {
540 return parse_int(&cfg->n_rxd, pkey);
542 else if (STR_EQ(str, "tx desc")) {
543 return parse_int(&cfg->n_txd, pkey);
545 else if (STR_EQ(str, "ipv6 mask length")) {
546 return parse_int(&cfg->v6_mask_length, pkey);
548 else if (STR_EQ(str, "all_rx_queues")) {
550 if (parse_bool(&val, pkey)) {
553 cfg->all_rx_queues = val;
555 else if (STR_EQ(str, "promiscuous")) {
557 if (parse_bool(&val, pkey)) {
560 cfg->promiscuous = val;
562 else if (STR_EQ(str, "multicast")) {
564 if (cfg->nb_mc_addr >= NB_MCAST_ADDR) {
565 plog_err("too many multicast addresses\n");
568 if (parse_mac(&cfg->mc_addr[cfg->nb_mc_addr], pkey)) {
573 else if (STR_EQ(str, "lsc")) {
574 cfg->lsc_set_explicitely = 1;
576 if (parse_bool(&val, pkey)) {
581 else if (STR_EQ(str, "local ipv4")) {
582 if (parse_ip_set(cfg->ip_addr, pkey, PROX_MAX_VLAN_TAGS) != 0) {
583 cfg->ip_addr[0].ip = 24;
584 return parse_ip(&cfg->ip_addr[0].ip, pkey);
588 else if (STR_EQ(str, "virtual")) {
590 if (parse_bool(&val, pkey)) {
595 else if (STR_EQ(str, "vdev")) {
596 prox_strncpy(cfg->vdev, pkey, MAX_NAME_SIZE);
598 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
599 else if (STR_EQ(str, "disable tx offload")) {
601 if (parse_int(&val, pkey)) {
605 cfg->disabled_tx_offload = val;
608 else if (STR_EQ(str, "strip crc")) {
610 if (parse_bool(&val, pkey)) {
613 #if defined(RTE_ETH_RX_OFFLOAD_CRC_STRIP)
615 cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_CRC_STRIP;
617 cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_CRC_STRIP;
619 #if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
621 cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
623 cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
628 else if (STR_EQ(str, "vlan tag")) {
629 return parse_int_set(cfg->vlan_tags, pkey, sizeof(cfg->vlan_tags) / sizeof(cfg->vlan_tags[0]));
631 else if (STR_EQ(str, "vlan")) {
632 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
634 if (parse_bool(&val, pkey)) {
638 cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
639 cfg->requested_tx_offload |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
641 cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
642 cfg->requested_tx_offload &= ~RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
645 plog_warn("vlan option not supported : update DPDK at least to 18.08 to support this option\n");
648 else if (STR_EQ(str, "mtu size")) {
650 if (parse_int(&val, pkey)) {
655 // A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan)
656 // should not be considered as a jumbo frame. However rte_ethdev.c considers that
657 // the max_rx_pkt_len for a non jumbo frame is 1518
658 #if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
659 cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN;
660 if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN)
662 cfg->port_conf.rxmode.mtu = cfg->mtu;
663 if (cfg->port_conf.rxmode.mtu > PROX_MTU)
665 cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
669 else if (STR_EQ(str, "rss")) {
671 if (parse_bool(&val, pkey)) {
675 cfg->port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
676 cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IPV4;
679 else if (STR_EQ(str, "rx_ring")) {
680 parse_str(cfg->rx_ring, pkey, sizeof(cfg->rx_ring));
682 else if (STR_EQ(str, "tx_ring")) {
683 parse_str(cfg->tx_ring, pkey, sizeof(cfg->tx_ring));
689 static enum police_action str_to_color(const char *str)
691 if (STR_EQ(str, "green"))
693 if (STR_EQ(str, "yellow"))
695 if (STR_EQ(str, "red"))
697 if (STR_EQ(str, "drop"))
702 struct cfg_depr task_cfg_depr[] = {
706 struct cfg_depr core_cfg_depr[] = {
709 {"network side", ""},
713 static int get_core_cfg(unsigned sindex, char *str, void *data)
716 struct lcore_cfg *lconf = (struct lcore_cfg *)data;
718 if (str == NULL || lconf == NULL || !(sindex & CFG_INDEXED)) {
722 pkey = get_cfg_key(str);
724 set_errf("Missing key after option");
728 uint32_t ncore = sindex & ~CFG_INDEXED;
729 if (ncore >= RTE_MAX_LCORE) {
730 set_errf("Core index too high (max allowed %d)", RTE_MAX_LCORE - 1);
734 lconf = &lconf[ncore];
736 for (uint32_t i = 0; i < RTE_DIM(core_cfg_depr); ++i) {
737 if (STR_EQ(str, core_cfg_depr[i].opt)) {
738 set_errf("Option '%s' is deprecated%s%s",
739 core_cfg_depr[i].opt, strlen(core_cfg_depr[i].info)? ": ": "", core_cfg_depr[i].info);
745 lcore_to_socket_core_ht(ncore, buff, sizeof(buff));
747 if (STR_EQ(str, "task")) {
750 if (parse_int(&val, pkey)) {
753 if (val >= MAX_TASKS_PER_CORE) {
754 set_errf("Too many tasks for core (max allowed %d)", MAX_TASKS_PER_CORE - 1);
757 if (val != lconf->n_tasks_all) {
758 set_errf("Task ID skipped or defined twice");
762 lconf->active_task = val;
764 lconf->targs[lconf->active_task].task = lconf->active_task;
766 if (lconf->n_tasks_all < lconf->active_task + 1) {
767 lconf->n_tasks_all = lconf->active_task + 1;
772 struct task_args *targ = &lconf->targs[lconf->active_task];
773 if (STR_EQ(str, "tx ports from routing table")) {
774 uint32_t vals[PROX_MAX_PORTS];
776 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
777 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
781 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
785 for (uint8_t i = 0; i < n_if; ++i) {
786 targ->tx_port_queue[i].port = vals[i];
789 targ->runtime_flags |= TASK_ROUTING;
792 if (STR_EQ(str, "tx ports from cpe table")) {
793 uint32_t vals[PROX_MAX_PORTS];
797 char* mapping_str = strstr(pkey, " remap=");
799 if (mapping_str != NULL) {
801 mapping_str += strlen(" remap=");
802 n_remap = parse_remap(targ->mapping, mapping_str);
805 if (parse_port_name_list(vals, &ret, PROX_MAX_PORTS, pkey)) {
809 if (n_remap != -1 && ret != (uint32_t)n_remap) {
810 set_errf("Expected %d remap elements but had %d", n_remap, ret);
814 for (uint8_t i = 0; i < ret; ++i) {
815 targ->tx_port_queue[i].port = vals[i];
817 /* default mapping this case is port0 -> port0 */
819 targ->mapping[vals[i]] = i;
823 targ->nb_txports = ret;
827 if (STR_EQ(str, "tx cores from routing table")) {
828 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
829 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
833 struct core_task_set *cts = &targ->core_task_set[0];
835 if (parse_task_set(cts, pkey))
838 if (cts->n_elems > MAX_WT_PER_LB) {
839 set_errf("Maximum worker threads allowed is %u but have %u", MAX_WT_PER_LB, cts->n_elems);
843 targ->nb_worker_threads = cts->n_elems;
844 targ->nb_txrings = cts->n_elems;
846 if (targ->nb_txrings > MAX_RINGS_PER_TASK) {
847 set_errf("Maximum allowed TX rings is %u but have %u", MAX_RINGS_PER_TASK, targ->nb_txrings);
851 targ->runtime_flags |= TASK_ROUTING;
854 if (STR_EQ(str, "tx cores from cpe table")) {
855 struct core_task_set *core_task_set = &targ->core_task_set[0];
859 mapping_str = strstr(pkey, " remap=");
860 if (mapping_str == NULL) {
861 set_errf("There is no default mapping for tx cores from cpe table. Please specify it through remap=");
865 mapping_str += strlen(" remap=");
866 ret = parse_remap(targ->mapping, mapping_str);
871 struct core_task_set *cts = &targ->core_task_set[0];
873 if (parse_task_set(cts, pkey))
875 if (cts->n_elems > MAX_RINGS_PER_TASK) {
876 set_errf("Maximum cores to route to is %u\n", MAX_RINGS_PER_TASK);
880 targ->nb_txrings = cts->n_elems;
882 if (ret != targ->nb_txrings) {
883 set_errf("Expecting same number of remaps as cores\n", str);
889 if (STR_EQ(str, "delay ms")) {
890 if (targ->delay_us) {
891 set_errf("delay ms and delay us are mutually exclusive\n", str);
895 int rc = parse_int(&delay_ms, pkey);
896 targ->delay_us = delay_ms * 1000;
899 if (STR_EQ(str, "delay us")) {
900 if (targ->delay_us) {
901 set_errf("delay ms and delay us are mutually exclusive\n", str);
904 return parse_int(&targ->delay_us, pkey);
906 if (STR_EQ(str, "random delay us")) {
907 return parse_int(&targ->random_delay_us, pkey);
909 if (STR_EQ(str, "cpe table timeout ms")) {
910 return parse_int(&targ->cpe_table_timeout_ms, pkey);
912 if (STR_EQ(str, "ctrl path polling frequency")) {
913 int rc = parse_int(&targ->ctrl_freq, pkey);
915 if (targ->ctrl_freq == 0) {
916 set_errf("ctrl frequency must be non null.");
923 if (STR_EQ(str, "handle arp")) {
924 return parse_flag(&targ->runtime_flags, TASK_CTRL_HANDLE_ARP, pkey);
926 if (STR_EQ(str, "fast path handle arp")) {
927 return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey);
930 if (STR_EQ(str, "do not forward geneve")) {
931 return parse_flag(&targ->runtime_flags, TASK_DO_NOT_FWD_GENEVE, pkey);
933 /* Using tx port name, only a _single_ port can be assigned to a task. */
934 if (STR_EQ(str, "tx port")) {
935 if (targ->nb_txports > 0) {
936 set_errf("Only one tx port can be defined per task. Use a LB task or routing instead.");
941 uint32_t ports[PROX_MAX_PORTS];
943 if(parse_port_name_list(ports, &n_if, PROX_MAX_PORTS, pkey)) {
944 // Port name not found, but could be a virtual device of a secondary process
945 // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
947 (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
948 (n_deferred_ports < PROX_MAX_PORTS)) {
949 prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
950 deferred_port[n_deferred_ports].is_rx_port = 0;
951 deferred_port[n_deferred_ports++].targ = targ;
957 PROX_ASSERT(n_if-1 < PROX_MAX_PORTS);
959 for (uint8_t i = 0; i < n_if; ++i) {
960 targ->tx_port_queue[i].port = ports[i];
965 targ->nb_worker_threads = targ->nb_txports;
970 if (STR_EQ(str, "rx ring")) {
972 int err = parse_bool(&val, pkey);
973 if (!err && val && targ->rx_port_queue[0].port != OUT_DISCARD) {
974 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
978 return parse_flag(&targ->flags, TASK_ARG_RX_RING, pkey);
980 if (STR_EQ(str, "private")) {
981 return parse_bool(&targ->use_src, pkey);
983 if (STR_EQ(str, "use src ip")) {
984 return parse_bool(&targ->use_src, pkey);
986 if (STR_EQ(str, "nat table")) {
987 return parse_str(targ->nat_table, pkey, sizeof(targ->nat_table));
989 if (STR_EQ(str, "rules")) {
990 return parse_str(targ->rules, pkey, sizeof(targ->rules));
992 if (STR_EQ(str, "route table")) {
993 return parse_str(targ->route_table, pkey, sizeof(targ->route_table));
995 if (STR_EQ(str, "dscp")) {
996 return parse_str(targ->dscp, pkey, sizeof(targ->dscp));
998 if (STR_EQ(str, "tun_bindings")) {
999 return parse_str(targ->tun_bindings, pkey, sizeof(targ->tun_bindings));
1001 if (STR_EQ(str, "cpe table")) {
1002 return parse_str(targ->cpe_table_name, pkey, sizeof(targ->cpe_table_name));
1004 if (STR_EQ(str, "user table")) {
1005 return parse_str(targ->user_table, pkey, sizeof(targ->user_table));
1007 if (STR_EQ(str, "streams")) {
1008 return parse_str(targ->streams, pkey, sizeof(targ->streams));
1010 if (STR_EQ(str, "Unsollicited NA")) {
1011 return parse_flag(&targ->flags, TASK_ARG_SEND_NA_AT_STARTUP, pkey);
1013 if (STR_EQ(str, "local lpm")) {
1014 return parse_flag(&targ->flags, TASK_ARG_LOCAL_LPM, pkey);
1016 if (STR_EQ(str, "drop")) {
1017 return parse_flag(&targ->flags, TASK_ARG_DROP, pkey);
1019 if (STR_EQ(str, "loop")) {
1020 parse_flag(&targ->loop, 1, pkey);
1021 return parse_flag(&targ->loop, 1, pkey);
1023 if (STR_EQ(str, "qinq")) {
1024 return parse_flag(&targ->flags, TASK_ARG_QINQ_ACL, pkey);
1026 if (STR_EQ(str, "bps")) {
1027 return parse_u64(&targ->rate_bps, pkey);
1029 if (STR_EQ(str, "random")) {
1030 return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0]));
1032 if (STR_EQ(str, "range")) {
1033 int rc = parse_range(&targ->range[targ->n_ranges].min, &targ->range[targ->n_ranges].max, pkey);
1037 if (STR_EQ(str, "range_offset")) {
1038 if (targ->n_ranges == 0) {
1039 set_errf("No range defined previously (use range=...)");
1042 return parse_int(&targ->range[targ->n_ranges - 1].offset, pkey);
1044 if (STR_EQ(str, "rand_offset")) {
1045 if (targ->n_rand_str == 0) {
1046 set_errf("No random defined previously (use random=...)");
1050 return parse_int(&targ->rand_offset[targ->n_rand_str - 1], pkey);
1052 if (STR_EQ(str, "keep src mac")) {
1053 return parse_flag(&targ->flags, DSF_KEEP_SRC_MAC, pkey);
1055 if (STR_EQ(str, "pcap file")) {
1056 return parse_str(targ->pcap_file, pkey, sizeof(targ->pcap_file));
1058 if (STR_EQ(str, "imix")) {
1059 char pkey2[MAX_CFG_STRING_LEN], *ptr;
1060 if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
1061 set_errf("Error while parsing imix, too long\n");
1064 const size_t pkey_len = strlen(pkey2);
1065 targ->imix_nb_pkts = 0;
1067 while (targ->imix_nb_pkts < MAX_IMIX_PKTS) {
1068 if (parse_int(&targ->imix_pkt_sizes[targ->imix_nb_pkts], ptr) != 0)
1070 targ->imix_nb_pkts++;
1071 if ((ptr = strchr(ptr, ',')) == NULL)
1074 if (targ->imix_nb_pkts == MAX_IMIX_PKTS) {
1075 set_errf("Too many packet sizes specified");
1079 plog_info("%d IMIX packets:", targ->imix_nb_pkts);
1080 for (size_t i = 0; i < targ->imix_nb_pkts; ++i) {
1081 plog_info("%d ", targ->imix_pkt_sizes[i]);
1086 if (STR_EQ(str, "pkt inline")) {
1087 char pkey2[MAX_CFG_STRING_LEN];
1088 if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
1089 set_errf("Error while parsing pkt line, too long\n");
1093 const size_t pkey_len = strlen(pkey2);
1096 for (size_t i = 0; i < pkey_len; ++i) {
1097 if (pkey2[i] == ' ')
1100 if (i + 1 == pkey_len) {
1101 set_errf("Incomplete byte at character %z", i);
1107 if (pkey2[i] >= '0' && pkey2[i] <= '9') {
1108 byte = (pkey2[i] - '0') << 4;
1110 else if (pkey2[i] >= 'a' && pkey2[i] <= 'f') {
1111 byte = (pkey2[i] - 'a' + 10) << 4;
1113 else if (pkey2[i] >= 'A' && pkey2[i] <= 'F') {
1114 byte = (pkey2[i] - 'A' + 10) << 4;
1117 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i]);
1121 if (pkey2[i + 1] >= '0' && pkey2[i + 1] <= '9') {
1122 byte |= (pkey2[i + 1] - '0');
1124 else if (pkey2[i + 1] >= 'a' && pkey2[i + 1] <= 'f') {
1125 byte |= (pkey2[i + 1] - 'a' + 10);
1127 else if (pkey2[i + 1] >= 'A' && pkey2[i + 1] <= 'F') {
1128 byte |= (pkey2[i + 1] - 'A' + 10);
1131 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i + 1]);
1134 if (targ->pkt_size == sizeof(targ->pkt_inline)) {
1135 set_errf("Inline packet definition can't be longer than %u", sizeof(targ->pkt_inline));
1139 targ->pkt_inline[targ->pkt_size++] = byte;
1145 if (STR_EQ(str, "accuracy limit nsec")) {
1146 return parse_int(&targ->accuracy_limit_nsec, pkey);
1148 if (STR_EQ(str, "latency bucket size")) {
1149 return parse_int(&targ->bucket_size, pkey);
1151 if (STR_EQ(str, "latency buffer size")) {
1152 return parse_int(&targ->latency_buffer_size, pkey);
1154 if (STR_EQ(str, "loss buffer size")) {
1155 return parse_int(&targ->loss_buffer_size, pkey);
1157 if (STR_EQ(str, "accuracy pos")) {
1158 return parse_int(&targ->accur_pos, pkey);
1160 if (STR_EQ(str, "signature")) {
1161 return parse_int(&targ->sig, pkey);
1163 if (STR_EQ(str, "signature pos")) {
1164 return parse_int(&targ->sig_pos, pkey);
1166 if (STR_EQ(str, "lat pos")) {
1167 targ->lat_enabled = 1;
1168 return parse_int(&targ->lat_pos, pkey);
1170 if (STR_EQ(str, "packet id pos")) {
1171 return parse_int(&targ->packet_id_pos, pkey);
1173 if (STR_EQ(str, "flow id pos")) {
1174 return parse_int(&targ->flow_id_pos, pkey);
1176 if (STR_EQ(str, "packet id in flow pos")) {
1177 return parse_int(&targ->packet_id_in_flow_pos, pkey);
1179 if (STR_EQ(str, "flow count")) {
1180 return parse_int(&targ->flow_count, pkey);
1182 if (STR_EQ(str, "probability")) { // old - use "probability no drop" instead
1184 int rc = parse_float(&probability, pkey);
1185 if (probability == 0) {
1186 set_errf("Probability must be != 0\n");
1188 } else if (probability > 100.0) {
1189 set_errf("Probability must be < 100\n");
1192 targ->probability_no_drop = probability * 10000;
1195 if (STR_EQ(str, "proba no drop")) {
1197 int rc = parse_float(&probability, pkey);
1198 if (probability == 0) {
1199 set_errf("probability no drop must be != 0\n");
1201 } else if (probability > 100.0) {
1202 set_errf("Probability must be < 100\n");
1205 targ->probability_no_drop = probability * 10000;
1208 if (STR_EQ(str, "proba delay")) {
1210 int rc = parse_float(&probability, pkey);
1211 if (probability > 100.0) {
1212 set_errf("Probability must be < 100\n");
1215 targ->probability_delay = probability * 10000;
1218 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1219 if (STR_EQ(str, "proba duplicate")) {
1221 int rc = parse_float(&probability, pkey);
1222 if (probability > 100.0) {
1223 set_errf("probability duplicate must be < 100\n");
1226 targ->probability_duplicate = probability * 10000;
1230 if (STR_EQ(str, "concur conn")) {
1231 return parse_int(&targ->n_concur_conn, pkey);
1233 if (STR_EQ(str, "max setup rate")) {
1234 return parse_int(&targ->max_setup_rate, pkey);
1236 if (STR_EQ(str, "pkt size")) {
1237 return parse_int(&targ->pkt_size, pkey);
1239 if (STR_EQ(str, "min bulk size")) {
1240 return parse_int(&targ->min_bulk_size, pkey);
1242 if (STR_EQ(str, "max bulk size")) {
1243 return parse_int(&targ->max_bulk_size, pkey);
1245 if (STR_EQ(str, "rx port")) {
1246 if (targ->flags & TASK_ARG_RX_RING) {
1247 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
1250 uint32_t vals[PROX_MAX_PORTS];
1253 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
1254 // Port name not found, but could be a virtual device of a secondary process
1255 // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
1257 (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
1258 (n_deferred_ports < PROX_MAX_PORTS)) {
1259 prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
1260 deferred_port[n_deferred_ports].is_rx_port = 1;
1261 deferred_port[n_deferred_ports++].targ = targ;
1267 for (uint8_t i = 0; i < n_if; ++i) {
1268 PROX_ASSERT(vals[i] < PROX_MAX_PORTS);
1269 targ->rx_port_queue[i].port = vals[i];
1275 if (STR_EQ(str, "mode")) {
1276 /* Check deprecated task modes */
1278 int ret = parse_str(mode, pkey, sizeof(mode));
1282 for (uint32_t i = 0; i < RTE_DIM(task_cfg_depr); ++i) {
1283 if (STR_EQ(mode, task_cfg_depr[i].opt)) {
1284 set_errf("Task mode '%s' is deprecated%s%s",
1285 task_cfg_depr[i].opt, strlen(task_cfg_depr[i].info)? ": ": "", task_cfg_depr[i].info);
1290 /* master is a special mode that is always needed (cannot be turned off) */
1291 if (STR_EQ(mode, "master")) {
1292 prox_cfg.master = ncore;
1293 targ->mode = MASTER;
1294 if (lconf->n_tasks_all > 1 || targ->task != 0) {
1295 set_errf("Master core can only have one task\n");
1298 // Initialize number of tasks to 1 for master, even if no task specified
1299 lconf->n_tasks_all = 1;
1300 lconf->active_task = 0;
1301 lconf->targs[lconf->active_task].task = 0;
1302 struct task_init* task_init = to_task_init(mode, "");
1304 targ->mode = task_init->mode;
1306 targ->task_init = task_init;
1310 struct task_init* task_init = to_task_init(mode, "");
1312 targ->mode = task_init->mode;
1315 set_errf("Task mode '%s' is invalid", mode);
1319 targ->task_init = task_init;
1322 if (STR_EQ(str, "users")) {
1323 return parse_int(&targ->n_flows, pkey);
1326 if (STR_EQ(str, "mark")) {
1327 return parse_flag(&targ->runtime_flags, TASK_MARK, pkey);
1330 if (STR_EQ(str, "mark green")) {
1331 return parse_int(&targ->marking[0], pkey);
1334 if (STR_EQ(str, "mark yellow")) {
1335 return parse_int(&targ->marking[1], pkey);
1338 if (STR_EQ(str, "mark red")) {
1339 return parse_int(&targ->marking[2], pkey);
1342 if (STR_EQ(str, "tx cores")) {
1343 uint8_t dest_task = 0;
1344 /* if user did not specify, dest_port is left at default (first type) */
1345 uint8_t dest_proto = 0;
1346 uint8_t ctrl = CTRL_TYPE_DP;
1347 char *task_str = strstr(pkey, "proto=");
1349 task_str += strlen("proto=");
1351 if (STR_EQ(task_str, "ipv4")) {
1354 else if (STR_EQ(task_str, "arp")) {
1357 else if (STR_EQ(task_str, "ipv6")) {
1361 set_errf("proto needs to be either ipv4, arp or ipv6");
1367 task_str = strstr(pkey, "task=");
1373 task_str += strlen("task=");
1374 char *task_str_end = strstr(task_str, " ");
1378 if (0 == strlen(task_str)) {
1379 set_errf("Invalid task= syntax");
1383 switch (task_str[strlen(task_str) - 1]) {
1385 ctrl = CTRL_TYPE_PKT;
1388 ctrl = CTRL_TYPE_MSG;
1394 if (task_str[strlen(task_str) -1] < '0' ||
1395 task_str[strlen(task_str) -1] > '9') {
1396 set_errf("Unknown ring type %c.\n",
1397 task_str[strlen(task_str) - 1]);
1402 dest_task = atoi(task_str);
1403 if (dest_task >= MAX_TASKS_PER_CORE) {
1404 set_errf("Destination task too high (max allowed %d)", MAX_TASKS_PER_CORE - 1);
1412 struct core_task_set *cts = &targ->core_task_set[dest_proto];
1414 if (parse_task_set(cts, pkey))
1417 if (cts->n_elems > MAX_WT_PER_LB) {
1418 set_errf("Too many worker threads (max allowed %d)", MAX_WT_PER_LB - 1);
1422 targ->nb_worker_threads = cts->n_elems;
1423 targ->nb_txrings += cts->n_elems;
1427 if (STR_EQ(str, "tx crc")) {
1428 return parse_flag(&targ->runtime_flags, TASK_TX_CRC, pkey);
1430 if (STR_EQ(str, "ring size")) {
1431 return parse_int(&targ->ring_size, pkey);
1433 if (STR_EQ(str, "mempool size")) {
1434 return parse_kmg(&targ->nb_mbuf, pkey);
1437 else if (STR_EQ(str, "mbuf size")) {
1438 return parse_int(&targ->mbuf_size, pkey);
1440 if (STR_EQ(str, "memcache size")) {
1441 return parse_kmg(&targ->nb_cache_mbuf, pkey);
1444 if (STR_EQ(str, "byte offset")) {
1445 return parse_int(&targ->byte_offset, pkey);
1448 if (STR_EQ(str, "realtime scheduling")) {
1449 return parse_flag(&lconf->flags, LCONF_FLAG_SCHED_RR, pkey);
1451 if (STR_EQ(str, "name")) {
1452 return parse_str(lconf->name, pkey, sizeof(lconf->name));
1454 /* MPLS configuration */
1455 if (STR_EQ(str, "untag mpls")) {
1456 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1459 if (STR_EQ(str, "add mpls")) {
1460 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1463 if (STR_EQ(str, "ether type")) {
1464 return parse_int(&targ->etype, pkey);
1467 if (STR_EQ(str, "cache set")) {
1468 return parse_int(&lconf->cache_set, pkey);
1471 if (STR_EQ(str, "sub mode")) {
1472 const char* mode_str = targ->task_init->mode_str;
1473 const char *sub_mode_str = pkey;
1475 targ->task_init = to_task_init(mode_str, sub_mode_str);
1476 if (!targ->task_init) {
1477 if ((strcmp(sub_mode_str, "l3") != 0) && (strcmp(sub_mode_str, "ndp") != 0)) {
1478 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1481 targ->task_init = to_task_init(mode_str, "");
1482 if (!targ->task_init) {
1483 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1487 if (strcmp(sub_mode_str, "l3") == 0) {
1488 prox_cfg.flags |= DSF_L3_ENABLED;
1489 targ->flags |= TASK_ARG_L3;
1490 strcpy(targ->sub_mode_str, "l3");
1491 } else if (strcmp(sub_mode_str, "ndp") == 0) {
1492 prox_cfg.flags |= DSF_NDP_ENABLED;
1493 targ->flags |= TASK_ARG_NDP;
1494 strcpy(targ->sub_mode_str, "ndp");
1496 strcpy(targ->sub_mode_str, targ->task_init->sub_mode_str);
1501 if (STR_EQ(str, "mempool name")) {
1502 return parse_str(targ->pool_name, pkey, sizeof(targ->pool_name));
1504 if (STR_EQ(str, "dpi engine")) {
1505 return parse_str(targ->dpi_engine_path, pkey, sizeof(targ->dpi_engine_path));
1507 if (STR_EQ(str, "dpi engine arg")) {
1508 return parse_str(targ->dpi_engine_args[targ->n_dpi_engine_args++], pkey,
1509 sizeof(targ->dpi_engine_args[0]));
1511 if (STR_EQ(str, "dst mac")) { /* destination MAC address to be used for packets */
1512 if (parse_mac(&targ->edaddr, pkey)) {
1513 if (STR_EQ(pkey, "no")) {
1514 targ->flags |= TASK_ARG_DO_NOT_SET_DST_MAC;
1517 if (STR_EQ(pkey, "packet") == 0)
1522 targ->flags |= TASK_ARG_DST_MAC_SET;
1525 if (STR_EQ(str, "src mac")) {
1526 if (parse_mac(&targ->esaddr, pkey)) {
1527 if (STR_EQ(pkey, "no")) {
1528 targ->flags |= TASK_ARG_DO_NOT_SET_SRC_MAC;
1531 else if (STR_EQ(pkey, "packet"))
1533 else if (STR_EQ(pkey, "hw")) {
1534 targ->flags |= TASK_ARG_HW_SRC_MAC;
1540 targ->flags |= TASK_ARG_SRC_MAC_SET;
1543 if (STR_EQ(str, "igmp ipv4")) { /* IGMP Group */
1544 return parse_ip(&targ->igmp_address, pkey);
1546 if (STR_EQ(str, "gateway ipv4")) { /* Gateway IP address used when generating */
1547 if ((targ->flags & TASK_ARG_L3) == 0)
1548 plog_warn("gateway ipv4 configured but L3 sub mode not enabled\n");
1549 if (targ->local_ipv4)
1550 targ->local_prefix = 32;
1551 return parse_ip(&targ->gateway_ipv4, pkey);
1553 if (STR_EQ(str, "ipv6 router")) { /* we simulate an IPV6 router */
1554 int rc = parse_flag(&targ->ipv6_router, 1, pkey);
1555 if (!rc && targ->ipv6_router) {
1556 plog_info("\tipv6 router configured => NDP enabled\n");
1557 prox_cfg.flags |= DSF_NDP_ENABLED;
1558 targ->flags |= TASK_ARG_NDP;
1559 strcpy(targ->sub_mode_str, "ndp");
1563 if (STR_EQ(str, "gateway ipv6")) { /* Gateway IP address used when generating */
1564 if ((targ->flags & TASK_ARG_NDP) == 0)
1565 plog_warn("gateway ipv6 configured but NDP sub mode not enabled\n");
1566 return parse_ip6(&targ->gateway_ipv6, pkey);
1568 if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */
1569 struct ip4_subnet cidr;
1570 if (parse_ip4_and_prefix(&cidr, pkey) != 0) {
1571 if (targ->gateway_ipv4)
1572 targ->local_prefix = 32;
1574 targ->local_prefix = 0;
1575 return parse_ip(&targ->local_ipv4, pkey);
1577 targ->local_ipv4 = cidr.ip;
1578 targ->local_prefix = cidr.prefix;
1582 if (STR_EQ(str, "remote ipv4")) { /* source IP address to be used for packets */
1583 return parse_ip(&targ->remote_ipv4, pkey);
1585 if (STR_EQ(str, "global ipv6")) {
1586 if (parse_ip6(&targ->global_ipv6, pkey) == 0) {
1587 plog_info("\tglobal ipv6 configured => NDP enabled\n");
1588 targ->flags |= TASK_ARG_NDP;
1589 prox_cfg.flags |= DSF_NDP_ENABLED;
1590 strcpy(targ->sub_mode_str, "ndp");
1592 plog_err("Unable to parse content of local ipv6: %s\n", pkey);
1597 if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */
1598 if (parse_ip6(&targ->local_ipv6, pkey) == 0) {
1599 plog_info("\tlocal ipv6 configured => NDP enabled\n");
1600 targ->flags |= TASK_ARG_NDP;
1601 prox_cfg.flags |= DSF_NDP_ENABLED;
1602 strcpy(targ->sub_mode_str, "ndp");
1604 plog_err("Unable to parse content of local ipv6: %s\n", pkey);
1609 if (STR_EQ(str, "router prefix")) {
1610 if (parse_ip6(&targ->router_prefix, pkey) == 0) {
1611 plog_info("\trouter prefix set to "IPv6_BYTES_FMT" (%s)\n", IPv6_BYTES(targ->router_prefix.bytes), IP6_Canonical(&targ->router_prefix));
1613 plog_err("Unable to parse content of router prefix: %s\n", pkey);
1618 if (STR_EQ(str, "arp timeout"))
1619 return parse_int(&targ->reachable_timeout, pkey);
1620 if (STR_EQ(str, "arp update time"))
1621 return parse_int(&targ->arp_ndp_retransmit_timeout, pkey);
1622 if (STR_EQ(str, "number of packets"))
1623 return parse_int(&targ->n_pkts, pkey);
1624 if (STR_EQ(str, "store size"))
1625 return parse_int(&targ->store_max, pkey);
1626 if (STR_EQ(str, "pipes")) {
1628 int err = parse_int(&val, pkey);
1631 if (!val || !rte_is_power_of_2(val)) {
1632 set_errf("Number of pipes has to be power of 2 and not zero");
1636 targ->qos_conf.port_params.n_pipes_per_subport = val;
1639 if (STR_EQ(str, "queue size")) {
1641 int err = parse_int(&val, pkey);
1645 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1646 targ->qos_conf.subport_params[0].qsize[0] = val;
1647 targ->qos_conf.subport_params[0].qsize[1] = val;
1648 targ->qos_conf.subport_params[0].qsize[2] = val;
1649 targ->qos_conf.subport_params[0].qsize[3] = val;
1651 targ->qos_conf.port_params.qsize[0] = val;
1652 targ->qos_conf.port_params.qsize[1] = val;
1653 targ->qos_conf.port_params.qsize[2] = val;
1654 targ->qos_conf.port_params.qsize[3] = val;
1658 if (STR_EQ(str, "subport tb rate")) {
1659 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1660 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_rate, pkey);
1662 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1663 return parse_u64(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1665 return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1669 if (STR_EQ(str, "subport tb size")) {
1670 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1671 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_size, pkey);
1673 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1674 return parse_u64(&targ->qos_conf.subport_params[0].tb_size, pkey);
1676 return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey);
1680 if (STR_EQ(str, "subport tc 0 rate")) {
1681 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1682 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[0], pkey);
1684 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1685 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1687 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1691 if (STR_EQ(str, "subport tc 1 rate")) {
1692 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1693 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[1], pkey);
1695 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1696 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1698 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1702 if (STR_EQ(str, "subport tc 2 rate")) {
1703 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1704 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[2], pkey);
1706 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1707 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1709 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1713 if (STR_EQ(str, "subport tc 3 rate")) {
1714 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1715 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[3], pkey);
1717 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1718 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1720 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1725 if (STR_EQ(str, "subport tc rate")) {
1727 int err = parse_int(&val, pkey);
1732 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1733 targ->qos_conf.port_params.subport_profiles->tc_rate[0] = val;
1734 targ->qos_conf.port_params.subport_profiles->tc_rate[1] = val;
1735 targ->qos_conf.port_params.subport_profiles->tc_rate[2] = val;
1736 targ->qos_conf.port_params.subport_profiles->tc_rate[3] = val;
1738 targ->qos_conf.subport_params[0].tc_rate[0] = val;
1739 targ->qos_conf.subport_params[0].tc_rate[1] = val;
1740 targ->qos_conf.subport_params[0].tc_rate[2] = val;
1741 targ->qos_conf.subport_params[0].tc_rate[3] = val;
1746 if (STR_EQ(str, "subport tc period")) {
1747 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
1748 return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_period, pkey);
1750 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1751 return parse_u64(&targ->qos_conf.subport_params[0].tc_period, pkey);
1753 return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey);
1757 if (STR_EQ(str, "pipe tb rate")) {
1758 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1759 return parse_u64(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1761 return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1764 if (STR_EQ(str, "pipe tb size")) {
1765 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1766 return parse_u64(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1768 return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1771 if (STR_EQ(str, "pipe tc rate")) {
1773 int err = parse_int(&val, pkey);
1778 targ->qos_conf.pipe_params[0].tc_rate[0] = val;
1779 targ->qos_conf.pipe_params[0].tc_rate[1] = val;
1780 targ->qos_conf.pipe_params[0].tc_rate[2] = val;
1781 targ->qos_conf.pipe_params[0].tc_rate[3] = val;
1784 if (STR_EQ(str, "pipe tc 0 rate")) {
1785 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1786 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1788 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1791 if (STR_EQ(str, "pipe tc 1 rate")) {
1792 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1793 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1795 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1798 if (STR_EQ(str, "pipe tc 2 rate")) {
1799 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1800 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1802 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1805 if (STR_EQ(str, "pipe tc 3 rate")) {
1806 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1807 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1809 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1812 if (STR_EQ(str, "pipe tc period")) {
1813 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1814 return parse_u64(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1816 return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1819 if (STR_EQ(str, "police action")) {
1820 char *in = strstr(pkey, " io=");
1822 set_errf("Need to specify io colors using io=in_color,out_color\n");
1826 in += strlen(" io=");
1828 char *out = strstr(in, ",");
1830 set_errf("Output color not specified\n");
1835 enum police_action in_color = str_to_color(in);
1836 enum police_action out_color = str_to_color(out);
1838 if (in_color == ACT_INVALID) {
1839 set_errf("Invalid input color %s. Expected green, yellow or red", in);
1842 if (out_color == ACT_INVALID) {
1843 set_errf("Invalid output color %s. Expected green, yellow or red", out);
1846 enum police_action action = str_to_color(pkey);
1847 if (action == ACT_INVALID) {
1848 set_errf("Error action %s. Expected green, yellow, red or drop", pkey);
1851 targ->police_act[in_color][out_color] = action;
1855 if (STR_EQ(str, "qinq tag")) {
1856 return parse_int(&targ->qinq_tag, pkey);
1858 if (STR_EQ(str, "cir")) {
1859 return parse_int(&targ->cir, pkey);
1861 if (STR_EQ(str, "cbs")) {
1862 return parse_int(&targ->cbs, pkey);
1864 if (STR_EQ(str, "pir")) {
1865 return parse_int(&targ->pir, pkey);
1867 if (STR_EQ(str, "pbs")) {
1868 return parse_int(&targ->pbs, pkey);
1870 if (STR_EQ(str, "ebs")) {
1871 return parse_int(&targ->ebs, pkey);
1873 uint32_t queue_id = 0;
1874 if (sscanf(str, "queue %d weight", &queue_id) == 1) {
1876 int err = parse_int(&val, pkey);
1880 if (queue_id >= RTE_SCHED_BE_QUEUES_PER_PIPE) {
1881 set_errf("queue_id must be < %d", RTE_SCHED_BE_QUEUES_PER_PIPE);
1884 targ->qos_conf.pipe_params[0].wrr_weights[queue_id] = val;
1887 if (STR_EQ(str, "classify")) {
1888 if (!(targ->task_init->flag_features & TASK_FEATURE_CLASSIFY)) {
1889 set_errf("Classify is not supported in '%s' mode", targ->task_init->mode_str);
1893 return parse_flag(&targ->runtime_flags, TASK_CLASSIFY, pkey);
1895 if (STR_EQ(str, "flow table size")) {
1896 return parse_int(&targ->flow_table_size, pkey);
1899 if (STR_EQ(str, "tbf rate")) {
1900 return parse_int(&targ->tb_rate, pkey);
1902 if (STR_EQ(str, "tbf size")) {
1903 return parse_int(&targ->tb_size, pkey);
1906 if (STR_EQ(str, "max rules")) {
1907 return parse_int(&targ->n_max_rules, pkey);
1910 if (STR_EQ(str, "tunnel hop limit")) {
1912 int err = parse_int(&val, pkey);
1916 targ->tunnel_hop_limit = val;
1920 if (STR_EQ(str, "lookup port mask")) {
1922 int err = parse_int(&val, pkey);
1926 targ->lookup_port_mask = val;
1930 if (STR_EQ(str, "irq debug")) {
1931 parse_int(&targ->irq_debug, pkey);
1935 if (STR_EQ(str, "multiplier")) {
1936 parse_int(&targ->multiplier, pkey);
1940 if (STR_EQ(str, "mirror size")) {
1941 parse_int(&targ->mirror_size, pkey);
1945 set_errf("Option '%s' is not known", str);
1946 /* fail on unknown keys */
1950 static int str_is_number(const char *in)
1954 for (size_t i = 0; i < strlen(in); ++i) {
1955 if (!dot_once && in[i] == '.') {
1960 if (in[i] < '0' || in[i] > '9')
1967 /* command line parameters parsing procedure */
1968 int prox_parse_args(int argc, char **argv)
1974 /* Default settings */
1975 prox_cfg.flags |= DSF_AUTOSTART | DSF_WAIT_ON_QUIT;
1976 prox_cfg.ui = PROX_UI_CURSES;
1978 plog_info("\tCommand line:");
1979 for (i = 0; i < argc; ++i) {
1980 plog_info(" %s", argv[i]);
1984 while ((opt = getopt(argc, argv, "f:dnzpo:tkuar:emsiw:l:v:q:")) != EOF) {
1987 /* path to config file */
1990 for (size_t i = 0; i < strlen(cfg_file); ++i) {
1991 if (cfg_file[i] == '/') {
1996 prox_strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE);
1999 plog_set_lvl(atoi(optarg));
2002 prox_cfg.log_name_pid = 0;
2003 prox_strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE);
2006 prox_cfg.log_name_pid = 1;
2009 prox_cfg.use_stats_logger = 1;
2012 prox_cfg.flags |= DSF_DAEMON;
2013 prox_cfg.ui = PROX_UI_NONE;
2016 prox_cfg.flags |= DSF_USE_DUMMY_CPU_TOPO;
2017 prox_cfg.flags |= DSF_CHECK_INIT;
2020 prox_cfg.flags |= DSF_USE_DUMMY_DEVICES;
2023 if (!str_is_number(optarg) || strlen(optarg) > 11)
2025 prox_strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str));
2028 if (prox_cfg.flags & DSF_DAEMON)
2031 if (!strcmp(optarg, "curses")) {
2032 prox_cfg.ui = PROX_UI_CURSES;
2034 else if (!strcmp(optarg, "cli")) {
2035 prox_cfg.ui = PROX_UI_CLI;
2037 else if (!strcmp(optarg, "none")) {
2038 prox_cfg.ui = PROX_UI_NONE;
2041 plog_err("Invalid local UI '%s', local UI can be 'curses', 'cli' or 'none'.", optarg);
2046 if (luaL_loadstring(prox_lua(), optarg)) {
2047 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
2051 if (lua_pcall(prox_lua(), 0, LUA_MULTRET, 0)) {
2052 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
2058 /* autostart all cores */
2059 prox_cfg.flags |= DSF_AUTOSTART;
2062 /* don't autostart */
2063 prox_cfg.flags &= ~DSF_AUTOSTART;
2066 prox_cfg.flags |= DSF_LISTEN_TCP;
2069 prox_cfg.flags |= DSF_LISTEN_UDS;
2072 /* list supported task modes and exit */
2073 prox_cfg.flags |= DSF_LIST_TASK_MODES;
2076 /* check configuration file syntax and exit */
2077 prox_cfg.flags |= DSF_CHECK_SYNTAX;
2080 /* check initialization sequence and exit */
2081 prox_cfg.flags |= DSF_CHECK_INIT;
2086 if (strlen(tmp) >= 3 &&
2087 (tmp2 = strchr(tmp, '='))) {
2090 prox_strncpy(tmp3 + 1, tmp, 63);
2091 plog_info("\tAdding variable: %s = %s\n", tmp3, tmp2 + 1);
2092 ret = add_var(tmp3, tmp2 + 1, 1);
2094 plog_err("\tFailed to add variable, too many variables defines\n");
2097 else if(ret == -3) {
2098 plog_err("\tFailed to add variable, already defined\n");
2105 plog_err("\tUnknown option\n");
2110 /* reset getopt lib for DPDK */
2116 static int check_cfg(void)
2119 #define RETURN_IF(cond, err) \
2125 RETURN_IF(rte_cfg.force_nchannel == 0, "\tError: number of memory channels not specified in [eal options] section\n");
2126 RETURN_IF(prox_cfg.master >= RTE_MAX_LCORE, "\tError: No master core specified (one core needs to have mode=master)\n");
2133 static int calc_tot_rxrings(void)
2135 struct lcore_cfg *slconf, *dlconf;
2136 struct task_args *starg, *dtarg;
2139 struct core_task ct;
2142 while (core_targ_next_early(&dlconf, &dtarg, 1) == 0) {
2143 dtarg->tot_rxrings = 0;
2147 while (core_targ_next_early(&slconf, &starg, 1) == 0) {
2148 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
2149 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
2150 ct = starg->core_task_set[idx].core_task[ring_idx];
2151 if (!prox_core_active(ct.core, 0)) {
2152 set_errf("Core %u is disabled but Core %u task %u is sending to it\n",
2153 ct.core, slconf->id, starg->id);
2157 dlconf = &lcore_cfg_init[ct.core];
2159 if (ct.task >= dlconf->n_tasks_all) {
2160 set_errf("Core %u task %u not enabled\n", ct.core, ct.task);
2164 dtarg = &dlconf->targs[ct.task];
2166 /* Control rings are not relevant at this point. */
2170 if (!(dtarg->flags & TASK_ARG_RX_RING)) {
2171 set_errf("Core %u task %u is not expecting to receive through a ring\n",
2176 dtarg->tot_rxrings++;
2177 if (dtarg->tot_rxrings > MAX_RINGS_PER_TASK) {
2178 set_errf("Core %u task %u is receiving from too many tasks",
2189 static void prox_set_core_mask(void)
2191 struct lcore_cfg *lconf;
2194 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
2195 lconf = &lcore_cfg_init[lcore_id];
2196 if (lconf->n_tasks_all > 0 && lconf->targs[0].mode != MASTER) {
2197 prox_core_set_active(lcore_id);
2202 static int is_using_no_drop(void)
2205 struct lcore_cfg *lconf;
2206 struct task_args *targs;
2209 while(prox_core_next(&lcore_id, 1) == 0) {
2210 lconf = &lcore_cfg_init[lcore_id];
2211 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
2212 targs = &lconf->targs[task_id];
2213 if (!(targs->flags & TASK_ARG_DROP))
2220 int prox_read_config_file(void)
2222 set_global_defaults(&prox_cfg);
2223 set_task_defaults(&prox_cfg, lcore_cfg_init);
2224 set_port_defaults();
2225 plog_info("=== Parsing configuration file '%s' ===\n", cfg_file);
2226 struct cfg_file *pcfg = cfg_open(cfg_file);
2231 struct cfg_section* config_sections[] = {
2243 for (struct cfg_section** section = config_sections; *section != NULL; ++section) {
2244 const char* name = (*section)->name;
2245 size_t len = strlen(name);
2246 plog_info("\t*** Reading [%s] section%s ***\n", name, name[len - 1] == '#'? "s": "");
2247 cfg_parse(pcfg, *section);
2249 if ((*section)->error) {
2250 plog_err("At line %u, section [%s], entry %u: '%s'\n\t%s\n"
2251 , pcfg->err_line, pcfg->err_section, pcfg->err_entry + 1, pcfg->cur_line,
2252 strlen(get_parse_err())? get_parse_err() : err_str);
2253 cfg_close(pcfg); /* cannot close before printing error, print uses internal buffer */
2260 prox_set_core_mask();
2262 if (is_using_no_drop()) {
2263 prox_cfg.flags &= ~DSF_WAIT_ON_QUIT;
2266 if (calc_tot_rxrings()) {
2267 plog_err("Error in configuration: %s\n", err_str);
2274 static void failed_rte_eal_init(__attribute__((unused))const char *prog_name)
2276 plog_err("\tError in rte_eal_init()\n");
2279 int prox_setup_rte(const char *prog_name)
2281 char *rte_argv[MAX_RTE_ARGV];
2282 char rte_arg[MAX_RTE_ARGV][MAX_ARG_LEN];
2283 char tmp[PROX_CM_STR_LEN];
2284 /* create mask of used cores */
2285 plog_info("=== Setting up RTE EAL ===\n");
2287 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) {
2288 plog_info("Using dummy cpu topology\n");
2289 snprintf(tmp, sizeof(tmp), "0x1");
2291 prox_core_to_hex(tmp, sizeof(tmp), 0);
2292 plog_info("\tWorker threads core mask is %s\n", tmp);
2293 prox_core_to_hex(tmp, sizeof(tmp), 1);
2294 plog_info("\tWith master core index %u, full core mask is %s\n", prox_cfg.master, tmp);
2297 /* fake command line parameters for rte_eal_init() */
2299 rte_argv[argc] = strdup(prog_name);
2300 sprintf(rte_arg[++argc], "-c%s", tmp);
2301 rte_argv[argc] = rte_arg[argc];
2302 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
2303 uint32_t master_core = prox_cfg.master;
2304 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2306 #if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
2307 sprintf(rte_arg[++argc], "--master-lcore=%u", master_core);
2309 sprintf(rte_arg[++argc], "--main-lcore=%u", master_core);
2311 rte_argv[argc] = rte_arg[argc];
2313 /* For old DPDK versions, the master core had to be the first
2315 uint32_t first_core = -1;
2317 if (prox_core_next(&first_core, 1) == -1) {
2318 plog_err("Can't core ID of first core in use\n");
2321 if (first_core != prox_cfg.master) {
2322 plog_err("The master core needs to be the first core (master core = %u, first core = %u).\n", first_core, prox_cfg.master);
2327 if (rte_cfg.memory) {
2328 sprintf(rte_arg[++argc], "-m%u", rte_cfg.memory);
2329 rte_argv[argc] = rte_arg[argc];
2332 if (rte_cfg.force_nchannel) {
2333 sprintf(rte_arg[++argc], "-n%u", rte_cfg.force_nchannel);
2334 rte_argv[argc] = rte_arg[argc];
2337 if (rte_cfg.force_nrank) {
2338 sprintf(rte_arg[++argc], "-r%u", rte_cfg.force_nrank);
2339 rte_argv[argc] = rte_arg[argc];
2342 if (rte_cfg.no_hugetlbfs) {
2343 strcpy(rte_arg[++argc], "--no-huge");
2344 rte_argv[argc] = rte_arg[argc];
2347 if (rte_cfg.no_pci) {
2348 strcpy(rte_arg[++argc], "--no-pci");
2349 rte_argv[argc] = rte_arg[argc];
2352 if (rte_cfg.no_hpet) {
2353 strcpy(rte_arg[++argc], "--no-hpet");
2354 rte_argv[argc] = rte_arg[argc];
2357 if (rte_cfg.no_shconf) {
2358 strcpy(rte_arg[++argc], "--no-shconf");
2359 rte_argv[argc] = rte_arg[argc];
2362 if (rte_cfg.eal != NULL) {
2363 char *ptr = rte_cfg.eal;
2365 while (ptr != NULL) {
2366 while (isspace(*ptr))
2369 ptr = strchr(ptr, ' ');
2373 prox_strncpy(rte_arg[++argc], ptr2, MAX_ARG_LEN);
2374 rte_argv[argc] = rte_arg[argc];
2378 if (rte_cfg.hugedir != NULL) {
2379 strcpy(rte_arg[++argc], "--huge-dir");
2380 rte_argv[argc] = rte_arg[argc];
2381 rte_argv[++argc] = rte_cfg.hugedir;
2384 if (rte_cfg.no_output) {
2385 rte_log_set_global_level(0);
2388 plog_info("\tEAL command line:");
2389 if (argc >= MAX_RTE_ARGV) {
2390 plog_err("too many arguments for EAL\n");
2394 for (int h = 0; h <= argc; ++h) {
2395 plog_info(" %s", rte_argv[h]);
2399 rte_set_application_usage_hook(failed_rte_eal_init);
2400 if (rte_eal_init(++argc, rte_argv) < 0) {
2401 plog_err("\tError in rte_eal_init()\n");
2404 plog_info("\tEAL Initialized\n");
2406 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2409 /* check if all active cores are in enabled in DPDK */
2410 for (uint32_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
2411 if (lcore_id == prox_cfg.master) {
2412 if (!rte_lcore_is_enabled(lcore_id))
2415 else if (rte_lcore_is_enabled(lcore_id) != prox_core_active(lcore_id, 0)) {
2416 plog_err("\tFailed to enable lcore %u\n", lcore_id);
2419 else if (lcore_cfg_init[lcore_id].n_tasks_all != 0 && !rte_lcore_is_enabled(lcore_id)) {
2420 plog_err("\tFailed to enable lcore %u\n", lcore_id);
2425 for (int i = 0; i < n_deferred_ports; i++) {
2426 if (prox_rte_eth_dev_get_port_by_name(deferred_port[i].name, &port_id) != 0) {
2427 plog_err("Did not find port name %s used while reading %s\n", deferred_port[i].name, deferred_port[i].is_rx_port ? "rx port" : "tx_port");
2430 plog_info("\tport %s is port id %d\n", deferred_port[i].name, port_id);
2431 if (deferred_port[i].is_rx_port) {
2432 deferred_port[i].targ->rx_port_queue[0].port = port_id;
2433 deferred_port[i].targ->nb_rxports = 1;
2435 deferred_port[i].targ->tx_port_queue[0].port = port_id;
2436 deferred_port[i].targ->nb_txports = 1;