2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_sched.h>
21 #include <rte_string_fns.h>
22 #include <rte_version.h>
24 #include "prox_malloc.h"
27 #include "prox_args.h"
28 #include "prox_assert.h"
33 #include "parse_utils.h"
34 #include "prox_port_cfg.h"
39 #include "prox_ipv6.h"
40 #include "prox_compat.h"
41 #include "ip_subnet.h"
43 #define MAX_RTE_ARGV 64
44 #define MAX_ARG_LEN 64
52 #define STR_EQ(s1, s2) (!strcmp((s1), (s2)))
54 /* configuration files support */
55 static int get_rte_cfg(unsigned sindex, char *str, void *data);
56 static int get_global_cfg(unsigned sindex, char *str, void *data);
57 static int get_port_cfg(unsigned sindex, char *str, void *data);
58 static int get_defaults_cfg(unsigned sindex, char *str, void *data);
59 static int get_cache_set_cfg(unsigned sindex, char *str, void *data);
60 static int get_var_cfg(unsigned sindex, char *str, void *data);
61 static int get_lua_cfg(unsigned sindex, char *str, void *data);
62 static int get_core_cfg(unsigned sindex, char *str, void *data);
64 static const char *cfg_file = DEFAULT_CONFIG_FILE;
65 static struct rte_cfg rte_cfg;
66 struct prox_cache_set_cfg prox_cache_set_cfg[PROX_MAX_CACHE_SET];
68 static char format_err_str[1024];
69 static const char *err_str = "Unknown error";
71 static struct cfg_section eal_default_cfg = {
72 .name = "eal options",
73 .parser = get_rte_cfg,
80 static struct cfg_section port_cfg = {
82 .parser = get_port_cfg,
83 .data = &prox_port_cfg,
89 static struct cfg_section var_cfg = {
91 .parser = get_var_cfg,
98 static struct cfg_section cache_set_cfg = {
99 .name = "cache set #",
100 .parser = get_cache_set_cfg,
101 .data = &prox_cache_set_cfg,
107 static struct cfg_section defaults_cfg = {
109 .parser = get_defaults_cfg,
116 static struct cfg_section settings_cfg = {
118 .parser = get_global_cfg,
125 static struct cfg_section lua_cfg = {
127 .parser = get_lua_cfg,
134 static struct cfg_section core_cfg = {
136 .parser = get_core_cfg,
137 .data = lcore_cfg_init,
143 struct deferred_port {
144 struct task_args *targ;
149 static struct deferred_port deferred_port[PROX_MAX_PORTS];
150 static int n_deferred_ports = 0;
152 static void set_errf(const char *format, ...)
155 va_start(ap, format);
156 vsnprintf(format_err_str, sizeof(format_err_str), format, ap);
158 err_str = format_err_str;
161 /* [eal options] parser */
162 static int get_rte_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
164 struct rte_cfg *pconfig = (struct rte_cfg *)data;
166 if (str == NULL || pconfig == NULL) {
170 char *pkey = get_cfg_key(str);
172 set_errf("Missing key after option");
176 if (STR_EQ(str, "-m")) {
177 return parse_int(&pconfig->memory, pkey);
179 if (STR_EQ(str, "-n")) {
180 if (parse_int(&pconfig->force_nchannel, pkey)) {
183 if (pconfig->force_nchannel == 0) {
184 set_errf("Invalid number of memory channels");
189 if (STR_EQ(str, "-r")) {
190 if (parse_int(&pconfig->force_nrank, pkey)) {
193 if (pconfig->force_nrank == 0 || pconfig->force_nrank > 16) {
194 set_errf("Invalid number of memory ranks");
200 if (STR_EQ(str, "no-pci")) {
201 return parse_bool(&pconfig->no_pci, pkey);
203 if (STR_EQ(str, "no-hpet")) {
204 return parse_bool(&pconfig->no_hpet, pkey);
206 if (STR_EQ(str, "no-shconf")) {
207 return parse_bool(&pconfig->no_shconf, pkey);
209 if (STR_EQ(str, "no-huge")) {
210 return parse_bool(&pconfig->no_hugetlbfs, pkey);
212 if (STR_EQ(str, "no-output")) {
213 return parse_bool(&pconfig->no_output, pkey);
216 if (STR_EQ(str, "huge-dir")) {
217 if (pconfig->hugedir) {
218 free(pconfig->hugedir);
220 pconfig->hugedir = strdup(pkey);
224 if (STR_EQ(str, "eal")) {
225 char eal[MAX_STR_LEN_PROC];
230 if (parse_str(eal, pkey, sizeof(eal)))
233 strip_spaces(&pkey, 1);
235 pconfig->eal = strdup(pkey);
239 set_errf("Option '%s' is not known", str);
243 struct cfg_depr global_cfg_depr[] = {
244 {"virtualization", "This is now set automatically if needed"},
245 {"qinq_tag", "This option is deprecated"},
246 {"wait on quit", "This is now set automatically if needed"},
250 const char *get_cfg_dir(void)
252 static char dir[PATH_MAX];
253 size_t end = strlen(cfg_file) - 1;
254 while (end > 0 && cfg_file[end] != '/')
257 strncpy(dir, cfg_file, end);
261 static int get_lua_cfg(__attribute__((unused)) unsigned sindex, __attribute__((unused)) char *str, __attribute__((unused)) void *data)
265 if (NULL == getcwd(cwd, sizeof(cwd))) {
266 set_errf("Failed to get current directory while loading Lua file\n");
269 status = chdir(get_cfg_dir());
271 set_errf("Failed to change directory to '%s' while loading Lua file\n", get_cfg_dir());
275 struct lua_State *l = prox_lua();
278 prox_strncpy(str_cpy, str, sizeof(str_cpy));
279 uint32_t len = strlen(str_cpy);
280 str_cpy[len++] = '\n';
283 status = luaL_loadstring(l, str_cpy);
285 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
290 status = lua_pcall(l, 0, LUA_MULTRET, 0);
292 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
299 set_errf("Failed to restore current directory to '%s' while loading Lua file\n", cwd);
306 /* [global] parser */
307 static int get_global_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
309 struct prox_cfg *pset = (struct prox_cfg *)data;
311 if (str == NULL || pset == NULL) {
315 char *pkey = get_cfg_key(str);
317 set_errf("Missing key after option");
321 for (uint32_t i = 0; i < RTE_DIM(global_cfg_depr); ++i) {
322 if (STR_EQ(str, global_cfg_depr[i].opt)) {
323 set_errf("Option '%s' is deprecated%s%s",
324 global_cfg_depr[i].opt, strlen(global_cfg_depr[i].info)? ": ": "", global_cfg_depr[i].info);
329 if (STR_EQ(str, "name")) {
330 return parse_str(pset->name, pkey, sizeof(pset->name));
333 if (STR_EQ(str, "start time")) {
334 return parse_int(&pset->start_time, pkey);
337 if (STR_EQ(str, "duration time")) {
338 return parse_int(&pset->duration_time, pkey);
341 if (STR_EQ(str, "shuffle")) {
342 return parse_flag(&pset->flags, DSF_SHUFFLE, pkey);
344 if (STR_EQ(str, "disable cmt")) {
345 return parse_flag(&pset->flags, DSF_DISABLE_CMT, pkey);
347 if (STR_EQ(str, "mp rings")) {
348 return parse_flag(&pset->flags, DSF_MP_RINGS, pkey);
350 if (STR_EQ(str, "enable bypass")) {
351 return parse_flag(&pset->flags, DSF_ENABLE_BYPASS, pkey);
353 if (STR_EQ(str, "poll timeout")) {
354 return parse_int(&pset->poll_timeout, pkey);
356 if (STR_EQ(str, "heartbeat timeout")) {
357 return parse_int(&pset->heartbeat_timeout, pkey);
360 if (STR_EQ(str, "cpe table map")) {
361 /* The config defined ports through 0, 1, 2 ... which
362 need to be associated with ports. This is done
363 through defining it using "cpe table map=" */
364 return parse_port_name_list((uint32_t*)pset->cpe_table_ports, NULL, PROX_MAX_PORTS, pkey);
367 if (STR_EQ(str, "pre cmd")) {
371 if (STR_EQ(str, "unique mempool per socket")) {
372 return parse_flag(&pset->flags, UNIQUE_MEMPOOL_PER_SOCKET, pkey);
375 if (STR_EQ(str, "log buffer size")) {
376 if (parse_kmg(&pset->logbuf_size, pkey)) {
379 plog_info("Logging to buffer with size = %d\n", pset->logbuf_size);
383 set_errf("Option '%s' is not known", str);
387 /* [variable] parser */
388 static int get_var_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
390 return add_var(str, get_cfg_key(str), 0);
393 /* [defaults] parser */
394 static int get_defaults_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
399 pkey = get_cfg_key(str);
401 set_errf("Missing key after option");
405 if (STR_EQ(str, "mempool size")) {
407 if (parse_kmg(&val, pkey)) {
411 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
412 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
413 cur_lcore_cfg_init->id = lcore_id;
414 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
415 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
423 if (STR_EQ(str, "qinq tag")) {
424 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
425 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
426 cur_lcore_cfg_init->id = lcore_id;
427 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
428 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
429 parse_int(&targ->qinq_tag, pkey);
434 if (STR_EQ(str, "memcache size")) {
436 if (parse_kmg(&val, pkey)) {
440 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
441 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
442 cur_lcore_cfg_init->id = lcore_id;
443 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
444 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
445 targ->nb_cache_mbuf = val;
451 set_errf("Option '%s' is not known", str);
455 /* [cache set] parser */
456 static int get_cache_set_cfg(unsigned sindex, char *str, void *data)
458 struct prox_cache_set_cfg *cfg = (struct prox_cache_set_cfg *)data;
460 uint8_t cur_if = sindex & ~CFG_INDEXED;
462 if (cur_if >= PROX_MAX_CACHE_SET) {
463 set_errf("Cache set ID is too high (max allowed %d)", PROX_MAX_CACHE_SET - 1 );
467 cfg = &prox_cache_set_cfg[cur_if];
469 if (str == NULL || data == NULL) {
473 char *pkey = get_cfg_key(str);
476 set_errf("Missing key after option");
480 if (STR_EQ(str, "mask")) {
482 int err = parse_int(&val, pkey);
488 plog_info("\tCache set %d has mask %x\n", cur_if, cfg->mask);
495 static int get_port_cfg(unsigned sindex, char *str, void *data)
497 struct prox_port_cfg *cfg = (struct prox_port_cfg *)data;
499 uint8_t cur_if = sindex & ~CFG_INDEXED;
501 if (cur_if >= PROX_MAX_PORTS) {
502 set_errf("Port ID is too high (max allowed %d)", PROX_MAX_PORTS - 1 );
506 cfg = &prox_port_cfg[cur_if];
508 if (str == NULL || data == NULL) {
512 char *pkey = get_cfg_key(str);
515 set_errf("Missing key after option");
519 if (STR_EQ(str, "mac")) {
520 if (STR_EQ(pkey, "hardware")) {
521 cfg->type = PROX_PORT_MAC_HW;
523 else if (STR_EQ(pkey, "random")) {
524 cfg->type = PROX_PORT_MAC_RAND;
527 cfg->type = PROX_PORT_MAC_SET;
528 if (parse_mac(&cfg->eth_addr, pkey)) {
533 else if (STR_EQ(str, "name")) {
535 prox_strncpy(cfg->names[0], pkey, MAX_NAME_SIZE);
536 PROX_ASSERT(cur_if < PROX_MAX_PORTS);
537 return add_port_name(cur_if, pkey);
539 else if (STR_EQ(str, "rx desc")) {
540 return parse_int(&cfg->n_rxd, pkey);
542 else if (STR_EQ(str, "tx desc")) {
543 return parse_int(&cfg->n_txd, pkey);
545 else if (STR_EQ(str, "all_rx_queues")) {
547 if (parse_bool(&val, pkey)) {
550 cfg->all_rx_queues = val;
552 else if (STR_EQ(str, "promiscuous")) {
554 if (parse_bool(&val, pkey)) {
557 cfg->promiscuous = val;
559 else if (STR_EQ(str, "multicast")) {
561 if (cfg->nb_mc_addr >= NB_MCAST_ADDR) {
562 plog_err("too many multicast addresses\n");
565 if (parse_mac(&cfg->mc_addr[cfg->nb_mc_addr], pkey)) {
570 else if (STR_EQ(str, "lsc")) {
571 cfg->lsc_set_explicitely = 1;
573 if (parse_bool(&val, pkey)) {
578 else if (STR_EQ(str, "local ipv4")) {
579 if (parse_ip_set(cfg->ip_addr, pkey, PROX_MAX_VLAN_TAGS) != 0) {
580 cfg->ip_addr[0].ip = 24;
581 return parse_ip(&cfg->ip_addr[0].ip, pkey);
585 else if (STR_EQ(str, "virtual")) {
587 if (parse_bool(&val, pkey)) {
592 else if (STR_EQ(str, "vdev")) {
593 prox_strncpy(cfg->vdev, pkey, MAX_NAME_SIZE);
595 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
596 else if (STR_EQ(str, "disable tx offload")) {
598 if (parse_int(&val, pkey)) {
602 cfg->disabled_tx_offload = val;
605 else if (STR_EQ(str, "strip crc")) {
607 if (parse_bool(&val, pkey)) {
610 #if defined(DEV_RX_OFFLOAD_CRC_STRIP)
612 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_CRC_STRIP;
614 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_CRC_STRIP;
616 #if defined (DEV_RX_OFFLOAD_KEEP_CRC)
618 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_KEEP_CRC;
621 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_KEEP_CRC;
625 else if (STR_EQ(str, "vlan tag")) {
626 return parse_int_set(cfg->vlan_tags, pkey, sizeof(cfg->vlan_tags) / sizeof(cfg->vlan_tags[0]));
628 else if (STR_EQ(str, "vlan")) {
629 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
631 if (parse_bool(&val, pkey)) {
635 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_VLAN_STRIP;
636 cfg->requested_tx_offload |= DEV_TX_OFFLOAD_VLAN_INSERT;
638 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
639 cfg->requested_tx_offload &= ~DEV_TX_OFFLOAD_VLAN_INSERT;
642 plog_warn("vlan option not supported : update DPDK at least to 18.08 to support this option\n");
645 else if (STR_EQ(str, "mtu size")) {
647 if (parse_int(&val, pkey)) {
652 // A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan)
653 // should not be considered as a jumbo frame. However rte_ethdev.c considers that
654 // the max_rx_pkt_len for a non jumbo frame is 1518
655 cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN;
656 if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN) {
657 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_JUMBO_FRAME;
662 else if (STR_EQ(str, "rss")) {
664 if (parse_bool(&val, pkey)) {
668 cfg->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
669 cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4;
672 else if (STR_EQ(str, "rx_ring")) {
673 parse_str(cfg->rx_ring, pkey, sizeof(cfg->rx_ring));
675 else if (STR_EQ(str, "tx_ring")) {
676 parse_str(cfg->tx_ring, pkey, sizeof(cfg->tx_ring));
682 static enum police_action str_to_color(const char *str)
684 if (STR_EQ(str, "green"))
686 if (STR_EQ(str, "yellow"))
688 if (STR_EQ(str, "red"))
690 if (STR_EQ(str, "drop"))
695 struct cfg_depr task_cfg_depr[] = {
699 struct cfg_depr core_cfg_depr[] = {
702 {"network side", ""},
706 static int get_core_cfg(unsigned sindex, char *str, void *data)
709 struct lcore_cfg *lconf = (struct lcore_cfg *)data;
711 if (str == NULL || lconf == NULL || !(sindex & CFG_INDEXED)) {
715 pkey = get_cfg_key(str);
717 set_errf("Missing key after option");
721 uint32_t ncore = sindex & ~CFG_INDEXED;
722 if (ncore >= RTE_MAX_LCORE) {
723 set_errf("Core index too high (max allowed %d)", RTE_MAX_LCORE - 1);
727 lconf = &lconf[ncore];
729 for (uint32_t i = 0; i < RTE_DIM(core_cfg_depr); ++i) {
730 if (STR_EQ(str, core_cfg_depr[i].opt)) {
731 set_errf("Option '%s' is deprecated%s%s",
732 core_cfg_depr[i].opt, strlen(core_cfg_depr[i].info)? ": ": "", core_cfg_depr[i].info);
738 lcore_to_socket_core_ht(ncore, buff, sizeof(buff));
740 if (STR_EQ(str, "task")) {
743 if (parse_int(&val, pkey)) {
746 if (val >= MAX_TASKS_PER_CORE) {
747 set_errf("Too many tasks for core (max allowed %d)", MAX_TASKS_PER_CORE - 1);
750 if (val != lconf->n_tasks_all) {
751 set_errf("Task ID skipped or defined twice");
755 lconf->active_task = val;
757 lconf->targs[lconf->active_task].task = lconf->active_task;
759 if (lconf->n_tasks_all < lconf->active_task + 1) {
760 lconf->n_tasks_all = lconf->active_task + 1;
765 struct task_args *targ = &lconf->targs[lconf->active_task];
766 if (STR_EQ(str, "tx ports from routing table")) {
767 uint32_t vals[PROX_MAX_PORTS];
769 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
770 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
774 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
778 for (uint8_t i = 0; i < n_if; ++i) {
779 targ->tx_port_queue[i].port = vals[i];
782 targ->runtime_flags |= TASK_ROUTING;
785 if (STR_EQ(str, "tx ports from cpe table")) {
786 uint32_t vals[PROX_MAX_PORTS];
790 char* mapping_str = strstr(pkey, " remap=");
792 if (mapping_str != NULL) {
794 mapping_str += strlen(" remap=");
795 n_remap = parse_remap(targ->mapping, mapping_str);
798 if (parse_port_name_list(vals, &ret, PROX_MAX_PORTS, pkey)) {
802 if (n_remap != -1 && ret != (uint32_t)n_remap) {
803 set_errf("Expected %d remap elements but had %d", n_remap, ret);
807 for (uint8_t i = 0; i < ret; ++i) {
808 targ->tx_port_queue[i].port = vals[i];
810 /* default mapping this case is port0 -> port0 */
812 targ->mapping[vals[i]] = i;
816 targ->nb_txports = ret;
820 if (STR_EQ(str, "tx cores from routing table")) {
821 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
822 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
826 struct core_task_set *cts = &targ->core_task_set[0];
828 if (parse_task_set(cts, pkey))
831 if (cts->n_elems > MAX_WT_PER_LB) {
832 set_errf("Maximum worker threads allowed is %u but have %u", MAX_WT_PER_LB, cts->n_elems);
836 targ->nb_worker_threads = cts->n_elems;
837 targ->nb_txrings = cts->n_elems;
839 if (targ->nb_txrings > MAX_RINGS_PER_TASK) {
840 set_errf("Maximum allowed TX rings is %u but have %u", MAX_RINGS_PER_TASK, targ->nb_txrings);
844 targ->runtime_flags |= TASK_ROUTING;
847 if (STR_EQ(str, "tx cores from cpe table")) {
848 struct core_task_set *core_task_set = &targ->core_task_set[0];
852 mapping_str = strstr(pkey, " remap=");
853 if (mapping_str == NULL) {
854 set_errf("There is no default mapping for tx cores from cpe table. Please specify it through remap=");
858 mapping_str += strlen(" remap=");
859 ret = parse_remap(targ->mapping, mapping_str);
864 struct core_task_set *cts = &targ->core_task_set[0];
866 if (parse_task_set(cts, pkey))
868 if (cts->n_elems > MAX_RINGS_PER_TASK) {
869 set_errf("Maximum cores to route to is %u\n", MAX_RINGS_PER_TASK);
873 targ->nb_txrings = cts->n_elems;
875 if (ret != targ->nb_txrings) {
876 set_errf("Expecting same number of remaps as cores\n", str);
882 if (STR_EQ(str, "delay ms")) {
883 if (targ->delay_us) {
884 set_errf("delay ms and delay us are mutually exclusive\n", str);
888 int rc = parse_int(&delay_ms, pkey);
889 targ->delay_us = delay_ms * 1000;
892 if (STR_EQ(str, "delay us")) {
893 if (targ->delay_us) {
894 set_errf("delay ms and delay us are mutually exclusive\n", str);
897 return parse_int(&targ->delay_us, pkey);
899 if (STR_EQ(str, "random delay us")) {
900 return parse_int(&targ->random_delay_us, pkey);
902 if (STR_EQ(str, "cpe table timeout ms")) {
903 return parse_int(&targ->cpe_table_timeout_ms, pkey);
905 if (STR_EQ(str, "ctrl path polling frequency")) {
906 int rc = parse_int(&targ->ctrl_freq, pkey);
908 if (targ->ctrl_freq == 0) {
909 set_errf("ctrl frequency must be non null.");
916 if (STR_EQ(str, "handle arp")) {
917 return parse_flag(&targ->runtime_flags, TASK_CTRL_HANDLE_ARP, pkey);
919 if (STR_EQ(str, "fast path handle arp")) {
920 return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey);
923 /* Using tx port name, only a _single_ port can be assigned to a task. */
924 if (STR_EQ(str, "tx port")) {
925 if (targ->nb_txports > 0) {
926 set_errf("Only one tx port can be defined per task. Use a LB task or routing instead.");
931 uint32_t ports[PROX_MAX_PORTS];
933 if(parse_port_name_list(ports, &n_if, PROX_MAX_PORTS, pkey)) {
934 // Port name not found, but could be a virtual device of a secondary process
935 // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
937 (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
938 (n_deferred_ports < PROX_MAX_PORTS)) {
939 prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
940 deferred_port[n_deferred_ports].is_rx_port = 0;
941 deferred_port[n_deferred_ports++].targ = targ;
947 PROX_ASSERT(n_if-1 < PROX_MAX_PORTS);
949 for (uint8_t i = 0; i < n_if; ++i) {
950 targ->tx_port_queue[i].port = ports[i];
955 targ->nb_worker_threads = targ->nb_txports;
960 if (STR_EQ(str, "rx ring")) {
962 int err = parse_bool(&val, pkey);
963 if (!err && val && targ->rx_port_queue[0].port != OUT_DISCARD) {
964 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
968 return parse_flag(&targ->flags, TASK_ARG_RX_RING, pkey);
970 if (STR_EQ(str, "private")) {
971 return parse_bool(&targ->use_src, pkey);
973 if (STR_EQ(str, "use src ip")) {
974 return parse_bool(&targ->use_src, pkey);
976 if (STR_EQ(str, "nat table")) {
977 return parse_str(targ->nat_table, pkey, sizeof(targ->nat_table));
979 if (STR_EQ(str, "rules")) {
980 return parse_str(targ->rules, pkey, sizeof(targ->rules));
982 if (STR_EQ(str, "route table")) {
983 return parse_str(targ->route_table, pkey, sizeof(targ->route_table));
985 if (STR_EQ(str, "dscp")) {
986 return parse_str(targ->dscp, pkey, sizeof(targ->dscp));
988 if (STR_EQ(str, "tun_bindings")) {
989 return parse_str(targ->tun_bindings, pkey, sizeof(targ->tun_bindings));
991 if (STR_EQ(str, "cpe table")) {
992 return parse_str(targ->cpe_table_name, pkey, sizeof(targ->cpe_table_name));
994 if (STR_EQ(str, "user table")) {
995 return parse_str(targ->user_table, pkey, sizeof(targ->user_table));
997 if (STR_EQ(str, "streams")) {
998 return parse_str(targ->streams, pkey, sizeof(targ->streams));
1000 if (STR_EQ(str, "Unsollicited NA")) {
1001 return parse_flag(&targ->flags, TASK_ARG_SEND_NA_AT_STARTUP, pkey);
1003 if (STR_EQ(str, "local lpm")) {
1004 return parse_flag(&targ->flags, TASK_ARG_LOCAL_LPM, pkey);
1006 if (STR_EQ(str, "drop")) {
1007 return parse_flag(&targ->flags, TASK_ARG_DROP, pkey);
1009 if (STR_EQ(str, "loop")) {
1010 parse_flag(&targ->loop, 1, pkey);
1011 return parse_flag(&targ->loop, 1, pkey);
1013 if (STR_EQ(str, "qinq")) {
1014 return parse_flag(&targ->flags, TASK_ARG_QINQ_ACL, pkey);
1016 if (STR_EQ(str, "bps")) {
1017 return parse_u64(&targ->rate_bps, pkey);
1019 if (STR_EQ(str, "random")) {
1020 return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0]));
1022 if (STR_EQ(str, "rand_offset")) {
1023 if (targ->n_rand_str == 0) {
1024 set_errf("No random defined previously (use random=...)");
1028 return parse_int(&targ->rand_offset[targ->n_rand_str - 1], pkey);
1030 if (STR_EQ(str, "keep src mac")) {
1031 return parse_flag(&targ->flags, DSF_KEEP_SRC_MAC, pkey);
1033 if (STR_EQ(str, "pcap file")) {
1034 return parse_str(targ->pcap_file, pkey, sizeof(targ->pcap_file));
1036 if (STR_EQ(str, "imix")) {
1037 char pkey2[MAX_CFG_STRING_LEN], *ptr;
1038 if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
1039 set_errf("Error while parsing imix, too long\n");
1042 const size_t pkey_len = strlen(pkey2);
1043 targ->imix_nb_pkts = 0;
1045 while (targ->imix_nb_pkts < MAX_IMIX_PKTS) {
1046 if (parse_int(&targ->imix_pkt_sizes[targ->imix_nb_pkts], ptr) != 0)
1048 targ->imix_nb_pkts++;
1049 if ((ptr = strchr(ptr, ',')) == NULL)
1052 if (targ->imix_nb_pkts == MAX_IMIX_PKTS) {
1053 set_errf("Too many packet sizes specified");
1057 plog_info("%d IMIX packets:", targ->imix_nb_pkts);
1058 for (size_t i = 0; i < targ->imix_nb_pkts; ++i) {
1059 plog_info("%d ", targ->imix_pkt_sizes[i]);
1064 if (STR_EQ(str, "pkt inline")) {
1065 char pkey2[MAX_CFG_STRING_LEN];
1066 if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
1067 set_errf("Error while parsing pkt line, too long\n");
1071 const size_t pkey_len = strlen(pkey2);
1074 for (size_t i = 0; i < pkey_len; ++i) {
1075 if (pkey2[i] == ' ')
1078 if (i + 1 == pkey_len) {
1079 set_errf("Incomplete byte at character %z", i);
1085 if (pkey2[i] >= '0' && pkey2[i] <= '9') {
1086 byte = (pkey2[i] - '0') << 4;
1088 else if (pkey2[i] >= 'a' && pkey2[i] <= 'f') {
1089 byte = (pkey2[i] - 'a' + 10) << 4;
1091 else if (pkey2[i] >= 'A' && pkey2[i] <= 'F') {
1092 byte = (pkey2[i] - 'A' + 10) << 4;
1095 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i]);
1099 if (pkey2[i + 1] >= '0' && pkey2[i + 1] <= '9') {
1100 byte |= (pkey2[i + 1] - '0');
1102 else if (pkey2[i + 1] >= 'a' && pkey2[i + 1] <= 'f') {
1103 byte |= (pkey2[i + 1] - 'a' + 10);
1105 else if (pkey2[i + 1] >= 'A' && pkey2[i + 1] <= 'F') {
1106 byte |= (pkey2[i + 1] - 'A' + 10);
1109 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i + 1]);
1112 if (targ->pkt_size == sizeof(targ->pkt_inline)) {
1113 set_errf("Inline packet definition can't be longer than %u", sizeof(targ->pkt_inline));
1117 targ->pkt_inline[targ->pkt_size++] = byte;
1123 if (STR_EQ(str, "accuracy limit nsec")) {
1124 return parse_int(&targ->accuracy_limit_nsec, pkey);
1126 if (STR_EQ(str, "latency bucket size")) {
1127 return parse_int(&targ->bucket_size, pkey);
1129 if (STR_EQ(str, "latency buffer size")) {
1130 return parse_int(&targ->latency_buffer_size, pkey);
1132 if (STR_EQ(str, "accuracy pos")) {
1133 return parse_int(&targ->accur_pos, pkey);
1135 if (STR_EQ(str, "signature")) {
1136 return parse_int(&targ->sig, pkey);
1138 if (STR_EQ(str, "signature pos")) {
1139 return parse_int(&targ->sig_pos, pkey);
1141 if (STR_EQ(str, "lat pos")) {
1142 targ->lat_enabled = 1;
1143 return parse_int(&targ->lat_pos, pkey);
1145 if (STR_EQ(str, "packet id pos")) {
1146 return parse_int(&targ->packet_id_pos, pkey);
1148 if (STR_EQ(str, "probability")) { // old - use "probability no drop" instead
1150 int rc = parse_float(&probability, pkey);
1151 if (probability == 0) {
1152 set_errf("Probability must be != 0\n");
1154 } else if (probability > 100.0) {
1155 set_errf("Probability must be < 100\n");
1158 targ->probability_no_drop = probability * 10000;
1161 if (STR_EQ(str, "proba no drop")) {
1163 int rc = parse_float(&probability, pkey);
1164 if (probability == 0) {
1165 set_errf("probability no drop must be != 0\n");
1167 } else if (probability > 100.0) {
1168 set_errf("Probability must be < 100\n");
1171 targ->probability_no_drop = probability * 10000;
1174 if (STR_EQ(str, "proba delay")) {
1176 int rc = parse_float(&probability, pkey);
1177 if (probability > 100.0) {
1178 set_errf("Probability must be < 100\n");
1181 targ->probability_delay = probability * 10000;
1184 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1185 if (STR_EQ(str, "proba duplicate")) {
1187 int rc = parse_float(&probability, pkey);
1188 if (probability > 100.0) {
1189 set_errf("probability duplicate must be < 100\n");
1192 targ->probability_duplicate = probability * 10000;
1196 if (STR_EQ(str, "concur conn")) {
1197 return parse_int(&targ->n_concur_conn, pkey);
1199 if (STR_EQ(str, "max setup rate")) {
1200 return parse_int(&targ->max_setup_rate, pkey);
1202 if (STR_EQ(str, "pkt size")) {
1203 return parse_int(&targ->pkt_size, pkey);
1205 if (STR_EQ(str, "min bulk size")) {
1206 return parse_int(&targ->min_bulk_size, pkey);
1208 if (STR_EQ(str, "max bulk size")) {
1209 return parse_int(&targ->max_bulk_size, pkey);
1211 if (STR_EQ(str, "rx port")) {
1212 if (targ->flags & TASK_ARG_RX_RING) {
1213 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
1216 uint32_t vals[PROX_MAX_PORTS];
1219 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
1220 // Port name not found, but could be a virtual device of a secondary process
1221 // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
1223 (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
1224 (n_deferred_ports < PROX_MAX_PORTS)) {
1225 prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
1226 deferred_port[n_deferred_ports].is_rx_port = 1;
1227 deferred_port[n_deferred_ports++].targ = targ;
1233 for (uint8_t i = 0; i < n_if; ++i) {
1234 PROX_ASSERT(vals[i] < PROX_MAX_PORTS);
1235 targ->rx_port_queue[i].port = vals[i];
1241 if (STR_EQ(str, "mode")) {
1242 /* Check deprecated task modes */
1244 int ret = parse_str(mode, pkey, sizeof(mode));
1248 for (uint32_t i = 0; i < RTE_DIM(task_cfg_depr); ++i) {
1249 if (STR_EQ(mode, task_cfg_depr[i].opt)) {
1250 set_errf("Task mode '%s' is deprecated%s%s",
1251 task_cfg_depr[i].opt, strlen(task_cfg_depr[i].info)? ": ": "", task_cfg_depr[i].info);
1256 /* master is a special mode that is always needed (cannot be turned off) */
1257 if (STR_EQ(mode, "master")) {
1258 prox_cfg.master = ncore;
1259 targ->mode = MASTER;
1260 if (lconf->n_tasks_all > 1 || targ->task != 0) {
1261 set_errf("Master core can only have one task\n");
1264 // Initialize number of tasks to 1 for master, even if no task specified
1265 lconf->n_tasks_all = 1;
1266 lconf->active_task = 0;
1267 lconf->targs[lconf->active_task].task = 0;
1268 struct task_init* task_init = to_task_init(mode, "");
1270 targ->mode = task_init->mode;
1272 targ->task_init = task_init;
1276 struct task_init* task_init = to_task_init(mode, "");
1278 targ->mode = task_init->mode;
1281 set_errf("Task mode '%s' is invalid", mode);
1285 targ->task_init = task_init;
1288 if (STR_EQ(str, "users")) {
1289 return parse_int(&targ->n_flows, pkey);
1292 if (STR_EQ(str, "mark")) {
1293 return parse_flag(&targ->runtime_flags, TASK_MARK, pkey);
1296 if (STR_EQ(str, "mark green")) {
1297 return parse_int(&targ->marking[0], pkey);
1300 if (STR_EQ(str, "mark yellow")) {
1301 return parse_int(&targ->marking[1], pkey);
1304 if (STR_EQ(str, "mark red")) {
1305 return parse_int(&targ->marking[2], pkey);
1308 if (STR_EQ(str, "tx cores")) {
1309 uint8_t dest_task = 0;
1310 /* if user did not specify, dest_port is left at default (first type) */
1311 uint8_t dest_proto = 0;
1312 uint8_t ctrl = CTRL_TYPE_DP;
1313 char *task_str = strstr(pkey, "proto=");
1315 task_str += strlen("proto=");
1317 if (STR_EQ(task_str, "ipv4")) {
1320 else if (STR_EQ(task_str, "arp")) {
1323 else if (STR_EQ(task_str, "ipv6")) {
1327 set_errf("proto needs to be either ipv4, arp or ipv6");
1333 task_str = strstr(pkey, "task=");
1339 task_str += strlen("task=");
1340 char *task_str_end = strstr(task_str, " ");
1344 if (0 == strlen(task_str)) {
1345 set_errf("Invalid task= syntax");
1349 switch (task_str[strlen(task_str) - 1]) {
1351 ctrl = CTRL_TYPE_PKT;
1354 ctrl = CTRL_TYPE_MSG;
1360 if (task_str[strlen(task_str) -1] < '0' ||
1361 task_str[strlen(task_str) -1] > '9') {
1362 set_errf("Unknown ring type %c.\n",
1363 task_str[strlen(task_str) - 1]);
1368 dest_task = atoi(task_str);
1369 if (dest_task >= MAX_TASKS_PER_CORE) {
1370 set_errf("Destination task too high (max allowed %d)", MAX_TASKS_PER_CORE - 1);
1378 struct core_task_set *cts = &targ->core_task_set[dest_proto];
1380 if (parse_task_set(cts, pkey))
1383 if (cts->n_elems > MAX_WT_PER_LB) {
1384 set_errf("Too many worker threads (max allowed %d)", MAX_WT_PER_LB - 1);
1388 targ->nb_worker_threads = cts->n_elems;
1389 targ->nb_txrings += cts->n_elems;
1393 if (STR_EQ(str, "tx crc")) {
1394 return parse_flag(&targ->runtime_flags, TASK_TX_CRC, pkey);
1396 if (STR_EQ(str, "ring size")) {
1397 return parse_int(&targ->ring_size, pkey);
1399 if (STR_EQ(str, "mempool size")) {
1400 return parse_kmg(&targ->nb_mbuf, pkey);
1403 else if (STR_EQ(str, "mbuf size")) {
1404 return parse_int(&targ->mbuf_size, pkey);
1406 if (STR_EQ(str, "memcache size")) {
1407 return parse_kmg(&targ->nb_cache_mbuf, pkey);
1410 if (STR_EQ(str, "byte offset")) {
1411 return parse_int(&targ->byte_offset, pkey);
1414 if (STR_EQ(str, "realtime scheduling")) {
1415 return parse_flag(&lconf->flags, LCONF_FLAG_SCHED_RR, pkey);
1417 if (STR_EQ(str, "name")) {
1418 return parse_str(lconf->name, pkey, sizeof(lconf->name));
1420 /* MPLS configuration */
1421 if (STR_EQ(str, "untag mpls")) {
1422 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1425 if (STR_EQ(str, "add mpls")) {
1426 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1429 if (STR_EQ(str, "ether type")) {
1430 return parse_int(&targ->etype, pkey);
1433 if (STR_EQ(str, "cache set")) {
1434 return parse_int(&lconf->cache_set, pkey);
1437 if (STR_EQ(str, "sub mode")) {
1438 const char* mode_str = targ->task_init->mode_str;
1439 const char *sub_mode_str = pkey;
1441 targ->task_init = to_task_init(mode_str, sub_mode_str);
1442 if (!targ->task_init) {
1443 if ((strcmp(sub_mode_str, "l3") != 0) && (strcmp(sub_mode_str, "ndp") != 0)) {
1444 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1447 targ->task_init = to_task_init(mode_str, "");
1448 if (!targ->task_init) {
1449 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1453 if (strcmp(sub_mode_str, "l3") == 0) {
1454 prox_cfg.flags |= DSF_L3_ENABLED;
1455 targ->flags |= TASK_ARG_L3;
1456 strcpy(targ->sub_mode_str, "l3");
1457 } else if (strcmp(sub_mode_str, "ndp") == 0) {
1458 prox_cfg.flags |= DSF_NDP_ENABLED;
1459 targ->flags |= TASK_ARG_NDP;
1460 strcpy(targ->sub_mode_str, "ndp");
1462 strcpy(targ->sub_mode_str, targ->task_init->sub_mode_str);
1467 if (STR_EQ(str, "mempool name")) {
1468 return parse_str(targ->pool_name, pkey, sizeof(targ->pool_name));
1470 if (STR_EQ(str, "dpi engine")) {
1471 return parse_str(targ->dpi_engine_path, pkey, sizeof(targ->dpi_engine_path));
1473 if (STR_EQ(str, "dpi engine arg")) {
1474 return parse_str(targ->dpi_engine_args[targ->n_dpi_engine_args++], pkey,
1475 sizeof(targ->dpi_engine_args[0]));
1477 if (STR_EQ(str, "dst mac")) { /* destination MAC address to be used for packets */
1478 if (parse_mac(&targ->edaddr, pkey)) {
1479 if (STR_EQ(pkey, "no")) {
1480 targ->flags |= TASK_ARG_DO_NOT_SET_DST_MAC;
1483 if (STR_EQ(pkey, "packet") == 0)
1488 targ->flags |= TASK_ARG_DST_MAC_SET;
1491 if (STR_EQ(str, "src mac")) {
1492 if (parse_mac(&targ->esaddr, pkey)) {
1493 if (STR_EQ(pkey, "no")) {
1494 targ->flags |= TASK_ARG_DO_NOT_SET_SRC_MAC;
1497 else if (STR_EQ(pkey, "packet"))
1499 else if (STR_EQ(pkey, "hw")) {
1500 targ->flags |= TASK_ARG_HW_SRC_MAC;
1506 targ->flags |= TASK_ARG_SRC_MAC_SET;
1509 if (STR_EQ(str, "igmp ipv4")) { /* IGMP Group */
1510 return parse_ip(&targ->igmp_address, pkey);
1512 if (STR_EQ(str, "gateway ipv4")) { /* Gateway IP address used when generating */
1513 if ((targ->flags & TASK_ARG_L3) == 0)
1514 plog_warn("gateway ipv4 configured but L3 sub mode not enabled\n");
1515 if (targ->local_ipv4)
1516 targ->local_prefix = 32;
1517 return parse_ip(&targ->gateway_ipv4, pkey);
1519 if (STR_EQ(str, "ipv6 router")) { /* we simulate an IPV6 router */
1520 int rc = parse_flag(&targ->ipv6_router, 1, pkey);
1521 if (!rc && targ->ipv6_router) {
1522 plog_info("\tipv6 router configured => NDP enabled\n");
1523 prox_cfg.flags |= DSF_NDP_ENABLED;
1524 targ->flags |= TASK_ARG_NDP;
1525 strcpy(targ->sub_mode_str, "ndp");
1529 if (STR_EQ(str, "gateway ipv6")) { /* Gateway IP address used when generating */
1530 if ((targ->flags & TASK_ARG_NDP) == 0)
1531 plog_warn("gateway ipv6 configured but NDP sub mode not enabled\n");
1532 return parse_ip6(&targ->gateway_ipv6, pkey);
1534 if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */
1535 struct ip4_subnet cidr;
1536 if (parse_ip4_and_prefix(&cidr, pkey) != 0) {
1537 if (targ->gateway_ipv4)
1538 targ->local_prefix = 32;
1540 targ->local_prefix = 0;
1541 return parse_ip(&targ->local_ipv4, pkey);
1543 targ->local_ipv4 = cidr.ip;
1544 targ->local_prefix = cidr.prefix;
1548 if (STR_EQ(str, "remote ipv4")) { /* source IP address to be used for packets */
1549 return parse_ip(&targ->remote_ipv4, pkey);
1551 if (STR_EQ(str, "global ipv6")) {
1552 if (parse_ip6(&targ->global_ipv6, pkey) == 0) {
1553 plog_info("\tglobal ipv6 configured => NDP enabled\n");
1554 targ->flags |= TASK_ARG_NDP;
1555 prox_cfg.flags |= DSF_NDP_ENABLED;
1556 strcpy(targ->sub_mode_str, "ndp");
1558 plog_err("Unable to parse content of local ipv6: %s\n", pkey);
1563 if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */
1564 if (parse_ip6(&targ->local_ipv6, pkey) == 0) {
1565 plog_info("\tlocal ipv6 configured => NDP enabled\n");
1566 targ->flags |= TASK_ARG_NDP;
1567 prox_cfg.flags |= DSF_NDP_ENABLED;
1568 strcpy(targ->sub_mode_str, "ndp");
1570 plog_err("Unable to parse content of local ipv6: %s\n", pkey);
1575 if (STR_EQ(str, "router prefix")) {
1576 if (parse_ip6(&targ->router_prefix, pkey) == 0) {
1577 plog_info("\trouter prefix set to "IPv6_BYTES_FMT" (%s)\n", IPv6_BYTES(targ->router_prefix.bytes), IP6_Canonical(&targ->router_prefix));
1579 plog_err("Unable to parse content of router prefix: %s\n", pkey);
1584 if (STR_EQ(str, "arp timeout"))
1585 return parse_int(&targ->reachable_timeout, pkey);
1586 if (STR_EQ(str, "arp update time"))
1587 return parse_int(&targ->arp_ndp_retransmit_timeout, pkey);
1588 if (STR_EQ(str, "number of packets"))
1589 return parse_int(&targ->n_pkts, pkey);
1590 if (STR_EQ(str, "pipes")) {
1592 int err = parse_int(&val, pkey);
1595 if (!val || !rte_is_power_of_2(val)) {
1596 set_errf("Number of pipes has to be power of 2 and not zero");
1600 targ->qos_conf.port_params.n_pipes_per_subport = val;
1603 if (STR_EQ(str, "queue size")) {
1605 int err = parse_int(&val, pkey);
1609 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1610 targ->qos_conf.subport_params[0].qsize[0] = val;
1611 targ->qos_conf.subport_params[0].qsize[1] = val;
1612 targ->qos_conf.subport_params[0].qsize[2] = val;
1613 targ->qos_conf.subport_params[0].qsize[3] = val;
1615 targ->qos_conf.port_params.qsize[0] = val;
1616 targ->qos_conf.port_params.qsize[1] = val;
1617 targ->qos_conf.port_params.qsize[2] = val;
1618 targ->qos_conf.port_params.qsize[3] = val;
1622 if (STR_EQ(str, "subport tb rate")) {
1623 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1624 return parse_u64(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1626 return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1629 if (STR_EQ(str, "subport tb size")) {
1630 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1631 return parse_u64(&targ->qos_conf.subport_params[0].tb_size, pkey);
1633 return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey);
1636 if (STR_EQ(str, "subport tc 0 rate")) {
1637 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1638 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1640 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1643 if (STR_EQ(str, "subport tc 1 rate")) {
1644 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1645 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1647 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1650 if (STR_EQ(str, "subport tc 2 rate")) {
1651 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1652 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1654 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1657 if (STR_EQ(str, "subport tc 3 rate")) {
1658 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1659 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1661 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1665 if (STR_EQ(str, "subport tc rate")) {
1667 int err = parse_int(&val, pkey);
1672 targ->qos_conf.subport_params[0].tc_rate[0] = val;
1673 targ->qos_conf.subport_params[0].tc_rate[1] = val;
1674 targ->qos_conf.subport_params[0].tc_rate[2] = val;
1675 targ->qos_conf.subport_params[0].tc_rate[3] = val;
1679 if (STR_EQ(str, "subport tc period")) {
1680 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1681 return parse_u64(&targ->qos_conf.subport_params[0].tc_period, pkey);
1683 return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey);
1686 if (STR_EQ(str, "pipe tb rate")) {
1687 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1688 return parse_u64(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1690 return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1693 if (STR_EQ(str, "pipe tb size")) {
1694 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1695 return parse_u64(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1697 return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1700 if (STR_EQ(str, "pipe tc rate")) {
1702 int err = parse_int(&val, pkey);
1707 targ->qos_conf.pipe_params[0].tc_rate[0] = val;
1708 targ->qos_conf.pipe_params[0].tc_rate[1] = val;
1709 targ->qos_conf.pipe_params[0].tc_rate[2] = val;
1710 targ->qos_conf.pipe_params[0].tc_rate[3] = val;
1713 if (STR_EQ(str, "pipe tc 0 rate")) {
1714 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1715 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1717 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1720 if (STR_EQ(str, "pipe tc 1 rate")) {
1721 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1722 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1724 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1727 if (STR_EQ(str, "pipe tc 2 rate")) {
1728 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1729 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1731 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1734 if (STR_EQ(str, "pipe tc 3 rate")) {
1735 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1736 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1738 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1741 if (STR_EQ(str, "pipe tc period")) {
1742 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
1743 return parse_u64(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1745 return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1748 if (STR_EQ(str, "police action")) {
1749 char *in = strstr(pkey, " io=");
1751 set_errf("Need to specify io colors using io=in_color,out_color\n");
1755 in += strlen(" io=");
1757 char *out = strstr(in, ",");
1759 set_errf("Output color not specified\n");
1764 enum police_action in_color = str_to_color(in);
1765 enum police_action out_color = str_to_color(out);
1767 if (in_color == ACT_INVALID) {
1768 set_errf("Invalid input color %s. Expected green, yellow or red", in);
1771 if (out_color == ACT_INVALID) {
1772 set_errf("Invalid output color %s. Expected green, yellow or red", out);
1775 enum police_action action = str_to_color(pkey);
1776 if (action == ACT_INVALID) {
1777 set_errf("Error action %s. Expected green, yellow, red or drop", pkey);
1780 targ->police_act[in_color][out_color] = action;
1784 if (STR_EQ(str, "qinq tag")) {
1785 return parse_int(&targ->qinq_tag, pkey);
1787 if (STR_EQ(str, "cir")) {
1788 return parse_int(&targ->cir, pkey);
1790 if (STR_EQ(str, "cbs")) {
1791 return parse_int(&targ->cbs, pkey);
1793 if (STR_EQ(str, "pir")) {
1794 return parse_int(&targ->pir, pkey);
1796 if (STR_EQ(str, "pbs")) {
1797 return parse_int(&targ->pbs, pkey);
1799 if (STR_EQ(str, "ebs")) {
1800 return parse_int(&targ->ebs, pkey);
1802 uint32_t queue_id = 0;
1803 if (sscanf(str, "queue %d weight", &queue_id) == 1) {
1805 int err = parse_int(&val, pkey);
1809 if (queue_id >= RTE_SCHED_BE_QUEUES_PER_PIPE) {
1810 set_errf("queue_id must be < %d", RTE_SCHED_BE_QUEUES_PER_PIPE);
1813 targ->qos_conf.pipe_params[0].wrr_weights[queue_id] = val;
1816 if (STR_EQ(str, "classify")) {
1817 if (!(targ->task_init->flag_features & TASK_FEATURE_CLASSIFY)) {
1818 set_errf("Classify is not supported in '%s' mode", targ->task_init->mode_str);
1822 return parse_flag(&targ->runtime_flags, TASK_CLASSIFY, pkey);
1824 if (STR_EQ(str, "flow table size")) {
1825 return parse_int(&targ->flow_table_size, pkey);
1828 if (STR_EQ(str, "tbf rate")) {
1829 return parse_int(&targ->tb_rate, pkey);
1831 if (STR_EQ(str, "tbf size")) {
1832 return parse_int(&targ->tb_size, pkey);
1835 if (STR_EQ(str, "max rules")) {
1836 return parse_int(&targ->n_max_rules, pkey);
1839 if (STR_EQ(str, "tunnel hop limit")) {
1841 int err = parse_int(&val, pkey);
1845 targ->tunnel_hop_limit = val;
1849 if (STR_EQ(str, "lookup port mask")) {
1851 int err = parse_int(&val, pkey);
1855 targ->lookup_port_mask = val;
1859 if (STR_EQ(str, "irq debug")) {
1860 parse_int(&targ->irq_debug, pkey);
1864 set_errf("Option '%s' is not known", str);
1865 /* fail on unknown keys */
1869 static int str_is_number(const char *in)
1873 for (size_t i = 0; i < strlen(in); ++i) {
1874 if (!dot_once && in[i] == '.') {
1879 if (in[i] < '0' || in[i] > '9')
1886 /* command line parameters parsing procedure */
1887 int prox_parse_args(int argc, char **argv)
1893 /* Default settings */
1894 prox_cfg.flags |= DSF_AUTOSTART | DSF_WAIT_ON_QUIT;
1895 prox_cfg.ui = PROX_UI_CURSES;
1897 plog_info("\tCommand line:");
1898 for (i = 0; i < argc; ++i) {
1899 plog_info(" %s", argv[i]);
1903 while ((opt = getopt(argc, argv, "f:dnzpo:tkuar:emsiw:l:v:q:")) != EOF) {
1906 /* path to config file */
1909 for (size_t i = 0; i < strlen(cfg_file); ++i) {
1910 if (cfg_file[i] == '/') {
1915 prox_strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE);
1918 plog_set_lvl(atoi(optarg));
1921 prox_cfg.log_name_pid = 0;
1922 prox_strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE);
1925 prox_cfg.log_name_pid = 1;
1928 prox_cfg.use_stats_logger = 1;
1931 prox_cfg.flags |= DSF_DAEMON;
1932 prox_cfg.ui = PROX_UI_NONE;
1935 prox_cfg.flags |= DSF_USE_DUMMY_CPU_TOPO;
1936 prox_cfg.flags |= DSF_CHECK_INIT;
1939 prox_cfg.flags |= DSF_USE_DUMMY_DEVICES;
1942 if (!str_is_number(optarg) || strlen(optarg) > 11)
1944 prox_strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str));
1947 if (prox_cfg.flags & DSF_DAEMON)
1950 if (!strcmp(optarg, "curses")) {
1951 prox_cfg.ui = PROX_UI_CURSES;
1953 else if (!strcmp(optarg, "cli")) {
1954 prox_cfg.ui = PROX_UI_CLI;
1956 else if (!strcmp(optarg, "none")) {
1957 prox_cfg.ui = PROX_UI_NONE;
1960 plog_err("Invalid local UI '%s', local UI can be 'curses', 'cli' or 'none'.", optarg);
1965 if (luaL_loadstring(prox_lua(), optarg)) {
1966 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
1970 if (lua_pcall(prox_lua(), 0, LUA_MULTRET, 0)) {
1971 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
1977 /* autostart all cores */
1978 prox_cfg.flags |= DSF_AUTOSTART;
1981 /* don't autostart */
1982 prox_cfg.flags &= ~DSF_AUTOSTART;
1985 prox_cfg.flags |= DSF_LISTEN_TCP;
1988 prox_cfg.flags |= DSF_LISTEN_UDS;
1991 /* list supported task modes and exit */
1992 prox_cfg.flags |= DSF_LIST_TASK_MODES;
1995 /* check configuration file syntax and exit */
1996 prox_cfg.flags |= DSF_CHECK_SYNTAX;
1999 /* check initialization sequence and exit */
2000 prox_cfg.flags |= DSF_CHECK_INIT;
2005 if (strlen(tmp) >= 3 &&
2006 (tmp2 = strchr(tmp, '='))) {
2009 prox_strncpy(tmp3 + 1, tmp, 63);
2010 plog_info("\tAdding variable: %s = %s\n", tmp3, tmp2 + 1);
2011 ret = add_var(tmp3, tmp2 + 1, 1);
2013 plog_err("\tFailed to add variable, too many variables defines\n");
2016 else if(ret == -3) {
2017 plog_err("\tFailed to add variable, already defined\n");
2024 plog_err("\tUnknown option\n");
2029 /* reset getopt lib for DPDK */
2035 static int check_cfg(void)
2038 #define RETURN_IF(cond, err) \
2044 RETURN_IF(rte_cfg.force_nchannel == 0, "\tError: number of memory channels not specified in [eal options] section\n");
2045 RETURN_IF(prox_cfg.master >= RTE_MAX_LCORE, "\tError: No master core specified (one core needs to have mode=master)\n");
2052 static int calc_tot_rxrings(void)
2054 struct lcore_cfg *slconf, *dlconf;
2055 struct task_args *starg, *dtarg;
2058 struct core_task ct;
2061 while (core_targ_next_early(&dlconf, &dtarg, 1) == 0) {
2062 dtarg->tot_rxrings = 0;
2066 while (core_targ_next_early(&slconf, &starg, 1) == 0) {
2067 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
2068 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
2069 ct = starg->core_task_set[idx].core_task[ring_idx];
2070 if (!prox_core_active(ct.core, 0)) {
2071 set_errf("Core %u is disabled but Core %u task %u is sending to it\n",
2072 ct.core, slconf->id, starg->id);
2076 dlconf = &lcore_cfg_init[ct.core];
2078 if (ct.task >= dlconf->n_tasks_all) {
2079 set_errf("Core %u task %u not enabled\n", ct.core, ct.task);
2083 dtarg = &dlconf->targs[ct.task];
2085 /* Control rings are not relevant at this point. */
2089 if (!(dtarg->flags & TASK_ARG_RX_RING)) {
2090 set_errf("Core %u task %u is not expecting to receive through a ring\n",
2095 dtarg->tot_rxrings++;
2096 if (dtarg->tot_rxrings > MAX_RINGS_PER_TASK) {
2097 set_errf("Core %u task %u is receiving from too many tasks",
2108 static void prox_set_core_mask(void)
2110 struct lcore_cfg *lconf;
2113 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
2114 lconf = &lcore_cfg_init[lcore_id];
2115 if (lconf->n_tasks_all > 0 && lconf->targs[0].mode != MASTER) {
2116 prox_core_set_active(lcore_id);
2121 static int is_using_no_drop(void)
2124 struct lcore_cfg *lconf;
2125 struct task_args *targs;
2128 while(prox_core_next(&lcore_id, 1) == 0) {
2129 lconf = &lcore_cfg_init[lcore_id];
2130 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
2131 targs = &lconf->targs[task_id];
2132 if (!(targs->flags & TASK_ARG_DROP))
2139 int prox_read_config_file(void)
2141 set_global_defaults(&prox_cfg);
2142 set_task_defaults(&prox_cfg, lcore_cfg_init);
2143 set_port_defaults();
2144 plog_info("=== Parsing configuration file '%s' ===\n", cfg_file);
2145 struct cfg_file *pcfg = cfg_open(cfg_file);
2150 struct cfg_section* config_sections[] = {
2162 for (struct cfg_section** section = config_sections; *section != NULL; ++section) {
2163 const char* name = (*section)->name;
2164 size_t len = strlen(name);
2165 plog_info("\t*** Reading [%s] section%s ***\n", name, name[len - 1] == '#'? "s": "");
2166 cfg_parse(pcfg, *section);
2168 if ((*section)->error) {
2169 plog_err("At line %u, section [%s], entry %u: '%s'\n\t%s\n"
2170 , pcfg->err_line, pcfg->err_section, pcfg->err_entry + 1, pcfg->cur_line,
2171 strlen(get_parse_err())? get_parse_err() : err_str);
2172 cfg_close(pcfg); /* cannot close before printing error, print uses internal buffer */
2179 prox_set_core_mask();
2181 if (is_using_no_drop()) {
2182 prox_cfg.flags &= ~DSF_WAIT_ON_QUIT;
2185 if (calc_tot_rxrings()) {
2186 plog_err("Error in configuration: %s\n", err_str);
2193 static void failed_rte_eal_init(__attribute__((unused))const char *prog_name)
2195 plog_err("\tError in rte_eal_init()\n");
2198 int prox_setup_rte(const char *prog_name)
2200 char *rte_argv[MAX_RTE_ARGV];
2201 char rte_arg[MAX_RTE_ARGV][MAX_ARG_LEN];
2202 char tmp[PROX_CM_STR_LEN];
2203 /* create mask of used cores */
2204 plog_info("=== Setting up RTE EAL ===\n");
2206 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) {
2207 plog_info("Using dummy cpu topology\n");
2208 snprintf(tmp, sizeof(tmp), "0x1");
2210 prox_core_to_hex(tmp, sizeof(tmp), 0);
2211 plog_info("\tWorker threads core mask is %s\n", tmp);
2212 prox_core_to_hex(tmp, sizeof(tmp), 1);
2213 plog_info("\tWith master core index %u, full core mask is %s\n", prox_cfg.master, tmp);
2216 /* fake command line parameters for rte_eal_init() */
2218 rte_argv[argc] = strdup(prog_name);
2219 sprintf(rte_arg[++argc], "-c%s", tmp);
2220 rte_argv[argc] = rte_arg[argc];
2221 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
2222 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2223 sprintf(rte_arg[++argc], "--master-lcore=%u", 0);
2225 sprintf(rte_arg[++argc], "--master-lcore=%u", prox_cfg.master);
2226 rte_argv[argc] = rte_arg[argc];
2228 /* For old DPDK versions, the master core had to be the first
2230 uint32_t first_core = -1;
2232 if (prox_core_next(&first_core, 1) == -1) {
2233 plog_err("Can't core ID of first core in use\n");
2236 if (first_core != prox_cfg.master) {
2237 plog_err("The master core needs to be the first core (master core = %u, first core = %u).\n", first_core, prox_cfg.master);
2242 if (rte_cfg.memory) {
2243 sprintf(rte_arg[++argc], "-m%u", rte_cfg.memory);
2244 rte_argv[argc] = rte_arg[argc];
2247 if (rte_cfg.force_nchannel) {
2248 sprintf(rte_arg[++argc], "-n%u", rte_cfg.force_nchannel);
2249 rte_argv[argc] = rte_arg[argc];
2252 if (rte_cfg.force_nrank) {
2253 sprintf(rte_arg[++argc], "-r%u", rte_cfg.force_nrank);
2254 rte_argv[argc] = rte_arg[argc];
2257 if (rte_cfg.no_hugetlbfs) {
2258 strcpy(rte_arg[++argc], "--no-huge");
2259 rte_argv[argc] = rte_arg[argc];
2262 if (rte_cfg.no_pci) {
2263 strcpy(rte_arg[++argc], "--no-pci");
2264 rte_argv[argc] = rte_arg[argc];
2267 if (rte_cfg.no_hpet) {
2268 strcpy(rte_arg[++argc], "--no-hpet");
2269 rte_argv[argc] = rte_arg[argc];
2272 if (rte_cfg.no_shconf) {
2273 strcpy(rte_arg[++argc], "--no-shconf");
2274 rte_argv[argc] = rte_arg[argc];
2277 if (rte_cfg.eal != NULL) {
2278 char *ptr = rte_cfg.eal;
2280 while (ptr != NULL) {
2281 while (isspace(*ptr))
2284 ptr = strchr(ptr, ' ');
2288 strcpy(rte_arg[++argc], ptr2);
2289 rte_argv[argc] = rte_arg[argc];
2293 if (rte_cfg.hugedir != NULL) {
2294 strcpy(rte_arg[++argc], "--huge-dir");
2295 rte_argv[argc] = rte_arg[argc];
2296 rte_argv[++argc] = rte_cfg.hugedir;
2299 if (rte_cfg.no_output) {
2300 rte_log_set_global_level(0);
2303 plog_info("\tEAL command line:");
2304 if (argc >= MAX_RTE_ARGV) {
2305 plog_err("too many arguments for EAL\n");
2309 for (int h = 0; h <= argc; ++h) {
2310 plog_info(" %s", rte_argv[h]);
2314 rte_set_application_usage_hook(failed_rte_eal_init);
2315 if (rte_eal_init(++argc, rte_argv) < 0) {
2316 plog_err("\tError in rte_eal_init()\n");
2319 plog_info("\tEAL Initialized\n");
2321 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2324 /* check if all active cores are in enabled in DPDK */
2325 for (uint32_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
2326 if (lcore_id == prox_cfg.master) {
2327 if (!rte_lcore_is_enabled(lcore_id))
2330 else if (rte_lcore_is_enabled(lcore_id) != prox_core_active(lcore_id, 0)) {
2331 plog_err("\tFailed to enable lcore %u\n", lcore_id);
2334 else if (lcore_cfg_init[lcore_id].n_tasks_all != 0 && !rte_lcore_is_enabled(lcore_id)) {
2335 plog_err("\tFailed to enable lcore %u\n", lcore_id);
2340 for (int i = 0; i < n_deferred_ports; i++) {
2341 if (prox_rte_eth_dev_get_port_by_name(deferred_port[i].name, &port_id) != 0) {
2342 plog_err("Did not find port name %s used while reading %s\n", deferred_port[i].name, deferred_port[i].is_rx_port ? "rx port" : "tx_port");
2345 plog_info("\tport %s is port id %d\n", deferred_port[i].name, port_id);
2346 if (deferred_port[i].is_rx_port) {
2347 deferred_port[i].targ->rx_port_queue[0].port = port_id;
2348 deferred_port[i].targ->nb_rxports = 1;
2350 deferred_port[i].targ->tx_port_queue[0].port = port_id;
2351 deferred_port[i].targ->nb_txports = 1;