2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_sched.h>
21 #include <rte_string_fns.h>
22 #include <rte_version.h>
24 #include "prox_malloc.h"
27 #include "prox_args.h"
28 #include "prox_assert.h"
33 #include "parse_utils.h"
34 #include "prox_port_cfg.h"
38 #include "prox_compat.h"
40 #define MAX_RTE_ARGV 64
41 #define MAX_ARG_LEN 64
49 #define STR_EQ(s1, s2) (!strcmp((s1), (s2)))
51 /* configuration files support */
52 static int get_rte_cfg(unsigned sindex, char *str, void *data);
53 static int get_global_cfg(unsigned sindex, char *str, void *data);
54 static int get_port_cfg(unsigned sindex, char *str, void *data);
55 static int get_defaults_cfg(unsigned sindex, char *str, void *data);
56 static int get_cache_set_cfg(unsigned sindex, char *str, void *data);
57 static int get_var_cfg(unsigned sindex, char *str, void *data);
58 static int get_lua_cfg(unsigned sindex, char *str, void *data);
59 static int get_core_cfg(unsigned sindex, char *str, void *data);
61 static const char *cfg_file = DEFAULT_CONFIG_FILE;
62 static struct rte_cfg rte_cfg;
63 struct prox_cache_set_cfg prox_cache_set_cfg[PROX_MAX_CACHE_SET];
65 static char format_err_str[1024];
66 static const char *err_str = "Unknown error";
68 static struct cfg_section eal_default_cfg = {
69 .name = "eal options",
70 .parser = get_rte_cfg,
77 static struct cfg_section port_cfg = {
79 .parser = get_port_cfg,
80 .data = &prox_port_cfg,
86 static struct cfg_section var_cfg = {
88 .parser = get_var_cfg,
95 static struct cfg_section cache_set_cfg = {
96 .name = "cache set #",
97 .parser = get_cache_set_cfg,
98 .data = &prox_cache_set_cfg,
104 static struct cfg_section defaults_cfg = {
106 .parser = get_defaults_cfg,
113 static struct cfg_section settings_cfg = {
115 .parser = get_global_cfg,
122 static struct cfg_section lua_cfg = {
124 .parser = get_lua_cfg,
131 static struct cfg_section core_cfg = {
133 .parser = get_core_cfg,
134 .data = lcore_cfg_init,
140 static void set_errf(const char *format, ...)
143 va_start(ap, format);
144 vsnprintf(format_err_str, sizeof(format_err_str), format, ap);
146 err_str = format_err_str;
149 /* [eal options] parser */
150 static int get_rte_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
152 struct rte_cfg *pconfig = (struct rte_cfg *)data;
154 if (str == NULL || pconfig == NULL) {
158 char *pkey = get_cfg_key(str);
160 set_errf("Missing key after option");
164 if (STR_EQ(str, "-m")) {
165 return parse_int(&pconfig->memory, pkey);
167 if (STR_EQ(str, "-n")) {
168 if (parse_int(&pconfig->force_nchannel, pkey)) {
171 if (pconfig->force_nchannel == 0) {
172 set_errf("Invalid number of memory channels");
177 if (STR_EQ(str, "-r")) {
178 if (parse_int(&pconfig->force_nrank, pkey)) {
181 if (pconfig->force_nrank == 0 || pconfig->force_nrank > 16) {
182 set_errf("Invalid number of memory ranks");
188 if (STR_EQ(str, "no-pci")) {
189 return parse_bool(&pconfig->no_pci, pkey);
191 if (STR_EQ(str, "no-hpet")) {
192 return parse_bool(&pconfig->no_hpet, pkey);
194 if (STR_EQ(str, "no-shconf")) {
195 return parse_bool(&pconfig->no_shconf, pkey);
197 if (STR_EQ(str, "no-huge")) {
198 return parse_bool(&pconfig->no_hugetlbfs, pkey);
200 if (STR_EQ(str, "no-output")) {
201 return parse_bool(&pconfig->no_output, pkey);
204 if (STR_EQ(str, "huge-dir")) {
205 if (pconfig->hugedir) {
206 free(pconfig->hugedir);
208 pconfig->hugedir = strdup(pkey);
212 if (STR_EQ(str, "eal")) {
213 char eal[MAX_STR_LEN_PROC];
218 if (parse_str(eal, pkey, sizeof(eal)))
221 strip_spaces(&pkey, 1);
223 pconfig->eal = strdup(pkey);
227 set_errf("Option '%s' is not known", str);
231 struct cfg_depr global_cfg_depr[] = {
232 {"virtualization", "This is now set automatically if needed"},
233 {"qinq_tag", "This option is deprecated"},
234 {"wait on quit", "This is now set automatically if needed"},
238 const char *get_cfg_dir(void)
240 static char dir[PATH_MAX];
241 size_t end = strlen(cfg_file) - 1;
242 while (end > 0 && cfg_file[end] != '/')
245 strncpy(dir, cfg_file, end);
249 static int get_lua_cfg(__attribute__((unused)) unsigned sindex, __attribute__((unused)) char *str, __attribute__((unused)) void *data)
253 if (NULL == getcwd(cwd, sizeof(cwd))) {
254 set_errf("Failed to get current directory while loading Lua file\n");
257 status = chdir(get_cfg_dir());
259 set_errf("Failed to change directory to '%s' while loading Lua file\n", get_cfg_dir());
263 struct lua_State *l = prox_lua();
266 prox_strncpy(str_cpy, str, sizeof(str_cpy));
267 uint32_t len = strlen(str_cpy);
268 str_cpy[len++] = '\n';
271 status = luaL_loadstring(l, str_cpy);
273 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
278 status = lua_pcall(l, 0, LUA_MULTRET, 0);
280 set_errf("Lua error: '%s'\n", lua_tostring(l, -1));
287 set_errf("Failed to restore current directory to '%s' while loading Lua file\n", cwd);
294 /* [global] parser */
295 static int get_global_cfg(__attribute__((unused))unsigned sindex, char *str, void *data)
297 struct prox_cfg *pset = (struct prox_cfg *)data;
299 if (str == NULL || pset == NULL) {
303 char *pkey = get_cfg_key(str);
305 set_errf("Missing key after option");
309 for (uint32_t i = 0; i < RTE_DIM(global_cfg_depr); ++i) {
310 if (STR_EQ(str, global_cfg_depr[i].opt)) {
311 set_errf("Option '%s' is deprecated%s%s",
312 global_cfg_depr[i].opt, strlen(global_cfg_depr[i].info)? ": ": "", global_cfg_depr[i].info);
317 if (STR_EQ(str, "name")) {
318 return parse_str(pset->name, pkey, sizeof(pset->name));
321 if (STR_EQ(str, "start time")) {
322 return parse_int(&pset->start_time, pkey);
325 if (STR_EQ(str, "duration time")) {
326 return parse_int(&pset->duration_time, pkey);
329 if (STR_EQ(str, "shuffle")) {
330 return parse_flag(&pset->flags, DSF_SHUFFLE, pkey);
332 if (STR_EQ(str, "disable cmt")) {
333 return parse_flag(&pset->flags, DSF_DISABLE_CMT, pkey);
335 if (STR_EQ(str, "mp rings")) {
336 return parse_flag(&pset->flags, DSF_MP_RINGS, pkey);
338 if (STR_EQ(str, "enable bypass")) {
339 return parse_flag(&pset->flags, DSF_ENABLE_BYPASS, pkey);
341 if (STR_EQ(str, "heartbeat timeout")) {
342 return parse_int(&pset->heartbeat_timeout, pkey);
345 if (STR_EQ(str, "cpe table map")) {
346 /* The config defined ports through 0, 1, 2 ... which
347 need to be associated with ports. This is done
348 through defining it using "cpe table map=" */
349 return parse_port_name_list((uint32_t*)pset->cpe_table_ports, NULL, PROX_MAX_PORTS, pkey);
352 if (STR_EQ(str, "pre cmd")) {
356 if (STR_EQ(str, "unique mempool per socket")) {
357 return parse_flag(&pset->flags, UNIQUE_MEMPOOL_PER_SOCKET, pkey);
360 if (STR_EQ(str, "log buffer size")) {
361 if (parse_kmg(&pset->logbuf_size, pkey)) {
364 plog_info("Logging to buffer with size = %d\n", pset->logbuf_size);
368 set_errf("Option '%s' is not known", str);
372 /* [variable] parser */
373 static int get_var_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
375 return add_var(str, get_cfg_key(str), 0);
378 /* [defaults] parser */
379 static int get_defaults_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data)
384 pkey = get_cfg_key(str);
386 set_errf("Missing key after option");
390 if (STR_EQ(str, "mempool size")) {
392 if (parse_kmg(&val, pkey)) {
396 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
397 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
398 cur_lcore_cfg_init->id = lcore_id;
399 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
400 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
408 if (STR_EQ(str, "qinq tag")) {
409 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
410 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
411 cur_lcore_cfg_init->id = lcore_id;
412 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
413 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
414 parse_int(&targ->qinq_tag, pkey);
419 if (STR_EQ(str, "memcache size")) {
421 if (parse_kmg(&val, pkey)) {
425 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
426 struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
427 cur_lcore_cfg_init->id = lcore_id;
428 for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
429 struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
430 targ->nb_cache_mbuf = val;
436 set_errf("Option '%s' is not known", str);
440 /* [cache set] parser */
441 static int get_cache_set_cfg(unsigned sindex, char *str, void *data)
443 struct prox_cache_set_cfg *cfg = (struct prox_cache_set_cfg *)data;
445 uint8_t cur_if = sindex & ~CFG_INDEXED;
447 if (cur_if >= PROX_MAX_CACHE_SET) {
448 set_errf("Cache set ID is too high (max allowed %d)", PROX_MAX_CACHE_SET - 1 );
452 cfg = &prox_cache_set_cfg[cur_if];
454 if (str == NULL || data == NULL) {
458 char *pkey = get_cfg_key(str);
461 set_errf("Missing key after option");
465 if (STR_EQ(str, "mask")) {
467 int err = parse_int(&val, pkey);
473 plog_info("\tCache set %d has mask %x\n", cur_if, cfg->mask);
480 static int get_port_cfg(unsigned sindex, char *str, void *data)
482 struct prox_port_cfg *cfg = (struct prox_port_cfg *)data;
484 uint8_t cur_if = sindex & ~CFG_INDEXED;
486 if (cur_if >= PROX_MAX_PORTS) {
487 set_errf("Port ID is too high (max allowed %d)", PROX_MAX_PORTS - 1 );
491 cfg = &prox_port_cfg[cur_if];
493 if (str == NULL || data == NULL) {
497 char *pkey = get_cfg_key(str);
500 set_errf("Missing key after option");
504 if (STR_EQ(str, "mac")) {
505 if (STR_EQ(pkey, "hardware")) {
506 cfg->type = PROX_PORT_MAC_HW;
508 else if (STR_EQ(pkey, "random")) {
509 cfg->type = PROX_PORT_MAC_RAND;
512 cfg->type = PROX_PORT_MAC_SET;
513 if (parse_mac(&cfg->eth_addr, pkey)) {
518 else if (STR_EQ(str, "name")) {
520 prox_strncpy(cfg->name, pkey, MAX_NAME_SIZE);
521 PROX_ASSERT(cur_if < PROX_MAX_PORTS);
522 return add_port_name(cur_if, pkey);
524 else if (STR_EQ(str, "rx desc")) {
525 return parse_int(&cfg->n_rxd, pkey);
527 else if (STR_EQ(str, "tx desc")) {
528 return parse_int(&cfg->n_txd, pkey);
530 else if (STR_EQ(str, "promiscuous")) {
532 if (parse_bool(&val, pkey)) {
535 cfg->promiscuous = val;
537 else if (STR_EQ(str, "multicast")) {
539 if (cfg->nb_mc_addr >= NB_MCAST_ADDR) {
540 plog_err("too many multicast addresses\n");
543 if (parse_mac(&cfg->mc_addr[cfg->nb_mc_addr], pkey)) {
548 else if (STR_EQ(str, "lsc")) {
549 cfg->lsc_set_explicitely = 1;
551 if (parse_bool(&val, pkey)) {
556 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
557 else if (STR_EQ(str, "disable tx offload")) {
559 if (parse_int(&val, pkey)) {
563 cfg->disabled_tx_offload = val;
566 else if (STR_EQ(str, "strip crc")) {
568 if (parse_bool(&val, pkey)) {
571 #if defined(DEV_RX_OFFLOAD_CRC_STRIP)
573 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_CRC_STRIP;
575 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_CRC_STRIP;
577 #if defined (DEV_RX_OFFLOAD_KEEP_CRC)
579 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_KEEP_CRC;
582 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_KEEP_CRC;
586 else if (STR_EQ(str, "vlan")) {
587 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
589 if (parse_bool(&val, pkey)) {
593 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_VLAN_STRIP;
594 cfg->requested_tx_offload |= DEV_TX_OFFLOAD_VLAN_INSERT;
596 cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
597 cfg->requested_tx_offload &= ~DEV_TX_OFFLOAD_VLAN_INSERT;
600 plog_warn("vlan option not supported : update DPDK at least to 18.08 to support this option\n");
603 else if (STR_EQ(str, "mtu size")) {
605 if (parse_int(&val, pkey)) {
610 // A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan)
611 // should not be considered as a jumbo frame. However rte_ethdev.c considers that
612 // the max_rx_pkt_len for a non jumbo frame is 1518
613 cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN;
614 if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN) {
615 cfg->requested_rx_offload |= DEV_RX_OFFLOAD_JUMBO_FRAME;
620 else if (STR_EQ(str, "rss")) {
622 if (parse_bool(&val, pkey)) {
626 cfg->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
627 cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4;
630 else if (STR_EQ(str, "rx_ring")) {
631 parse_str(cfg->rx_ring, pkey, sizeof(cfg->rx_ring));
633 else if (STR_EQ(str, "tx_ring")) {
634 parse_str(cfg->tx_ring, pkey, sizeof(cfg->tx_ring));
640 static enum police_action str_to_color(const char *str)
642 if (STR_EQ(str, "green"))
644 if (STR_EQ(str, "yellow"))
646 if (STR_EQ(str, "red"))
648 if (STR_EQ(str, "drop"))
653 struct cfg_depr task_cfg_depr[] = {
657 struct cfg_depr core_cfg_depr[] = {
660 {"network side", ""},
664 static int get_core_cfg(unsigned sindex, char *str, void *data)
667 struct lcore_cfg *lconf = (struct lcore_cfg *)data;
669 if (str == NULL || lconf == NULL || !(sindex & CFG_INDEXED)) {
673 pkey = get_cfg_key(str);
675 set_errf("Missing key after option");
679 uint32_t ncore = sindex & ~CFG_INDEXED;
680 if (ncore >= RTE_MAX_LCORE) {
681 set_errf("Core index too high (max allowed %d)", RTE_MAX_LCORE - 1);
685 lconf = &lconf[ncore];
687 for (uint32_t i = 0; i < RTE_DIM(core_cfg_depr); ++i) {
688 if (STR_EQ(str, core_cfg_depr[i].opt)) {
689 set_errf("Option '%s' is deprecated%s%s",
690 core_cfg_depr[i].opt, strlen(core_cfg_depr[i].info)? ": ": "", core_cfg_depr[i].info);
696 lcore_to_socket_core_ht(ncore, buff, sizeof(buff));
698 if (STR_EQ(str, "task")) {
701 if (parse_int(&val, pkey)) {
704 if (val >= MAX_TASKS_PER_CORE) {
705 set_errf("Too many tasks for core (max allowed %d)", MAX_TASKS_PER_CORE - 1);
708 if (val != lconf->n_tasks_all) {
709 set_errf("Task ID skipped or defined twice");
713 lconf->active_task = val;
715 lconf->targs[lconf->active_task].task = lconf->active_task;
717 if (lconf->n_tasks_all < lconf->active_task + 1) {
718 lconf->n_tasks_all = lconf->active_task + 1;
723 struct task_args *targ = &lconf->targs[lconf->active_task];
724 if (STR_EQ(str, "tx ports from routing table")) {
725 uint32_t vals[PROX_MAX_PORTS];
727 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
728 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
732 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
736 for (uint8_t i = 0; i < n_if; ++i) {
737 targ->tx_port_queue[i].port = vals[i];
740 targ->runtime_flags |= TASK_ROUTING;
743 if (STR_EQ(str, "tx ports from cpe table")) {
744 uint32_t vals[PROX_MAX_PORTS];
748 char* mapping_str = strstr(pkey, " remap=");
750 if (mapping_str != NULL) {
752 mapping_str += strlen(" remap=");
753 n_remap = parse_remap(targ->mapping, mapping_str);
756 if (parse_port_name_list(vals, &ret, PROX_MAX_PORTS, pkey)) {
760 if (n_remap != -1 && ret != (uint32_t)n_remap) {
761 set_errf("Expected %d remap elements but had %d", n_remap, ret);
765 for (uint8_t i = 0; i < ret; ++i) {
766 targ->tx_port_queue[i].port = vals[i];
768 /* default mapping this case is port0 -> port0 */
770 targ->mapping[vals[i]] = i;
774 targ->nb_txports = ret;
778 if (STR_EQ(str, "tx cores from routing table")) {
779 if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) {
780 set_errf("tx port form route not supported mode %s", targ->task_init->mode_str);
784 struct core_task_set *cts = &targ->core_task_set[0];
786 if (parse_task_set(cts, pkey))
789 if (cts->n_elems > MAX_WT_PER_LB) {
790 set_errf("Maximum worker threads allowed is %u but have %u", MAX_WT_PER_LB, cts->n_elems);
794 targ->nb_worker_threads = cts->n_elems;
795 targ->nb_txrings = cts->n_elems;
797 if (targ->nb_txrings > MAX_RINGS_PER_TASK) {
798 set_errf("Maximum allowed TX rings is %u but have %u", MAX_RINGS_PER_TASK, targ->nb_txrings);
802 targ->runtime_flags |= TASK_ROUTING;
805 if (STR_EQ(str, "tx cores from cpe table")) {
806 struct core_task_set *core_task_set = &targ->core_task_set[0];
810 mapping_str = strstr(pkey, " remap=");
811 if (mapping_str == NULL) {
812 set_errf("There is no default mapping for tx cores from cpe table. Please specify it through remap=");
816 mapping_str += strlen(" remap=");
817 ret = parse_remap(targ->mapping, mapping_str);
822 struct core_task_set *cts = &targ->core_task_set[0];
824 if (parse_task_set(cts, pkey))
826 if (cts->n_elems > MAX_RINGS_PER_TASK) {
827 set_errf("Maximum cores to route to is %u\n", MAX_RINGS_PER_TASK);
831 targ->nb_txrings = cts->n_elems;
833 if (ret != targ->nb_txrings) {
834 set_errf("Expecting same number of remaps as cores\n", str);
840 if (STR_EQ(str, "delay ms")) {
841 if (targ->delay_us) {
842 set_errf("delay ms and delay us are mutually exclusive\n", str);
846 int rc = parse_int(&delay_ms, pkey);
847 targ->delay_us = delay_ms * 1000;
850 if (STR_EQ(str, "delay us")) {
851 if (targ->delay_us) {
852 set_errf("delay ms and delay us are mutually exclusive\n", str);
855 return parse_int(&targ->delay_us, pkey);
857 if (STR_EQ(str, "random delay us")) {
858 return parse_int(&targ->random_delay_us, pkey);
860 if (STR_EQ(str, "cpe table timeout ms")) {
861 return parse_int(&targ->cpe_table_timeout_ms, pkey);
863 if (STR_EQ(str, "ctrl path polling frequency")) {
864 int rc = parse_int(&targ->ctrl_freq, pkey);
866 if (targ->ctrl_freq == 0) {
867 set_errf("ctrl frequency must be non null.");
874 if (STR_EQ(str, "handle arp")) {
875 return parse_flag(&targ->runtime_flags, TASK_CTRL_HANDLE_ARP, pkey);
877 if (STR_EQ(str, "fast path handle arp")) {
878 return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey);
881 /* Using tx port name, only a _single_ port can be assigned to a task. */
882 if (STR_EQ(str, "tx port")) {
883 if (targ->nb_txports > 0) {
884 set_errf("Only one tx port can be defined per task. Use a LB task or routing instead.");
889 uint32_t ports[PROX_MAX_PORTS];
891 if(parse_port_name_list(ports, &n_if, PROX_MAX_PORTS, pkey)) {
895 PROX_ASSERT(n_if-1 < PROX_MAX_PORTS);
897 for (uint8_t i = 0; i < n_if; ++i) {
898 targ->tx_port_queue[i].port = ports[i];
903 targ->nb_worker_threads = targ->nb_txports;
908 if (STR_EQ(str, "rx ring")) {
910 int err = parse_bool(&val, pkey);
911 if (!err && val && targ->rx_port_queue[0].port != OUT_DISCARD) {
912 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
916 return parse_flag(&targ->flags, TASK_ARG_RX_RING, pkey);
918 if (STR_EQ(str, "private")) {
919 return parse_bool(&targ->use_src, pkey);
921 if (STR_EQ(str, "use src ip")) {
922 return parse_bool(&targ->use_src, pkey);
924 if (STR_EQ(str, "nat table")) {
925 return parse_str(targ->nat_table, pkey, sizeof(targ->nat_table));
927 if (STR_EQ(str, "rules")) {
928 return parse_str(targ->rules, pkey, sizeof(targ->rules));
930 if (STR_EQ(str, "route table")) {
931 return parse_str(targ->route_table, pkey, sizeof(targ->route_table));
933 if (STR_EQ(str, "dscp")) {
934 return parse_str(targ->dscp, pkey, sizeof(targ->dscp));
936 if (STR_EQ(str, "tun_bindings")) {
937 return parse_str(targ->tun_bindings, pkey, sizeof(targ->tun_bindings));
939 if (STR_EQ(str, "cpe table")) {
940 return parse_str(targ->cpe_table_name, pkey, sizeof(targ->cpe_table_name));
942 if (STR_EQ(str, "user table")) {
943 return parse_str(targ->user_table, pkey, sizeof(targ->user_table));
945 if (STR_EQ(str, "streams")) {
946 return parse_str(targ->streams, pkey, sizeof(targ->streams));
948 if (STR_EQ(str, "local lpm")) {
949 return parse_flag(&targ->flags, TASK_ARG_LOCAL_LPM, pkey);
951 if (STR_EQ(str, "drop")) {
952 return parse_flag(&targ->flags, TASK_ARG_DROP, pkey);
954 if (STR_EQ(str, "loop")) {
955 parse_flag(&targ->loop, 1, pkey);
956 return parse_flag(&targ->loop, 1, pkey);
958 if (STR_EQ(str, "qinq")) {
959 return parse_flag(&targ->flags, TASK_ARG_QINQ_ACL, pkey);
961 if (STR_EQ(str, "bps")) {
962 return parse_u64(&targ->rate_bps, pkey);
964 if (STR_EQ(str, "random")) {
965 return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0]));
967 if (STR_EQ(str, "rand_offset")) {
968 if (targ->n_rand_str == 0) {
969 set_errf("No random defined previously (use random=...)");
973 return parse_int(&targ->rand_offset[targ->n_rand_str - 1], pkey);
975 if (STR_EQ(str, "keep src mac")) {
976 return parse_flag(&targ->flags, DSF_KEEP_SRC_MAC, pkey);
978 if (STR_EQ(str, "pcap file")) {
979 return parse_str(targ->pcap_file, pkey, sizeof(targ->pcap_file));
981 if (STR_EQ(str, "pkt inline")) {
982 char pkey2[MAX_CFG_STRING_LEN];
983 if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
984 set_errf("Error while parsing pkt line, too long\n");
988 const size_t pkey_len = strlen(pkey2);
991 for (size_t i = 0; i < pkey_len; ++i) {
995 if (i + 1 == pkey_len) {
996 set_errf("Incomplete byte at character %z", i);
1002 if (pkey2[i] >= '0' && pkey2[i] <= '9') {
1003 byte = (pkey2[i] - '0') << 4;
1005 else if (pkey2[i] >= 'a' && pkey2[i] <= 'f') {
1006 byte = (pkey2[i] - 'a' + 10) << 4;
1008 else if (pkey2[i] >= 'A' && pkey2[i] <= 'F') {
1009 byte = (pkey2[i] - 'A' + 10) << 4;
1012 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i]);
1016 if (pkey2[i + 1] >= '0' && pkey2[i + 1] <= '9') {
1017 byte |= (pkey2[i + 1] - '0');
1019 else if (pkey2[i + 1] >= 'a' && pkey2[i + 1] <= 'f') {
1020 byte |= (pkey2[i + 1] - 'a' + 10);
1022 else if (pkey2[i + 1] >= 'A' && pkey2[i + 1] <= 'F') {
1023 byte |= (pkey2[i + 1] - 'A' + 10);
1026 set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i + 1]);
1029 if (targ->pkt_size == sizeof(targ->pkt_inline)) {
1030 set_errf("Inline packet definition can't be longer than %u", sizeof(targ->pkt_inline));
1034 targ->pkt_inline[targ->pkt_size++] = byte;
1040 if (STR_EQ(str, "accuracy limit nsec")) {
1041 return parse_int(&targ->accuracy_limit_nsec, pkey);
1043 if (STR_EQ(str, "latency bucket size")) {
1044 return parse_int(&targ->bucket_size, pkey);
1046 if (STR_EQ(str, "latency buffer size")) {
1047 return parse_int(&targ->latency_buffer_size, pkey);
1049 if (STR_EQ(str, "accuracy pos")) {
1050 return parse_int(&targ->accur_pos, pkey);
1052 if (STR_EQ(str, "signature")) {
1053 return parse_int(&targ->sig, pkey);
1055 if (STR_EQ(str, "signature pos")) {
1056 return parse_int(&targ->sig_pos, pkey);
1058 if (STR_EQ(str, "lat pos")) {
1059 targ->lat_enabled = 1;
1060 return parse_int(&targ->lat_pos, pkey);
1062 if (STR_EQ(str, "packet id pos")) {
1063 return parse_int(&targ->packet_id_pos, pkey);
1065 if (STR_EQ(str, "probability")) {
1067 int rc = parse_float(&probability, pkey);
1068 if (probability == 0) {
1069 set_errf("Probability must be != 0\n");
1071 } else if (probability > 100.0) {
1072 set_errf("Probability must be < 100\n");
1075 targ->probability = probability * 10000;
1078 if (STR_EQ(str, "concur conn")) {
1079 return parse_int(&targ->n_concur_conn, pkey);
1081 if (STR_EQ(str, "max setup rate")) {
1082 return parse_int(&targ->max_setup_rate, pkey);
1084 if (STR_EQ(str, "pkt size")) {
1085 return parse_int(&targ->pkt_size, pkey);
1087 if (STR_EQ(str, "min bulk size")) {
1088 return parse_int(&targ->min_bulk_size, pkey);
1090 if (STR_EQ(str, "max bulk size")) {
1091 return parse_int(&targ->max_bulk_size, pkey);
1093 if (STR_EQ(str, "rx port")) {
1094 if (targ->flags & TASK_ARG_RX_RING) {
1095 set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead.");
1098 uint32_t vals[PROX_MAX_PORTS];
1101 if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
1105 for (uint8_t i = 0; i < n_if; ++i) {
1106 PROX_ASSERT(vals[i] < PROX_MAX_PORTS);
1107 targ->rx_port_queue[i].port = vals[i];
1113 if (STR_EQ(str, "mode")) {
1114 /* Check deprecated task modes */
1116 int ret = parse_str(mode, pkey, sizeof(mode));
1120 for (uint32_t i = 0; i < RTE_DIM(task_cfg_depr); ++i) {
1121 if (STR_EQ(mode, task_cfg_depr[i].opt)) {
1122 set_errf("Task mode '%s' is deprecated%s%s",
1123 task_cfg_depr[i].opt, strlen(task_cfg_depr[i].info)? ": ": "", task_cfg_depr[i].info);
1128 /* master is a special mode that is always needed (cannot be turned off) */
1129 if (STR_EQ(mode, "master")) {
1130 prox_cfg.master = ncore;
1131 targ->mode = MASTER;
1132 if (lconf->n_tasks_all > 1 || targ->task != 0) {
1133 set_errf("Master core can only have one task\n");
1136 // Initialize number of tasks to 1 for master, even if no task specified
1137 lconf->n_tasks_all = 1;
1138 lconf->active_task = 0;
1139 lconf->targs[lconf->active_task].task = 0;
1140 struct task_init* task_init = to_task_init(mode, "");
1142 targ->mode = task_init->mode;
1144 targ->task_init = task_init;
1148 struct task_init* task_init = to_task_init(mode, "");
1150 targ->mode = task_init->mode;
1153 set_errf("Task mode '%s' is invalid", mode);
1157 targ->task_init = task_init;
1160 if (STR_EQ(str, "users")) {
1161 return parse_int(&targ->n_flows, pkey);
1164 if (STR_EQ(str, "mark")) {
1165 return parse_flag(&targ->runtime_flags, TASK_MARK, pkey);
1168 if (STR_EQ(str, "mark green")) {
1169 return parse_int(&targ->marking[0], pkey);
1172 if (STR_EQ(str, "mark yellow")) {
1173 return parse_int(&targ->marking[1], pkey);
1176 if (STR_EQ(str, "mark red")) {
1177 return parse_int(&targ->marking[2], pkey);
1180 if (STR_EQ(str, "tx cores")) {
1181 uint8_t dest_task = 0;
1182 /* if user did not specify, dest_port is left at default (first type) */
1183 uint8_t dest_proto = 0;
1184 uint8_t ctrl = CTRL_TYPE_DP;
1185 char *task_str = strstr(pkey, "proto=");
1187 task_str += strlen("proto=");
1189 if (STR_EQ(task_str, "ipv4")) {
1192 else if (STR_EQ(task_str, "arp")) {
1195 else if (STR_EQ(task_str, "ipv6")) {
1199 set_errf("proto needs to be either ipv4, arp or ipv6");
1205 task_str = strstr(pkey, "task=");
1211 task_str += strlen("task=");
1212 char *task_str_end = strstr(task_str, " ");
1216 if (0 == strlen(task_str)) {
1217 set_errf("Invalid task= syntax");
1221 switch (task_str[strlen(task_str) - 1]) {
1223 ctrl = CTRL_TYPE_PKT;
1226 ctrl = CTRL_TYPE_MSG;
1232 if (task_str[strlen(task_str) -1] < '0' ||
1233 task_str[strlen(task_str) -1] > '9') {
1234 set_errf("Unknown ring type %c.\n",
1235 task_str[strlen(task_str) - 1]);
1240 dest_task = atoi(task_str);
1241 if (dest_task >= MAX_TASKS_PER_CORE) {
1242 set_errf("Destination task too high (max allowed %d)", MAX_TASKS_PER_CORE - 1);
1250 struct core_task_set *cts = &targ->core_task_set[dest_proto];
1252 if (parse_task_set(cts, pkey))
1255 if (cts->n_elems > MAX_WT_PER_LB) {
1256 set_errf("Too many worker threads (max allowed %d)", MAX_WT_PER_LB - 1);
1260 targ->nb_worker_threads = cts->n_elems;
1261 targ->nb_txrings += cts->n_elems;
1265 if (STR_EQ(str, "tx crc")) {
1266 return parse_flag(&targ->runtime_flags, TASK_TX_CRC, pkey);
1268 if (STR_EQ(str, "ring size")) {
1269 return parse_int(&targ->ring_size, pkey);
1271 if (STR_EQ(str, "mempool size")) {
1272 return parse_kmg(&targ->nb_mbuf, pkey);
1275 else if (STR_EQ(str, "mbuf size")) {
1276 return parse_int(&targ->mbuf_size, pkey);
1278 if (STR_EQ(str, "memcache size")) {
1279 return parse_kmg(&targ->nb_cache_mbuf, pkey);
1282 if (STR_EQ(str, "byte offset")) {
1283 return parse_int(&targ->byte_offset, pkey);
1286 if (STR_EQ(str, "realtime scheduling")) {
1287 return parse_flag(&lconf->flags, LCONF_FLAG_SCHED_RR, pkey);
1289 if (STR_EQ(str, "name")) {
1290 return parse_str(lconf->name, pkey, sizeof(lconf->name));
1292 /* MPLS configuration */
1293 if (STR_EQ(str, "untag mpls")) {
1294 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1297 if (STR_EQ(str, "add mpls")) {
1298 return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey);
1301 if (STR_EQ(str, "ether type")) {
1302 return parse_int(&targ->etype, pkey);
1305 if (STR_EQ(str, "cache set")) {
1306 return parse_int(&lconf->cache_set, pkey);
1309 if (STR_EQ(str, "sub mode")) {
1310 const char* mode_str = targ->task_init->mode_str;
1311 const char *sub_mode_str = pkey;
1313 targ->task_init = to_task_init(mode_str, sub_mode_str);
1314 if (!targ->task_init) {
1315 if (strcmp(sub_mode_str, "l3") != 0) {
1316 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1319 targ->task_init = to_task_init(mode_str, "");
1320 if (!targ->task_init) {
1321 set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
1325 if (strcmp(sub_mode_str, "l3") == 0) {
1326 prox_cfg.flags |= DSF_CTRL_PLANE_ENABLED;
1327 targ->flags |= TASK_ARG_L3;
1328 strcpy(targ->sub_mode_str, "l3");
1330 strcpy(targ->sub_mode_str, targ->task_init->sub_mode_str);
1335 if (STR_EQ(str, "mempool name")) {
1336 return parse_str(targ->pool_name, pkey, sizeof(targ->pool_name));
1338 if (STR_EQ(str, "dpi engine")) {
1339 return parse_str(targ->dpi_engine_path, pkey, sizeof(targ->dpi_engine_path));
1341 if (STR_EQ(str, "dpi engine arg")) {
1342 return parse_str(targ->dpi_engine_args[targ->n_dpi_engine_args++], pkey,
1343 sizeof(targ->dpi_engine_args[0]));
1345 if (STR_EQ(str, "dst mac")) { /* destination MAC address to be used for packets */
1346 if (parse_mac(&targ->edaddr, pkey)) {
1347 if (STR_EQ(pkey, "no")) {
1348 targ->flags |= TASK_ARG_DO_NOT_SET_DST_MAC;
1351 if (STR_EQ(pkey, "packet") == 0)
1356 targ->flags |= TASK_ARG_DST_MAC_SET;
1359 if (STR_EQ(str, "src mac")) {
1360 if (parse_mac(&targ->esaddr, pkey)) {
1361 if (STR_EQ(pkey, "no")) {
1362 targ->flags |= TASK_ARG_DO_NOT_SET_SRC_MAC;
1365 else if (STR_EQ(pkey, "packet"))
1367 else if (STR_EQ(pkey, "hw")) {
1368 targ->flags |= TASK_ARG_HW_SRC_MAC;
1374 targ->flags |= TASK_ARG_SRC_MAC_SET;
1377 if (STR_EQ(str, "igmp ipv4")) { /* IGMP Group */
1378 return parse_ip(&targ->igmp_address, pkey);
1380 if (STR_EQ(str, "gateway ipv4")) { /* Gateway IP address used when generating */
1381 if ((targ->flags & TASK_ARG_L3) == 0)
1382 plog_warn("gateway ipv4 configured but L3 sub mode not enabled\n");
1383 return parse_ip(&targ->gateway_ipv4, pkey);
1385 if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */
1386 return parse_ip(&targ->local_ipv4, pkey);
1388 if (STR_EQ(str, "remote ipv4")) { /* source IP address to be used for packets */
1389 return parse_ip(&targ->remote_ipv4, pkey);
1391 if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */
1392 return parse_ip6(&targ->local_ipv6, pkey);
1394 if (STR_EQ(str, "arp timeout"))
1395 return parse_int(&targ->arp_timeout, pkey);
1396 if (STR_EQ(str, "arp update time"))
1397 return parse_int(&targ->arp_update_time, pkey);
1398 if (STR_EQ(str, "number of packets"))
1399 return parse_int(&targ->n_pkts, pkey);
1400 if (STR_EQ(str, "pipes")) {
1402 int err = parse_int(&val, pkey);
1405 if (!val || !rte_is_power_of_2(val)) {
1406 set_errf("Number of pipes has to be power of 2 and not zero");
1410 targ->qos_conf.port_params.n_pipes_per_subport = val;
1413 if (STR_EQ(str, "queue size")) {
1415 int err = parse_int(&val, pkey);
1419 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1420 targ->qos_conf.subport_params[0].qsize[0] = val;
1421 targ->qos_conf.subport_params[0].qsize[1] = val;
1422 targ->qos_conf.subport_params[0].qsize[2] = val;
1423 targ->qos_conf.subport_params[0].qsize[3] = val;
1425 targ->qos_conf.port_params.qsize[0] = val;
1426 targ->qos_conf.port_params.qsize[1] = val;
1427 targ->qos_conf.port_params.qsize[2] = val;
1428 targ->qos_conf.port_params.qsize[3] = val;
1432 if (STR_EQ(str, "subport tb rate")) {
1433 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1434 return parse_u64(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1436 return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey);
1439 if (STR_EQ(str, "subport tb size")) {
1440 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1441 return parse_u64(&targ->qos_conf.subport_params[0].tb_size, pkey);
1443 return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey);
1446 if (STR_EQ(str, "subport tc 0 rate")) {
1447 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1448 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1450 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
1453 if (STR_EQ(str, "subport tc 1 rate")) {
1454 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1455 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1457 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
1460 if (STR_EQ(str, "subport tc 2 rate")) {
1461 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1462 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1464 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
1467 if (STR_EQ(str, "subport tc 3 rate")) {
1468 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1469 return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1471 return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
1475 if (STR_EQ(str, "subport tc rate")) {
1477 int err = parse_int(&val, pkey);
1482 targ->qos_conf.subport_params[0].tc_rate[0] = val;
1483 targ->qos_conf.subport_params[0].tc_rate[1] = val;
1484 targ->qos_conf.subport_params[0].tc_rate[2] = val;
1485 targ->qos_conf.subport_params[0].tc_rate[3] = val;
1489 if (STR_EQ(str, "subport tc period")) {
1490 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1491 return parse_u64(&targ->qos_conf.subport_params[0].tc_period, pkey);
1493 return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey);
1496 if (STR_EQ(str, "pipe tb rate")) {
1497 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1498 return parse_u64(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1500 return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
1503 if (STR_EQ(str, "pipe tb size")) {
1504 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1505 return parse_u64(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1507 return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey);
1510 if (STR_EQ(str, "pipe tc rate")) {
1512 int err = parse_int(&val, pkey);
1517 targ->qos_conf.pipe_params[0].tc_rate[0] = val;
1518 targ->qos_conf.pipe_params[0].tc_rate[1] = val;
1519 targ->qos_conf.pipe_params[0].tc_rate[2] = val;
1520 targ->qos_conf.pipe_params[0].tc_rate[3] = val;
1523 if (STR_EQ(str, "pipe tc 0 rate")) {
1524 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1525 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1527 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
1530 if (STR_EQ(str, "pipe tc 1 rate")) {
1531 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1532 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1534 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
1537 if (STR_EQ(str, "pipe tc 2 rate")) {
1538 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1539 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1541 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
1544 if (STR_EQ(str, "pipe tc 3 rate")) {
1545 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1546 return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1548 return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
1551 if (STR_EQ(str, "pipe tc period")) {
1552 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
1553 return parse_u64(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1555 return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey);
1558 if (STR_EQ(str, "police action")) {
1559 char *in = strstr(pkey, " io=");
1561 set_errf("Need to specify io colors using io=in_color,out_color\n");
1565 in += strlen(" io=");
1567 char *out = strstr(in, ",");
1569 set_errf("Output color not specified\n");
1574 enum police_action in_color = str_to_color(in);
1575 enum police_action out_color = str_to_color(out);
1577 if (in_color == ACT_INVALID) {
1578 set_errf("Invalid input color %s. Expected green, yellow or red", in);
1581 if (out_color == ACT_INVALID) {
1582 set_errf("Invalid output color %s. Expected green, yellow or red", out);
1585 enum police_action action = str_to_color(pkey);
1586 if (action == ACT_INVALID) {
1587 set_errf("Error action %s. Expected green, yellow, red or drop", pkey);
1590 targ->police_act[in_color][out_color] = action;
1594 if (STR_EQ(str, "qinq tag")) {
1595 return parse_int(&targ->qinq_tag, pkey);
1597 if (STR_EQ(str, "cir")) {
1598 return parse_int(&targ->cir, pkey);
1600 if (STR_EQ(str, "cbs")) {
1601 return parse_int(&targ->cbs, pkey);
1603 if (STR_EQ(str, "pir")) {
1604 return parse_int(&targ->pir, pkey);
1606 if (STR_EQ(str, "pbs")) {
1607 return parse_int(&targ->pbs, pkey);
1609 if (STR_EQ(str, "ebs")) {
1610 return parse_int(&targ->ebs, pkey);
1612 uint32_t queue_id = 0;
1613 if (sscanf(str, "queue %d weight", &queue_id) == 1) {
1615 int err = parse_int(&val, pkey);
1619 if (queue_id >= RTE_SCHED_BE_QUEUES_PER_PIPE) {
1620 set_errf("queue_id must be < %d", RTE_SCHED_BE_QUEUES_PER_PIPE);
1623 targ->qos_conf.pipe_params[0].wrr_weights[queue_id] = val;
1626 if (STR_EQ(str, "classify")) {
1627 if (!(targ->task_init->flag_features & TASK_FEATURE_CLASSIFY)) {
1628 set_errf("Classify is not supported in '%s' mode", targ->task_init->mode_str);
1632 return parse_flag(&targ->runtime_flags, TASK_CLASSIFY, pkey);
1634 if (STR_EQ(str, "flow table size")) {
1635 return parse_int(&targ->flow_table_size, pkey);
1638 if (STR_EQ(str, "tbf rate")) {
1639 return parse_int(&targ->tb_rate, pkey);
1641 if (STR_EQ(str, "tbf size")) {
1642 return parse_int(&targ->tb_size, pkey);
1645 if (STR_EQ(str, "max rules")) {
1646 return parse_int(&targ->n_max_rules, pkey);
1649 if (STR_EQ(str, "tunnel hop limit")) {
1651 int err = parse_int(&val, pkey);
1655 targ->tunnel_hop_limit = val;
1659 if (STR_EQ(str, "lookup port mask")) {
1661 int err = parse_int(&val, pkey);
1665 targ->lookup_port_mask = val;
1669 if (STR_EQ(str, "irq debug")) {
1670 parse_int(&targ->irq_debug, pkey);
1674 set_errf("Option '%s' is not known", str);
1675 /* fail on unknown keys */
1679 static int str_is_number(const char *in)
1683 for (size_t i = 0; i < strlen(in); ++i) {
1684 if (!dot_once && in[i] == '.') {
1689 if (in[i] < '0' || in[i] > '9')
1696 /* command line parameters parsing procedure */
1697 int prox_parse_args(int argc, char **argv)
1703 /* Default settings */
1704 prox_cfg.flags |= DSF_AUTOSTART | DSF_WAIT_ON_QUIT;
1705 prox_cfg.ui = PROX_UI_CURSES;
1707 plog_info("\tCommand line:");
1708 for (i = 0; i < argc; ++i) {
1709 plog_info(" %s", argv[i]);
1713 while ((opt = getopt(argc, argv, "f:dnzpo:tkuar:emsiw:l:v:q:")) != EOF) {
1716 /* path to config file */
1719 for (size_t i = 0; i < strlen(cfg_file); ++i) {
1720 if (cfg_file[i] == '/') {
1725 prox_strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE);
1728 plog_set_lvl(atoi(optarg));
1731 prox_cfg.log_name_pid = 0;
1732 prox_strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE);
1735 prox_cfg.log_name_pid = 1;
1738 prox_cfg.use_stats_logger = 1;
1741 prox_cfg.flags |= DSF_DAEMON;
1742 prox_cfg.ui = PROX_UI_NONE;
1745 prox_cfg.flags |= DSF_USE_DUMMY_CPU_TOPO;
1746 prox_cfg.flags |= DSF_CHECK_INIT;
1749 prox_cfg.flags |= DSF_USE_DUMMY_DEVICES;
1752 if (!str_is_number(optarg) || strlen(optarg) > 11)
1754 prox_strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str));
1757 if (prox_cfg.flags & DSF_DAEMON)
1760 if (!strcmp(optarg, "curses")) {
1761 prox_cfg.ui = PROX_UI_CURSES;
1763 else if (!strcmp(optarg, "cli")) {
1764 prox_cfg.ui = PROX_UI_CLI;
1766 else if (!strcmp(optarg, "none")) {
1767 prox_cfg.ui = PROX_UI_NONE;
1770 plog_err("Invalid local UI '%s', local UI can be 'curses', 'cli' or 'none'.", optarg);
1775 if (luaL_loadstring(prox_lua(), optarg)) {
1776 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
1780 if (lua_pcall(prox_lua(), 0, LUA_MULTRET, 0)) {
1781 set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1));
1787 /* autostart all cores */
1788 prox_cfg.flags |= DSF_AUTOSTART;
1791 /* don't autostart */
1792 prox_cfg.flags &= ~DSF_AUTOSTART;
1795 prox_cfg.flags |= DSF_LISTEN_TCP;
1798 prox_cfg.flags |= DSF_LISTEN_UDS;
1801 /* list supported task modes and exit */
1802 prox_cfg.flags |= DSF_LIST_TASK_MODES;
1805 /* check configuration file syntax and exit */
1806 prox_cfg.flags |= DSF_CHECK_SYNTAX;
1809 /* check initialization sequence and exit */
1810 prox_cfg.flags |= DSF_CHECK_INIT;
1815 if (strlen(tmp) >= 3 &&
1816 (tmp2 = strchr(tmp, '='))) {
1819 prox_strncpy(tmp3 + 1, tmp, 63);
1820 plog_info("\tAdding variable: %s = %s\n", tmp3, tmp2 + 1);
1821 ret = add_var(tmp3, tmp2 + 1, 1);
1823 plog_err("\tFailed to add variable, too many variables defines\n");
1826 else if(ret == -3) {
1827 plog_err("\tFailed to add variable, already defined\n");
1834 plog_err("\tUnknown option\n");
1839 /* reset getopt lib for DPDK */
1845 static int check_cfg(void)
1848 #define RETURN_IF(cond, err) \
1854 RETURN_IF(rte_cfg.force_nchannel == 0, "\tError: number of memory channels not specified in [eal options] section\n");
1855 RETURN_IF(prox_cfg.master >= RTE_MAX_LCORE, "\tError: No master core specified (one core needs to have mode=master)\n");
1862 static int calc_tot_rxrings(void)
1864 struct lcore_cfg *slconf, *dlconf;
1865 struct task_args *starg, *dtarg;
1868 struct core_task ct;
1871 while (core_targ_next_early(&dlconf, &dtarg, 1) == 0) {
1872 dtarg->tot_rxrings = 0;
1876 while (core_targ_next_early(&slconf, &starg, 1) == 0) {
1877 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
1878 for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
1879 ct = starg->core_task_set[idx].core_task[ring_idx];
1880 if (!prox_core_active(ct.core, 0)) {
1881 set_errf("Core %u is disabled but Core %u task %u is sending to it\n",
1882 ct.core, slconf->id, starg->id);
1886 dlconf = &lcore_cfg_init[ct.core];
1888 if (ct.task >= dlconf->n_tasks_all) {
1889 set_errf("Core %u task %u not enabled\n", ct.core, ct.task);
1893 dtarg = &dlconf->targs[ct.task];
1895 /* Control rings are not relevant at this point. */
1899 if (!(dtarg->flags & TASK_ARG_RX_RING)) {
1900 set_errf("Core %u task %u is not expecting to receive through a ring\n",
1905 dtarg->tot_rxrings++;
1906 if (dtarg->tot_rxrings > MAX_RINGS_PER_TASK) {
1907 set_errf("Core %u task %u is receiving from too many tasks",
1918 static void prox_set_core_mask(void)
1920 struct lcore_cfg *lconf;
1923 for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
1924 lconf = &lcore_cfg_init[lcore_id];
1925 if (lconf->n_tasks_all > 0 && lconf->targs[0].mode != MASTER) {
1926 prox_core_set_active(lcore_id);
1931 static int is_using_no_drop(void)
1934 struct lcore_cfg *lconf;
1935 struct task_args *targs;
1938 while(prox_core_next(&lcore_id, 1) == 0) {
1939 lconf = &lcore_cfg_init[lcore_id];
1940 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
1941 targs = &lconf->targs[task_id];
1942 if (!(targs->flags & TASK_ARG_DROP))
1949 int prox_read_config_file(void)
1951 set_global_defaults(&prox_cfg);
1952 set_task_defaults(&prox_cfg, lcore_cfg_init);
1953 set_port_defaults();
1954 plog_info("=== Parsing configuration file '%s' ===\n", cfg_file);
1955 struct cfg_file *pcfg = cfg_open(cfg_file);
1960 struct cfg_section* config_sections[] = {
1972 for (struct cfg_section** section = config_sections; *section != NULL; ++section) {
1973 const char* name = (*section)->name;
1974 size_t len = strlen(name);
1975 plog_info("\t*** Reading [%s] section%s ***\n", name, name[len - 1] == '#'? "s": "");
1976 cfg_parse(pcfg, *section);
1978 if ((*section)->error) {
1979 plog_err("At line %u, section [%s], entry %u: '%s'\n\t%s\n"
1980 , pcfg->err_line, pcfg->err_section, pcfg->err_entry + 1, pcfg->cur_line,
1981 strlen(get_parse_err())? get_parse_err() : err_str);
1982 cfg_close(pcfg); /* cannot close before printing error, print uses internal buffer */
1989 prox_set_core_mask();
1991 if (is_using_no_drop()) {
1992 prox_cfg.flags &= ~DSF_WAIT_ON_QUIT;
1995 if (calc_tot_rxrings()) {
1996 plog_err("Error in configuration: %s\n", err_str);
2003 static void failed_rte_eal_init(__attribute__((unused))const char *prog_name)
2005 plog_err("\tError in rte_eal_init()\n");
2008 int prox_setup_rte(const char *prog_name)
2010 char *rte_argv[MAX_RTE_ARGV];
2011 char rte_arg[MAX_RTE_ARGV][MAX_ARG_LEN];
2012 char tmp[PROX_CM_STR_LEN];
2013 /* create mask of used cores */
2014 plog_info("=== Setting up RTE EAL ===\n");
2016 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) {
2017 plog_info("Using dummy cpu topology\n");
2018 snprintf(tmp, sizeof(tmp), "0x1");
2020 prox_core_to_hex(tmp, sizeof(tmp), 0);
2021 plog_info("\tWorker threads core mask is %s\n", tmp);
2022 prox_core_to_hex(tmp, sizeof(tmp), 1);
2023 plog_info("\tWith master core index %u, full core mask is %s\n", prox_cfg.master, tmp);
2026 /* fake command line parameters for rte_eal_init() */
2028 rte_argv[argc] = strdup(prog_name);
2029 sprintf(rte_arg[++argc], "-c%s", tmp);
2030 rte_argv[argc] = rte_arg[argc];
2031 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
2032 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2033 sprintf(rte_arg[++argc], "--master-lcore=%u", 0);
2035 sprintf(rte_arg[++argc], "--master-lcore=%u", prox_cfg.master);
2036 rte_argv[argc] = rte_arg[argc];
2038 /* For old DPDK versions, the master core had to be the first
2040 uint32_t first_core = -1;
2042 if (prox_core_next(&first_core, 1) == -1) {
2043 plog_err("Can't core ID of first core in use\n");
2046 if (first_core != prox_cfg.master) {
2047 plog_err("The master core needs to be the first core (master core = %u, first core = %u).\n", first_core, prox_cfg.master);
2052 if (rte_cfg.memory) {
2053 sprintf(rte_arg[++argc], "-m%u", rte_cfg.memory);
2054 rte_argv[argc] = rte_arg[argc];
2057 if (rte_cfg.force_nchannel) {
2058 sprintf(rte_arg[++argc], "-n%u", rte_cfg.force_nchannel);
2059 rte_argv[argc] = rte_arg[argc];
2062 if (rte_cfg.force_nrank) {
2063 sprintf(rte_arg[++argc], "-r%u", rte_cfg.force_nrank);
2064 rte_argv[argc] = rte_arg[argc];
2067 if (rte_cfg.no_hugetlbfs) {
2068 strcpy(rte_arg[++argc], "--no-huge");
2069 rte_argv[argc] = rte_arg[argc];
2072 if (rte_cfg.no_pci) {
2073 strcpy(rte_arg[++argc], "--no-pci");
2074 rte_argv[argc] = rte_arg[argc];
2077 if (rte_cfg.no_hpet) {
2078 strcpy(rte_arg[++argc], "--no-hpet");
2079 rte_argv[argc] = rte_arg[argc];
2082 if (rte_cfg.no_shconf) {
2083 strcpy(rte_arg[++argc], "--no-shconf");
2084 rte_argv[argc] = rte_arg[argc];
2087 if (rte_cfg.eal != NULL) {
2088 char *ptr = rte_cfg.eal;
2090 while (ptr != NULL) {
2091 while (isspace(*ptr))
2094 ptr = strchr(ptr, ' ');
2098 strcpy(rte_arg[++argc], ptr2);
2099 rte_argv[argc] = rte_arg[argc];
2103 if (rte_cfg.hugedir != NULL) {
2104 strcpy(rte_arg[++argc], "--huge-dir");
2105 rte_argv[argc] = rte_arg[argc];
2106 rte_argv[++argc] = rte_cfg.hugedir;
2109 if (rte_cfg.no_output) {
2110 rte_log_set_global_level(0);
2113 plog_info("\tEAL command line:");
2114 if (argc >= MAX_RTE_ARGV) {
2115 plog_err("too many arguments for EAL\n");
2119 for (int h = 0; h <= argc; ++h) {
2120 plog_info(" %s", rte_argv[h]);
2124 rte_set_application_usage_hook(failed_rte_eal_init);
2125 if (rte_eal_init(++argc, rte_argv) < 0) {
2126 plog_err("\tError in rte_eal_init()\n");
2129 plog_info("\tEAL Initialized\n");
2131 if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
2134 /* check if all active cores are in enabled in DPDK */
2135 for (uint32_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
2136 if (lcore_id == prox_cfg.master) {
2137 if (!rte_lcore_is_enabled(lcore_id))
2140 else if (rte_lcore_is_enabled(lcore_id) != prox_core_active(lcore_id, 0)) {
2141 plog_err("\tFailed to enable lcore %u\n", lcore_id);
2144 else if (lcore_cfg_init[lcore_id].n_tasks_all != 0 && !rte_lcore_is_enabled(lcore_id)) {
2145 plog_err("\tFailed to enable lcore %u\n", lcore_id);