2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 #include <rte_table_hash.h>
19 #include <rte_version.h>
20 #include <rte_malloc.h>
21 #if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
22 #include <rte_eal_memconfig.h>
25 #include "prox_malloc.h"
31 #include "hash_utils.h"
33 #include "prox_port_cfg.h"
35 #include "handle_qos.h"
36 #include "handle_qinq_encap4.h"
41 #include "stats_core.h"
43 void start_core_all(int task_id)
45 uint32_t cores[RTE_MAX_LCORE];
50 prox_core_to_str(tmp, sizeof(tmp), 0);
51 plog_info("Starting cores: %s\n", tmp);
54 while (prox_core_next(&lcore_id, 0) == 0) {
55 cores[cnt++] = lcore_id;
57 start_cores(cores, cnt, task_id);
60 void stop_core_all(int task_id)
62 uint32_t cores[RTE_MAX_LCORE];
67 prox_core_to_str(tmp, sizeof(tmp), 0);
68 plog_info("Stopping cores: %s\n", tmp);
71 while (prox_core_next(&lcore_id, 0) == 0) {
72 cores[cnt++] = lcore_id;
75 stop_cores(cores, cnt, task_id);
78 static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
80 for (int i = 0; i < count; ++i) {
81 if (!prox_core_active(cores[i], 0)) {
82 plog_warn("%s %u: core is not active\n", prefix, cores[i]);
87 static inline int wait_command_handled(struct lcore_cfg *lconf)
89 uint64_t t1 = rte_rdtsc(), t2;
92 if (lconf->msg.type == LCONF_MSG_STOP)
95 while (lconf_is_req(lconf)) {
97 if (t2 - t1 > max_time * rte_get_tsc_hz()) {
98 // Failed to handle command ...
99 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
100 struct task_args *targs = &lconf->targs[task_id];
101 if (!(targs->flags & TASK_ARG_DROP)) {
102 plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
106 plogx_err("Failed to handle command\n");
113 static inline void start_l3(struct task_args *targ)
115 if (!task_is_master(targ)) {
116 if ((targ->nb_txports != 0)) {
117 if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP))
118 task_start_l3(targ->tbase, targ);
123 void start_cores(uint32_t *cores, int count, int task_id)
125 int n_started_cores = 0;
126 uint32_t started_cores[RTE_MAX_LCORE];
127 struct task_args *targ;
129 warn_inactive_cores(cores, count, "Can't start core");
131 for (int i = 0; i < count; ++i) {
132 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
134 if (lconf->n_tasks_run != lconf->n_tasks_all) {
136 for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
137 targ = &lconf->targs[tid];
140 } else if (task_id < lconf->n_tasks_all) {
141 targ = &lconf->targs[task_id];
144 plog_warn("Invalid task id %d on core %u\n", task_id, cores[i]);
147 if (wait_command_handled(lconf) == -1) return;
148 lconf->msg.type = LCONF_MSG_START;
149 lconf->msg.task_id = task_id;
150 lconf_set_req(lconf);
152 plog_info("Starting core %u (all tasks)\n", cores[i]);
154 plog_info("Starting core %u task %u\n", cores[i], task_id);
155 started_cores[n_started_cores++] = cores[i];
156 lconf->flags |= LCONF_FLAG_RUNNING;
157 rte_eal_remote_launch(lconf_run, NULL, cores[i]);
160 plog_warn("Core %u is already running all its tasks\n", cores[i]);
164 /* This function is blocking, so detect when each core has
165 consumed the message. */
166 for (int i = 0; i < n_started_cores; ++i) {
167 struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
168 plog_info("Waiting for core %u to start...", started_cores[i]);
169 if (wait_command_handled(lconf) == -1) return;
174 void stop_cores(uint32_t *cores, int count, int task_id)
176 int n_stopped_cores = 0;
177 uint32_t stopped_cores[RTE_MAX_LCORE];
180 warn_inactive_cores(cores, count, "Can't stop core");
182 for (int i = 0; i < count; ++i) {
183 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
184 if (task_id >= lconf->n_tasks_all) {
185 plog_warn("Trying to stop invalid task id %d on core %u\n", task_id, cores[i]);
188 if (lconf->n_tasks_run) {
189 if (wait_command_handled(lconf) == -1) return;
191 lconf->msg.type = LCONF_MSG_STOP;
192 lconf->msg.task_id = task_id;
193 lconf_set_req(lconf);
194 stopped_cores[n_stopped_cores++] = cores[i];
198 for (int i = 0; i < n_stopped_cores; ++i) {
199 c = stopped_cores[i];
200 struct lcore_cfg *lconf = &lcore_cfg[c];
201 if (wait_command_handled(lconf) == -1) return;
203 if (lconf->n_tasks_run == 0) {
204 plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
205 rte_eal_wait_lcore(c);
207 lconf->flags &= ~LCONF_FLAG_RUNNING;
210 plog_info("Stopped task %u on core %u\n", task_id, c);
221 static struct size_unit to_size_unit(uint64_t bytes)
223 struct size_unit ret;
225 if (bytes > 1 << 30) {
226 ret.val = bytes >> 30;
227 ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30);
228 strcpy(ret.unit, "GB");
230 else if (bytes > 1 << 20) {
231 ret.val = bytes >> 20;
232 ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20);
233 strcpy(ret.unit, "MB");
235 else if (bytes > 1 << 10) {
236 ret.val = bytes >> 10;
237 ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10);
238 strcpy(ret.unit, "KB");
243 strcpy(ret.unit, "B");
249 static int add_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr)
254 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
256 if (port_cfg->nb_mc_addr >= NB_MCAST_ADDR) {
257 plog_err("Already reached maximum number (%d) of mcast addr on port %u\n", NB_MCAST_ADDR, port_id);
260 for (i = 0; i < port_cfg->nb_mc_addr; i++) {
261 if (prox_rte_is_same_ether_addr(addr, &port_cfg->mc_addr[i])) {
262 plog_info("multicast address already added to port\n");
267 prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[port_cfg->nb_mc_addr]);
268 if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr + 1)) != 0) {
269 plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id);
273 port_cfg->nb_mc_addr++;
274 plog_info("rte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
278 static int del_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr)
283 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
285 for (i = 0; i < port_cfg->nb_mc_addr; i++) {
286 if (prox_rte_is_same_ether_addr(addr, &port_cfg->mc_addr[i])) {
287 // Copy last address to the slot to be deleted
288 prox_rte_ether_addr_copy(&port_cfg->mc_addr[port_cfg->nb_mc_addr-1], &port_cfg->mc_addr[i]);
290 if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr - 1)) != 0) {
291 plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id);
292 // When set failed, let restore the situation we were before calling the function...
293 prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[i]);
296 port_cfg->nb_mc_addr--;
297 plog_info("rte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
301 plog_err("multicast address not found on port %u\n", port_id);
304 void cmd_mem_stats(void)
306 struct rte_malloc_socket_stats sock_stats;
310 for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) {
311 if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0)
314 plogx_info("Socket %u memory stats:\n", i);
315 su = to_size_unit(sock_stats.heap_totalsz_bytes);
316 plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
317 su = to_size_unit(sock_stats.heap_freesz_bytes);
318 plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
319 su = to_size_unit(sock_stats.heap_allocsz_bytes);
320 plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
321 su = to_size_unit(sock_stats.greatest_free_size);
322 plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit);
323 plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count);
324 plogx_info("\tFree_count: %u\n", sock_stats.free_count);
328 static void get_hp_sz_string(char *sz_str, uint64_t hp_sz)
330 switch (hp_sz >> 20) {
332 strcpy(sz_str, " 0 ");
335 strcpy(sz_str, "2MB");
338 strcpy(sz_str, "1GB");
341 strcpy(sz_str, "??");
345 #if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
346 // Print all segments, 1 by 1
347 // Unused for now, keep for reference
348 static int print_all_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, void *arg)
350 int memseg_list_idx = 0, memseg_idx;
351 int n = (*(int *)arg)++;
353 #if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
354 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
355 memseg_list_idx = memseg_list - mcfg->memsegs;
356 if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
357 plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
361 memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
362 if (memseg_idx < 0) {
363 plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
368 get_hp_sz_string(sz_str, memseg->hugepage_sz);
369 plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
375 memseg->iova+memseg->len,
377 memseg->len/memseg->hugepage_sz, sz_str);
382 // Print memory segments
383 // Contiguous segments are shown as 1 big segment
384 static int print_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, size_t len, void *arg)
386 int memseg_list_idx = 0, memseg_idx;
389 #if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
390 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
391 memseg_list_idx = memseg_list - mcfg->memsegs;
392 if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
393 plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
397 memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
398 if (memseg_idx < 0) {
399 plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
404 get_hp_sz_string(sz_str, memseg->hugepage_sz);
405 plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
413 memseg->hugepage_sz?len/memseg->hugepage_sz:0, sz_str);
420 void cmd_mem_layout(void)
422 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
423 const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
425 plog_info("Memory layout:\n");
426 for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
427 if (memseg[i].addr == NULL)
431 get_hp_sz_string(sz_str, memseg[i].hugepage_sz);
433 plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
436 memseg[i].phys_addr + memseg[i].len,
438 memseg[i].len/memseg[i].hugepage_sz, sz_str);
441 int segment_number = 0;
442 //rte_memseg_walk(print_all_segments, &segment_number);
443 rte_memseg_contig_walk(print_segments, &segment_number);
447 void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
449 plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
450 if (lcore_id > RTE_MAX_LCORE) {
451 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
453 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
454 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
457 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
459 lconf->tasks_all[task_id]->aux->task_rt_dump.input = input;
461 if (wait_command_handled(lconf) == -1) return;
463 lconf->msg.type = LCONF_MSG_DUMP;
465 lconf->msg.type = LCONF_MSG_DUMP_RX;
467 lconf->msg.type = LCONF_MSG_DUMP_TX;
470 lconf->msg.task_id = task_id;
471 lconf->msg.val = nb_packets;
472 lconf_set_req(lconf);
475 if (lconf->n_tasks_run == 0) {
476 lconf_do_flags(lconf);
481 void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
483 plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets);
484 if (lcore_id > RTE_MAX_LCORE) {
485 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
487 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
488 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
491 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
493 if (wait_command_handled(lconf) == -1) return;
495 lconf->msg.type = LCONF_MSG_TRACE;
496 lconf->msg.task_id = task_id;
497 lconf->msg.val = nb_packets;
498 lconf_set_req(lconf);
500 if (lconf->n_tasks_run == 0) {
501 lconf_do_flags(lconf);
506 void cmd_rx_bw_start(uint32_t lcore_id)
508 if (lcore_id > RTE_MAX_LCORE) {
509 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
510 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) {
511 plog_warn("rx bandwidt already on core %u\n", lcore_id);
514 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
516 if (wait_command_handled(lconf) == -1) return;
517 lconf->msg.type = LCONF_MSG_RX_BW_START;
518 lconf_set_req(lconf);
520 if (lconf->n_tasks_run == 0) {
521 lconf_do_flags(lconf);
526 void cmd_tx_bw_start(uint32_t lcore_id)
528 if (lcore_id > RTE_MAX_LCORE) {
529 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
530 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) {
531 plog_warn("tx bandwidth already running on core %u\n", lcore_id);
534 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
536 if (wait_command_handled(lconf) == -1) return;
537 lconf->msg.type = LCONF_MSG_TX_BW_START;
538 lconf_set_req(lconf);
540 if (lconf->n_tasks_run == 0) {
541 lconf_do_flags(lconf);
546 void cmd_rx_bw_stop(uint32_t lcore_id)
548 if (lcore_id > RTE_MAX_LCORE) {
549 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
550 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) {
551 plog_warn("rx bandwidth not running on core %u\n", lcore_id);
554 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
556 if (wait_command_handled(lconf) == -1) return;
557 lconf->msg.type = LCONF_MSG_RX_BW_STOP;
558 lconf_set_req(lconf);
560 if (lconf->n_tasks_run == 0) {
561 lconf_do_flags(lconf);
566 void cmd_tx_bw_stop(uint32_t lcore_id)
568 if (lcore_id > RTE_MAX_LCORE) {
569 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
570 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) {
571 plog_warn("tx bandwidth not running on core %u\n", lcore_id);
574 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
576 if (wait_command_handled(lconf) == -1) return;
577 lconf->msg.type = LCONF_MSG_TX_BW_STOP;
578 lconf_set_req(lconf);
580 if (lconf->n_tasks_run == 0) {
581 lconf_do_flags(lconf);
585 void cmd_rx_distr_start(uint32_t lcore_id)
587 if (lcore_id > RTE_MAX_LCORE) {
588 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
589 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) {
590 plog_warn("rx distribution already xrunning on core %u\n", lcore_id);
592 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
594 if (wait_command_handled(lconf) == -1) return;
595 lconf->msg.type = LCONF_MSG_RX_DISTR_START;
596 lconf_set_req(lconf);
598 if (lconf->n_tasks_run == 0) {
599 lconf_do_flags(lconf);
604 void cmd_tx_distr_start(uint32_t lcore_id)
606 if (lcore_id > RTE_MAX_LCORE) {
607 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
608 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) {
609 plog_warn("tx distribution already xrunning on core %u\n", lcore_id);
611 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
613 if (wait_command_handled(lconf) == -1) return;
614 lconf->msg.type = LCONF_MSG_TX_DISTR_START;
615 lconf_set_req(lconf);
617 if (lconf->n_tasks_run == 0) {
618 lconf_do_flags(lconf);
623 void cmd_rx_distr_stop(uint32_t lcore_id)
625 if (lcore_id > RTE_MAX_LCORE) {
626 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
627 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) {
628 plog_warn("rx distribution not running on core %u\n", lcore_id);
630 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
632 if (wait_command_handled(lconf) == -1) return;
633 lconf->msg.type = LCONF_MSG_RX_DISTR_STOP;
634 lconf_set_req(lconf);
636 if (lconf->n_tasks_run == 0) {
637 lconf_do_flags(lconf);
642 void cmd_tx_distr_stop(uint32_t lcore_id)
644 if (lcore_id > RTE_MAX_LCORE) {
645 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
646 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) {
647 plog_warn("tx distribution not running on core %u\n", lcore_id);
649 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
651 if (wait_command_handled(lconf) == -1) return;
652 lconf->msg.type = LCONF_MSG_TX_DISTR_STOP;
653 lconf_set_req(lconf);
655 if (lconf->n_tasks_run == 0) {
656 lconf_do_flags(lconf);
661 void cmd_rx_distr_rst(uint32_t lcore_id)
663 if (lcore_id > RTE_MAX_LCORE) {
664 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
666 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
668 if (wait_command_handled(lconf) == -1) return;
669 lconf->msg.type = LCONF_MSG_RX_DISTR_RESET;
670 lconf_set_req(lconf);
672 if (lconf->n_tasks_run == 0) {
673 lconf_do_flags(lconf);
678 void cmd_tx_distr_rst(uint32_t lcore_id)
680 if (lcore_id > RTE_MAX_LCORE) {
681 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
683 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
685 if (wait_command_handled(lconf) == -1) return;
686 lconf->msg.type = LCONF_MSG_TX_DISTR_RESET;
687 lconf_set_req(lconf);
689 if (lconf->n_tasks_run == 0) {
690 lconf_do_flags(lconf);
695 void cmd_rx_distr_show(uint32_t lcore_id)
697 if (lcore_id > RTE_MAX_LCORE) {
698 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
700 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
701 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
702 plog_info("t[%u]: ", i);
703 for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
704 plog_info("%u ", t->aux->rx_bucket[j]);
710 void cmd_tx_distr_show(uint32_t lcore_id)
712 if (lcore_id > RTE_MAX_LCORE) {
713 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
715 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
716 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
717 uint64_t tot = 0, avg = 0;
718 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
719 tot += t->aux->tx_bucket[j];
720 avg += j * t->aux->tx_bucket[j];
725 plog_info("t[%u]: %lu: ", i, avg);
726 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
727 plog_info("%u ", t->aux->tx_bucket[j]);
734 void cmd_ringinfo_all(void)
736 struct lcore_cfg *lconf;
737 uint32_t lcore_id = -1;
739 while(prox_core_next(&lcore_id, 0) == 0) {
740 lconf = &lcore_cfg[lcore_id];
741 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
742 cmd_ringinfo(lcore_id, task_id);
747 void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
749 struct lcore_cfg *lconf;
750 struct rte_ring *ring;
751 struct task_args* targ;
754 if (!prox_core_active(lcore_id, 0)) {
755 plog_info("lcore %u is not active\n", lcore_id);
758 lconf = &lcore_cfg[lcore_id];
759 if (task_id >= lconf->n_tasks_all) {
760 plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all);
764 targ = &lconf->targs[task_id];
765 plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
766 for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
767 ring = targ->rx_rings[i];
768 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
769 count = ring->prod.mask + 1;
771 count = ring->mask + 1;
773 plog_info("\tRing %u:\n", i);
774 plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
775 plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
776 plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
780 void cmd_port_up(uint8_t port_id)
784 if (!port_is_active(port_id)) {
788 if ((err = rte_eth_dev_set_link_up(port_id)) == 0) {
789 plog_info("Bringing port %d up\n", port_id);
792 plog_warn("Failed to bring port %d up with error %d\n", port_id, err);
796 void cmd_port_down(uint8_t port_id)
800 if (!port_is_active(port_id)) {
804 if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
805 plog_info("Bringing port %d down\n", port_id);
808 plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
812 void cmd_xstats(uint8_t port_id)
814 #if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
816 struct rte_eth_xstat *eth_xstat = NULL; // id and value
817 struct rte_eth_xstat_name *eth_xstat_name = NULL; // only names
818 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
821 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
822 eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket);
823 PROX_ASSERT(eth_xstat_name);
824 rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats);
825 if ((rc < 0) || (rc > n_xstats)) {
827 plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc);
828 } else if (rc > n_xstats) {
829 plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc);
833 eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket);
834 PROX_ASSERT(eth_xstat);
835 rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats);
836 if ((rc < 0) || (rc > n_xstats)) {
838 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
839 } else if (rc > n_xstats) {
840 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
843 for (int i=0;i<rc;i++) {
844 plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value);
848 prox_free(eth_xstat_name);
850 prox_free(eth_xstat);
852 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
854 struct rte_eth_xstats *eth_xstats;
855 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
858 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
859 eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket);
860 PROX_ASSERT(eth_xstats);
861 rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats);
862 if ((rc < 0) || (rc > n_xstats)) {
864 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
865 } else if (rc > n_xstats) {
866 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
869 for (int i=0;i<rc;i++) {
870 plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value);
874 prox_free(eth_xstats);
876 plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n");
881 void cmd_portinfo(int port_id, char *dst, size_t max_len)
883 char *end = dst + max_len;
887 uint8_t max_port_idx = prox_last_port_active() + 1;
889 for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) {
890 if (!prox_port_cfg[port_id].active) {
893 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
895 dst += snprintf(dst, end - dst,
896 "%2d:%10s; "MAC_BYTES_FMT"; %s\n",
899 MAC_BYTES(port_cfg->eth_addr.addr_bytes),
905 if (!port_is_active(port_id)) {
909 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
911 dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
912 dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->names[0]);
913 dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
914 dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
915 dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
916 dst += snprintf(dst, end - dst, "\tLink max speed: %u Mbps\n", port_cfg->max_link_speed);
917 dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
918 dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
919 dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
920 dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
921 for (unsigned int i = 0; i < port_cfg->nb_mc_addr; i++) {
922 dst += snprintf(dst, end - dst, "\tmcast address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->mc_addr[i].addr_bytes));
924 dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
925 dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
926 dst += snprintf(dst, end - dst, "\tMemory pools:\n");
928 for (uint8_t i = 0; i < 32; ++i) {
929 if (port_cfg->pool[i]) {
930 dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n",
931 port_cfg->pool[i]->name, port_cfg->pool[i]);
936 void cmd_read_reg(uint8_t port_id, unsigned int id)
938 unsigned int val, rc;
939 if (!port_is_active(port_id)) {
942 rc = read_reg(port_id, id, &val);
944 plog_warn("Failed to read register %d on port %d\n", id, port_id);
947 plog_info("Register 0x%08X : %08X \n", id, val);
951 void cmd_reset_port(uint8_t portid)
954 if (!prox_port_cfg[portid].active) {
955 plog_info("port not active \n");
958 rte_eth_dev_stop(portid);
959 rc = rte_eth_dev_start(portid);
961 plog_warn("Failed to restart port %d\n", portid);
965 void cmd_multicast(uint8_t port_id, unsigned int val, prox_rte_ether_addr *mac)
967 if (!port_is_active(port_id)) {
970 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
972 if (port_cfg->nb_mc_addr == 0) {
973 rte_eth_allmulticast_enable(port_id);
975 if (add_multicast_addr(port_id, mac) != 0) {
976 if (port_cfg->nb_mc_addr == 0)
977 rte_eth_allmulticast_disable(port_id);
979 } else if (val == 0) {
980 if (del_multicast_addr(port_id, mac) == 0) {
981 if (port_cfg->nb_mc_addr == 0) {
982 rte_eth_allmulticast_disable(port_id);
986 plog_err("Unexpected value in cmd_multicast on port %d\n", port_id);
990 void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
992 if (!port_is_active(port_id)) {
996 plog_info("writing 0x%08X %08X\n", id, val);
997 write_reg(port_id, id, val);
1000 void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
1002 if (!port_is_active(port_id)) {
1006 plog_info("setting vlan offload to %d\n", val);
1007 if (val & ~(RTE_ETH_VLAN_STRIP_OFFLOAD | RTE_ETH_VLAN_FILTER_OFFLOAD | RTE_ETH_VLAN_EXTEND_OFFLOAD)) {
1008 plog_info("wrong vlan offload value\n");
1010 int ret = rte_eth_dev_set_vlan_offload(port_id, val);
1011 plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret);
1014 void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val)
1016 if (!port_is_active(port_id)) {
1020 plog_info("setting vln filter for vlan %d to %d\n", id, val);
1021 int ret = rte_eth_dev_vlan_filter(port_id, id, val);
1022 plog_info("rte_eth_dev_vlan_filter return %d\n", ret);
1025 void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
1027 plog_info("thread_info %u %u \n", lcore_id, task_id);
1028 if (lcore_id > RTE_MAX_LCORE) {
1029 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
1031 if (!prox_core_active(lcore_id, 0)) {
1032 plog_warn("lcore %u is not active\n", lcore_id);
1035 if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
1036 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
1039 if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) {
1040 struct task_base *task;
1042 task = lcore_cfg[lcore_id].tasks_all[task_id];
1043 plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id,
1044 task_qos_n_pkts_buffered(task));
1046 #ifdef ENABLE_EXTRA_USER_STATISTICS
1048 else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
1049 struct task_qinq_encap4 *task;
1050 task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]);
1051 for (int i=0;i<task->n_users;i++) {
1052 if (task->stats_per_user[i])
1053 plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
1058 // Only QoS thread info so far
1059 plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x);
1063 void cmd_rx_tx_info(void)
1065 uint32_t lcore_id = -1;
1066 while(prox_core_next(&lcore_id, 0) == 0) {
1067 for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) {
1068 struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];
1070 plog_info("Core %u:", lcore_id);
1071 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
1072 for (int i = 0; i < targ->nb_rxports; i++) {
1073 plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue);
1077 for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
1078 plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
1082 for (uint8_t j = 0; j < targ->nb_txports; ++j) {
1083 plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
1084 targ->tx_port_queue[j].queue);
1087 for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
1088 plog_info(" TX ring %p", targ->tx_rings[j]);
1095 void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set)
1097 uint64_t tmp_rmid = 0;
1098 cqm_assoc_read(lcore_id, &tmp_rmid);
1099 *set = (uint32_t)(tmp_rmid >> 32);
1102 void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val)
1104 cat_get_class_mask(lcore_id, set, val);
1107 void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val)
1109 cat_set_class_mask(lcore_id, set, val);
1110 lcore_cfg[lcore_id].cache_set = set;
1112 while(prox_core_next(&id, 0) == 0) {
1113 if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) {
1114 plog_info("Updating mask for core %d to %d\n", id, set);
1115 stats_update_cache_mask(id, val);
1120 void cmd_set_cache_class(uint32_t lcore_id, uint32_t set)
1122 uint64_t tmp_rmid = 0;
1124 cqm_assoc_read(lcore_id, &tmp_rmid);
1125 cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32));
1126 cat_get_class_mask(lcore_id, set, &val);
1127 stats_update_cache_mask(lcore_id, val);
1130 void cmd_cache_reset(void)
1132 uint8_t sockets[MAX_SOCKETS] = {0};
1133 uint8_t cores[MAX_SOCKETS] = {0};
1134 uint32_t mask = (1 << cat_get_num_ways()) - 1;
1135 uint32_t lcore_id = -1, socket_id;
1136 while(prox_core_next(&lcore_id, 0) == 0) {
1137 cqm_assoc(lcore_id, 0);
1138 socket_id = rte_lcore_to_socket_id(lcore_id);
1139 if (socket_id < MAX_SOCKETS) {
1140 sockets[socket_id] = 1;
1141 cores[socket_id] = lcore_id;
1143 stats_update_cache_mask(lcore_id, mask);
1144 plog_info("Setting core %d to cache mask %x\n", lcore_id, mask);
1145 lcore_cfg[lcore_id].cache_set = 0;
1147 for (uint32_t s = 0; s < MAX_SOCKETS; s++) {
1149 cat_reset_cache(cores[s]);
1151 stats_lcore_assoc_rmid();
1154 int bypass_task(uint32_t lcore_id, uint32_t task_id)
1156 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
1157 struct task_args *targ, *starg, *dtarg;
1158 struct rte_ring *ring = NULL;
1160 if (task_id >= lconf->n_tasks_all)
1163 targ = &lconf->targs[task_id];
1164 if (targ->nb_txrings == 1) {
1165 plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
1167 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
1168 starg = targ->prev_tasks[i];
1169 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
1170 for (unsigned int k = 0; k < targ->nb_rxrings; k++) {
1171 if (starg->tx_rings[j] == targ->rx_rings[k]) {
1172 plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]);
1173 starg->tx_rings[j] = targ->tx_rings[0];
1174 struct task_base *tbase = starg->tbase;
1175 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1181 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
1188 int reconnect_task(uint32_t lcore_id, uint32_t task_id)
1190 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
1191 struct task_args *targ, *starg, *dtarg = NULL;
1192 struct rte_ring *ring = NULL;
1194 if (task_id >= lconf->n_tasks_all)
1197 targ = &lconf->targs[task_id];
1198 if (targ->nb_txrings == 1) {
1200 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
1201 starg = targ->prev_tasks[i];
1202 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
1203 if (starg->tx_rings[j] == targ->tx_rings[0]) {
1204 if (targ->n_prev_tasks == targ->nb_rxrings) {
1205 starg->tx_rings[j] = targ->rx_rings[i];
1206 struct task_base *tbase = starg->tbase;
1207 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1208 plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
1209 } else if (targ->nb_rxrings == 1) {
1210 starg->tx_rings[j] = targ->rx_rings[0];
1211 struct task_base *tbase = starg->tbase;
1212 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1213 plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks);
1215 plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings);
1221 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);