2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 #include <rte_table_hash.h>
19 #include <rte_version.h>
20 #include <rte_malloc.h>
22 #include "prox_malloc.h"
28 #include "hash_utils.h"
30 #include "prox_port_cfg.h"
32 #include "handle_qos.h"
33 #include "handle_qinq_encap4.h"
38 #include "stats_core.h"
40 void start_core_all(int task_id)
42 uint32_t cores[RTE_MAX_LCORE];
47 prox_core_to_str(tmp, sizeof(tmp), 0);
48 plog_info("Starting cores: %s\n", tmp);
51 while (prox_core_next(&lcore_id, 0) == 0) {
52 cores[cnt++] = lcore_id;
54 start_cores(cores, cnt, task_id);
57 void stop_core_all(int task_id)
59 uint32_t cores[RTE_MAX_LCORE];
64 prox_core_to_str(tmp, sizeof(tmp), 0);
65 plog_info("Stopping cores: %s\n", tmp);
68 while (prox_core_next(&lcore_id, 0) == 0) {
69 cores[cnt++] = lcore_id;
72 stop_cores(cores, cnt, task_id);
75 static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
77 for (int i = 0; i < count; ++i) {
78 if (!prox_core_active(cores[i], 0)) {
79 plog_warn("%s %u: core is not active\n", prefix, cores[i]);
84 static inline int wait_command_handled(struct lcore_cfg *lconf)
86 uint64_t t1 = rte_rdtsc(), t2;
87 while (lconf_is_req(lconf)) {
89 if (t2 - t1 > 5 * rte_get_tsc_hz()) {
90 // Failed to handle command ...
91 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
92 struct task_args *targs = &lconf->targs[task_id];
93 if (!(targs->flags & TASK_ARG_DROP)) {
94 plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
98 plogx_err("Failed to handle command\n");
105 static inline void start_l3(struct task_args *targ)
107 if (!task_is_master(targ)) {
108 if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
109 if (targ->task_init->flag_features & TASK_FEATURE_L3)
110 task_start_l3(targ->tbase, targ);
115 void start_cores(uint32_t *cores, int count, int task_id)
117 int n_started_cores = 0;
118 uint32_t started_cores[RTE_MAX_LCORE];
119 struct task_args *targ;
121 warn_inactive_cores(cores, count, "Can't start core");
123 for (int i = 0; i < count; ++i) {
124 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
126 if (lconf->n_tasks_run != lconf->n_tasks_all) {
128 for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
129 targ = &lconf->targs[tid];
133 targ = &lconf->targs[task_id];
136 lconf->msg.type = LCONF_MSG_START;
137 lconf->msg.task_id = task_id;
138 lconf_set_req(lconf);
140 plog_info("Starting core %u (all tasks)\n", cores[i]);
142 plog_info("Starting core %u task %u\n", cores[i], task_id);
143 started_cores[n_started_cores++] = cores[i];
144 lconf->flags |= LCONF_FLAG_RUNNING;
145 rte_eal_remote_launch(lconf_run, NULL, cores[i]);
148 plog_warn("Core %u is already running all its tasks\n", cores[i]);
152 /* This function is blocking, so detect when each core has
153 consumed the message. */
154 for (int i = 0; i < n_started_cores; ++i) {
155 struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
156 plog_info("Waiting for core %u to start...", started_cores[i]);
157 if (wait_command_handled(lconf) == -1) return;
162 void stop_cores(uint32_t *cores, int count, int task_id)
164 int n_stopped_cores = 0;
165 uint32_t stopped_cores[RTE_MAX_LCORE];
168 warn_inactive_cores(cores, count, "Can't stop core");
170 for (int i = 0; i < count; ++i) {
171 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
172 if (lconf->n_tasks_run) {
173 if (wait_command_handled(lconf) == -1) return;
175 lconf->msg.type = LCONF_MSG_STOP;
176 lconf->msg.task_id = task_id;
177 lconf_set_req(lconf);
178 stopped_cores[n_stopped_cores++] = cores[i];
182 for (int i = 0; i < n_stopped_cores; ++i) {
183 c = stopped_cores[i];
184 struct lcore_cfg *lconf = &lcore_cfg[c];
185 if (wait_command_handled(lconf) == -1) return;
187 if (lconf->n_tasks_run == 0) {
188 plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
189 rte_eal_wait_lcore(c);
191 lconf->flags &= ~LCONF_FLAG_RUNNING;
194 plog_info("Stopped task %u on core %u\n", task_id, c);
205 static struct size_unit to_size_unit(uint64_t bytes)
207 struct size_unit ret;
209 if (bytes > 1 << 30) {
210 ret.val = bytes >> 30;
211 ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30);
212 strcpy(ret.unit, "GB");
214 else if (bytes > 1 << 20) {
215 ret.val = bytes >> 20;
216 ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20);
217 strcpy(ret.unit, "MB");
219 else if (bytes > 1 << 10) {
220 ret.val = bytes >> 10;
221 ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10);
222 strcpy(ret.unit, "KB");
227 strcpy(ret.unit, "B");
233 void cmd_mem_stats(void)
235 struct rte_malloc_socket_stats sock_stats;
239 for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) {
240 if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0)
243 plogx_info("Socket %u memory stats:\n", i);
244 su = to_size_unit(sock_stats.heap_totalsz_bytes);
245 plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
246 su = to_size_unit(sock_stats.heap_freesz_bytes);
247 plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
248 su = to_size_unit(sock_stats.heap_allocsz_bytes);
249 plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
250 su = to_size_unit(sock_stats.greatest_free_size);
251 plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit);
252 plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count);
253 plogx_info("\tFree_count: %u\n", sock_stats.free_count);
257 void cmd_mem_layout(void)
259 const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
261 plog_info("Memory layout:\n");
262 for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
263 if (memseg[i].addr == NULL)
267 switch (memseg[i].hugepage_sz >> 20) {
278 plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
281 memseg[i].phys_addr + memseg[i].len,
283 memseg[i].len/memseg[i].hugepage_sz, sz_str);
287 void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
289 plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
290 if (lcore_id > RTE_MAX_LCORE) {
291 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
293 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
294 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
297 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
299 lconf->tasks_all[task_id]->aux->task_rt_dump.input = input;
301 if (wait_command_handled(lconf) == -1) return;
303 lconf->msg.type = LCONF_MSG_DUMP;
305 lconf->msg.type = LCONF_MSG_DUMP_RX;
307 lconf->msg.type = LCONF_MSG_DUMP_TX;
310 lconf->msg.task_id = task_id;
311 lconf->msg.val = nb_packets;
312 lconf_set_req(lconf);
315 if (lconf->n_tasks_run == 0) {
316 lconf_do_flags(lconf);
321 void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
323 plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets);
324 if (lcore_id > RTE_MAX_LCORE) {
325 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
327 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
328 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
331 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
333 if (wait_command_handled(lconf) == -1) return;
335 lconf->msg.type = LCONF_MSG_TRACE;
336 lconf->msg.task_id = task_id;
337 lconf->msg.val = nb_packets;
338 lconf_set_req(lconf);
340 if (lconf->n_tasks_run == 0) {
341 lconf_do_flags(lconf);
346 void cmd_rx_bw_start(uint32_t lcore_id)
348 if (lcore_id > RTE_MAX_LCORE) {
349 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
350 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) {
351 plog_warn("rx bandwidt already on core %u\n", lcore_id);
354 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
356 if (wait_command_handled(lconf) == -1) return;
357 lconf->msg.type = LCONF_MSG_RX_BW_START;
358 lconf_set_req(lconf);
360 if (lconf->n_tasks_run == 0) {
361 lconf_do_flags(lconf);
366 void cmd_tx_bw_start(uint32_t lcore_id)
368 if (lcore_id > RTE_MAX_LCORE) {
369 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
370 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) {
371 plog_warn("tx bandwidth already running on core %u\n", lcore_id);
374 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
376 if (wait_command_handled(lconf) == -1) return;
377 lconf->msg.type = LCONF_MSG_TX_BW_START;
378 lconf_set_req(lconf);
380 if (lconf->n_tasks_run == 0) {
381 lconf_do_flags(lconf);
386 void cmd_rx_bw_stop(uint32_t lcore_id)
388 if (lcore_id > RTE_MAX_LCORE) {
389 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
390 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) {
391 plog_warn("rx bandwidth not running on core %u\n", lcore_id);
394 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
396 if (wait_command_handled(lconf) == -1) return;
397 lconf->msg.type = LCONF_MSG_RX_BW_STOP;
398 lconf_set_req(lconf);
400 if (lconf->n_tasks_run == 0) {
401 lconf_do_flags(lconf);
406 void cmd_tx_bw_stop(uint32_t lcore_id)
408 if (lcore_id > RTE_MAX_LCORE) {
409 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
410 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) {
411 plog_warn("tx bandwidth not running on core %u\n", lcore_id);
414 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
416 if (wait_command_handled(lconf) == -1) return;
417 lconf->msg.type = LCONF_MSG_TX_BW_STOP;
418 lconf_set_req(lconf);
420 if (lconf->n_tasks_run == 0) {
421 lconf_do_flags(lconf);
425 void cmd_rx_distr_start(uint32_t lcore_id)
427 if (lcore_id > RTE_MAX_LCORE) {
428 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
429 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) {
430 plog_warn("rx distribution already xrunning on core %u\n", lcore_id);
432 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
434 if (wait_command_handled(lconf) == -1) return;
435 lconf->msg.type = LCONF_MSG_RX_DISTR_START;
436 lconf_set_req(lconf);
438 if (lconf->n_tasks_run == 0) {
439 lconf_do_flags(lconf);
444 void cmd_tx_distr_start(uint32_t lcore_id)
446 if (lcore_id > RTE_MAX_LCORE) {
447 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
448 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) {
449 plog_warn("tx distribution already xrunning on core %u\n", lcore_id);
451 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
453 if (wait_command_handled(lconf) == -1) return;
454 lconf->msg.type = LCONF_MSG_TX_DISTR_START;
455 lconf_set_req(lconf);
457 if (lconf->n_tasks_run == 0) {
458 lconf_do_flags(lconf);
463 void cmd_rx_distr_stop(uint32_t lcore_id)
465 if (lcore_id > RTE_MAX_LCORE) {
466 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
467 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) {
468 plog_warn("rx distribution not running on core %u\n", lcore_id);
470 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
472 if (wait_command_handled(lconf) == -1) return;
473 lconf->msg.type = LCONF_MSG_RX_DISTR_STOP;
474 lconf_set_req(lconf);
476 if (lconf->n_tasks_run == 0) {
477 lconf_do_flags(lconf);
482 void cmd_tx_distr_stop(uint32_t lcore_id)
484 if (lcore_id > RTE_MAX_LCORE) {
485 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
486 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) {
487 plog_warn("tx distribution not running on core %u\n", lcore_id);
489 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
491 if (wait_command_handled(lconf) == -1) return;
492 lconf->msg.type = LCONF_MSG_TX_DISTR_STOP;
493 lconf_set_req(lconf);
495 if (lconf->n_tasks_run == 0) {
496 lconf_do_flags(lconf);
501 void cmd_rx_distr_rst(uint32_t lcore_id)
503 if (lcore_id > RTE_MAX_LCORE) {
504 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
506 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
508 if (wait_command_handled(lconf) == -1) return;
509 lconf->msg.type = LCONF_MSG_RX_DISTR_RESET;
510 lconf_set_req(lconf);
512 if (lconf->n_tasks_run == 0) {
513 lconf_do_flags(lconf);
518 void cmd_tx_distr_rst(uint32_t lcore_id)
520 if (lcore_id > RTE_MAX_LCORE) {
521 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
523 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
525 if (wait_command_handled(lconf) == -1) return;
526 lconf->msg.type = LCONF_MSG_TX_DISTR_RESET;
527 lconf_set_req(lconf);
529 if (lconf->n_tasks_run == 0) {
530 lconf_do_flags(lconf);
535 void cmd_rx_distr_show(uint32_t lcore_id)
537 if (lcore_id > RTE_MAX_LCORE) {
538 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
540 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
541 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
542 plog_info("t[%u]: ", i);
543 for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
544 plog_info("%u ", t->aux->rx_bucket[j]);
550 void cmd_tx_distr_show(uint32_t lcore_id)
552 if (lcore_id > RTE_MAX_LCORE) {
553 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
555 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
556 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
557 uint64_t tot = 0, avg = 0;
558 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
559 tot += t->aux->tx_bucket[j];
560 avg += j * t->aux->tx_bucket[j];
565 plog_info("t[%u]: %lu: ", i, avg);
566 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
567 plog_info("%u ", t->aux->tx_bucket[j]);
574 void cmd_ringinfo_all(void)
576 struct lcore_cfg *lconf;
577 uint32_t lcore_id = -1;
579 while(prox_core_next(&lcore_id, 0) == 0) {
580 lconf = &lcore_cfg[lcore_id];
581 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
582 cmd_ringinfo(lcore_id, task_id);
587 void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
589 struct lcore_cfg *lconf;
590 struct rte_ring *ring;
591 struct task_args* targ;
594 if (!prox_core_active(lcore_id, 0)) {
595 plog_info("lcore %u is not active\n", lcore_id);
598 lconf = &lcore_cfg[lcore_id];
599 if (task_id >= lconf->n_tasks_all) {
600 plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all);
604 targ = &lconf->targs[task_id];
605 plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
606 for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
607 ring = targ->rx_rings[i];
608 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
609 count = ring->prod.mask + 1;
611 count = ring->mask + 1;
613 plog_info("\tRing %u:\n", i);
614 plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
615 plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
616 plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
620 void cmd_port_up(uint8_t port_id)
624 if (!port_is_active(port_id)) {
628 if ((err = rte_eth_dev_set_link_up(port_id)) == 0) {
629 plog_info("Bringing port %d up\n", port_id);
632 plog_warn("Failed to bring port %d up with error %d\n", port_id, err);
636 void cmd_port_down(uint8_t port_id)
640 if (!port_is_active(port_id)) {
644 if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
645 plog_info("Bringing port %d down\n", port_id);
648 plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
652 void cmd_xstats(uint8_t port_id)
654 #if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
656 struct rte_eth_xstat *eth_xstat = NULL; // id and value
657 struct rte_eth_xstat_name *eth_xstat_name = NULL; // only names
658 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
661 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
662 eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket);
663 PROX_ASSERT(eth_xstat_name);
664 rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats);
665 if ((rc < 0) || (rc > n_xstats)) {
667 plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc);
668 } else if (rc > n_xstats) {
669 plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc);
673 eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket);
674 PROX_ASSERT(eth_xstat);
675 rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats);
676 if ((rc < 0) || (rc > n_xstats)) {
678 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
679 } else if (rc > n_xstats) {
680 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
683 for (int i=0;i<rc;i++) {
684 plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value);
688 prox_free(eth_xstat_name);
690 prox_free(eth_xstat);
692 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
694 struct rte_eth_xstats *eth_xstats;
695 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
698 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
699 eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket);
700 PROX_ASSERT(eth_xstats);
701 rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats);
702 if ((rc < 0) || (rc > n_xstats)) {
704 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
705 } else if (rc > n_xstats) {
706 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
709 for (int i=0;i<rc;i++) {
710 plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value);
714 prox_free(eth_xstats);
716 plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n");
721 void cmd_portinfo(int port_id, char *dst, size_t max_len)
723 char *end = dst + max_len;
727 uint8_t max_port_idx = prox_last_port_active() + 1;
729 for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) {
730 if (!prox_port_cfg[port_id].active) {
733 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
735 dst += snprintf(dst, end - dst,
736 "%2d:%10s; "MAC_BYTES_FMT"; %s\n",
739 MAC_BYTES(port_cfg->eth_addr.addr_bytes),
745 if (!port_is_active(port_id)) {
749 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
751 dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
752 dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
753 dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
754 dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
755 dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
756 dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
757 dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
758 dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
759 dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
760 dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
761 dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
762 dst += snprintf(dst, end - dst, "\tMemory pools:\n");
764 for (uint8_t i = 0; i < 32; ++i) {
765 if (port_cfg->pool[i]) {
766 dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n",
767 port_cfg->pool[i]->name, port_cfg->pool[i]);
772 void cmd_read_reg(uint8_t port_id, unsigned int id)
774 unsigned int val, rc;
775 if (!port_is_active(port_id)) {
778 rc = read_reg(port_id, id, &val);
780 plog_warn("Failed to read register %d on port %d\n", id, port_id);
783 plog_info("Register 0x%08X : %08X \n", id, val);
787 void cmd_reset_port(uint8_t portid)
790 if (!prox_port_cfg[portid].active) {
791 plog_info("port not active \n");
794 rte_eth_dev_stop(portid);
795 rc = rte_eth_dev_start(portid);
797 plog_warn("Failed to restart port %d\n", portid);
800 void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
802 if (!port_is_active(port_id)) {
806 plog_info("writing 0x%08X %08X\n", id, val);
807 write_reg(port_id, id, val);
810 void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
812 if (!port_is_active(port_id)) {
816 plog_info("setting vlan offload to %d\n", val);
817 if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
818 plog_info("wrong vlan offload value\n");
820 int ret = rte_eth_dev_set_vlan_offload(port_id, val);
821 plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret);
824 void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val)
826 if (!port_is_active(port_id)) {
830 plog_info("setting vln filter for vlan %d to %d\n", id, val);
831 int ret = rte_eth_dev_vlan_filter(port_id, id, val);
832 plog_info("rte_eth_dev_vlan_filter return %d\n", ret);
835 void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
837 plog_info("thread_info %u %u \n", lcore_id, task_id);
838 if (lcore_id > RTE_MAX_LCORE) {
839 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
841 if (!prox_core_active(lcore_id, 0)) {
842 plog_warn("lcore %u is not active\n", lcore_id);
845 if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
846 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
849 if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) {
850 struct task_base *task;
852 task = lcore_cfg[lcore_id].tasks_all[task_id];
853 plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id,
854 task_qos_n_pkts_buffered(task));
856 #ifdef ENABLE_EXTRA_USER_STATISTICS
858 else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
859 struct task_qinq_encap4 *task;
860 task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]);
861 for (int i=0;i<task->n_users;i++) {
862 if (task->stats_per_user[i])
863 plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
868 // Only QoS thread info so far
869 plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x);
873 void cmd_rx_tx_info(void)
875 uint32_t lcore_id = -1;
876 while(prox_core_next(&lcore_id, 0) == 0) {
877 for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) {
878 struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];
880 plog_info("Core %u:", lcore_id);
881 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
882 for (int i = 0; i < targ->nb_rxports; i++) {
883 plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue);
887 for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
888 plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
892 for (uint8_t j = 0; j < targ->nb_txports; ++j) {
893 plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
894 targ->tx_port_queue[j].queue);
897 for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
898 plog_info(" TX ring %p", targ->tx_rings[j]);
905 void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set)
907 uint64_t tmp_rmid = 0;
908 cqm_assoc_read(lcore_id, &tmp_rmid);
909 *set = (uint32_t)(tmp_rmid >> 32);
912 void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val)
914 cat_get_class_mask(lcore_id, set, val);
917 void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val)
919 cat_set_class_mask(lcore_id, set, val);
920 lcore_cfg[lcore_id].cache_set = set;
922 while(prox_core_next(&id, 0) == 0) {
923 if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) {
924 plog_info("Updating mask for core %d to %d\n", id, set);
925 stats_update_cache_mask(id, val);
930 void cmd_set_cache_class(uint32_t lcore_id, uint32_t set)
932 uint64_t tmp_rmid = 0;
934 cqm_assoc_read(lcore_id, &tmp_rmid);
935 cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32));
936 cat_get_class_mask(lcore_id, set, &val);
937 stats_update_cache_mask(lcore_id, val);
940 void cmd_cache_reset(void)
942 uint8_t sockets[MAX_SOCKETS] = {0};
943 uint8_t cores[MAX_SOCKETS] = {0};
944 uint32_t mask = (1 << cat_get_num_ways()) - 1;
945 uint32_t lcore_id = -1, socket_id;
946 while(prox_core_next(&lcore_id, 0) == 0) {
947 cqm_assoc(lcore_id, 0);
948 socket_id = rte_lcore_to_socket_id(lcore_id);
949 if (socket_id < MAX_SOCKETS) {
950 sockets[socket_id] = 1;
951 cores[socket_id] = lcore_id;
953 stats_update_cache_mask(lcore_id, mask);
954 plog_info("Setting core %d to cache mask %x\n", lcore_id, mask);
955 lcore_cfg[lcore_id].cache_set = 0;
957 for (uint32_t s = 0; s < MAX_SOCKETS; s++) {
959 cat_reset_cache(cores[s]);
961 stats_lcore_assoc_rmid();
964 int bypass_task(uint32_t lcore_id, uint32_t task_id)
966 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
967 struct task_args *targ, *starg, *dtarg;
968 struct rte_ring *ring = NULL;
970 if (task_id >= lconf->n_tasks_all)
973 targ = &lconf->targs[task_id];
974 if (targ->nb_txrings == 1) {
975 plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
977 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
978 starg = targ->prev_tasks[i];
979 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
980 for (unsigned int k = 0; k < targ->nb_rxrings; k++) {
981 if (starg->tx_rings[j] == targ->rx_rings[k]) {
982 plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]);
983 starg->tx_rings[j] = targ->tx_rings[0];
984 struct task_base *tbase = starg->tbase;
985 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
991 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
998 int reconnect_task(uint32_t lcore_id, uint32_t task_id)
1000 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
1001 struct task_args *targ, *starg, *dtarg = NULL;
1002 struct rte_ring *ring = NULL;
1004 if (task_id >= lconf->n_tasks_all)
1007 targ = &lconf->targs[task_id];
1008 if (targ->nb_txrings == 1) {
1010 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
1011 starg = targ->prev_tasks[i];
1012 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
1013 if (starg->tx_rings[j] == targ->tx_rings[0]) {
1014 if (targ->n_prev_tasks == targ->nb_rxrings) {
1015 starg->tx_rings[j] = targ->rx_rings[i];
1016 struct task_base *tbase = starg->tbase;
1017 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1018 plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
1019 } else if (targ->nb_rxrings == 1) {
1020 starg->tx_rings[j] = targ->rx_rings[0];
1021 struct task_base *tbase = starg->tbase;
1022 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1023 plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks);
1025 plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings);
1031 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);