2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 #include <rte_table_hash.h>
19 #include <rte_version.h>
20 #include <rte_malloc.h>
22 #include "prox_malloc.h"
28 #include "hash_utils.h"
30 #include "prox_port_cfg.h"
32 #include "handle_qos.h"
33 #include "handle_qinq_encap4.h"
38 #include "stats_core.h"
40 void start_core_all(int task_id)
42 uint32_t cores[RTE_MAX_LCORE];
47 prox_core_to_str(tmp, sizeof(tmp), 0);
48 plog_info("Starting cores: %s\n", tmp);
51 while (prox_core_next(&lcore_id, 0) == 0) {
52 cores[cnt++] = lcore_id;
54 start_cores(cores, cnt, task_id);
57 void stop_core_all(int task_id)
59 uint32_t cores[RTE_MAX_LCORE];
64 prox_core_to_str(tmp, sizeof(tmp), 0);
65 plog_info("Stopping cores: %s\n", tmp);
68 while (prox_core_next(&lcore_id, 0) == 0) {
69 cores[cnt++] = lcore_id;
72 stop_cores(cores, cnt, task_id);
75 static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
77 for (int i = 0; i < count; ++i) {
78 if (!prox_core_active(cores[i], 0)) {
79 plog_warn("%s %u: core is not active\n", prefix, cores[i]);
84 static inline int wait_command_handled(struct lcore_cfg *lconf)
86 uint64_t t1 = rte_rdtsc(), t2;
87 while (lconf_is_req(lconf)) {
89 if (t2 - t1 > 5 * rte_get_tsc_hz()) {
90 // Failed to handle command ...
91 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
92 struct task_args *targs = &lconf->targs[task_id];
93 if (!(targs->flags & TASK_ARG_DROP)) {
94 plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
98 plogx_err("Failed to handle command\n");
104 void start_cores(uint32_t *cores, int count, int task_id)
106 int n_started_cores = 0;
107 uint32_t started_cores[RTE_MAX_LCORE];
109 warn_inactive_cores(cores, count, "Can't start core");
111 for (int i = 0; i < count; ++i) {
112 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
114 if (lconf->n_tasks_run != lconf->n_tasks_all) {
116 lconf->msg.type = LCONF_MSG_START;
117 lconf->msg.task_id = task_id;
118 lconf_set_req(lconf);
120 plog_info("Starting core %u (all tasks)\n", cores[i]);
122 plog_info("Starting core %u task %u\n", cores[i], task_id);
123 started_cores[n_started_cores++] = cores[i];
124 lconf->flags |= LCONF_FLAG_RUNNING;
125 rte_eal_remote_launch(lconf_run, NULL, cores[i]);
128 plog_warn("Core %u is already running all its tasks\n", cores[i]);
132 /* This function is blocking, so detect when each core has
133 consumed the message. */
134 for (int i = 0; i < n_started_cores; ++i) {
135 struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
136 plog_info("Waiting for core %u to start...", started_cores[i]);
137 if (wait_command_handled(lconf) == -1) return;
142 void stop_cores(uint32_t *cores, int count, int task_id)
144 int n_stopped_cores = 0;
145 uint32_t stopped_cores[RTE_MAX_LCORE];
148 warn_inactive_cores(cores, count, "Can't stop core");
150 for (int i = 0; i < count; ++i) {
151 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
152 if (lconf->n_tasks_run) {
153 if (wait_command_handled(lconf) == -1) return;
155 lconf->msg.type = LCONF_MSG_STOP;
156 lconf->msg.task_id = task_id;
157 lconf_set_req(lconf);
158 stopped_cores[n_stopped_cores++] = cores[i];
162 for (int i = 0; i < n_stopped_cores; ++i) {
163 c = stopped_cores[i];
164 struct lcore_cfg *lconf = &lcore_cfg[c];
165 if (wait_command_handled(lconf) == -1) return;
167 if (lconf->n_tasks_run == 0) {
168 plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
169 rte_eal_wait_lcore(c);
171 lconf->flags &= ~LCONF_FLAG_RUNNING;
174 plog_info("Stopped task %u on core %u\n", task_id, c);
185 static struct size_unit to_size_unit(uint64_t bytes)
187 struct size_unit ret;
189 if (bytes > 1 << 30) {
190 ret.val = bytes >> 30;
191 ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30);
192 strcpy(ret.unit, "GB");
194 else if (bytes > 1 << 20) {
195 ret.val = bytes >> 20;
196 ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20);
197 strcpy(ret.unit, "MB");
199 else if (bytes > 1 << 10) {
200 ret.val = bytes >> 10;
201 ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10);
202 strcpy(ret.unit, "KB");
207 strcpy(ret.unit, "B");
213 void cmd_mem_stats(void)
215 struct rte_malloc_socket_stats sock_stats;
219 for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) {
220 if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0)
223 plogx_info("Socket %u memory stats:\n", i);
224 su = to_size_unit(sock_stats.heap_totalsz_bytes);
225 plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
226 su = to_size_unit(sock_stats.heap_freesz_bytes);
227 plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
228 su = to_size_unit(sock_stats.heap_allocsz_bytes);
229 plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
230 su = to_size_unit(sock_stats.greatest_free_size);
231 plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit);
232 plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count);
233 plogx_info("\tFree_count: %u\n", sock_stats.free_count);
237 void cmd_mem_layout(void)
239 const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
241 plog_info("Memory layout:\n");
242 for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
243 if (memseg[i].addr == NULL)
247 switch (memseg[i].hugepage_sz >> 20) {
258 plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
261 memseg[i].phys_addr + memseg[i].len,
263 memseg[i].len/memseg[i].hugepage_sz, sz_str);
267 void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
269 plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
270 if (lcore_id > RTE_MAX_LCORE) {
271 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
273 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
274 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
277 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
279 lconf->tasks_all[task_id]->aux->task_rt_dump.input = input;
281 if (wait_command_handled(lconf) == -1) return;
283 lconf->msg.type = LCONF_MSG_DUMP;
285 lconf->msg.type = LCONF_MSG_DUMP_RX;
287 lconf->msg.type = LCONF_MSG_DUMP_TX;
290 lconf->msg.task_id = task_id;
291 lconf->msg.val = nb_packets;
292 lconf_set_req(lconf);
295 if (lconf->n_tasks_run == 0) {
296 lconf_do_flags(lconf);
301 void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
303 plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets);
304 if (lcore_id > RTE_MAX_LCORE) {
305 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
307 else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
308 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
311 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
313 if (wait_command_handled(lconf) == -1) return;
315 lconf->msg.type = LCONF_MSG_TRACE;
316 lconf->msg.task_id = task_id;
317 lconf->msg.val = nb_packets;
318 lconf_set_req(lconf);
320 if (lconf->n_tasks_run == 0) {
321 lconf_do_flags(lconf);
326 void cmd_rx_bw_start(uint32_t lcore_id)
328 if (lcore_id > RTE_MAX_LCORE) {
329 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
330 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) {
331 plog_warn("rx bandwidt already on core %u\n", lcore_id);
334 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
336 if (wait_command_handled(lconf) == -1) return;
337 lconf->msg.type = LCONF_MSG_RX_BW_START;
338 lconf_set_req(lconf);
340 if (lconf->n_tasks_run == 0) {
341 lconf_do_flags(lconf);
346 void cmd_tx_bw_start(uint32_t lcore_id)
348 if (lcore_id > RTE_MAX_LCORE) {
349 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
350 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) {
351 plog_warn("tx bandwidth already running on core %u\n", lcore_id);
354 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
356 if (wait_command_handled(lconf) == -1) return;
357 lconf->msg.type = LCONF_MSG_TX_BW_START;
358 lconf_set_req(lconf);
360 if (lconf->n_tasks_run == 0) {
361 lconf_do_flags(lconf);
366 void cmd_rx_bw_stop(uint32_t lcore_id)
368 if (lcore_id > RTE_MAX_LCORE) {
369 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
370 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) {
371 plog_warn("rx bandwidth not running on core %u\n", lcore_id);
374 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
376 if (wait_command_handled(lconf) == -1) return;
377 lconf->msg.type = LCONF_MSG_RX_BW_STOP;
378 lconf_set_req(lconf);
380 if (lconf->n_tasks_run == 0) {
381 lconf_do_flags(lconf);
386 void cmd_tx_bw_stop(uint32_t lcore_id)
388 if (lcore_id > RTE_MAX_LCORE) {
389 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
390 } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) {
391 plog_warn("tx bandwidth not running on core %u\n", lcore_id);
394 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
396 if (wait_command_handled(lconf) == -1) return;
397 lconf->msg.type = LCONF_MSG_TX_BW_STOP;
398 lconf_set_req(lconf);
400 if (lconf->n_tasks_run == 0) {
401 lconf_do_flags(lconf);
405 void cmd_rx_distr_start(uint32_t lcore_id)
407 if (lcore_id > RTE_MAX_LCORE) {
408 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
409 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) {
410 plog_warn("rx distribution already xrunning on core %u\n", lcore_id);
412 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
414 if (wait_command_handled(lconf) == -1) return;
415 lconf->msg.type = LCONF_MSG_RX_DISTR_START;
416 lconf_set_req(lconf);
418 if (lconf->n_tasks_run == 0) {
419 lconf_do_flags(lconf);
424 void cmd_tx_distr_start(uint32_t lcore_id)
426 if (lcore_id > RTE_MAX_LCORE) {
427 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
428 } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) {
429 plog_warn("tx distribution already xrunning on core %u\n", lcore_id);
431 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
433 if (wait_command_handled(lconf) == -1) return;
434 lconf->msg.type = LCONF_MSG_TX_DISTR_START;
435 lconf_set_req(lconf);
437 if (lconf->n_tasks_run == 0) {
438 lconf_do_flags(lconf);
443 void cmd_rx_distr_stop(uint32_t lcore_id)
445 if (lcore_id > RTE_MAX_LCORE) {
446 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
447 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) {
448 plog_warn("rx distribution not running on core %u\n", lcore_id);
450 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
452 if (wait_command_handled(lconf) == -1) return;
453 lconf->msg.type = LCONF_MSG_RX_DISTR_STOP;
454 lconf_set_req(lconf);
456 if (lconf->n_tasks_run == 0) {
457 lconf_do_flags(lconf);
462 void cmd_tx_distr_stop(uint32_t lcore_id)
464 if (lcore_id > RTE_MAX_LCORE) {
465 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
466 } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) {
467 plog_warn("tx distribution not running on core %u\n", lcore_id);
469 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
471 if (wait_command_handled(lconf) == -1) return;
472 lconf->msg.type = LCONF_MSG_TX_DISTR_STOP;
473 lconf_set_req(lconf);
475 if (lconf->n_tasks_run == 0) {
476 lconf_do_flags(lconf);
481 void cmd_rx_distr_rst(uint32_t lcore_id)
483 if (lcore_id > RTE_MAX_LCORE) {
484 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
486 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
488 if (wait_command_handled(lconf) == -1) return;
489 lconf->msg.type = LCONF_MSG_RX_DISTR_RESET;
490 lconf_set_req(lconf);
492 if (lconf->n_tasks_run == 0) {
493 lconf_do_flags(lconf);
498 void cmd_tx_distr_rst(uint32_t lcore_id)
500 if (lcore_id > RTE_MAX_LCORE) {
501 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
503 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
505 if (wait_command_handled(lconf) == -1) return;
506 lconf->msg.type = LCONF_MSG_TX_DISTR_RESET;
507 lconf_set_req(lconf);
509 if (lconf->n_tasks_run == 0) {
510 lconf_do_flags(lconf);
515 void cmd_rx_distr_show(uint32_t lcore_id)
517 if (lcore_id > RTE_MAX_LCORE) {
518 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
520 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
521 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
522 plog_info("t[%u]: ", i);
523 for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
524 plog_info("%u ", t->aux->rx_bucket[j]);
530 void cmd_tx_distr_show(uint32_t lcore_id)
532 if (lcore_id > RTE_MAX_LCORE) {
533 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
535 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
536 struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
537 uint64_t tot = 0, avg = 0;
538 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
539 tot += t->aux->tx_bucket[j];
540 avg += j * t->aux->tx_bucket[j];
545 plog_info("t[%u]: %lu: ", i, avg);
546 for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
547 plog_info("%u ", t->aux->tx_bucket[j]);
554 void cmd_ringinfo_all(void)
556 struct lcore_cfg *lconf;
557 uint32_t lcore_id = -1;
559 while(prox_core_next(&lcore_id, 0) == 0) {
560 lconf = &lcore_cfg[lcore_id];
561 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
562 cmd_ringinfo(lcore_id, task_id);
567 void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
569 struct lcore_cfg *lconf;
570 struct rte_ring *ring;
571 struct task_args* targ;
574 if (!prox_core_active(lcore_id, 0)) {
575 plog_info("lcore %u is not active\n", lcore_id);
578 lconf = &lcore_cfg[lcore_id];
579 if (task_id >= lconf->n_tasks_all) {
580 plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all);
584 targ = &lconf->targs[task_id];
585 plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
586 for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
587 ring = targ->rx_rings[i];
588 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
589 count = ring->prod.mask + 1;
591 count = ring->mask + 1;
593 plog_info("\tRing %u:\n", i);
594 plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
595 plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
596 plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
600 void cmd_port_up(uint8_t port_id)
604 if (!port_is_active(port_id)) {
608 if ((err = rte_eth_dev_set_link_up(port_id)) == 0) {
609 plog_info("Bringing port %d up\n", port_id);
612 plog_warn("Failed to bring port %d up with error %d\n", port_id, err);
616 void cmd_port_down(uint8_t port_id)
620 if (!port_is_active(port_id)) {
624 if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
625 plog_info("Bringing port %d down\n", port_id);
628 plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
632 void cmd_xstats(uint8_t port_id)
634 #if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
636 struct rte_eth_xstat *eth_xstat = NULL; // id and value
637 struct rte_eth_xstat_name *eth_xstat_name = NULL; // only names
638 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
641 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
642 eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket);
643 PROX_ASSERT(eth_xstat_name);
644 rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats);
645 if ((rc < 0) || (rc > n_xstats)) {
647 plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc);
648 } else if (rc > n_xstats) {
649 plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc);
653 eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket);
654 PROX_ASSERT(eth_xstat);
655 rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats);
656 if ((rc < 0) || (rc > n_xstats)) {
658 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
659 } else if (rc > n_xstats) {
660 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
663 for (int i=0;i<rc;i++) {
664 plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value);
668 prox_free(eth_xstat_name);
670 prox_free(eth_xstat);
672 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
674 struct rte_eth_xstats *eth_xstats;
675 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
678 n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
679 eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket);
680 PROX_ASSERT(eth_xstats);
681 rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats);
682 if ((rc < 0) || (rc > n_xstats)) {
684 plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
685 } else if (rc > n_xstats) {
686 plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
689 for (int i=0;i<rc;i++) {
690 plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value);
694 prox_free(eth_xstats);
696 plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n");
701 void cmd_portinfo(int port_id, char *dst, size_t max_len)
703 char *end = dst + max_len;
707 uint8_t max_port_idx = prox_last_port_active() + 1;
709 for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) {
710 if (!prox_port_cfg[port_id].active) {
713 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
715 dst += snprintf(dst, end - dst,
716 "%2d:%10s; "MAC_BYTES_FMT"; %s\n",
719 MAC_BYTES(port_cfg->eth_addr.addr_bytes),
725 if (!port_is_active(port_id)) {
729 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
731 dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
732 dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
733 dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
734 dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
735 dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
736 dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
737 dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
738 dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
739 dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
740 dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
741 dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
742 dst += snprintf(dst, end - dst, "\tMemory pools:\n");
744 for (uint8_t i = 0; i < 32; ++i) {
745 if (port_cfg->pool[i]) {
746 dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n",
747 port_cfg->pool[i]->name, port_cfg->pool[i]);
752 void cmd_read_reg(uint8_t port_id, unsigned int id)
754 unsigned int val, rc;
755 if (!port_is_active(port_id)) {
758 rc = read_reg(port_id, id, &val);
760 plog_warn("Failed to read register %d on port %d\n", id, port_id);
763 plog_info("Register 0x%08X : %08X \n", id, val);
767 void cmd_reset_port(uint8_t portid)
770 if (!prox_port_cfg[portid].active) {
771 plog_info("port not active \n");
774 rte_eth_dev_stop(portid);
775 rc = rte_eth_dev_start(portid);
777 plog_warn("Failed to restart port %d\n", portid);
780 void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
782 if (!port_is_active(port_id)) {
786 plog_info("writing 0x%08X %08X\n", id, val);
787 write_reg(port_id, id, val);
790 void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
792 if (!port_is_active(port_id)) {
796 plog_info("setting vlan offload to %d\n", val);
797 if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
798 plog_info("wrong vlan offload value\n");
800 int ret = rte_eth_dev_set_vlan_offload(port_id, val);
801 plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret);
804 void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val)
806 if (!port_is_active(port_id)) {
810 plog_info("setting vln filter for vlan %d to %d\n", id, val);
811 int ret = rte_eth_dev_vlan_filter(port_id, id, val);
812 plog_info("rte_eth_dev_vlan_filter return %d\n", ret);
815 void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
817 plog_info("thread_info %u %u \n", lcore_id, task_id);
818 if (lcore_id > RTE_MAX_LCORE) {
819 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
821 if (!prox_core_active(lcore_id, 0)) {
822 plog_warn("lcore %u is not active\n", lcore_id);
825 if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
826 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
829 if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) {
830 struct task_base *task;
832 task = lcore_cfg[lcore_id].tasks_all[task_id];
833 plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id,
834 task_qos_n_pkts_buffered(task));
836 #ifdef ENABLE_EXTRA_USER_STATISTICS
838 else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
839 struct task_qinq_encap4 *task;
840 task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]);
841 for (int i=0;i<task->n_users;i++) {
842 if (task->stats_per_user[i])
843 plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
848 // Only QoS thread info so far
849 plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x);
853 void cmd_rx_tx_info(void)
855 uint32_t lcore_id = -1;
856 while(prox_core_next(&lcore_id, 0) == 0) {
857 for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) {
858 struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];
860 plog_info("Core %u:", lcore_id);
861 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
862 for (int i = 0; i < targ->nb_rxports; i++) {
863 plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue);
867 for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
868 plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
872 for (uint8_t j = 0; j < targ->nb_txports; ++j) {
873 plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
874 targ->tx_port_queue[j].queue);
877 for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
878 plog_info(" TX ring %p", targ->tx_rings[j]);
885 void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set)
887 uint64_t tmp_rmid = 0;
888 cqm_assoc_read(lcore_id, &tmp_rmid);
889 *set = (uint32_t)(tmp_rmid >> 32);
892 void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val)
894 cat_get_class_mask(lcore_id, set, val);
897 void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val)
899 cat_set_class_mask(lcore_id, set, val);
900 lcore_cfg[lcore_id].cache_set = set;
902 while(prox_core_next(&id, 0) == 0) {
903 if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) {
904 plog_info("Updating mask for core %d to %d\n", id, set);
905 stats_update_cache_mask(id, val);
910 void cmd_set_cache_class(uint32_t lcore_id, uint32_t set)
912 uint64_t tmp_rmid = 0;
914 cqm_assoc_read(lcore_id, &tmp_rmid);
915 cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32));
916 cat_get_class_mask(lcore_id, set, &val);
917 stats_update_cache_mask(lcore_id, val);
920 void cmd_cache_reset(void)
922 uint8_t sockets[MAX_SOCKETS] = {0};
923 uint8_t cores[MAX_SOCKETS] = {0};
924 uint32_t mask = (1 << cat_get_num_ways()) - 1;
925 uint32_t lcore_id = -1, socket_id;
926 while(prox_core_next(&lcore_id, 0) == 0) {
927 cqm_assoc(lcore_id, 0);
928 socket_id = rte_lcore_to_socket_id(lcore_id);
929 if (socket_id < MAX_SOCKETS) {
930 sockets[socket_id] = 1;
931 cores[socket_id] = lcore_id;
933 stats_update_cache_mask(lcore_id, mask);
934 plog_info("Setting core %d to cache mask %x\n", lcore_id, mask);
935 lcore_cfg[lcore_id].cache_set = 0;
937 for (uint32_t s = 0; s < MAX_SOCKETS; s++) {
939 cat_reset_cache(cores[s]);
941 stats_lcore_assoc_rmid();
944 int bypass_task(uint32_t lcore_id, uint32_t task_id)
946 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
947 struct task_args *targ, *starg, *dtarg;
948 struct rte_ring *ring = NULL;
950 if (task_id >= lconf->n_tasks_all)
953 targ = &lconf->targs[task_id];
954 if (targ->nb_txrings == 1) {
955 plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
957 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
958 starg = targ->prev_tasks[i];
959 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
960 for (unsigned int k = 0; k < targ->nb_rxrings; k++) {
961 if (starg->tx_rings[j] == targ->rx_rings[k]) {
962 plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]);
963 starg->tx_rings[j] = targ->tx_rings[0];
964 struct task_base *tbase = starg->tbase;
965 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
971 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
978 int reconnect_task(uint32_t lcore_id, uint32_t task_id)
980 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
981 struct task_args *targ, *starg, *dtarg = NULL;
982 struct rte_ring *ring = NULL;
984 if (task_id >= lconf->n_tasks_all)
987 targ = &lconf->targs[task_id];
988 if (targ->nb_txrings == 1) {
990 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
991 starg = targ->prev_tasks[i];
992 for (unsigned int j = 0; j < starg->nb_txrings; j++) {
993 if (starg->tx_rings[j] == targ->tx_rings[0]) {
994 if (targ->n_prev_tasks == targ->nb_rxrings) {
995 starg->tx_rings[j] = targ->rx_rings[i];
996 struct task_base *tbase = starg->tbase;
997 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
998 plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
999 } else if (targ->nb_rxrings == 1) {
1000 starg->tx_rings[j] = targ->rx_rings[0];
1001 struct task_base *tbase = starg->tbase;
1002 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1003 plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks);
1005 plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings);
1011 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);