Merge changes from PROX-v041
[samplevnf.git] / VNFs / DPPD-PROX / commands.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <rte_table_hash.h>
19 #include <rte_version.h>
20 #include <rte_malloc.h>
21
22 #include "prox_malloc.h"
23 #include "display.h"
24 #include "commands.h"
25 #include "log.h"
26 #include "run.h"
27 #include "lconf.h"
28 #include "hash_utils.h"
29 #include "prox_cfg.h"
30 #include "prox_port_cfg.h"
31 #include "defines.h"
32 #include "handle_qos.h"
33 #include "handle_qinq_encap4.h"
34 #include "quit.h"
35 #include "input.h"
36 #include "rw_reg.h"
37 #include "cqm.h"
38 #include "stats_core.h"
39
40 void start_core_all(int task_id)
41 {
42         uint32_t cores[RTE_MAX_LCORE];
43         uint32_t lcore_id;
44         char tmp[256];
45         int cnt = 0;
46
47         prox_core_to_str(tmp, sizeof(tmp), 0);
48         plog_info("Starting cores: %s\n", tmp);
49
50         lcore_id = -1;
51         while (prox_core_next(&lcore_id, 0) == 0) {
52                 cores[cnt++] = lcore_id;
53         }
54         start_cores(cores, cnt, task_id);
55 }
56
57 void stop_core_all(int task_id)
58 {
59         uint32_t cores[RTE_MAX_LCORE];
60         uint32_t lcore_id;
61         char tmp[256];
62         int cnt = 0;
63
64         prox_core_to_str(tmp, sizeof(tmp), 0);
65         plog_info("Stopping cores: %s\n", tmp);
66
67         lcore_id = -1;
68         while (prox_core_next(&lcore_id, 0) == 0) {
69                 cores[cnt++] = lcore_id;
70         }
71
72         stop_cores(cores, cnt, task_id);
73 }
74
75 static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
76 {
77         for (int i = 0; i < count; ++i) {
78                 if (!prox_core_active(cores[i], 0)) {
79                         plog_warn("%s %u: core is not active\n", prefix, cores[i]);
80                 }
81         }
82 }
83
84 static inline int wait_command_handled(struct lcore_cfg *lconf)
85 {
86         uint64_t t1 = rte_rdtsc(), t2;
87         while (lconf_is_req(lconf)) {
88                 t2 = rte_rdtsc();
89                 if (t2 - t1 > 5 * rte_get_tsc_hz()) {
90                         // Failed to handle command ...
91                         for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
92                                 struct task_args *targs = &lconf->targs[task_id];
93                                 if (!(targs->flags & TASK_ARG_DROP)) {
94                                         plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
95                                         return - 1;
96                                 }
97                         }
98                         plogx_err("Failed to handle command\n");
99                         return -1;
100                 }
101         }
102         return 0;
103 }
104
105 static inline void start_l3(struct task_args *targ)
106 {
107         if (!task_is_master(targ)) {
108                 if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
109                         if (targ->task_init->flag_features & TASK_FEATURE_L3)
110                                 task_start_l3(targ->tbase, targ);
111                 }
112         }
113 }
114
115 void start_cores(uint32_t *cores, int count, int task_id)
116 {
117         int n_started_cores = 0;
118         uint32_t started_cores[RTE_MAX_LCORE];
119         struct task_args *targ;
120
121         warn_inactive_cores(cores, count, "Can't start core");
122
123         for (int i = 0; i < count; ++i) {
124                 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
125
126                 if (lconf->n_tasks_run != lconf->n_tasks_all) {
127                         if (task_id == -1) {
128                                 for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
129                                         targ = &lconf->targs[tid];
130                                         start_l3(targ);
131                                 }
132                         } else {
133                                 targ = &lconf->targs[task_id];
134                                 start_l3(targ);
135                         }
136                         lconf->msg.type = LCONF_MSG_START;
137                         lconf->msg.task_id = task_id;
138                         lconf_set_req(lconf);
139                         if (task_id == -1)
140                                 plog_info("Starting core %u (all tasks)\n", cores[i]);
141                         else
142                                 plog_info("Starting core %u task %u\n", cores[i], task_id);
143                         started_cores[n_started_cores++] = cores[i];
144                         lconf->flags |= LCONF_FLAG_RUNNING;
145                         rte_eal_remote_launch(lconf_run, NULL, cores[i]);
146                 }
147                 else {
148                         plog_warn("Core %u is already running all its tasks\n", cores[i]);
149                 }
150         }
151
152         /* This function is blocking, so detect when each core has
153            consumed the message. */
154         for (int i = 0; i < n_started_cores; ++i) {
155                 struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
156                 plog_info("Waiting for core %u to start...", started_cores[i]);
157                 if (wait_command_handled(lconf) == -1) return;
158                 plog_info(" OK\n");
159         }
160 }
161
162 void stop_cores(uint32_t *cores, int count, int task_id)
163 {
164         int n_stopped_cores = 0;
165         uint32_t stopped_cores[RTE_MAX_LCORE];
166         uint32_t c;
167
168         warn_inactive_cores(cores, count, "Can't stop core");
169
170         for (int i = 0; i < count; ++i) {
171                 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
172                 if (lconf->n_tasks_run) {
173                         if (wait_command_handled(lconf) == -1) return;
174
175                         lconf->msg.type = LCONF_MSG_STOP;
176                         lconf->msg.task_id = task_id;
177                         lconf_set_req(lconf);
178                         stopped_cores[n_stopped_cores++] = cores[i];
179                 }
180         }
181
182         for (int i = 0; i < n_stopped_cores; ++i) {
183                 c = stopped_cores[i];
184                 struct lcore_cfg *lconf = &lcore_cfg[c];
185                 if (wait_command_handled(lconf) == -1) return;
186
187                 if (lconf->n_tasks_run == 0) {
188                         plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
189                         rte_eal_wait_lcore(c);
190                         plog_info(" OK\n");
191                         lconf->flags &= ~LCONF_FLAG_RUNNING;
192                 }
193                 else {
194                         plog_info("Stopped task %u on core %u\n", task_id, c);
195                 }
196         }
197 }
198
199 struct size_unit {
200         uint64_t val;
201         uint64_t frac;
202         char     unit[8];
203 };
204
205 static struct size_unit to_size_unit(uint64_t bytes)
206 {
207         struct size_unit ret;
208
209         if (bytes > 1 << 30) {
210                 ret.val = bytes >> 30;
211                 ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30);
212                 strcpy(ret.unit, "GB");
213         }
214         else if (bytes > 1 << 20) {
215                 ret.val = bytes >> 20;
216                 ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20);
217                 strcpy(ret.unit, "MB");
218         }
219         else if (bytes > 1 << 10) {
220                 ret.val = bytes >> 10;
221                 ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10);
222                 strcpy(ret.unit, "KB");
223         }
224         else {
225                 ret.val = bytes;
226                 ret.frac = 0;
227                 strcpy(ret.unit, "B");
228         }
229
230         return ret;
231 }
232
233 void cmd_mem_stats(void)
234 {
235         struct rte_malloc_socket_stats sock_stats;
236         uint64_t v;
237         struct size_unit su;
238
239         for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) {
240                 if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0)
241                         continue;
242
243                 plogx_info("Socket %u memory stats:\n", i);
244                 su = to_size_unit(sock_stats.heap_totalsz_bytes);
245                 plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
246                 su = to_size_unit(sock_stats.heap_freesz_bytes);
247                 plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
248                 su = to_size_unit(sock_stats.heap_allocsz_bytes);
249                 plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
250                 su = to_size_unit(sock_stats.greatest_free_size);
251                 plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit);
252                 plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count);
253                 plogx_info("\tFree_count: %u\n", sock_stats.free_count);
254         }
255 }
256
257 void cmd_mem_layout(void)
258 {
259         const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
260
261         plog_info("Memory layout:\n");
262         for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
263                 if (memseg[i].addr == NULL)
264                         break;
265
266                 const char *sz_str;
267                 switch (memseg[i].hugepage_sz >> 20) {
268                 case 2:
269                         sz_str = "2MB";
270                         break;
271                 case 1024:
272                         sz_str = "1GB";
273                         break;
274                 default:
275                         sz_str = "??";
276                 }
277
278                 plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
279                           i,
280                           memseg[i].phys_addr,
281                           memseg[i].phys_addr + memseg[i].len,
282                           memseg[i].addr,
283                           memseg[i].len/memseg[i].hugepage_sz, sz_str);
284         }
285 }
286
287 void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
288 {
289         plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
290         if (lcore_id > RTE_MAX_LCORE) {
291                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
292         }
293         else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
294                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
295         }
296         else {
297                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
298
299                 lconf->tasks_all[task_id]->aux->task_rt_dump.input = input;
300
301                 if (wait_command_handled(lconf) == -1) return;
302                 if (rx && tx)
303                         lconf->msg.type = LCONF_MSG_DUMP;
304                 else if (rx)
305                         lconf->msg.type = LCONF_MSG_DUMP_RX;
306                 else if (tx)
307                         lconf->msg.type = LCONF_MSG_DUMP_TX;
308
309                 if (rx || tx) {
310                         lconf->msg.task_id = task_id;
311                         lconf->msg.val  = nb_packets;
312                         lconf_set_req(lconf);
313                 }
314
315                 if (lconf->n_tasks_run == 0) {
316                         lconf_do_flags(lconf);
317                 }
318         }
319 }
320
321 void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
322 {
323         plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets);
324         if (lcore_id > RTE_MAX_LCORE) {
325                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
326         }
327         else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
328                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
329         }
330         else {
331                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
332
333                 if (wait_command_handled(lconf) == -1) return;
334
335                 lconf->msg.type = LCONF_MSG_TRACE;
336                 lconf->msg.task_id = task_id;
337                 lconf->msg.val  = nb_packets;
338                 lconf_set_req(lconf);
339
340                 if (lconf->n_tasks_run == 0) {
341                         lconf_do_flags(lconf);
342                 }
343         }
344 }
345
346 void cmd_rx_bw_start(uint32_t lcore_id)
347 {
348         if (lcore_id > RTE_MAX_LCORE) {
349                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
350         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) {
351                 plog_warn("rx bandwidt already on core %u\n", lcore_id);
352         } else {
353
354                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
355
356                 if (wait_command_handled(lconf) == -1) return;
357                 lconf->msg.type = LCONF_MSG_RX_BW_START;
358                 lconf_set_req(lconf);
359
360                 if (lconf->n_tasks_run == 0) {
361                         lconf_do_flags(lconf);
362                 }
363         }
364 }
365
366 void cmd_tx_bw_start(uint32_t lcore_id)
367 {
368         if (lcore_id > RTE_MAX_LCORE) {
369                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
370         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) {
371                 plog_warn("tx bandwidth already running on core %u\n", lcore_id);
372         } else {
373
374                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
375
376                 if (wait_command_handled(lconf) == -1) return;
377                 lconf->msg.type = LCONF_MSG_TX_BW_START;
378                 lconf_set_req(lconf);
379
380                 if (lconf->n_tasks_run == 0) {
381                         lconf_do_flags(lconf);
382                 }
383         }
384 }
385
386 void cmd_rx_bw_stop(uint32_t lcore_id)
387 {
388         if (lcore_id > RTE_MAX_LCORE) {
389                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
390         } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) {
391                 plog_warn("rx bandwidth not running on core %u\n", lcore_id);
392         } else {
393
394                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
395
396                 if (wait_command_handled(lconf) == -1) return;
397                 lconf->msg.type = LCONF_MSG_RX_BW_STOP;
398                 lconf_set_req(lconf);
399
400                 if (lconf->n_tasks_run == 0) {
401                         lconf_do_flags(lconf);
402                 }
403         }
404 }
405
406 void cmd_tx_bw_stop(uint32_t lcore_id)
407 {
408         if (lcore_id > RTE_MAX_LCORE) {
409                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
410         } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) {
411                 plog_warn("tx bandwidth not running on core %u\n", lcore_id);
412         } else {
413
414                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
415
416                 if (wait_command_handled(lconf) == -1) return;
417                 lconf->msg.type = LCONF_MSG_TX_BW_STOP;
418                 lconf_set_req(lconf);
419
420                 if (lconf->n_tasks_run == 0) {
421                         lconf_do_flags(lconf);
422                 }
423         }
424 }
425 void cmd_rx_distr_start(uint32_t lcore_id)
426 {
427         if (lcore_id > RTE_MAX_LCORE) {
428                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
429         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) {
430                 plog_warn("rx distribution already xrunning on core %u\n", lcore_id);
431         } else {
432                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
433
434                 if (wait_command_handled(lconf) == -1) return;
435                 lconf->msg.type = LCONF_MSG_RX_DISTR_START;
436                 lconf_set_req(lconf);
437
438                 if (lconf->n_tasks_run == 0) {
439                         lconf_do_flags(lconf);
440                 }
441         }
442 }
443
444 void cmd_tx_distr_start(uint32_t lcore_id)
445 {
446         if (lcore_id > RTE_MAX_LCORE) {
447                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
448         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) {
449                 plog_warn("tx distribution already xrunning on core %u\n", lcore_id);
450         } else {
451                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
452
453                 if (wait_command_handled(lconf) == -1) return;
454                 lconf->msg.type = LCONF_MSG_TX_DISTR_START;
455                 lconf_set_req(lconf);
456
457                 if (lconf->n_tasks_run == 0) {
458                         lconf_do_flags(lconf);
459                 }
460         }
461 }
462
463 void cmd_rx_distr_stop(uint32_t lcore_id)
464 {
465         if (lcore_id > RTE_MAX_LCORE) {
466                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
467         } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) {
468                 plog_warn("rx distribution not running on core %u\n", lcore_id);
469         } else {
470                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
471
472                 if (wait_command_handled(lconf) == -1) return;
473                 lconf->msg.type = LCONF_MSG_RX_DISTR_STOP;
474                 lconf_set_req(lconf);
475
476                 if (lconf->n_tasks_run == 0) {
477                         lconf_do_flags(lconf);
478                 }
479         }
480 }
481
482 void cmd_tx_distr_stop(uint32_t lcore_id)
483 {
484         if (lcore_id > RTE_MAX_LCORE) {
485                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
486         } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) {
487                 plog_warn("tx distribution not running on core %u\n", lcore_id);
488         } else {
489                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
490
491                 if (wait_command_handled(lconf) == -1) return;
492                 lconf->msg.type = LCONF_MSG_TX_DISTR_STOP;
493                 lconf_set_req(lconf);
494
495                 if (lconf->n_tasks_run == 0) {
496                         lconf_do_flags(lconf);
497                 }
498         }
499 }
500
501 void cmd_rx_distr_rst(uint32_t lcore_id)
502 {
503         if (lcore_id > RTE_MAX_LCORE) {
504                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
505         } else {
506                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
507
508                 if (wait_command_handled(lconf) == -1) return;
509                 lconf->msg.type = LCONF_MSG_RX_DISTR_RESET;
510                 lconf_set_req(lconf);
511
512                 if (lconf->n_tasks_run == 0) {
513                         lconf_do_flags(lconf);
514                 }
515         }
516 }
517
518 void cmd_tx_distr_rst(uint32_t lcore_id)
519 {
520         if (lcore_id > RTE_MAX_LCORE) {
521                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
522         } else {
523                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
524
525                 if (wait_command_handled(lconf) == -1) return;
526                 lconf->msg.type = LCONF_MSG_TX_DISTR_RESET;
527                 lconf_set_req(lconf);
528
529                 if (lconf->n_tasks_run == 0) {
530                         lconf_do_flags(lconf);
531                 }
532         }
533 }
534
535 void cmd_rx_distr_show(uint32_t lcore_id)
536 {
537         if (lcore_id > RTE_MAX_LCORE) {
538                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
539         } else {
540                 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
541                         struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
542                         plog_info("t[%u]: ", i);
543                         for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
544                                 plog_info("%u ", t->aux->rx_bucket[j]);
545                         }
546                         plog_info("\n");
547                 }
548         }
549 }
550 void cmd_tx_distr_show(uint32_t lcore_id)
551 {
552         if (lcore_id > RTE_MAX_LCORE) {
553                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
554         } else {
555                 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
556                         struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
557                         uint64_t tot = 0, avg = 0;
558                         for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
559                                 tot += t->aux->tx_bucket[j];
560                                 avg += j * t->aux->tx_bucket[j];
561                         }
562                         if (tot) {
563                                 avg = avg / tot;
564                         }
565                         plog_info("t[%u]: %lu: ", i, avg);
566                         for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
567                                 plog_info("%u ", t->aux->tx_bucket[j]);
568                         }
569                         plog_info("\n");
570                 }
571         }
572 }
573
574 void cmd_ringinfo_all(void)
575 {
576         struct lcore_cfg *lconf;
577         uint32_t lcore_id = -1;
578
579         while(prox_core_next(&lcore_id, 0) == 0) {
580                 lconf = &lcore_cfg[lcore_id];
581                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
582                         cmd_ringinfo(lcore_id, task_id);
583                 }
584         }
585 }
586
587 void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
588 {
589         struct lcore_cfg *lconf;
590         struct rte_ring *ring;
591         struct task_args* targ;
592         uint32_t count;
593
594         if (!prox_core_active(lcore_id, 0)) {
595                 plog_info("lcore %u is not active\n", lcore_id);
596                 return;
597         }
598         lconf = &lcore_cfg[lcore_id];
599         if (task_id >= lconf->n_tasks_all) {
600                 plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all);
601                 return;
602         }
603
604         targ = &lconf->targs[task_id];
605         plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
606         for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
607                 ring = targ->rx_rings[i];
608 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
609                 count = ring->prod.mask + 1;
610 #else
611                 count = ring->mask + 1;
612 #endif
613                 plog_info("\tRing %u:\n", i);
614                 plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
615                 plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
616                 plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
617         }
618 }
619
620 void cmd_port_up(uint8_t port_id)
621 {
622         int err;
623
624         if (!port_is_active(port_id)) {
625                 return ;
626         }
627
628         if ((err = rte_eth_dev_set_link_up(port_id)) == 0) {
629                 plog_info("Bringing port %d up\n", port_id);
630         }
631         else {
632                 plog_warn("Failed to bring port %d up with error %d\n", port_id, err);
633         }
634 }
635
636 void cmd_port_down(uint8_t port_id)
637 {
638         int err;
639
640         if (!port_is_active(port_id)) {
641                 return ;
642         }
643
644         if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
645                 plog_info("Bringing port %d down\n", port_id);
646         }
647         else {
648                 plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
649         }
650 }
651
652 void cmd_xstats(uint8_t port_id)
653 {
654 #if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
655         int n_xstats;
656         struct rte_eth_xstat *eth_xstat = NULL; // id and value
657         struct rte_eth_xstat_name *eth_xstat_name = NULL;       // only names
658         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
659         int rc;
660
661         n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
662         eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket);
663         PROX_ASSERT(eth_xstat_name);
664         rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats);
665         if ((rc < 0) || (rc > n_xstats)) {
666                 if (rc < 0) {
667                         plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc);
668                 } else if (rc > n_xstats) {
669                         plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc);
670                 }
671         }
672
673         eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket);
674         PROX_ASSERT(eth_xstat);
675         rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats);
676         if ((rc < 0) || (rc > n_xstats)) {
677                 if (rc < 0) {
678                         plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
679                 } else if (rc > n_xstats) {
680                         plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
681                 }
682         } else {
683                 for (int i=0;i<rc;i++) {
684                         plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value);
685                 }
686         }
687         if (eth_xstat_name)
688                 prox_free(eth_xstat_name);
689         if (eth_xstat)
690                 prox_free(eth_xstat);
691 #else
692 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
693         int n_xstats;
694         struct rte_eth_xstats *eth_xstats;
695         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
696         int rc;
697
698         n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
699         eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket);
700         PROX_ASSERT(eth_xstats);
701         rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats);
702         if ((rc < 0) || (rc > n_xstats)) {
703                 if (rc < 0) {
704                         plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
705                 } else if (rc > n_xstats) {
706                         plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
707                 }
708         } else {
709                 for (int i=0;i<rc;i++) {
710                         plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value);
711                 }
712         }
713         if (eth_xstats)
714                 prox_free(eth_xstats);
715 #else
716         plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n");
717 #endif
718 #endif
719 }
720
721 void cmd_portinfo(int port_id, char *dst, size_t max_len)
722 {
723         char *end = dst + max_len;
724
725         *dst = 0;
726         if (port_id == -1) {
727                 uint8_t max_port_idx = prox_last_port_active() + 1;
728
729                 for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) {
730                         if (!prox_port_cfg[port_id].active) {
731                                 continue;
732                         }
733                         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
734
735                         dst += snprintf(dst, end - dst,
736                                         "%2d:%10s; "MAC_BYTES_FMT"; %s\n",
737                                         port_id,
738                                         port_cfg->name,
739                                         MAC_BYTES(port_cfg->eth_addr.addr_bytes),
740                                         port_cfg->pci_addr);
741                 }
742                 return;
743         }
744
745         if (!port_is_active(port_id)) {
746                 return ;
747         }
748
749         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
750
751         dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
752         dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
753         dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
754         dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
755         dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
756         dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
757         dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
758         dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
759         dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
760         dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
761         dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
762         dst += snprintf(dst, end - dst, "\tMemory pools:\n");
763
764         for (uint8_t i = 0; i < 32; ++i) {
765                 if (port_cfg->pool[i]) {
766                         dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n",
767                                         port_cfg->pool[i]->name, port_cfg->pool[i]);
768                 }
769         }
770 }
771
772 void cmd_read_reg(uint8_t port_id, unsigned int id)
773 {
774         unsigned int val, rc;
775         if (!port_is_active(port_id)) {
776                 return ;
777         }
778         rc = read_reg(port_id, id, &val);
779         if (rc) {
780                 plog_warn("Failed to read register %d on port %d\n", id, port_id);
781         }
782         else {
783                 plog_info("Register 0x%08X : %08X \n", id, val);
784         }
785 }
786
787 void cmd_reset_port(uint8_t portid)
788 {
789         unsigned int rc;
790         if (!prox_port_cfg[portid].active) {
791                 plog_info("port not active \n");
792                 return;
793         }
794         rte_eth_dev_stop(portid);
795         rc = rte_eth_dev_start(portid);
796         if (rc) {
797                 plog_warn("Failed to restart port %d\n", portid);
798         }
799 }
800 void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
801 {
802         if (!port_is_active(port_id)) {
803                 return ;
804         }
805
806         plog_info("writing 0x%08X %08X\n", id, val);
807         write_reg(port_id, id, val);
808 }
809
810 void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
811 {
812         if (!port_is_active(port_id)) {
813                 return ;
814         }
815
816         plog_info("setting vlan offload to %d\n", val);
817         if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
818                 plog_info("wrong vlan offload value\n");
819         }
820         int ret = rte_eth_dev_set_vlan_offload(port_id, val);
821         plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret);
822 }
823
824 void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val)
825 {
826         if (!port_is_active(port_id)) {
827                 return ;
828         }
829
830         plog_info("setting vln filter for vlan %d to %d\n", id, val);
831         int ret = rte_eth_dev_vlan_filter(port_id, id, val);
832         plog_info("rte_eth_dev_vlan_filter return %d\n", ret);
833 }
834
835 void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
836 {
837         plog_info("thread_info %u %u \n", lcore_id, task_id);
838         if (lcore_id > RTE_MAX_LCORE) {
839                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
840         }
841         if (!prox_core_active(lcore_id, 0)) {
842                 plog_warn("lcore %u is not active\n", lcore_id);
843                 return;
844         }
845         if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
846                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
847                 return;
848         }
849         if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) {
850                 struct task_base *task;
851
852                 task = lcore_cfg[lcore_id].tasks_all[task_id];
853                 plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id,
854                           task_qos_n_pkts_buffered(task));
855
856 #ifdef ENABLE_EXTRA_USER_STATISTICS
857         }
858         else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
859                 struct task_qinq_encap4 *task;
860                 task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]);
861                 for (int i=0;i<task->n_users;i++) {
862                         if (task->stats_per_user[i])
863                                 plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
864                 }
865 #endif
866         }
867         else {
868                 // Only QoS thread info so far
869                 plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x);
870         }
871 }
872
873 void cmd_rx_tx_info(void)
874 {
875         uint32_t lcore_id = -1;
876         while(prox_core_next(&lcore_id, 0) == 0) {
877                 for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) {
878                         struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];
879
880                         plog_info("Core %u:", lcore_id);
881                         if (targ->rx_port_queue[0].port != OUT_DISCARD) {
882                                 for (int i = 0; i < targ->nb_rxports; i++) {
883                                         plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue);
884                                 }
885                         }
886                         else {
887                                 for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
888                                         plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
889                                 }
890                         }
891                         plog_info(" ==>");
892                         for (uint8_t j = 0; j < targ->nb_txports; ++j) {
893                                 plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
894                                           targ->tx_port_queue[j].queue);
895                         }
896
897                         for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
898                                 plog_info(" TX ring %p", targ->tx_rings[j]);
899                         }
900
901                         plog_info("\n");
902                 }
903         }
904 }
905 void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set)
906 {
907         uint64_t tmp_rmid = 0;
908         cqm_assoc_read(lcore_id, &tmp_rmid);
909         *set = (uint32_t)(tmp_rmid >> 32);
910 }
911
912 void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val)
913 {
914         cat_get_class_mask(lcore_id, set, val);
915 }
916
917 void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val)
918 {
919         cat_set_class_mask(lcore_id, set, val);
920         lcore_cfg[lcore_id].cache_set = set;
921         uint32_t id = -1;
922         while(prox_core_next(&id, 0) == 0) {
923                 if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) {
924                         plog_info("Updating mask for core %d to %d\n", id, set);
925                         stats_update_cache_mask(id, val);
926                 }
927         }
928 }
929
930 void cmd_set_cache_class(uint32_t lcore_id, uint32_t set)
931 {
932         uint64_t tmp_rmid = 0;
933         uint32_t val = 0;
934         cqm_assoc_read(lcore_id, &tmp_rmid);
935         cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32));
936         cat_get_class_mask(lcore_id, set, &val);
937         stats_update_cache_mask(lcore_id, val);
938 }
939
940 void cmd_cache_reset(void)
941 {
942         uint8_t sockets[MAX_SOCKETS] = {0};
943         uint8_t cores[MAX_SOCKETS] = {0};
944         uint32_t mask = (1 << cat_get_num_ways()) - 1;
945         uint32_t lcore_id = -1, socket_id;
946         while(prox_core_next(&lcore_id, 0) == 0) {
947                 cqm_assoc(lcore_id, 0);
948                 socket_id = rte_lcore_to_socket_id(lcore_id);
949                 if (socket_id < MAX_SOCKETS) {
950                         sockets[socket_id] = 1;
951                         cores[socket_id] = lcore_id;
952                 }
953                 stats_update_cache_mask(lcore_id, mask);
954                 plog_info("Setting core %d to cache mask %x\n", lcore_id, mask);
955                 lcore_cfg[lcore_id].cache_set = 0;
956         }
957         for (uint32_t s = 0; s < MAX_SOCKETS; s++) {
958                 if (sockets[s])
959                         cat_reset_cache(cores[s]);
960         }
961         stats_lcore_assoc_rmid();
962 }
963
964 int bypass_task(uint32_t lcore_id, uint32_t task_id)
965 {
966         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
967         struct task_args *targ, *starg, *dtarg;
968         struct rte_ring *ring = NULL;
969
970         if (task_id >= lconf->n_tasks_all)
971                 return -1;
972
973         targ = &lconf->targs[task_id];
974         if (targ->nb_txrings == 1) {
975                 plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
976                 // Find source task
977                 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
978                         starg = targ->prev_tasks[i];
979                         for (unsigned int j = 0; j < starg->nb_txrings; j++) {
980                                 for (unsigned int k = 0; k < targ->nb_rxrings; k++) {
981                                         if (starg->tx_rings[j] == targ->rx_rings[k]) {
982                                                 plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]);
983                                                 starg->tx_rings[j] = targ->tx_rings[0];
984                                                 struct task_base *tbase = starg->tbase;
985                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
986                                         }
987                                 }
988                         }
989                 }
990         } else {
991                 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
992                 return -1;
993         }
994
995         return 0;
996 }
997
998 int reconnect_task(uint32_t lcore_id, uint32_t task_id)
999 {
1000         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
1001         struct task_args *targ, *starg, *dtarg = NULL;
1002         struct rte_ring *ring = NULL;
1003
1004         if (task_id >= lconf->n_tasks_all)
1005                 return -1;
1006
1007         targ = &lconf->targs[task_id];
1008         if (targ->nb_txrings == 1) {
1009                 // Find source task
1010                 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
1011                         starg = targ->prev_tasks[i];
1012                         for (unsigned int j = 0; j < starg->nb_txrings; j++) {
1013                                 if (starg->tx_rings[j] == targ->tx_rings[0]) {
1014                                         if (targ->n_prev_tasks == targ->nb_rxrings) {
1015                                                 starg->tx_rings[j] = targ->rx_rings[i];
1016                                                 struct task_base *tbase = starg->tbase;
1017                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1018                                                 plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
1019                                         } else if (targ->nb_rxrings == 1) {
1020                                                 starg->tx_rings[j] = targ->rx_rings[0];
1021                                                 struct task_base *tbase = starg->tbase;
1022                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1023                                                 plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks);
1024                                         } else {
1025                                                 plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings);
1026                                         }
1027                                 }
1028                         }
1029                 }
1030         } else {
1031                 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
1032                 return -1;
1033         }
1034
1035         return 0;
1036 }