Initial support for DPDK 18.05
[samplevnf.git] / VNFs / DPPD-PROX / commands.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <rte_table_hash.h>
19 #include <rte_version.h>
20 #include <rte_malloc.h>
21
22 #include "prox_malloc.h"
23 #include "display.h"
24 #include "commands.h"
25 #include "log.h"
26 #include "run.h"
27 #include "lconf.h"
28 #include "hash_utils.h"
29 #include "prox_cfg.h"
30 #include "prox_port_cfg.h"
31 #include "defines.h"
32 #include "handle_qos.h"
33 #include "handle_qinq_encap4.h"
34 #include "quit.h"
35 #include "input.h"
36 #include "rw_reg.h"
37 #include "cqm.h"
38 #include "stats_core.h"
39
40 void start_core_all(int task_id)
41 {
42         uint32_t cores[RTE_MAX_LCORE];
43         uint32_t lcore_id;
44         char tmp[256];
45         int cnt = 0;
46
47         prox_core_to_str(tmp, sizeof(tmp), 0);
48         plog_info("Starting cores: %s\n", tmp);
49
50         lcore_id = -1;
51         while (prox_core_next(&lcore_id, 0) == 0) {
52                 cores[cnt++] = lcore_id;
53         }
54         start_cores(cores, cnt, task_id);
55 }
56
57 void stop_core_all(int task_id)
58 {
59         uint32_t cores[RTE_MAX_LCORE];
60         uint32_t lcore_id;
61         char tmp[256];
62         int cnt = 0;
63
64         prox_core_to_str(tmp, sizeof(tmp), 0);
65         plog_info("Stopping cores: %s\n", tmp);
66
67         lcore_id = -1;
68         while (prox_core_next(&lcore_id, 0) == 0) {
69                 cores[cnt++] = lcore_id;
70         }
71
72         stop_cores(cores, cnt, task_id);
73 }
74
75 static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
76 {
77         for (int i = 0; i < count; ++i) {
78                 if (!prox_core_active(cores[i], 0)) {
79                         plog_warn("%s %u: core is not active\n", prefix, cores[i]);
80                 }
81         }
82 }
83
84 static inline int wait_command_handled(struct lcore_cfg *lconf)
85 {
86         uint64_t t1 = rte_rdtsc(), t2;
87         int max_time = 5;
88
89         if (lconf->msg.type == LCONF_MSG_STOP)
90                 max_time = 30;
91
92         while (lconf_is_req(lconf)) {
93                 t2 = rte_rdtsc();
94                 if (t2 - t1 > max_time * rte_get_tsc_hz()) {
95                         // Failed to handle command ...
96                         for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
97                                 struct task_args *targs = &lconf->targs[task_id];
98                                 if (!(targs->flags & TASK_ARG_DROP)) {
99                                         plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
100                                         return - 1;
101                                 }
102                         }
103                         plogx_err("Failed to handle command\n");
104                         return -1;
105                 }
106         }
107         return 0;
108 }
109
110 static inline void start_l3(struct task_args *targ)
111 {
112         if (!task_is_master(targ)) {
113                 if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
114                         if (targ->flags & TASK_ARG_L3)
115                                 task_start_l3(targ->tbase, targ);
116                 }
117         }
118 }
119
120 void start_cores(uint32_t *cores, int count, int task_id)
121 {
122         int n_started_cores = 0;
123         uint32_t started_cores[RTE_MAX_LCORE];
124         struct task_args *targ;
125
126         warn_inactive_cores(cores, count, "Can't start core");
127
128         for (int i = 0; i < count; ++i) {
129                 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
130
131                 if (lconf->n_tasks_run != lconf->n_tasks_all) {
132                         if (task_id == -1) {
133                                 for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
134                                         targ = &lconf->targs[tid];
135                                         start_l3(targ);
136                                 }
137                         } else {
138                                 targ = &lconf->targs[task_id];
139                                 start_l3(targ);
140                         }
141                         lconf->msg.type = LCONF_MSG_START;
142                         lconf->msg.task_id = task_id;
143                         lconf_set_req(lconf);
144                         if (task_id == -1)
145                                 plog_info("Starting core %u (all tasks)\n", cores[i]);
146                         else
147                                 plog_info("Starting core %u task %u\n", cores[i], task_id);
148                         started_cores[n_started_cores++] = cores[i];
149                         lconf->flags |= LCONF_FLAG_RUNNING;
150                         rte_eal_remote_launch(lconf_run, NULL, cores[i]);
151                 }
152                 else {
153                         plog_warn("Core %u is already running all its tasks\n", cores[i]);
154                 }
155         }
156
157         /* This function is blocking, so detect when each core has
158            consumed the message. */
159         for (int i = 0; i < n_started_cores; ++i) {
160                 struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
161                 plog_info("Waiting for core %u to start...", started_cores[i]);
162                 if (wait_command_handled(lconf) == -1) return;
163                 plog_info(" OK\n");
164         }
165 }
166
167 void stop_cores(uint32_t *cores, int count, int task_id)
168 {
169         int n_stopped_cores = 0;
170         uint32_t stopped_cores[RTE_MAX_LCORE];
171         uint32_t c;
172
173         warn_inactive_cores(cores, count, "Can't stop core");
174
175         for (int i = 0; i < count; ++i) {
176                 struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
177                 if (lconf->n_tasks_run) {
178                         if (wait_command_handled(lconf) == -1) return;
179
180                         lconf->msg.type = LCONF_MSG_STOP;
181                         lconf->msg.task_id = task_id;
182                         lconf_set_req(lconf);
183                         stopped_cores[n_stopped_cores++] = cores[i];
184                 }
185         }
186
187         for (int i = 0; i < n_stopped_cores; ++i) {
188                 c = stopped_cores[i];
189                 struct lcore_cfg *lconf = &lcore_cfg[c];
190                 if (wait_command_handled(lconf) == -1) return;
191
192                 if (lconf->n_tasks_run == 0) {
193                         plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
194                         rte_eal_wait_lcore(c);
195                         plog_info(" OK\n");
196                         lconf->flags &= ~LCONF_FLAG_RUNNING;
197                 }
198                 else {
199                         plog_info("Stopped task %u on core %u\n", task_id, c);
200                 }
201         }
202 }
203
204 struct size_unit {
205         uint64_t val;
206         uint64_t frac;
207         char     unit[8];
208 };
209
210 static struct size_unit to_size_unit(uint64_t bytes)
211 {
212         struct size_unit ret;
213
214         if (bytes > 1 << 30) {
215                 ret.val = bytes >> 30;
216                 ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30);
217                 strcpy(ret.unit, "GB");
218         }
219         else if (bytes > 1 << 20) {
220                 ret.val = bytes >> 20;
221                 ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20);
222                 strcpy(ret.unit, "MB");
223         }
224         else if (bytes > 1 << 10) {
225                 ret.val = bytes >> 10;
226                 ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10);
227                 strcpy(ret.unit, "KB");
228         }
229         else {
230                 ret.val = bytes;
231                 ret.frac = 0;
232                 strcpy(ret.unit, "B");
233         }
234
235         return ret;
236 }
237
238 void cmd_mem_stats(void)
239 {
240         struct rte_malloc_socket_stats sock_stats;
241         uint64_t v;
242         struct size_unit su;
243
244         for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) {
245                 if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0)
246                         continue;
247
248                 plogx_info("Socket %u memory stats:\n", i);
249                 su = to_size_unit(sock_stats.heap_totalsz_bytes);
250                 plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
251                 su = to_size_unit(sock_stats.heap_freesz_bytes);
252                 plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
253                 su = to_size_unit(sock_stats.heap_allocsz_bytes);
254                 plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit);
255                 su = to_size_unit(sock_stats.greatest_free_size);
256                 plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit);
257                 plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count);
258                 plogx_info("\tFree_count: %u\n", sock_stats.free_count);
259         }
260 }
261
262 void cmd_mem_layout(void)
263 {
264 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
265         const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
266
267         plog_info("Memory layout:\n");
268         for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
269                 if (memseg[i].addr == NULL)
270                         break;
271
272                 const char *sz_str;
273                 switch (memseg[i].hugepage_sz >> 20) {
274                 case 2:
275                         sz_str = "2MB";
276                         break;
277                 case 1024:
278                         sz_str = "1GB";
279                         break;
280                 default:
281                         sz_str = "??";
282                 }
283
284                 plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
285                           i,
286                           memseg[i].phys_addr,
287                           memseg[i].phys_addr + memseg[i].len,
288                           memseg[i].addr,
289                           memseg[i].len/memseg[i].hugepage_sz, sz_str);
290         }
291 #else
292         plog_info("Memory layout: command not supported in this DPDK version\n");
293         // TODO DPDK1805
294 #endif
295 }
296
297 void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
298 {
299         plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
300         if (lcore_id > RTE_MAX_LCORE) {
301                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
302         }
303         else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
304                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
305         }
306         else {
307                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
308
309                 lconf->tasks_all[task_id]->aux->task_rt_dump.input = input;
310
311                 if (wait_command_handled(lconf) == -1) return;
312                 if (rx && tx)
313                         lconf->msg.type = LCONF_MSG_DUMP;
314                 else if (rx)
315                         lconf->msg.type = LCONF_MSG_DUMP_RX;
316                 else if (tx)
317                         lconf->msg.type = LCONF_MSG_DUMP_TX;
318
319                 if (rx || tx) {
320                         lconf->msg.task_id = task_id;
321                         lconf->msg.val  = nb_packets;
322                         lconf_set_req(lconf);
323                 }
324
325                 if (lconf->n_tasks_run == 0) {
326                         lconf_do_flags(lconf);
327                 }
328         }
329 }
330
331 void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
332 {
333         plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets);
334         if (lcore_id > RTE_MAX_LCORE) {
335                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
336         }
337         else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
338                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
339         }
340         else {
341                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
342
343                 if (wait_command_handled(lconf) == -1) return;
344
345                 lconf->msg.type = LCONF_MSG_TRACE;
346                 lconf->msg.task_id = task_id;
347                 lconf->msg.val  = nb_packets;
348                 lconf_set_req(lconf);
349
350                 if (lconf->n_tasks_run == 0) {
351                         lconf_do_flags(lconf);
352                 }
353         }
354 }
355
356 void cmd_rx_bw_start(uint32_t lcore_id)
357 {
358         if (lcore_id > RTE_MAX_LCORE) {
359                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
360         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) {
361                 plog_warn("rx bandwidt already on core %u\n", lcore_id);
362         } else {
363
364                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
365
366                 if (wait_command_handled(lconf) == -1) return;
367                 lconf->msg.type = LCONF_MSG_RX_BW_START;
368                 lconf_set_req(lconf);
369
370                 if (lconf->n_tasks_run == 0) {
371                         lconf_do_flags(lconf);
372                 }
373         }
374 }
375
376 void cmd_tx_bw_start(uint32_t lcore_id)
377 {
378         if (lcore_id > RTE_MAX_LCORE) {
379                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
380         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) {
381                 plog_warn("tx bandwidth already running on core %u\n", lcore_id);
382         } else {
383
384                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
385
386                 if (wait_command_handled(lconf) == -1) return;
387                 lconf->msg.type = LCONF_MSG_TX_BW_START;
388                 lconf_set_req(lconf);
389
390                 if (lconf->n_tasks_run == 0) {
391                         lconf_do_flags(lconf);
392                 }
393         }
394 }
395
396 void cmd_rx_bw_stop(uint32_t lcore_id)
397 {
398         if (lcore_id > RTE_MAX_LCORE) {
399                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
400         } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) {
401                 plog_warn("rx bandwidth not running on core %u\n", lcore_id);
402         } else {
403
404                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
405
406                 if (wait_command_handled(lconf) == -1) return;
407                 lconf->msg.type = LCONF_MSG_RX_BW_STOP;
408                 lconf_set_req(lconf);
409
410                 if (lconf->n_tasks_run == 0) {
411                         lconf_do_flags(lconf);
412                 }
413         }
414 }
415
416 void cmd_tx_bw_stop(uint32_t lcore_id)
417 {
418         if (lcore_id > RTE_MAX_LCORE) {
419                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
420         } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) {
421                 plog_warn("tx bandwidth not running on core %u\n", lcore_id);
422         } else {
423
424                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
425
426                 if (wait_command_handled(lconf) == -1) return;
427                 lconf->msg.type = LCONF_MSG_TX_BW_STOP;
428                 lconf_set_req(lconf);
429
430                 if (lconf->n_tasks_run == 0) {
431                         lconf_do_flags(lconf);
432                 }
433         }
434 }
435 void cmd_rx_distr_start(uint32_t lcore_id)
436 {
437         if (lcore_id > RTE_MAX_LCORE) {
438                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
439         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) {
440                 plog_warn("rx distribution already xrunning on core %u\n", lcore_id);
441         } else {
442                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
443
444                 if (wait_command_handled(lconf) == -1) return;
445                 lconf->msg.type = LCONF_MSG_RX_DISTR_START;
446                 lconf_set_req(lconf);
447
448                 if (lconf->n_tasks_run == 0) {
449                         lconf_do_flags(lconf);
450                 }
451         }
452 }
453
454 void cmd_tx_distr_start(uint32_t lcore_id)
455 {
456         if (lcore_id > RTE_MAX_LCORE) {
457                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
458         } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) {
459                 plog_warn("tx distribution already xrunning on core %u\n", lcore_id);
460         } else {
461                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
462
463                 if (wait_command_handled(lconf) == -1) return;
464                 lconf->msg.type = LCONF_MSG_TX_DISTR_START;
465                 lconf_set_req(lconf);
466
467                 if (lconf->n_tasks_run == 0) {
468                         lconf_do_flags(lconf);
469                 }
470         }
471 }
472
473 void cmd_rx_distr_stop(uint32_t lcore_id)
474 {
475         if (lcore_id > RTE_MAX_LCORE) {
476                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
477         } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) {
478                 plog_warn("rx distribution not running on core %u\n", lcore_id);
479         } else {
480                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
481
482                 if (wait_command_handled(lconf) == -1) return;
483                 lconf->msg.type = LCONF_MSG_RX_DISTR_STOP;
484                 lconf_set_req(lconf);
485
486                 if (lconf->n_tasks_run == 0) {
487                         lconf_do_flags(lconf);
488                 }
489         }
490 }
491
492 void cmd_tx_distr_stop(uint32_t lcore_id)
493 {
494         if (lcore_id > RTE_MAX_LCORE) {
495                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
496         } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) {
497                 plog_warn("tx distribution not running on core %u\n", lcore_id);
498         } else {
499                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
500
501                 if (wait_command_handled(lconf) == -1) return;
502                 lconf->msg.type = LCONF_MSG_TX_DISTR_STOP;
503                 lconf_set_req(lconf);
504
505                 if (lconf->n_tasks_run == 0) {
506                         lconf_do_flags(lconf);
507                 }
508         }
509 }
510
511 void cmd_rx_distr_rst(uint32_t lcore_id)
512 {
513         if (lcore_id > RTE_MAX_LCORE) {
514                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
515         } else {
516                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
517
518                 if (wait_command_handled(lconf) == -1) return;
519                 lconf->msg.type = LCONF_MSG_RX_DISTR_RESET;
520                 lconf_set_req(lconf);
521
522                 if (lconf->n_tasks_run == 0) {
523                         lconf_do_flags(lconf);
524                 }
525         }
526 }
527
528 void cmd_tx_distr_rst(uint32_t lcore_id)
529 {
530         if (lcore_id > RTE_MAX_LCORE) {
531                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
532         } else {
533                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
534
535                 if (wait_command_handled(lconf) == -1) return;
536                 lconf->msg.type = LCONF_MSG_TX_DISTR_RESET;
537                 lconf_set_req(lconf);
538
539                 if (lconf->n_tasks_run == 0) {
540                         lconf_do_flags(lconf);
541                 }
542         }
543 }
544
545 void cmd_rx_distr_show(uint32_t lcore_id)
546 {
547         if (lcore_id > RTE_MAX_LCORE) {
548                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
549         } else {
550                 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
551                         struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
552                         plog_info("t[%u]: ", i);
553                         for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
554                                 plog_info("%u ", t->aux->rx_bucket[j]);
555                         }
556                         plog_info("\n");
557                 }
558         }
559 }
560 void cmd_tx_distr_show(uint32_t lcore_id)
561 {
562         if (lcore_id > RTE_MAX_LCORE) {
563                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
564         } else {
565                 for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) {
566                         struct task_base *t = lcore_cfg[lcore_id].tasks_all[i];
567                         uint64_t tot = 0, avg = 0;
568                         for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
569                                 tot += t->aux->tx_bucket[j];
570                                 avg += j * t->aux->tx_bucket[j];
571                         }
572                         if (tot) {
573                                 avg = avg / tot;
574                         }
575                         plog_info("t[%u]: %lu: ", i, avg);
576                         for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) {
577                                 plog_info("%u ", t->aux->tx_bucket[j]);
578                         }
579                         plog_info("\n");
580                 }
581         }
582 }
583
584 void cmd_ringinfo_all(void)
585 {
586         struct lcore_cfg *lconf;
587         uint32_t lcore_id = -1;
588
589         while(prox_core_next(&lcore_id, 0) == 0) {
590                 lconf = &lcore_cfg[lcore_id];
591                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
592                         cmd_ringinfo(lcore_id, task_id);
593                 }
594         }
595 }
596
597 void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
598 {
599         struct lcore_cfg *lconf;
600         struct rte_ring *ring;
601         struct task_args* targ;
602         uint32_t count;
603
604         if (!prox_core_active(lcore_id, 0)) {
605                 plog_info("lcore %u is not active\n", lcore_id);
606                 return;
607         }
608         lconf = &lcore_cfg[lcore_id];
609         if (task_id >= lconf->n_tasks_all) {
610                 plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all);
611                 return;
612         }
613
614         targ = &lconf->targs[task_id];
615         plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
616         for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
617                 ring = targ->rx_rings[i];
618 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
619                 count = ring->prod.mask + 1;
620 #else
621                 count = ring->mask + 1;
622 #endif
623                 plog_info("\tRing %u:\n", i);
624                 plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
625                 plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
626                 plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
627         }
628 }
629
630 void cmd_port_up(uint8_t port_id)
631 {
632         int err;
633
634         if (!port_is_active(port_id)) {
635                 return ;
636         }
637
638         if ((err = rte_eth_dev_set_link_up(port_id)) == 0) {
639                 plog_info("Bringing port %d up\n", port_id);
640         }
641         else {
642                 plog_warn("Failed to bring port %d up with error %d\n", port_id, err);
643         }
644 }
645
646 void cmd_port_down(uint8_t port_id)
647 {
648         int err;
649
650         if (!port_is_active(port_id)) {
651                 return ;
652         }
653
654         if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
655                 plog_info("Bringing port %d down\n", port_id);
656         }
657         else {
658                 plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
659         }
660 }
661
662 void cmd_xstats(uint8_t port_id)
663 {
664 #if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
665         int n_xstats;
666         struct rte_eth_xstat *eth_xstat = NULL; // id and value
667         struct rte_eth_xstat_name *eth_xstat_name = NULL;       // only names
668         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
669         int rc;
670
671         n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
672         eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket);
673         PROX_ASSERT(eth_xstat_name);
674         rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats);
675         if ((rc < 0) || (rc > n_xstats)) {
676                 if (rc < 0) {
677                         plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc);
678                 } else if (rc > n_xstats) {
679                         plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc);
680                 }
681         }
682
683         eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket);
684         PROX_ASSERT(eth_xstat);
685         rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats);
686         if ((rc < 0) || (rc > n_xstats)) {
687                 if (rc < 0) {
688                         plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
689                 } else if (rc > n_xstats) {
690                         plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
691                 }
692         } else {
693                 for (int i=0;i<rc;i++) {
694                         plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value);
695                 }
696         }
697         if (eth_xstat_name)
698                 prox_free(eth_xstat_name);
699         if (eth_xstat)
700                 prox_free(eth_xstat);
701 #else
702 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
703         int n_xstats;
704         struct rte_eth_xstats *eth_xstats;
705         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
706         int rc;
707
708         n_xstats = rte_eth_xstats_get(port_id, NULL, 0);
709         eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket);
710         PROX_ASSERT(eth_xstats);
711         rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats);
712         if ((rc < 0) || (rc > n_xstats)) {
713                 if (rc < 0) {
714                         plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc);
715                 } else if (rc > n_xstats) {
716                         plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc);
717                 }
718         } else {
719                 for (int i=0;i<rc;i++) {
720                         plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value);
721                 }
722         }
723         if (eth_xstats)
724                 prox_free(eth_xstats);
725 #else
726         plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n");
727 #endif
728 #endif
729 }
730
731 void cmd_portinfo(int port_id, char *dst, size_t max_len)
732 {
733         char *end = dst + max_len;
734
735         *dst = 0;
736         if (port_id == -1) {
737                 uint8_t max_port_idx = prox_last_port_active() + 1;
738
739                 for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) {
740                         if (!prox_port_cfg[port_id].active) {
741                                 continue;
742                         }
743                         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
744
745                         dst += snprintf(dst, end - dst,
746                                         "%2d:%10s; "MAC_BYTES_FMT"; %s\n",
747                                         port_id,
748                                         port_cfg->name,
749                                         MAC_BYTES(port_cfg->eth_addr.addr_bytes),
750                                         port_cfg->pci_addr);
751                 }
752                 return;
753         }
754
755         if (!port_is_active(port_id)) {
756                 return ;
757         }
758
759         struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
760
761         dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
762         dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
763         dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
764         dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
765         dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
766         dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
767         dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
768         dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
769         dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
770         dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
771         dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
772         dst += snprintf(dst, end - dst, "\tMemory pools:\n");
773
774         for (uint8_t i = 0; i < 32; ++i) {
775                 if (port_cfg->pool[i]) {
776                         dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n",
777                                         port_cfg->pool[i]->name, port_cfg->pool[i]);
778                 }
779         }
780 }
781
782 void cmd_read_reg(uint8_t port_id, unsigned int id)
783 {
784         unsigned int val, rc;
785         if (!port_is_active(port_id)) {
786                 return ;
787         }
788         rc = read_reg(port_id, id, &val);
789         if (rc) {
790                 plog_warn("Failed to read register %d on port %d\n", id, port_id);
791         }
792         else {
793                 plog_info("Register 0x%08X : %08X \n", id, val);
794         }
795 }
796
797 void cmd_reset_port(uint8_t portid)
798 {
799         unsigned int rc;
800         if (!prox_port_cfg[portid].active) {
801                 plog_info("port not active \n");
802                 return;
803         }
804         rte_eth_dev_stop(portid);
805         rc = rte_eth_dev_start(portid);
806         if (rc) {
807                 plog_warn("Failed to restart port %d\n", portid);
808         }
809 }
810 void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
811 {
812         if (!port_is_active(port_id)) {
813                 return ;
814         }
815
816         plog_info("writing 0x%08X %08X\n", id, val);
817         write_reg(port_id, id, val);
818 }
819
820 void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
821 {
822         if (!port_is_active(port_id)) {
823                 return ;
824         }
825
826         plog_info("setting vlan offload to %d\n", val);
827         if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
828                 plog_info("wrong vlan offload value\n");
829         }
830         int ret = rte_eth_dev_set_vlan_offload(port_id, val);
831         plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret);
832 }
833
834 void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val)
835 {
836         if (!port_is_active(port_id)) {
837                 return ;
838         }
839
840         plog_info("setting vln filter for vlan %d to %d\n", id, val);
841         int ret = rte_eth_dev_vlan_filter(port_id, id, val);
842         plog_info("rte_eth_dev_vlan_filter return %d\n", ret);
843 }
844
845 void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
846 {
847         plog_info("thread_info %u %u \n", lcore_id, task_id);
848         if (lcore_id > RTE_MAX_LCORE) {
849                 plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE);
850         }
851         if (!prox_core_active(lcore_id, 0)) {
852                 plog_warn("lcore %u is not active\n", lcore_id);
853                 return;
854         }
855         if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
856                 plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1);
857                 return;
858         }
859         if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) {
860                 struct task_base *task;
861
862                 task = lcore_cfg[lcore_id].tasks_all[task_id];
863                 plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id,
864                           task_qos_n_pkts_buffered(task));
865
866 #ifdef ENABLE_EXTRA_USER_STATISTICS
867         }
868         else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
869                 struct task_qinq_encap4 *task;
870                 task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]);
871                 for (int i=0;i<task->n_users;i++) {
872                         if (task->stats_per_user[i])
873                                 plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
874                 }
875 #endif
876         }
877         else {
878                 // Only QoS thread info so far
879                 plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x);
880         }
881 }
882
883 void cmd_rx_tx_info(void)
884 {
885         uint32_t lcore_id = -1;
886         while(prox_core_next(&lcore_id, 0) == 0) {
887                 for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) {
888                         struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];
889
890                         plog_info("Core %u:", lcore_id);
891                         if (targ->rx_port_queue[0].port != OUT_DISCARD) {
892                                 for (int i = 0; i < targ->nb_rxports; i++) {
893                                         plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue);
894                                 }
895                         }
896                         else {
897                                 for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
898                                         plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
899                                 }
900                         }
901                         plog_info(" ==>");
902                         for (uint8_t j = 0; j < targ->nb_txports; ++j) {
903                                 plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
904                                           targ->tx_port_queue[j].queue);
905                         }
906
907                         for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
908                                 plog_info(" TX ring %p", targ->tx_rings[j]);
909                         }
910
911                         plog_info("\n");
912                 }
913         }
914 }
915 void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set)
916 {
917         uint64_t tmp_rmid = 0;
918         cqm_assoc_read(lcore_id, &tmp_rmid);
919         *set = (uint32_t)(tmp_rmid >> 32);
920 }
921
922 void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val)
923 {
924         cat_get_class_mask(lcore_id, set, val);
925 }
926
927 void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val)
928 {
929         cat_set_class_mask(lcore_id, set, val);
930         lcore_cfg[lcore_id].cache_set = set;
931         uint32_t id = -1;
932         while(prox_core_next(&id, 0) == 0) {
933                 if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) {
934                         plog_info("Updating mask for core %d to %d\n", id, set);
935                         stats_update_cache_mask(id, val);
936                 }
937         }
938 }
939
940 void cmd_set_cache_class(uint32_t lcore_id, uint32_t set)
941 {
942         uint64_t tmp_rmid = 0;
943         uint32_t val = 0;
944         cqm_assoc_read(lcore_id, &tmp_rmid);
945         cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32));
946         cat_get_class_mask(lcore_id, set, &val);
947         stats_update_cache_mask(lcore_id, val);
948 }
949
950 void cmd_cache_reset(void)
951 {
952         uint8_t sockets[MAX_SOCKETS] = {0};
953         uint8_t cores[MAX_SOCKETS] = {0};
954         uint32_t mask = (1 << cat_get_num_ways()) - 1;
955         uint32_t lcore_id = -1, socket_id;
956         while(prox_core_next(&lcore_id, 0) == 0) {
957                 cqm_assoc(lcore_id, 0);
958                 socket_id = rte_lcore_to_socket_id(lcore_id);
959                 if (socket_id < MAX_SOCKETS) {
960                         sockets[socket_id] = 1;
961                         cores[socket_id] = lcore_id;
962                 }
963                 stats_update_cache_mask(lcore_id, mask);
964                 plog_info("Setting core %d to cache mask %x\n", lcore_id, mask);
965                 lcore_cfg[lcore_id].cache_set = 0;
966         }
967         for (uint32_t s = 0; s < MAX_SOCKETS; s++) {
968                 if (sockets[s])
969                         cat_reset_cache(cores[s]);
970         }
971         stats_lcore_assoc_rmid();
972 }
973
974 int bypass_task(uint32_t lcore_id, uint32_t task_id)
975 {
976         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
977         struct task_args *targ, *starg, *dtarg;
978         struct rte_ring *ring = NULL;
979
980         if (task_id >= lconf->n_tasks_all)
981                 return -1;
982
983         targ = &lconf->targs[task_id];
984         if (targ->nb_txrings == 1) {
985                 plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
986                 // Find source task
987                 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
988                         starg = targ->prev_tasks[i];
989                         for (unsigned int j = 0; j < starg->nb_txrings; j++) {
990                                 for (unsigned int k = 0; k < targ->nb_rxrings; k++) {
991                                         if (starg->tx_rings[j] == targ->rx_rings[k]) {
992                                                 plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]);
993                                                 starg->tx_rings[j] = targ->tx_rings[0];
994                                                 struct task_base *tbase = starg->tbase;
995                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
996                                         }
997                                 }
998                         }
999                 }
1000         } else {
1001                 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
1002                 return -1;
1003         }
1004
1005         return 0;
1006 }
1007
1008 int reconnect_task(uint32_t lcore_id, uint32_t task_id)
1009 {
1010         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
1011         struct task_args *targ, *starg, *dtarg = NULL;
1012         struct rte_ring *ring = NULL;
1013
1014         if (task_id >= lconf->n_tasks_all)
1015                 return -1;
1016
1017         targ = &lconf->targs[task_id];
1018         if (targ->nb_txrings == 1) {
1019                 // Find source task
1020                 for (unsigned int i = 0; i < targ->n_prev_tasks; i++) {
1021                         starg = targ->prev_tasks[i];
1022                         for (unsigned int j = 0; j < starg->nb_txrings; j++) {
1023                                 if (starg->tx_rings[j] == targ->tx_rings[0]) {
1024                                         if (targ->n_prev_tasks == targ->nb_rxrings) {
1025                                                 starg->tx_rings[j] = targ->rx_rings[i];
1026                                                 struct task_base *tbase = starg->tbase;
1027                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1028                                                 plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks);
1029                                         } else if (targ->nb_rxrings == 1) {
1030                                                 starg->tx_rings[j] = targ->rx_rings[0];
1031                                                 struct task_base *tbase = starg->tbase;
1032                                                 tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j];
1033                                                 plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks);
1034                                         } else {
1035                                                 plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings);
1036                                         }
1037                                 }
1038                         }
1039                 }
1040         } else {
1041                 plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings);
1042                 return -1;
1043         }
1044
1045         return 0;
1046 }