2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
21 #include <rte_cycles.h>
22 #include <rte_ethdev.h>
23 #include <rte_ether.h>
26 #include <rte_malloc.h>
30 #include "pipeline_common_fe.h"
31 #include "pipeline_master.h"
32 #include "pipeline_passthrough.h"
33 #include "thread_fe.h"
34 #include "pipeline_vfw.h"
35 #include "pipeline_loadb.h"
36 #include "pipeline_txrx.h"
37 #include "pipeline_arpicmp.h"
38 #include "interface.h"
39 #include "l3fwd_common.h"
40 #include "l3fwd_lpm4.h"
41 #include "l3fwd_lpm6.h"
43 #include "vnf_define.h"
44 #define APP_NAME_SIZE 32
45 port_config_t *port_config;
48 app_init_core_map(struct app_params *app)
50 APP_LOG(app, HIGH, "Initializing CPU core map ...");
51 app->core_map = cpu_core_map_init(4, 32, 4, 0);
53 if (app->core_map == NULL)
54 rte_panic("Cannot create CPU core map\n");
56 if (app->log_level >= APP_LOG_LEVEL_LOW)
57 cpu_core_map_print(app->core_map);
60 /* Core Mask String in Hex Representation */
61 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
64 app_init_core_mask(struct app_params *app)
66 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
69 for (i = 0; i < app->n_pipelines; i++) {
70 struct app_pipeline_params *p = &app->pipeline_params[i];
73 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
79 rte_panic("Cannot create CPU core mask\n");
81 app_core_enable_in_core_mask(app, lcore_id);
84 app_core_build_core_mask_string(app, core_mask_str);
85 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
90 app_init_eal(struct app_params *app)
93 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
94 struct app_eal_params *p = &app->eal_params;
99 app->eal_argv[n_args++] = strdup(app->app_name);
101 app_core_build_core_mask_string(app, core_mask_str);
102 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
103 app->eal_argv[n_args++] = strdup(buffer);
106 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
107 app->eal_argv[n_args++] = strdup(buffer);
110 if (p->master_lcore_present) {
113 "--master-lcore=%" PRIu32,
115 app->eal_argv[n_args++] = strdup(buffer);
118 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
119 app->eal_argv[n_args++] = strdup(buffer);
121 if (p->memory_present) {
122 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
123 app->eal_argv[n_args++] = strdup(buffer);
126 if (p->ranks_present) {
127 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
128 app->eal_argv[n_args++] = strdup(buffer);
131 for (i = 0; i < APP_MAX_LINKS; i++) {
132 if (p->pci_blacklist[i] == NULL)
137 "--pci-blacklist=%s",
138 p->pci_blacklist[i]);
139 app->eal_argv[n_args++] = strdup(buffer);
142 if (app->port_mask != 0)
143 for (i = 0; i < APP_MAX_LINKS; i++) {
144 if (p->pci_whitelist[i] == NULL)
149 "--pci-whitelist=%s",
150 p->pci_whitelist[i]);
151 app->eal_argv[n_args++] = strdup(buffer);
154 for (i = 0; i < app->n_links; i++) {
155 char *pci_bdf = app->link_params[i].pci_bdf;
159 "--pci-whitelist=%s",
161 app->eal_argv[n_args++] = strdup(buffer);
164 for (i = 0; i < APP_MAX_LINKS; i++) {
165 if (p->vdev[i] == NULL)
172 app->eal_argv[n_args++] = strdup(buffer);
175 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
176 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
177 app->eal_argv[n_args++] = strdup(buffer);
185 app->eal_argv[n_args++] = strdup(buffer);
189 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
190 app->eal_argv[n_args++] = strdup(buffer);
193 if (p->log_level_present) {
196 "--log-level=%" PRIu32,
198 app->eal_argv[n_args++] = strdup(buffer);
201 if ((p->version_present) && p->version) {
202 snprintf(buffer, sizeof(buffer), "-v");
203 app->eal_argv[n_args++] = strdup(buffer);
206 if ((p->help_present) && p->help) {
207 snprintf(buffer, sizeof(buffer), "--help");
208 app->eal_argv[n_args++] = strdup(buffer);
211 if ((p->no_huge_present) && p->no_huge) {
212 snprintf(buffer, sizeof(buffer), "--no-huge");
213 app->eal_argv[n_args++] = strdup(buffer);
216 if ((p->no_pci_present) && p->no_pci) {
217 snprintf(buffer, sizeof(buffer), "--no-pci");
218 app->eal_argv[n_args++] = strdup(buffer);
221 if ((p->no_hpet_present) && p->no_hpet) {
222 snprintf(buffer, sizeof(buffer), "--no-hpet");
223 app->eal_argv[n_args++] = strdup(buffer);
226 if ((p->no_shconf_present) && p->no_shconf) {
227 snprintf(buffer, sizeof(buffer), "--no-shconf");
228 app->eal_argv[n_args++] = strdup(buffer);
232 snprintf(buffer, sizeof(buffer), "-d=%s", p->add_driver);
233 app->eal_argv[n_args++] = strdup(buffer);
241 app->eal_argv[n_args++] = strdup(buffer);
245 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
246 app->eal_argv[n_args++] = strdup(buffer);
249 if (p->file_prefix) {
254 app->eal_argv[n_args++] = strdup(buffer);
257 if (p->base_virtaddr) {
260 "--base-virtaddr=%s",
262 app->eal_argv[n_args++] = strdup(buffer);
265 if ((p->create_uio_dev_present) && p->create_uio_dev) {
266 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
267 app->eal_argv[n_args++] = strdup(buffer);
275 app->eal_argv[n_args++] = strdup(buffer);
278 if ((p->xen_dom0_present) && (p->xen_dom0)) {
279 snprintf(buffer, sizeof(buffer), "--xen-dom0");
280 app->eal_argv[n_args++] = strdup(buffer);
283 snprintf(buffer, sizeof(buffer), "--");
284 app->eal_argv[n_args++] = strdup(buffer);
286 app->eal_argc = n_args;
288 APP_LOG(app, HIGH, "Initializing EAL ...");
289 if (app->log_level >= APP_LOG_LEVEL_LOW) {
292 fprintf(stdout, "[APP] EAL arguments: \"");
293 for (i = 1; i < app->eal_argc; i++)
294 fprintf(stdout, "%s ", app->eal_argv[i]);
295 fprintf(stdout, "\"\n");
298 status = rte_eal_init(app->eal_argc, app->eal_argv);
300 rte_panic("EAL init error\n");
303 app_link_filter_arp_add(struct app_link_params *link)
305 struct rte_eth_ethertype_filter filter = {
306 .ether_type = ETHER_TYPE_ARP,
308 .queue = link->arp_q,
311 return rte_eth_dev_filter_ctrl(link->pmd_id,
312 RTE_ETH_FILTER_ETHERTYPE,
318 app_link_filter_tcp_syn_add(struct app_link_params *link)
320 struct rte_eth_syn_filter filter = {
322 .queue = link->tcp_syn_q,
325 return rte_eth_dev_filter_ctrl(link->pmd_id,
332 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
334 struct rte_eth_ntuple_filter filter = {
335 .flags = RTE_5TUPLE_FLAGS,
336 .dst_ip = rte_bswap32(l2->ip),
337 .dst_ip_mask = UINT32_MAX, /* Enable */
339 .src_ip_mask = 0, /* Disable */
341 .dst_port_mask = 0, /* Disable */
343 .src_port_mask = 0, /* Disable */
345 .proto_mask = 0, /* Disable */
347 .priority = 1, /* Lowest */
348 .queue = l1->ip_local_q,
351 return rte_eth_dev_filter_ctrl(l1->pmd_id,
352 RTE_ETH_FILTER_NTUPLE,
358 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
360 struct rte_eth_ntuple_filter filter = {
361 .flags = RTE_5TUPLE_FLAGS,
362 .dst_ip = rte_bswap32(l2->ip),
363 .dst_ip_mask = UINT32_MAX, /* Enable */
365 .src_ip_mask = 0, /* Disable */
367 .dst_port_mask = 0, /* Disable */
369 .src_port_mask = 0, /* Disable */
371 .proto_mask = 0, /* Disable */
373 .priority = 1, /* Lowest */
374 .queue = l1->ip_local_q,
377 return rte_eth_dev_filter_ctrl(l1->pmd_id,
378 RTE_ETH_FILTER_NTUPLE,
379 RTE_ETH_FILTER_DELETE,
384 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
386 struct rte_eth_ntuple_filter filter = {
387 .flags = RTE_5TUPLE_FLAGS,
388 .dst_ip = rte_bswap32(l2->ip),
389 .dst_ip_mask = UINT32_MAX, /* Enable */
391 .src_ip_mask = 0, /* Disable */
393 .dst_port_mask = 0, /* Disable */
395 .src_port_mask = 0, /* Disable */
396 .proto = IPPROTO_TCP,
397 .proto_mask = UINT8_MAX, /* Enable */
399 .priority = 2, /* Higher priority than IP */
400 .queue = l1->tcp_local_q,
403 return rte_eth_dev_filter_ctrl(l1->pmd_id,
404 RTE_ETH_FILTER_NTUPLE,
410 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
412 struct rte_eth_ntuple_filter filter = {
413 .flags = RTE_5TUPLE_FLAGS,
414 .dst_ip = rte_bswap32(l2->ip),
415 .dst_ip_mask = UINT32_MAX, /* Enable */
417 .src_ip_mask = 0, /* Disable */
419 .dst_port_mask = 0, /* Disable */
421 .src_port_mask = 0, /* Disable */
422 .proto = IPPROTO_TCP,
423 .proto_mask = UINT8_MAX, /* Enable */
425 .priority = 2, /* Higher priority than IP */
426 .queue = l1->tcp_local_q,
429 return rte_eth_dev_filter_ctrl(l1->pmd_id,
430 RTE_ETH_FILTER_NTUPLE,
431 RTE_ETH_FILTER_DELETE,
436 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
438 struct rte_eth_ntuple_filter filter = {
439 .flags = RTE_5TUPLE_FLAGS,
440 .dst_ip = rte_bswap32(l2->ip),
441 .dst_ip_mask = UINT32_MAX, /* Enable */
443 .src_ip_mask = 0, /* Disable */
445 .dst_port_mask = 0, /* Disable */
447 .src_port_mask = 0, /* Disable */
448 .proto = IPPROTO_UDP,
449 .proto_mask = UINT8_MAX, /* Enable */
451 .priority = 2, /* Higher priority than IP */
452 .queue = l1->udp_local_q,
455 return rte_eth_dev_filter_ctrl(l1->pmd_id,
456 RTE_ETH_FILTER_NTUPLE,
462 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
464 struct rte_eth_ntuple_filter filter = {
465 .flags = RTE_5TUPLE_FLAGS,
466 .dst_ip = rte_bswap32(l2->ip),
467 .dst_ip_mask = UINT32_MAX, /* Enable */
469 .src_ip_mask = 0, /* Disable */
471 .dst_port_mask = 0, /* Disable */
473 .src_port_mask = 0, /* Disable */
474 .proto = IPPROTO_UDP,
475 .proto_mask = UINT8_MAX, /* Enable */
477 .priority = 2, /* Higher priority than IP */
478 .queue = l1->udp_local_q,
481 return rte_eth_dev_filter_ctrl(l1->pmd_id,
482 RTE_ETH_FILTER_NTUPLE,
483 RTE_ETH_FILTER_DELETE,
488 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
490 struct rte_eth_ntuple_filter filter = {
491 .flags = RTE_5TUPLE_FLAGS,
492 .dst_ip = rte_bswap32(l2->ip),
493 .dst_ip_mask = UINT32_MAX, /* Enable */
495 .src_ip_mask = 0, /* Disable */
497 .dst_port_mask = 0, /* Disable */
499 .src_port_mask = 0, /* Disable */
500 .proto = IPPROTO_SCTP,
501 .proto_mask = UINT8_MAX, /* Enable */
503 .priority = 2, /* Higher priority than IP */
504 .queue = l1->sctp_local_q,
507 return rte_eth_dev_filter_ctrl(l1->pmd_id,
508 RTE_ETH_FILTER_NTUPLE,
514 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
516 struct rte_eth_ntuple_filter filter = {
517 .flags = RTE_5TUPLE_FLAGS,
518 .dst_ip = rte_bswap32(l2->ip),
519 .dst_ip_mask = UINT32_MAX, /* Enable */
521 .src_ip_mask = 0, /* Disable */
523 .dst_port_mask = 0, /* Disable */
525 .src_port_mask = 0, /* Disable */
526 .proto = IPPROTO_SCTP,
527 .proto_mask = UINT8_MAX, /* Enable */
529 .priority = 2, /* Higher priority than IP */
530 .queue = l1->sctp_local_q,
533 return rte_eth_dev_filter_ctrl(l1->pmd_id,
534 RTE_ETH_FILTER_NTUPLE,
535 RTE_ETH_FILTER_DELETE,
540 app_link_is_virtual(struct app_link_params *p)
542 uint32_t pmd_id = p->pmd_id;
543 struct rte_eth_dev *dev = &rte_eth_devices[pmd_id];
545 if (dev->dev_type == RTE_ETH_DEV_VIRTUAL)
553 app_link_up_internal(__rte_unused struct app_params *app,
554 struct app_link_params *cp)
556 if(app == NULL || cp == NULL)
557 printf("NULL Pointers");
559 if (app_link_is_virtual(cp)) {
564 ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_UP);
566 /* Mark link as UP */
571 app_link_down_internal(__rte_unused struct app_params *app,
572 struct app_link_params *cp)
574 if(app == NULL || cp == NULL)
575 printf("NULL Pointers");
577 if (app_link_is_virtual(cp)) {
582 ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_DOWN);
583 /* Mark link as DOWN */
589 app_check_link(struct app_params *app)
591 uint32_t all_links_up, i;
595 for (i = 0; i < app->n_links; i++) {
596 struct app_link_params *p = &app->link_params[i];
597 struct rte_eth_link link_params;
599 memset(&link_params, 0, sizeof(link_params));
600 rte_eth_link_get(p->pmd_id, &link_params);
602 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
605 link_params.link_speed / 1000,
606 link_params.link_status ? "UP" : "DOWN");
608 if (link_params.link_status == ETH_LINK_DOWN)
612 if (all_links_up == 0)
613 rte_panic("Some links are DOWN\n");
617 is_any_swq_frag_or_ras(struct app_params *app)
621 for (i = 0; i < app->n_pktq_swq; i++) {
622 struct app_pktq_swq_params *p = &app->swq_params[i];
624 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
625 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
633 app_init_link_frag_ras(struct app_params *app)
637 if (is_any_swq_frag_or_ras(app)) {
638 for (i = 0; i < app->n_pktq_hwq_out; i++) {
639 struct app_pktq_hwq_out_params *p_txq =
640 &app->hwq_out_params[i];
642 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
648 app_get_cpu_socket_id(uint32_t pmd_id)
650 int status = rte_eth_dev_socket_id(pmd_id);
652 return (status != SOCKET_ID_ANY) ? status : 0;
655 struct rte_eth_rxmode rx_mode = {
656 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
658 .header_split = 0, /**< Header Split disabled. */
659 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
660 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
661 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
662 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
663 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
664 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
666 struct rte_fdir_conf fdir_conf = {
667 .mode = RTE_FDIR_MODE_NONE,
668 .pballoc = RTE_FDIR_PBALLOC_64K,
669 .status = RTE_FDIR_REPORT_STATUS,
671 .vlan_tci_mask = 0x0,
673 .src_ip = 0xFFFFFFFF,
674 .dst_ip = 0xFFFFFFFF,
677 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
678 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
680 .src_port_mask = 0xFFFF,
681 .dst_port_mask = 0xFFFF,
682 .mac_addr_byte_mask = 0xFF,
683 .tunnel_type_mask = 1,
684 .tunnel_id_mask = 0xFFFFFFFF,
690 app_init_link(struct app_params *app)
694 app_init_link_frag_ras(app);
696 /* Configuring port_config_t structure for interface
697 * manager initialization
699 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
700 port_config = rte_zmalloc(NULL, (app->n_links * size),
701 RTE_CACHE_LINE_SIZE);
702 if (port_config == NULL)
703 rte_panic("port_config is NULL: Memory Allocation failure\n");
705 for (i = 0; i < app->n_links; i++) {
706 struct app_link_params *p_link = &app->link_params[i];
707 uint32_t link_id, n_hwq_in, n_hwq_out;
710 status = sscanf(p_link->name, "LINK%" PRIu32, &link_id);
712 rte_panic("%s (%" PRId32 "): "
713 "init error (%" PRId32 ")\n",
714 p_link->name, link_id, status);
716 n_hwq_in = app_link_get_n_rxq(app, p_link);
717 n_hwq_out = app_link_get_n_txq(app, p_link);
719 printf("\n\nn_hwq_in %d\n", n_hwq_in);
720 struct rte_eth_conf *My_local_conf = &p_link->conf;
722 My_local_conf->rxmode = rx_mode;
723 My_local_conf->fdir_conf = fdir_conf;
724 My_local_conf->rxmode.mq_mode = ETH_MQ_RX_RSS;
725 My_local_conf->rx_adv_conf.rss_conf.rss_key = NULL;
726 My_local_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP
727 | ETH_RSS_UDP | ETH_RSS_TCP;
728 } else {/* disable-rss */
729 My_local_conf->rx_adv_conf.rss_conf.rss_hf = 0;
730 /* pkt-filter-mode is perfect */
731 My_local_conf->fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
734 /* Set the hardware CRC stripping to avoid double stripping
736 p_link->conf.rxmode.hw_strip_crc = 1;
738 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
739 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
745 port_config[i].port_id = p_link->pmd_id;
746 port_config[i].nrx_queue = n_hwq_in;
747 port_config[i].ntx_queue = n_hwq_out;
748 port_config[i].state = 1;
749 port_config[i].promisc = p_link->promisc;
750 port_config[i].mempool.pool_size =
751 app->mempool_params[0].pool_size;
752 port_config[i].mempool.buffer_size =
753 app->mempool_params[0].buffer_size;
754 port_config[i].mempool.cache_size =
755 app->mempool_params[0].cache_size;
756 port_config[i].mempool.cpu_socket_id =
757 app->mempool_params[0].cpu_socket_id;
758 memcpy(&port_config[i].port_conf, &p_link->conf,
759 sizeof(struct rte_eth_conf));
760 memcpy(&port_config[i].rx_conf, &app->hwq_in_params[0].conf,
761 sizeof(struct rte_eth_rxconf));
762 memcpy(&port_config[i].tx_conf, &app->hwq_out_params[0].conf,
763 sizeof(struct rte_eth_txconf));
765 if (app->header_csum_req) {
766 /* Enable TCP and UDP HW Checksum */
767 port_config[i].tx_conf.txq_flags &=
768 ~(ETH_TXQ_FLAGS_NOXSUMTCP |
769 ETH_TXQ_FLAGS_NOXSUMUDP);
772 if (ifm_port_setup(p_link->pmd_id, &port_config[i]))
773 rte_panic("Port Setup Failed: %s - %" PRIu32
774 "\n", p_link->name, p_link->pmd_id);
776 app_link_up_internal(app, p_link);
783 app_init_swq(struct app_params *app)
787 for (i = 0; i < app->n_pktq_swq; i++) {
788 struct app_pktq_swq_params *p = &app->swq_params[i];
789 unsigned int flags = 0;
791 if (app_swq_get_readers(app, p) == 1)
792 flags |= RING_F_SC_DEQ;
793 if (app_swq_get_writers(app, p) == 1)
794 flags |= RING_F_SP_ENQ;
796 APP_LOG(app, HIGH, "Initializing %s...", p->name);
797 app->swq[i] = rte_ring_create(
803 if (app->swq[i] == NULL)
804 rte_panic("%s init error\n", p->name);
809 app_init_tm(struct app_params *app)
813 for (i = 0; i < app->n_pktq_tm; i++) {
814 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
815 struct app_link_params *p_link;
816 struct rte_eth_link link_eth_params;
817 struct rte_sched_port *sched;
818 uint32_t n_subports, subport_id;
821 p_link = app_get_link_for_tm(app, p_tm);
823 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
826 p_tm->sched_port_params.name = p_tm->name;
827 p_tm->sched_port_params.socket =
828 app_get_cpu_socket_id(p_link->pmd_id);
829 p_tm->sched_port_params.rate =
830 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
832 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
833 sched = rte_sched_port_config(&p_tm->sched_port_params);
835 rte_panic("%s init error\n", p_tm->name);
839 n_subports = p_tm->sched_port_params.n_subports_per_port;
840 for (subport_id = 0; subport_id < n_subports; subport_id++) {
841 uint32_t n_pipes_per_subport, pipe_id;
843 status = rte_sched_subport_config(sched,
845 &p_tm->sched_subport_params[subport_id]);
847 rte_panic("%s subport %" PRIu32
848 " init error (%" PRId32 ")\n",
849 p_tm->name, subport_id, status);
852 n_pipes_per_subport =
853 p_tm->sched_port_params.n_pipes_per_subport;
855 pipe_id < n_pipes_per_subport;
857 int profile_id = p_tm->sched_pipe_to_profile[
858 subport_id * APP_MAX_SCHED_PIPES +
861 if (profile_id == -1)
864 status = rte_sched_pipe_config(sched,
869 rte_panic("%s subport %" PRIu32
871 " (profile %" PRId32 ") "
872 "init error (% " PRId32 ")\n",
873 p_tm->name, subport_id, pipe_id,
881 app_init_msgq(struct app_params *app)
885 for (i = 0; i < app->n_msgq; i++) {
886 struct app_msgq_params *p = &app->msgq_params[i];
888 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
889 app->msgq[i] = rte_ring_create(
893 RING_F_SP_ENQ | RING_F_SC_DEQ);
895 if (app->msgq[i] == NULL)
896 rte_panic("%s init error\n", p->name);
900 static void app_pipeline_params_get(struct app_params *app,
901 struct app_pipeline_params *p_in,
902 struct pipeline_params *p_out)
907 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
909 p_out->socket_id = (int) p_in->socket_id;
911 p_out->log_level = app->log_level;
914 p_out->n_ports_in = p_in->n_pktq_in;
915 for (i = 0; i < p_in->n_pktq_in; i++) {
916 struct app_pktq_in_params *in = &p_in->pktq_in[i];
917 struct pipeline_port_in_params *out = &p_out->port_in[i];
920 case APP_PKTQ_IN_HWQ:
922 struct app_pktq_hwq_in_params *p_hwq_in =
923 &app->hwq_in_params[in->id];
924 struct app_link_params *p_link =
925 app_get_link_for_rxq(app, p_hwq_in);
926 uint32_t rxq_link_id, rxq_queue_id;
929 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
933 rte_panic("%s (%" PRId32 "): "
934 "init error (%" PRId32 ")\n",
935 p_hwq_in->name, rxq_link_id, status);
937 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
938 out->params.ethdev.port_id = p_link->pmd_id;
939 out->params.ethdev.queue_id = rxq_queue_id;
940 out->burst_size = p_hwq_in->burst;
943 case APP_PKTQ_IN_SWQ:
945 struct app_pktq_swq_params *swq_params =
946 &app->swq_params[in->id];
948 if ((swq_params->ipv4_frag == 0) &&
949 (swq_params->ipv6_frag == 0)) {
950 if (app_swq_get_readers(app,
953 PIPELINE_PORT_IN_RING_READER;
954 out->params.ring.ring =
957 app->swq_params[in->id].
960 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
961 out->params.ring_multi.ring = app->swq[in->id];
962 out->burst_size = swq_params->burst_read;
965 if (swq_params->ipv4_frag == 1) {
966 struct rte_port_ring_reader_ipv4_frag_params
968 &out->params.ring_ipv4_frag;
971 PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
972 params->ring = app->swq[in->id];
973 params->mtu = swq_params->mtu;
974 params->metadata_size =
975 swq_params->metadata_size;
976 params->pool_direct =
978 [swq_params->mempool_direct_id];
979 params->pool_indirect =
981 [swq_params->mempool_indirect_id];
982 out->burst_size = swq_params->burst_read;
984 struct rte_port_ring_reader_ipv6_frag_params
986 &out->params.ring_ipv6_frag;
989 PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
990 params->ring = app->swq[in->id];
991 params->mtu = swq_params->mtu;
992 params->metadata_size =
993 swq_params->metadata_size;
994 params->pool_direct =
996 [swq_params->mempool_direct_id];
997 params->pool_indirect =
999 [swq_params->mempool_indirect_id];
1000 out->burst_size = swq_params->burst_read;
1005 case APP_PKTQ_IN_TM:
1006 out->type = PIPELINE_PORT_IN_SCHED_READER;
1007 out->params.sched.sched = app->tm[in->id];
1008 out->burst_size = app->tm_params[in->id].burst_read;
1010 case APP_PKTQ_IN_SOURCE:
1011 mempool_id = app->source_params[in->id].mempool_id;
1012 out->type = PIPELINE_PORT_IN_SOURCE;
1013 out->params.source.mempool = app->mempool[mempool_id];
1014 out->burst_size = app->source_params[in->id].burst;
1017 if (app->source_params[in->id].file_name
1019 out->params.source.file_name = strdup(
1020 app->source_params[in->id].
1022 if (out->params.source.file_name == NULL) {
1024 n_bytes_per_pkt = 0;
1027 out->params.source.n_bytes_per_pkt =
1028 app->source_params[in->id].
1040 p_out->n_ports_out = p_in->n_pktq_out;
1041 for (i = 0; i < p_in->n_pktq_out; i++) {
1042 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1043 struct pipeline_port_out_params *out = &p_out->port_out[i];
1046 case APP_PKTQ_OUT_HWQ:
1048 struct app_pktq_hwq_out_params *p_hwq_out =
1049 &app->hwq_out_params[in->id];
1050 struct app_link_params *p_link =
1051 app_get_link_for_txq(app, p_hwq_out);
1052 uint32_t txq_link_id, txq_queue_id;
1055 sscanf(p_hwq_out->name,
1056 "TXQ%" SCNu32 ".%" SCNu32,
1060 rte_panic("%s (%" PRId32 "): "
1061 "init error (%" PRId32 ")\n",
1062 p_hwq_out->name, txq_link_id, status);
1064 if (p_hwq_out->dropless == 0) {
1065 struct rte_port_ethdev_writer_params *params =
1066 &out->params.ethdev;
1068 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1069 params->port_id = p_link->pmd_id;
1070 params->queue_id = txq_queue_id;
1071 params->tx_burst_sz =
1072 app->hwq_out_params[in->id].burst;
1074 struct rte_port_ethdev_writer_nodrop_params
1075 *params = &out->params.ethdev_nodrop;
1078 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1079 params->port_id = p_link->pmd_id;
1080 params->queue_id = txq_queue_id;
1081 params->tx_burst_sz = p_hwq_out->burst;
1082 params->n_retries = p_hwq_out->n_retries;
1086 case APP_PKTQ_OUT_SWQ:
1088 struct app_pktq_swq_params *swq_params =
1089 &app->swq_params[in->id];
1091 if ((swq_params->ipv4_ras == 0) &&
1092 (swq_params->ipv6_ras == 0)) {
1093 if (app_swq_get_writers(app, swq_params) == 1) {
1094 if (app->swq_params[in->id].dropless == 0) {
1095 struct rte_port_ring_writer_params
1096 *params = &out->params.ring;
1098 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1099 params->ring = app->swq[in->id];
1100 params->tx_burst_sz =
1101 app->swq_params[in->id].burst_write;
1103 struct rte_port_ring_writer_nodrop_params
1104 *params = &out->params.ring_nodrop;
1107 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1108 params->ring = app->swq[in->id];
1109 params->tx_burst_sz =
1110 app->swq_params[in->id].burst_write;
1112 app->swq_params[in->id].n_retries;
1115 if (swq_params->dropless == 0) {
1116 struct rte_port_ring_multi_writer_params
1118 &out->params.ring_multi;
1121 PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1122 params->ring = app->swq[in->id];
1123 params->tx_burst_sz = swq_params->burst_write;
1125 struct rte_port_ring_multi_writer_nodrop_params
1127 &out->params.ring_multi_nodrop;
1130 PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1132 params->ring = app->swq[in->id];
1133 params->tx_burst_sz = swq_params->burst_write;
1134 params->n_retries = swq_params->n_retries;
1138 if (swq_params->ipv4_ras == 1) {
1139 struct rte_port_ring_writer_ipv4_ras_params
1141 &out->params.ring_ipv4_ras;
1144 PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1145 params->ring = app->swq[in->id];
1146 params->tx_burst_sz = swq_params->burst_write;
1148 struct rte_port_ring_writer_ipv6_ras_params
1150 &out->params.ring_ipv6_ras;
1153 PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1154 params->ring = app->swq[in->id];
1155 params->tx_burst_sz = swq_params->burst_write;
1160 case APP_PKTQ_OUT_TM: {
1161 struct rte_port_sched_writer_params *params =
1164 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1165 params->sched = app->tm[in->id];
1166 params->tx_burst_sz =
1167 app->tm_params[in->id].burst_write;
1170 case APP_PKTQ_OUT_SINK:
1171 out->type = PIPELINE_PORT_OUT_SINK;
1172 if (app->sink_params[in->id].file_name != NULL) {
1173 out->params.sink.file_name = strdup(
1174 app->sink_params[in->id].
1176 if (out->params.sink.file_name == NULL) {
1177 out->params.sink.max_n_pkts = 0;
1180 out->params.sink.max_n_pkts =
1181 app->sink_params[in->id].
1184 out->params.sink.file_name = NULL;
1185 out->params.sink.max_n_pkts = 0;
1194 p_out->n_msgq = p_in->n_msgq_in;
1196 for (i = 0; i < p_in->n_msgq_in; i++)
1197 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1199 for (i = 0; i < p_in->n_msgq_out; i++)
1200 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1203 p_out->n_args = p_in->n_args;
1204 for (i = 0; i < p_in->n_args; i++) {
1205 p_out->args_name[i] = p_in->args_name[i];
1206 p_out->args_value[i] = p_in->args_value[i];
1211 app_init_pipelines(struct app_params *app)
1215 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1216 struct app_pipeline_params *params =
1217 &app->pipeline_params[p_id];
1218 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1219 struct pipeline_type *ptype;
1220 struct pipeline_params pp;
1222 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1224 ptype = app_pipeline_type_find(app, params->type);
1226 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1229 app_pipeline_params_get(app, params, &pp);
1233 if (ptype->be_ops->f_init) {
1234 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1236 if (data->be == NULL)
1237 rte_panic("Pipeline instance \"%s\" back-end "
1238 "init error\n", params->name);
1243 if (ptype->fe_ops->f_init) {
1244 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1246 if (data->fe == NULL)
1247 rte_panic("Pipeline instance \"%s\" front-end "
1248 "init error\n", params->name);
1251 data->ptype = ptype;
1253 data->timer_period = (rte_get_tsc_hz() *
1254 params->timer_period) / 100;
1259 app_init_threads(struct app_params *app)
1261 uint64_t time = rte_get_tsc_cycles();
1264 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1265 struct app_pipeline_params *params =
1266 &app->pipeline_params[p_id];
1267 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1268 struct pipeline_type *ptype;
1269 struct app_thread_data *t;
1270 struct app_thread_pipeline_data *p;
1273 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1276 params->hyper_th_id);
1279 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1282 (params->hyper_th_id) ? "h" : "");
1284 t = &app->thread_data[lcore_id];
1286 t->timer_period = (rte_get_tsc_hz() *
1287 APP_THREAD_TIMER_PERIOD) / DIV_CONV_HZ_SEC;
1288 t->thread_req_deadline = time + t->timer_period;
1290 t->headroom_cycles = 0;
1291 t->headroom_time = rte_get_tsc_cycles();
1292 t->headroom_ratio = 0.0;
1294 t->msgq_in = app_thread_msgq_in_get(app,
1297 params->hyper_th_id);
1298 if (t->msgq_in == NULL)
1299 rte_panic("Init error: Cannot find MSGQ_IN "
1300 "for thread %" PRId32, lcore_id);
1302 t->msgq_out = app_thread_msgq_out_get(app,
1305 params->hyper_th_id);
1306 if (t->msgq_out == NULL)
1307 rte_panic("Init error: Cannot find MSGQ_OUT "
1308 "for thread %" PRId32, lcore_id);
1310 ptype = app_pipeline_type_find(app, params->type);
1312 rte_panic("Init error: Unknown pipeline "
1313 "type \"%s\"\n", params->type);
1315 p = (ptype->be_ops->f_run == NULL) ?
1316 &t->regular[t->n_regular] :
1317 &t->custom[t->n_custom];
1319 p->pipeline_id = p_id;
1321 p->f_run = ptype->be_ops->f_run;
1322 p->f_timer = ptype->be_ops->f_timer;
1323 p->timer_period = data->timer_period;
1324 p->deadline = time + data->timer_period;
1328 if (ptype->be_ops->f_run == NULL)
1335 int app_init(struct app_params *app)
1337 app_init_core_map(app);
1338 app_init_core_mask(app);
1342 /*app_init_mempool(app);*/
1348 app_pipeline_common_cmd_push(app);
1349 app_pipeline_thread_cmd_push(app);
1350 app_pipeline_type_register(app, &pipeline_master);
1351 app_pipeline_type_register(app, &pipeline_passthrough);
1352 app_pipeline_type_register(app, &pipeline_vfw);
1353 app_pipeline_type_register(app, &pipeline_loadb);
1354 app_pipeline_type_register(app, &pipeline_txrx);
1355 app_pipeline_type_register(app, &pipeline_arpicmp);
1357 app_init_pipelines(app);
1358 app_init_threads(app);
1363 populate_lpm_routes();
1364 print_interface_details();
1370 app_pipeline_type_cmd_push(struct app_params *app,
1371 struct pipeline_type *ptype)
1373 cmdline_parse_ctx_t *cmds;
1376 /* Check input arguments */
1377 if ((app == NULL) ||
1381 n_cmds = pipeline_type_cmds_count(ptype);
1385 cmds = ptype->fe_ops->cmds;
1387 /* Check for available slots in the application commands array */
1388 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1391 /* Push pipeline commands into the application */
1392 memcpy(&app->cmds[app->n_cmds],
1394 n_cmds * sizeof(cmdline_parse_ctx_t));
1396 for (i = 0; i < n_cmds; i++)
1397 app->cmds[app->n_cmds + i]->data = app;
1399 app->n_cmds += n_cmds;
1400 app->cmds[app->n_cmds] = NULL;
1406 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1410 /* Check input arguments */
1411 if ((app == NULL) ||
1413 (ptype->name == NULL) ||
1414 (strlen(ptype->name) == 0) ||
1415 (ptype->be_ops->f_init == NULL) ||
1416 (ptype->be_ops->f_timer == NULL))
1419 /* Check for duplicate entry */
1420 for (i = 0; i < app->n_pipeline_types; i++)
1421 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1424 /* Check for resource availability */
1425 n_cmds = pipeline_type_cmds_count(ptype);
1426 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1427 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1430 /* Copy pipeline type */
1431 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1433 sizeof(struct pipeline_type));
1435 /* Copy CLI commands */
1437 app_pipeline_type_cmd_push(app, ptype);
1443 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1447 for (i = 0; i < app->n_pipeline_types; i++)
1448 if (strcmp(app->pipeline_type[i].name, name) == 0)
1449 return &app->pipeline_type[i];