2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
21 #include <rte_cycles.h>
22 #include <rte_ethdev.h>
23 #include <rte_ether.h>
26 #include <rte_malloc.h>
27 #include <rte_version.h>
31 #include "pipeline_common_fe.h"
32 #include "pipeline_master.h"
33 #include "pipeline_passthrough.h"
34 #include "thread_fe.h"
35 #include "pipeline_vfw.h"
36 #include "pipeline_loadb.h"
37 #include "pipeline_txrx.h"
38 #include "pipeline_arpicmp.h"
39 #include "interface.h"
40 #include "l3fwd_common.h"
41 #include "l3fwd_lpm4.h"
42 #include "l3fwd_lpm6.h"
44 #include "vnf_define.h"
45 #define APP_NAME_SIZE 32
46 port_config_t *port_config;
49 app_init_core_map(struct app_params *app)
51 APP_LOG(app, HIGH, "Initializing CPU core map ...");
52 app->core_map = cpu_core_map_init(4, 32, 4, 0);
54 if (app->core_map == NULL)
55 rte_panic("Cannot create CPU core map\n");
57 if (app->log_level >= APP_LOG_LEVEL_LOW)
58 cpu_core_map_print(app->core_map);
61 /* Core Mask String in Hex Representation */
62 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
65 app_init_core_mask(struct app_params *app)
67 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
70 for (i = 0; i < app->n_pipelines; i++) {
71 struct app_pipeline_params *p = &app->pipeline_params[i];
74 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
80 rte_panic("Cannot create CPU core mask\n");
82 app_core_enable_in_core_mask(app, lcore_id);
85 app_core_build_core_mask_string(app, core_mask_str);
86 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
91 app_init_eal(struct app_params *app)
94 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
95 struct app_eal_params *p = &app->eal_params;
100 if (unlikely (n_args >= APP_EAL_ARGC))
103 app->eal_argv[n_args++] = strdup(app->app_name);
105 app_core_build_core_mask_string(app, core_mask_str);
106 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
107 if (unlikely (n_args >= APP_EAL_ARGC))
109 app->eal_argv[n_args++] = strdup(buffer);
112 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
113 if (unlikely (n_args >= APP_EAL_ARGC))
115 app->eal_argv[n_args++] = strdup(buffer);
118 if (p->master_lcore_present) {
121 "--master-lcore=%" PRIu32,
123 if (unlikely (n_args >= APP_EAL_ARGC))
125 app->eal_argv[n_args++] = strdup(buffer);
128 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
129 if (unlikely (n_args >= APP_EAL_ARGC))
131 app->eal_argv[n_args++] = strdup(buffer);
133 if (p->memory_present) {
134 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
135 if (unlikely (n_args >= APP_EAL_ARGC))
137 app->eal_argv[n_args++] = strdup(buffer);
140 if (p->ranks_present) {
141 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
142 if (unlikely (n_args >= APP_EAL_ARGC))
144 app->eal_argv[n_args++] = strdup(buffer);
147 for (i = 0; i < APP_MAX_LINKS; i++) {
148 if (p->pci_blacklist[i] == NULL)
153 "--pci-blacklist=%s",
154 p->pci_blacklist[i]);
155 if (unlikely (n_args >= APP_EAL_ARGC))
157 app->eal_argv[n_args++] = strdup(buffer);
160 if (app->port_mask != 0)
161 for (i = 0; i < APP_MAX_LINKS; i++) {
162 if (p->pci_whitelist[i] == NULL)
167 "--pci-whitelist=%s",
168 p->pci_whitelist[i]);
169 if (unlikely (n_args >= APP_EAL_ARGC))
171 app->eal_argv[n_args++] = strdup(buffer);
174 for (i = 0; i < app->n_links; i++) {
175 char *pci_bdf = app->link_params[i].pci_bdf;
179 "--pci-whitelist=%s",
181 if (unlikely (n_args >= APP_EAL_ARGC))
183 app->eal_argv[n_args++] = strdup(buffer);
186 for (i = 0; i < APP_MAX_LINKS; i++) {
187 if (p->vdev[i] == NULL)
194 if (unlikely (n_args >= APP_EAL_ARGC))
196 app->eal_argv[n_args++] = strdup(buffer);
199 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
200 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
201 if (unlikely (n_args >= APP_EAL_ARGC))
203 app->eal_argv[n_args++] = strdup(buffer);
211 if (unlikely (n_args >= APP_EAL_ARGC))
213 app->eal_argv[n_args++] = strdup(buffer);
217 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
218 if (unlikely (n_args >= APP_EAL_ARGC))
220 app->eal_argv[n_args++] = strdup(buffer);
223 if (p->log_level_present) {
226 "--log-level=%" PRIu32,
228 if (unlikely (n_args >= APP_EAL_ARGC))
230 app->eal_argv[n_args++] = strdup(buffer);
233 if ((p->version_present) && p->version) {
234 snprintf(buffer, sizeof(buffer), "-v");
235 if (unlikely (n_args >= APP_EAL_ARGC))
237 app->eal_argv[n_args++] = strdup(buffer);
240 if ((p->help_present) && p->help) {
241 snprintf(buffer, sizeof(buffer), "--help");
242 if (unlikely (n_args >= APP_EAL_ARGC))
244 app->eal_argv[n_args++] = strdup(buffer);
247 if ((p->no_huge_present) && p->no_huge) {
248 snprintf(buffer, sizeof(buffer), "--no-huge");
249 if (unlikely (n_args >= APP_EAL_ARGC))
251 app->eal_argv[n_args++] = strdup(buffer);
254 if ((p->no_pci_present) && p->no_pci) {
255 snprintf(buffer, sizeof(buffer), "--no-pci");
256 if (unlikely (n_args >= APP_EAL_ARGC))
258 app->eal_argv[n_args++] = strdup(buffer);
261 if ((p->no_hpet_present) && p->no_hpet) {
262 snprintf(buffer, sizeof(buffer), "--no-hpet");
263 if (unlikely (n_args >= APP_EAL_ARGC))
265 app->eal_argv[n_args++] = strdup(buffer);
268 if ((p->no_shconf_present) && p->no_shconf) {
269 snprintf(buffer, sizeof(buffer), "--no-shconf");
270 if (unlikely (n_args >= APP_EAL_ARGC))
272 app->eal_argv[n_args++] = strdup(buffer);
276 snprintf(buffer, sizeof(buffer), "-d=%s", p->add_driver);
277 if (unlikely (n_args >= APP_EAL_ARGC))
279 app->eal_argv[n_args++] = strdup(buffer);
287 if (unlikely (n_args >= APP_EAL_ARGC))
289 app->eal_argv[n_args++] = strdup(buffer);
293 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
294 if (unlikely (n_args >= APP_EAL_ARGC))
296 app->eal_argv[n_args++] = strdup(buffer);
299 if (p->file_prefix) {
304 if (unlikely (n_args >= APP_EAL_ARGC))
306 app->eal_argv[n_args++] = strdup(buffer);
309 if (p->base_virtaddr) {
312 "--base-virtaddr=%s",
314 if (unlikely (n_args >= APP_EAL_ARGC))
316 app->eal_argv[n_args++] = strdup(buffer);
319 if ((p->create_uio_dev_present) && p->create_uio_dev) {
320 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
321 if (unlikely (n_args >= APP_EAL_ARGC))
323 app->eal_argv[n_args++] = strdup(buffer);
331 if (unlikely (n_args >= APP_EAL_ARGC))
333 app->eal_argv[n_args++] = strdup(buffer);
336 if ((p->xen_dom0_present) && (p->xen_dom0)) {
337 snprintf(buffer, sizeof(buffer), "--xen-dom0");
338 if (unlikely (n_args >= APP_EAL_ARGC))
340 app->eal_argv[n_args++] = strdup(buffer);
343 snprintf(buffer, sizeof(buffer), "--");
344 if (unlikely (n_args >= APP_EAL_ARGC))
346 app->eal_argv[n_args++] = strdup(buffer);
348 app->eal_argc = n_args;
350 APP_LOG(app, HIGH, "Initializing EAL ...");
351 if (app->log_level >= APP_LOG_LEVEL_LOW) {
354 fprintf(stdout, "[APP] EAL arguments: \"");
355 for (i = 1; i < app->eal_argc; i++)
356 fprintf(stdout, "%s ", app->eal_argv[i]);
357 fprintf(stdout, "\"\n");
360 status = rte_eal_init(app->eal_argc, app->eal_argv);
363 rte_panic("EAL init error\n");
367 app_link_filter_arp_add(struct app_link_params *link)
369 struct rte_eth_ethertype_filter filter = {
370 .ether_type = ETHER_TYPE_ARP,
372 .queue = link->arp_q,
375 return rte_eth_dev_filter_ctrl(link->pmd_id,
376 RTE_ETH_FILTER_ETHERTYPE,
382 app_link_filter_tcp_syn_add(struct app_link_params *link)
384 struct rte_eth_syn_filter filter = {
386 .queue = link->tcp_syn_q,
389 return rte_eth_dev_filter_ctrl(link->pmd_id,
396 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
398 struct rte_eth_ntuple_filter filter = {
399 .flags = RTE_5TUPLE_FLAGS,
400 .dst_ip = rte_bswap32(l2->ip),
401 .dst_ip_mask = UINT32_MAX, /* Enable */
403 .src_ip_mask = 0, /* Disable */
405 .dst_port_mask = 0, /* Disable */
407 .src_port_mask = 0, /* Disable */
409 .proto_mask = 0, /* Disable */
411 .priority = 1, /* Lowest */
412 .queue = l1->ip_local_q,
415 return rte_eth_dev_filter_ctrl(l1->pmd_id,
416 RTE_ETH_FILTER_NTUPLE,
422 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
424 struct rte_eth_ntuple_filter filter = {
425 .flags = RTE_5TUPLE_FLAGS,
426 .dst_ip = rte_bswap32(l2->ip),
427 .dst_ip_mask = UINT32_MAX, /* Enable */
429 .src_ip_mask = 0, /* Disable */
431 .dst_port_mask = 0, /* Disable */
433 .src_port_mask = 0, /* Disable */
435 .proto_mask = 0, /* Disable */
437 .priority = 1, /* Lowest */
438 .queue = l1->ip_local_q,
441 return rte_eth_dev_filter_ctrl(l1->pmd_id,
442 RTE_ETH_FILTER_NTUPLE,
443 RTE_ETH_FILTER_DELETE,
448 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
450 struct rte_eth_ntuple_filter filter = {
451 .flags = RTE_5TUPLE_FLAGS,
452 .dst_ip = rte_bswap32(l2->ip),
453 .dst_ip_mask = UINT32_MAX, /* Enable */
455 .src_ip_mask = 0, /* Disable */
457 .dst_port_mask = 0, /* Disable */
459 .src_port_mask = 0, /* Disable */
460 .proto = IPPROTO_TCP,
461 .proto_mask = UINT8_MAX, /* Enable */
463 .priority = 2, /* Higher priority than IP */
464 .queue = l1->tcp_local_q,
467 return rte_eth_dev_filter_ctrl(l1->pmd_id,
468 RTE_ETH_FILTER_NTUPLE,
474 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
476 struct rte_eth_ntuple_filter filter = {
477 .flags = RTE_5TUPLE_FLAGS,
478 .dst_ip = rte_bswap32(l2->ip),
479 .dst_ip_mask = UINT32_MAX, /* Enable */
481 .src_ip_mask = 0, /* Disable */
483 .dst_port_mask = 0, /* Disable */
485 .src_port_mask = 0, /* Disable */
486 .proto = IPPROTO_TCP,
487 .proto_mask = UINT8_MAX, /* Enable */
489 .priority = 2, /* Higher priority than IP */
490 .queue = l1->tcp_local_q,
493 return rte_eth_dev_filter_ctrl(l1->pmd_id,
494 RTE_ETH_FILTER_NTUPLE,
495 RTE_ETH_FILTER_DELETE,
500 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
502 struct rte_eth_ntuple_filter filter = {
503 .flags = RTE_5TUPLE_FLAGS,
504 .dst_ip = rte_bswap32(l2->ip),
505 .dst_ip_mask = UINT32_MAX, /* Enable */
507 .src_ip_mask = 0, /* Disable */
509 .dst_port_mask = 0, /* Disable */
511 .src_port_mask = 0, /* Disable */
512 .proto = IPPROTO_UDP,
513 .proto_mask = UINT8_MAX, /* Enable */
515 .priority = 2, /* Higher priority than IP */
516 .queue = l1->udp_local_q,
519 return rte_eth_dev_filter_ctrl(l1->pmd_id,
520 RTE_ETH_FILTER_NTUPLE,
526 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
528 struct rte_eth_ntuple_filter filter = {
529 .flags = RTE_5TUPLE_FLAGS,
530 .dst_ip = rte_bswap32(l2->ip),
531 .dst_ip_mask = UINT32_MAX, /* Enable */
533 .src_ip_mask = 0, /* Disable */
535 .dst_port_mask = 0, /* Disable */
537 .src_port_mask = 0, /* Disable */
538 .proto = IPPROTO_UDP,
539 .proto_mask = UINT8_MAX, /* Enable */
541 .priority = 2, /* Higher priority than IP */
542 .queue = l1->udp_local_q,
545 return rte_eth_dev_filter_ctrl(l1->pmd_id,
546 RTE_ETH_FILTER_NTUPLE,
547 RTE_ETH_FILTER_DELETE,
552 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
554 struct rte_eth_ntuple_filter filter = {
555 .flags = RTE_5TUPLE_FLAGS,
556 .dst_ip = rte_bswap32(l2->ip),
557 .dst_ip_mask = UINT32_MAX, /* Enable */
559 .src_ip_mask = 0, /* Disable */
561 .dst_port_mask = 0, /* Disable */
563 .src_port_mask = 0, /* Disable */
564 .proto = IPPROTO_SCTP,
565 .proto_mask = UINT8_MAX, /* Enable */
567 .priority = 2, /* Higher priority than IP */
568 .queue = l1->sctp_local_q,
571 return rte_eth_dev_filter_ctrl(l1->pmd_id,
572 RTE_ETH_FILTER_NTUPLE,
578 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
580 struct rte_eth_ntuple_filter filter = {
581 .flags = RTE_5TUPLE_FLAGS,
582 .dst_ip = rte_bswap32(l2->ip),
583 .dst_ip_mask = UINT32_MAX, /* Enable */
585 .src_ip_mask = 0, /* Disable */
587 .dst_port_mask = 0, /* Disable */
589 .src_port_mask = 0, /* Disable */
590 .proto = IPPROTO_SCTP,
591 .proto_mask = UINT8_MAX, /* Enable */
593 .priority = 2, /* Higher priority than IP */
594 .queue = l1->sctp_local_q,
597 return rte_eth_dev_filter_ctrl(l1->pmd_id,
598 RTE_ETH_FILTER_NTUPLE,
599 RTE_ETH_FILTER_DELETE,
603 /* rte_eth_dev is removed in DPDK version 16.11 and onwards */
604 #if RTE_VERSION < 0x100b0000
606 app_link_is_virtual(struct app_link_params *p)
608 uint32_t pmd_id = p->pmd_id;
609 struct rte_eth_dev *dev = &rte_eth_devices[pmd_id];
611 if (dev->dev_type == RTE_ETH_DEV_VIRTUAL)
619 app_link_up_internal(__rte_unused struct app_params *app,
620 struct app_link_params *cp)
622 if(app == NULL || cp == NULL)
623 rte_panic("NULL Pointers");
625 #if RTE_VERSION < 0x100b0000
626 if (app_link_is_virtual(cp)) {
631 ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_UP);
633 /* Mark link as UP */
638 app_link_down_internal(__rte_unused struct app_params *app,
639 struct app_link_params *cp)
641 if(app == NULL || cp == NULL)
642 rte_panic("NULL Pointers");
645 #if RTE_VERSION < 0x100b0000
646 if (app_link_is_virtual(cp)) {
651 ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_DOWN);
652 /* Mark link as DOWN */
658 app_check_link(struct app_params *app)
660 uint32_t all_links_up, i;
664 for (i = 0; i < app->n_links; i++) {
665 struct app_link_params *p = &app->link_params[i];
666 struct rte_eth_link link_params;
668 memset(&link_params, 0, sizeof(link_params));
669 rte_eth_link_get(p->pmd_id, &link_params);
671 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
674 link_params.link_speed / 1000,
675 link_params.link_status ? "UP" : "DOWN");
677 if (link_params.link_status == ETH_LINK_DOWN)
681 if (all_links_up == 0)
682 rte_panic("Some links are DOWN\n");
686 is_any_swq_frag_or_ras(struct app_params *app)
690 for (i = 0; i < app->n_pktq_swq; i++) {
691 struct app_pktq_swq_params *p = &app->swq_params[i];
693 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
694 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
702 app_init_link_frag_ras(struct app_params *app)
706 if (is_any_swq_frag_or_ras(app)) {
707 for (i = 0; i < app->n_pktq_hwq_out; i++) {
708 struct app_pktq_hwq_out_params *p_txq =
709 &app->hwq_out_params[i];
711 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
717 app_get_cpu_socket_id(uint32_t pmd_id)
719 int status = rte_eth_dev_socket_id(pmd_id);
721 return (status != SOCKET_ID_ANY) ? status : 0;
724 struct rte_eth_rxmode rx_mode = {
725 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
727 .header_split = 0, /**< Header Split disabled. */
728 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
729 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
730 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
731 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
732 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
733 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
735 struct rte_fdir_conf fdir_conf = {
736 .mode = RTE_FDIR_MODE_NONE,
737 .pballoc = RTE_FDIR_PBALLOC_64K,
738 .status = RTE_FDIR_REPORT_STATUS,
740 .vlan_tci_mask = 0x0,
742 .src_ip = 0xFFFFFFFF,
743 .dst_ip = 0xFFFFFFFF,
746 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
747 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
749 .src_port_mask = 0xFFFF,
750 .dst_port_mask = 0xFFFF,
751 .mac_addr_byte_mask = 0xFF,
752 .tunnel_type_mask = 1,
753 .tunnel_id_mask = 0xFFFFFFFF,
759 app_init_link(struct app_params *app)
763 app_init_link_frag_ras(app);
765 /* Configuring port_config_t structure for interface
766 * manager initialization
768 size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
769 port_config = rte_zmalloc(NULL, (app->n_links * size),
770 RTE_CACHE_LINE_SIZE);
771 if (port_config == NULL)
772 rte_panic("port_config is NULL: Memory Allocation failure num_links %d %d\n", app->n_links, app->n_links * size);
774 for (i = 0; i < app->n_links; i++) {
775 struct app_link_params *p_link = &app->link_params[i];
776 uint32_t link_id, n_hwq_in, n_hwq_out;
779 status = sscanf(p_link->name, "LINK%" PRIu32, &link_id);
781 rte_panic("%s (%" PRId32 "): "
782 "init error (%" PRId32 ")\n",
783 p_link->name, link_id, status);
785 n_hwq_in = app_link_get_n_rxq(app, p_link);
786 n_hwq_out = app_link_get_n_txq(app, p_link);
788 printf("\n\nn_hwq_in %d\n", n_hwq_in);
789 struct rte_eth_conf *My_local_conf = &p_link->conf;
791 My_local_conf->rxmode = rx_mode;
792 My_local_conf->fdir_conf = fdir_conf;
793 My_local_conf->rxmode.mq_mode = ETH_MQ_RX_RSS;
794 My_local_conf->rx_adv_conf.rss_conf.rss_key = NULL;
795 My_local_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP
796 | ETH_RSS_UDP | ETH_RSS_TCP;
797 /* pkt-filter-mode is perfect */
798 My_local_conf->fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
801 My_local_conf->rx_adv_conf.rss_conf.rss_hf = 0;
804 /* Set the hardware CRC stripping to avoid double stripping
806 p_link->conf.rxmode.hw_strip_crc = 1;
808 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
809 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
815 port_config[i].port_id = p_link->pmd_id;
816 port_config[i].nrx_queue = n_hwq_in;
817 port_config[i].ntx_queue = n_hwq_out;
818 port_config[i].state = 1;
819 port_config[i].promisc = p_link->promisc;
820 port_config[i].mempool.pool_size =
821 app->mempool_params[0].pool_size;
822 port_config[i].mempool.buffer_size =
823 app->mempool_params[0].buffer_size;
824 port_config[i].mempool.cache_size =
825 app->mempool_params[0].cache_size;
826 port_config[i].mempool.cpu_socket_id =
827 app->mempool_params[0].cpu_socket_id;
828 memcpy(&port_config[i].port_conf, &p_link->conf,
829 sizeof(struct rte_eth_conf));
830 memcpy(&port_config[i].rx_conf, &app->hwq_in_params[0].conf,
831 sizeof(struct rte_eth_rxconf));
832 memcpy(&port_config[i].tx_conf, &app->hwq_out_params[0].conf,
833 sizeof(struct rte_eth_txconf));
835 if (app->header_csum_req) {
836 /* Enable TCP and UDP HW Checksum */
837 port_config[i].tx_conf.txq_flags &=
838 ~(ETH_TXQ_FLAGS_NOXSUMTCP |
839 ETH_TXQ_FLAGS_NOXSUMUDP);
842 if (ifm_port_setup(p_link->pmd_id, &port_config[i])) {
843 printf("Failed to configure port %s - %"PRIu32
844 ".\n", p_link->name, p_link->pmd_id);
845 printf("Try again with offload disabled....\n");
846 port_config[i].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
847 if (ifm_port_setup (p_link->pmd_id, &port_config[i]))
848 rte_panic("Port Setup Failed: %s - %" PRIu32
849 "\n", p_link->name, p_link->pmd_id);
852 app_link_up_internal(app, p_link);
859 app_init_swq(struct app_params *app)
863 for (i = 0; i < app->n_pktq_swq; i++) {
864 struct app_pktq_swq_params *p = &app->swq_params[i];
865 unsigned int flags = 0;
867 if (app_swq_get_readers(app, p) == 1)
868 flags |= RING_F_SC_DEQ;
869 if (app_swq_get_writers(app, p) == 1)
870 flags |= RING_F_SP_ENQ;
872 APP_LOG(app, HIGH, "Initializing %s...", p->name);
873 app->swq[i] = rte_ring_create(
879 if (app->swq[i] == NULL)
880 rte_panic("%s init error\n", p->name);
885 app_init_tm(struct app_params *app)
889 for (i = 0; i < app->n_pktq_tm; i++) {
890 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
891 struct app_link_params *p_link;
892 struct rte_eth_link link_eth_params;
893 struct rte_sched_port *sched;
894 uint32_t n_subports, subport_id;
897 p_link = app_get_link_for_tm(app, p_tm);
899 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
902 p_tm->sched_port_params.name = p_tm->name;
903 p_tm->sched_port_params.socket =
904 app_get_cpu_socket_id(p_link->pmd_id);
905 p_tm->sched_port_params.rate =
906 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
908 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
909 sched = rte_sched_port_config(&p_tm->sched_port_params);
911 rte_panic("%s init error\n", p_tm->name);
915 n_subports = p_tm->sched_port_params.n_subports_per_port;
916 for (subport_id = 0; subport_id < n_subports; subport_id++) {
917 uint32_t n_pipes_per_subport, pipe_id;
919 status = rte_sched_subport_config(sched,
921 &p_tm->sched_subport_params[subport_id]);
923 rte_panic("%s subport %" PRIu32
924 " init error (%" PRId32 ")\n",
925 p_tm->name, subport_id, status);
928 n_pipes_per_subport =
929 p_tm->sched_port_params.n_pipes_per_subport;
931 pipe_id < n_pipes_per_subport;
933 int profile_id = p_tm->sched_pipe_to_profile[
934 subport_id * APP_MAX_SCHED_PIPES +
937 if (profile_id == -1)
940 status = rte_sched_pipe_config(sched,
945 rte_panic("%s subport %" PRIu32
947 " (profile %" PRId32 ") "
948 "init error (% " PRId32 ")\n",
949 p_tm->name, subport_id, pipe_id,
957 app_init_msgq(struct app_params *app)
961 for (i = 0; i < app->n_msgq; i++) {
962 struct app_msgq_params *p = &app->msgq_params[i];
964 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
965 app->msgq[i] = rte_ring_create(
969 RING_F_SP_ENQ | RING_F_SC_DEQ);
971 if (app->msgq[i] == NULL)
972 rte_panic("%s init error\n", p->name);
976 static void app_pipeline_params_get(struct app_params *app,
977 struct app_pipeline_params *p_in,
978 struct pipeline_params *p_out)
983 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
985 p_out->socket_id = (int) p_in->socket_id;
987 p_out->log_level = app->log_level;
990 p_out->n_ports_in = p_in->n_pktq_in;
991 for (i = 0; i < p_in->n_pktq_in; i++) {
992 struct app_pktq_in_params *in = &p_in->pktq_in[i];
993 struct pipeline_port_in_params *out = &p_out->port_in[i];
996 case APP_PKTQ_IN_HWQ:
998 struct app_pktq_hwq_in_params *p_hwq_in =
999 &app->hwq_in_params[in->id];
1000 struct app_link_params *p_link =
1001 app_get_link_for_rxq(app, p_hwq_in);
1002 uint32_t rxq_link_id, rxq_queue_id;
1005 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1009 rte_panic("%s (%" PRId32 "): "
1010 "init error (%" PRId32 ")\n",
1011 p_hwq_in->name, rxq_link_id, status);
1013 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1014 out->params.ethdev.port_id = p_link->pmd_id;
1015 out->params.ethdev.queue_id = rxq_queue_id;
1016 out->burst_size = p_hwq_in->burst;
1019 case APP_PKTQ_IN_SWQ:
1021 struct app_pktq_swq_params *swq_params =
1022 &app->swq_params[in->id];
1024 if ((swq_params->ipv4_frag == 0) &&
1025 (swq_params->ipv6_frag == 0)) {
1026 if (app_swq_get_readers(app,
1029 PIPELINE_PORT_IN_RING_READER;
1030 out->params.ring.ring =
1033 app->swq_params[in->id].
1036 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1037 out->params.ring_multi.ring = app->swq[in->id];
1038 out->burst_size = swq_params->burst_read;
1041 if (swq_params->ipv4_frag == 1) {
1042 struct rte_port_ring_reader_ipv4_frag_params
1044 &out->params.ring_ipv4_frag;
1047 PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1048 params->ring = app->swq[in->id];
1049 params->mtu = swq_params->mtu;
1050 params->metadata_size =
1051 swq_params->metadata_size;
1052 params->pool_direct =
1054 [swq_params->mempool_direct_id];
1055 params->pool_indirect =
1057 [swq_params->mempool_indirect_id];
1058 out->burst_size = swq_params->burst_read;
1060 struct rte_port_ring_reader_ipv6_frag_params
1062 &out->params.ring_ipv6_frag;
1065 PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1066 params->ring = app->swq[in->id];
1067 params->mtu = swq_params->mtu;
1068 params->metadata_size =
1069 swq_params->metadata_size;
1070 params->pool_direct =
1072 [swq_params->mempool_direct_id];
1073 params->pool_indirect =
1075 [swq_params->mempool_indirect_id];
1076 out->burst_size = swq_params->burst_read;
1081 case APP_PKTQ_IN_TM:
1082 out->type = PIPELINE_PORT_IN_SCHED_READER;
1083 out->params.sched.sched = app->tm[in->id];
1084 out->burst_size = app->tm_params[in->id].burst_read;
1086 case APP_PKTQ_IN_SOURCE:
1087 mempool_id = app->source_params[in->id].mempool_id;
1088 out->type = PIPELINE_PORT_IN_SOURCE;
1089 out->params.source.mempool = app->mempool[mempool_id];
1090 out->burst_size = app->source_params[in->id].burst;
1093 if (app->source_params[in->id].file_name
1095 out->params.source.file_name = strdup(
1096 app->source_params[in->id].
1098 if (out->params.source.file_name == NULL) {
1100 n_bytes_per_pkt = 0;
1103 out->params.source.n_bytes_per_pkt =
1104 app->source_params[in->id].
1116 p_out->n_ports_out = p_in->n_pktq_out;
1117 for (i = 0; i < p_in->n_pktq_out; i++) {
1118 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1119 struct pipeline_port_out_params *out = &p_out->port_out[i];
1122 case APP_PKTQ_OUT_HWQ:
1124 struct app_pktq_hwq_out_params *p_hwq_out =
1125 &app->hwq_out_params[in->id];
1126 struct app_link_params *p_link =
1127 app_get_link_for_txq(app, p_hwq_out);
1128 uint32_t txq_link_id, txq_queue_id;
1131 sscanf(p_hwq_out->name,
1132 "TXQ%" SCNu32 ".%" SCNu32,
1136 rte_panic("%s (%" PRId32 "): "
1137 "init error (%" PRId32 ")\n",
1138 p_hwq_out->name, txq_link_id, status);
1140 if (p_hwq_out->dropless == 0) {
1141 struct rte_port_ethdev_writer_params *params =
1142 &out->params.ethdev;
1144 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1145 params->port_id = p_link->pmd_id;
1146 params->queue_id = txq_queue_id;
1147 params->tx_burst_sz =
1148 app->hwq_out_params[in->id].burst;
1150 struct rte_port_ethdev_writer_nodrop_params
1151 *params = &out->params.ethdev_nodrop;
1154 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1155 params->port_id = p_link->pmd_id;
1156 params->queue_id = txq_queue_id;
1157 params->tx_burst_sz = p_hwq_out->burst;
1158 params->n_retries = p_hwq_out->n_retries;
1162 case APP_PKTQ_OUT_SWQ:
1164 struct app_pktq_swq_params *swq_params =
1165 &app->swq_params[in->id];
1167 if ((swq_params->ipv4_ras == 0) &&
1168 (swq_params->ipv6_ras == 0)) {
1169 if (app_swq_get_writers(app, swq_params) == 1) {
1170 if (app->swq_params[in->id].dropless == 0) {
1171 struct rte_port_ring_writer_params
1172 *params = &out->params.ring;
1174 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1175 params->ring = app->swq[in->id];
1176 params->tx_burst_sz =
1177 app->swq_params[in->id].burst_write;
1179 struct rte_port_ring_writer_nodrop_params
1180 *params = &out->params.ring_nodrop;
1183 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1184 params->ring = app->swq[in->id];
1185 params->tx_burst_sz =
1186 app->swq_params[in->id].burst_write;
1188 app->swq_params[in->id].n_retries;
1191 if (swq_params->dropless == 0) {
1192 struct rte_port_ring_multi_writer_params
1194 &out->params.ring_multi;
1197 PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1198 params->ring = app->swq[in->id];
1199 params->tx_burst_sz = swq_params->burst_write;
1201 struct rte_port_ring_multi_writer_nodrop_params
1203 &out->params.ring_multi_nodrop;
1206 PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1208 params->ring = app->swq[in->id];
1209 params->tx_burst_sz = swq_params->burst_write;
1210 params->n_retries = swq_params->n_retries;
1214 if (swq_params->ipv4_ras == 1) {
1215 struct rte_port_ring_writer_ipv4_ras_params
1217 &out->params.ring_ipv4_ras;
1220 PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1221 params->ring = app->swq[in->id];
1222 params->tx_burst_sz = swq_params->burst_write;
1224 struct rte_port_ring_writer_ipv6_ras_params
1226 &out->params.ring_ipv6_ras;
1229 PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1230 params->ring = app->swq[in->id];
1231 params->tx_burst_sz = swq_params->burst_write;
1236 case APP_PKTQ_OUT_TM: {
1237 struct rte_port_sched_writer_params *params =
1240 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1241 params->sched = app->tm[in->id];
1242 params->tx_burst_sz =
1243 app->tm_params[in->id].burst_write;
1246 case APP_PKTQ_OUT_SINK:
1247 out->type = PIPELINE_PORT_OUT_SINK;
1248 if (app->sink_params[in->id].file_name != NULL) {
1249 out->params.sink.file_name = strdup(
1250 app->sink_params[in->id].
1252 if (out->params.sink.file_name == NULL) {
1253 out->params.sink.max_n_pkts = 0;
1256 out->params.sink.max_n_pkts =
1257 app->sink_params[in->id].
1260 out->params.sink.file_name = NULL;
1261 out->params.sink.max_n_pkts = 0;
1270 p_out->n_msgq = p_in->n_msgq_in;
1272 for (i = 0; i < p_in->n_msgq_in; i++)
1273 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1275 for (i = 0; i < p_in->n_msgq_out; i++)
1276 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1279 p_out->n_args = p_in->n_args;
1280 for (i = 0; i < p_in->n_args; i++) {
1281 p_out->args_name[i] = p_in->args_name[i];
1282 p_out->args_value[i] = p_in->args_value[i];
1287 app_init_pipelines(struct app_params *app)
1291 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1292 struct app_pipeline_params *params =
1293 &app->pipeline_params[p_id];
1294 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1295 struct pipeline_type *ptype;
1296 struct pipeline_params pp;
1298 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1300 ptype = app_pipeline_type_find(app, params->type);
1302 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1305 app_pipeline_params_get(app, params, &pp);
1309 if (ptype->be_ops->f_init) {
1310 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1312 if (data->be == NULL)
1313 rte_panic("Pipeline instance \"%s\" back-end "
1314 "init error\n", params->name);
1319 if (ptype->fe_ops->f_init) {
1320 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1322 if (data->fe == NULL)
1323 rte_panic("Pipeline instance \"%s\" front-end "
1324 "init error\n", params->name);
1327 data->ptype = ptype;
1329 data->timer_period = (rte_get_tsc_hz() *
1330 params->timer_period) / 100;
1335 app_init_threads(struct app_params *app)
1337 uint64_t time = rte_get_tsc_cycles();
1340 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1341 struct app_pipeline_params *params =
1342 &app->pipeline_params[p_id];
1343 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1344 struct pipeline_type *ptype;
1345 struct app_thread_data *t;
1346 struct app_thread_pipeline_data *p;
1349 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1352 params->hyper_th_id);
1355 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1358 (params->hyper_th_id) ? "h" : "");
1360 t = &app->thread_data[lcore_id];
1362 t->timer_period = (rte_get_tsc_hz() *
1363 APP_THREAD_TIMER_PERIOD) / DIV_CONV_HZ_SEC;
1364 t->thread_req_deadline = time + t->timer_period;
1366 t->headroom_cycles = 0;
1367 t->headroom_time = rte_get_tsc_cycles();
1368 t->headroom_ratio = 0.0;
1370 t->msgq_in = app_thread_msgq_in_get(app,
1373 params->hyper_th_id);
1374 if (t->msgq_in == NULL)
1375 rte_panic("Init error: Cannot find MSGQ_IN "
1376 "for thread %" PRId32, lcore_id);
1378 t->msgq_out = app_thread_msgq_out_get(app,
1381 params->hyper_th_id);
1382 if (t->msgq_out == NULL)
1383 rte_panic("Init error: Cannot find MSGQ_OUT "
1384 "for thread %" PRId32, lcore_id);
1386 ptype = app_pipeline_type_find(app, params->type);
1388 rte_panic("Init error: Unknown pipeline "
1389 "type \"%s\"\n", params->type);
1391 p = (ptype->be_ops->f_run == NULL) ?
1392 &t->regular[t->n_regular] :
1393 &t->custom[t->n_custom];
1395 p->pipeline_id = p_id;
1397 p->f_run = ptype->be_ops->f_run;
1398 p->f_timer = ptype->be_ops->f_timer;
1399 p->timer_period = data->timer_period;
1400 p->deadline = time + data->timer_period;
1404 if (ptype->be_ops->f_run == NULL)
1411 int app_init(struct app_params *app)
1413 app_init_core_map(app);
1414 app_init_core_mask(app);
1418 /*app_init_mempool(app);*/
1424 app_pipeline_common_cmd_push(app);
1425 app_pipeline_thread_cmd_push(app);
1426 app_pipeline_type_register(app, &pipeline_master);
1427 app_pipeline_type_register(app, &pipeline_passthrough);
1428 app_pipeline_type_register(app, &pipeline_vfw);
1429 app_pipeline_type_register(app, &pipeline_loadb);
1430 app_pipeline_type_register(app, &pipeline_txrx);
1431 app_pipeline_type_register(app, &pipeline_arpicmp);
1433 app_init_pipelines(app);
1434 app_init_threads(app);
1436 #ifdef L3_STACK_SUPPORT
1440 populate_lpm_routes();
1441 print_interface_details();
1447 app_pipeline_type_cmd_push(struct app_params *app,
1448 struct pipeline_type *ptype)
1450 cmdline_parse_ctx_t *cmds;
1453 /* Check input arguments */
1454 if ((app == NULL) ||
1458 n_cmds = pipeline_type_cmds_count(ptype);
1462 cmds = ptype->fe_ops->cmds;
1464 /* Check for available slots in the application commands array */
1465 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1468 /* Push pipeline commands into the application */
1469 memcpy(&app->cmds[app->n_cmds],
1471 n_cmds * sizeof(cmdline_parse_ctx_t));
1473 for (i = 0; i < n_cmds; i++)
1474 app->cmds[app->n_cmds + i]->data = app;
1476 app->n_cmds += n_cmds;
1477 app->cmds[app->n_cmds] = NULL;
1483 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1487 /* Check input arguments */
1488 if ((app == NULL) ||
1490 (ptype->name == NULL) ||
1491 (strlen(ptype->name) == 0) ||
1492 (ptype->be_ops->f_init == NULL) ||
1493 (ptype->be_ops->f_timer == NULL))
1496 /* Check for duplicate entry */
1497 for (i = 0; i < app->n_pipeline_types; i++)
1498 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1501 /* Check for resource availability */
1502 n_cmds = pipeline_type_cmds_count(ptype);
1503 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1504 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1507 /* Copy pipeline type */
1508 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1510 sizeof(struct pipeline_type));
1512 /* Copy CLI commands */
1514 app_pipeline_type_cmd_push(app, ptype);
1520 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1524 for (i = 0; i < app->n_pipeline_types; i++)
1525 if (strcmp(app->pipeline_type[i].name, name) == 0)
1526 return &app->pipeline_type[i];