X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=VNFs%2FDPPD-PROX%2Fprox_port_cfg.c;h=fc4971f1e2584c220318039ad3b9ca3afe47a0d6;hb=refs%2Fchanges%2F13%2F66713%2F1;hp=269b1c637e30ee243627b5add15000dc7359cb01;hpb=17acce6d8d07f10f42e479e7fff7a10efc9ad4ae;p=samplevnf.git diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c index 269b1c63..fc4971f1 100644 --- a/VNFs/DPPD-PROX/prox_port_cfg.c +++ b/VNFs/DPPD-PROX/prox_port_cfg.c @@ -40,6 +40,7 @@ #include "defines.h" #include "prox_cksum.h" #include "stats_irq.h" +#include "prox_compat.h" struct prox_port_cfg prox_port_cfg[PROX_MAX_PORTS]; rte_atomic32_t lsc; @@ -123,12 +124,49 @@ void prox_pktmbuf_reinit(void *arg, void *start, __attribute__((unused)) void *e prox_pktmbuf_init(init_args->mp, init_args->lconf, obj, idx); } +#define CONFIGURE_TX_OFFLOAD(flag) \ + if (port_cfg->requested_tx_offload & flag) {\ + if (port_cfg->disabled_tx_offload & flag) {\ + plog_info("\t\t%s disabled by configuration\n", #flag);\ + port_cfg->requested_tx_offload &= ~flag;\ + } else if (port_cfg->dev_info.tx_offload_capa & flag) {\ + port_cfg->port_conf.txmode.offloads |= flag;\ + plog_info("\t\t%s enabled on port\n", #flag);\ + } else if (port_cfg->dev_info.tx_queue_offload_capa & flag) {\ + port_cfg->tx_conf.offloads |= flag;\ + plog_info("\t\t%s enabled on queue\n", #flag);\ + } else {\ + port_cfg->requested_tx_offload &= ~flag;\ + plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\ + }\ + } else {\ + plog_info("\t\t%s disabled\n", #flag);\ + }\ + +#define CONFIGURE_RX_OFFLOAD(flag) \ + if (port_cfg->requested_rx_offload & flag) {\ + if (port_cfg->dev_info.rx_offload_capa & flag) {\ + port_cfg->port_conf.rxmode.offloads |= flag;\ + plog_info("\t\t%s enabled on port\n", #flag);\ + } else if (port_cfg->dev_info.rx_queue_offload_capa & flag) {\ + port_cfg->rx_conf.offloads |= flag;\ + plog_info("\t\t%s enabled on queue\n", #flag);\ + } else {\ + port_cfg->requested_rx_offload &= ~flag;\ + plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\ + }\ + } else {\ + plog_info("\t\t%s disabled\n", #flag);\ + }\ + + /* initialize rte devices and check the number of available ports */ void init_rte_dev(int use_dummy_devices) { uint8_t nb_ports, port_id_max; int port_id_last; struct rte_eth_dev_info dev_info; + const struct rte_pci_device *pci_dev; nb_ports = rte_eth_dev_count(); /* get available ports configuration */ @@ -180,14 +218,12 @@ void init_rte_dev(int use_dummy_devices) struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; port_cfg->socket = -1; + memcpy(&port_cfg->dev_info, &dev_info, sizeof(struct rte_eth_dev_info)); port_cfg->max_txq = dev_info.max_tx_queues; port_cfg->max_rxq = dev_info.max_rx_queues; + port_cfg->max_rx_pkt_len = dev_info.max_rx_pktlen; + port_cfg->min_rx_bufsize = dev_info.min_rx_bufsize; - if (!dev_info.pci_dev) - continue; - - snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr), - "%04x:%02x:%02x.%1x", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); strncpy(port_cfg->driver_name, dev_info.driver_name, sizeof(port_cfg->driver_name)); plog_info("\tPort %u : driver='%s' tx_queues=%d rx_queues=%d\n", port_id, !strcmp(port_cfg->driver_name, "")? "null" : port_cfg->driver_name, port_cfg->max_txq, port_cfg->max_rxq); @@ -203,6 +239,18 @@ void init_rte_dev(int use_dummy_devices) *ptr = '\x0'; } +#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0) + pci_dev = dev_info.pci_dev; +#else + if (!dev_info.device) + continue; + pci_dev = RTE_DEV_TO_PCI(dev_info.device); +#endif + if (!pci_dev) + continue; + + snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr), + "%04x:%02x:%02x.%1x", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); /* Try to find the device's numa node */ char buf[1024]; snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/numa_node", port_cfg->pci_addr); @@ -218,11 +266,14 @@ void init_rte_dev(int use_dummy_devices) fclose(numa_node_fd); } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { - port_cfg->capabilities.tx_offload_cksum |= IPV4_CKSUM; + // In DPDK 18.08 vmxnet3 reports it supports IPV4 checksum, but packets does not go through when IPv4 cksum is enabled + if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) { + plog_info("\t\tDisabling IPV4 cksum on vmxnet3\n"); + port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_IPV4_CKSUM; } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { - port_cfg->capabilities.tx_offload_cksum |= UDP_CKSUM; + if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) { + plog_info("\t\tDisabling UDP cksum on vmxnet3\n"); + port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM; } } } @@ -258,6 +309,93 @@ uint8_t init_rte_ring_dev(void) return nb_ring_dev; } +static void print_port_capa(struct prox_port_cfg *port_cfg) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1) + plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) + plog_info("VLAN STRIP | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) + plog_info("IPV4 CKSUM | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) + plog_info("UDP CKSUM | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) + plog_info("TCP CKSUM | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) + plog_info("TCP LRO | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) + plog_info("QINQ STRIP | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) + plog_info("OUTER_IPV4_CKSUM | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_MACSEC_STRIP) + plog_info("MACSEC STRIP | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_HEADER_SPLIT) + plog_info("HEADER SPLIT | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_FILTER) + plog_info("VLAN FILTER | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_EXTEND) + plog_info("VLAN EXTEND | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) + plog_info("JUMBO FRAME | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_CRC_STRIP) + plog_info("CRC STRIP | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER) + plog_info("SCATTER | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) + plog_info("TIMESTAMP | "); + if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) + plog_info("SECURITY "); + plog_info("\n"); + + plog_info("\t\tTX offload capa = 0x%lx = ", port_cfg->dev_info.tx_offload_capa); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) + plog_info("VLAN INSERT | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) + plog_info("IPV4 CKSUM | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) + plog_info("UDP CKSUM | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) + plog_info("TCP CKSUM | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) + plog_info("SCTP CKSUM | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) + plog_info("TCP TS0 | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) + plog_info("UDP TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + plog_info("OUTER IPV4 CKSUM | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) + plog_info("QINQ INSERT | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) + plog_info("VLAN TNL TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) + plog_info("GRE TNL TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) + plog_info("IPIP TNL TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) + plog_info("GENEVE TNL TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) + plog_info("MACSEC INSERT | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE) + plog_info("MT LOCKFREE | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) + plog_info("MULTI SEG | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY) + plog_info("SECURITY | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) + plog_info("UDP TNL TSO | "); + if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) + plog_info("IP TNL TSO | "); + plog_info("\n"); + + plog_info("\t\trx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.rx_queue_offload_capa); + plog_info("\t\ttx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.tx_queue_offload_capa); + plog_info("\t\tflow_type_rss_offloads = 0x%lx\n", port_cfg->dev_info.flow_type_rss_offloads); + plog_info("\t\tdefault RX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_rxportconf.burst_size, port_cfg->dev_info.default_rxportconf.ring_size, port_cfg->dev_info.default_rxportconf.nb_queues); + plog_info("\t\tdefault TX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_txportconf.burst_size, port_cfg->dev_info.default_txportconf.ring_size, port_cfg->dev_info.default_txportconf.nb_queues); +#endif +} + static void init_port(struct prox_port_cfg *port_cfg) { static char dummy_pool_name[] = "0_dummy"; @@ -270,6 +408,9 @@ static void init_port(struct prox_port_cfg *port_cfg) plog_info("\t\tPort name is set to %s\n", port_cfg->name); plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq); plog_info("\t\tPort driver is %s\n", port_cfg->driver_name); +#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0) + plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa); +#endif PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0, "\t\t port %u is enabled but no RX or TX queues have been configured", port_id); @@ -278,10 +419,7 @@ static void init_port(struct prox_port_cfg *port_cfg) /* not receiving on this port */ plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id); port_cfg->n_rxq = 1; - uint32_t mbuf_size = MBUF_SIZE; - if (strcmp(port_cfg->short_name, "vmxnet3") == 0) { - mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM; - } + uint32_t mbuf_size = TX_MBUF_SIZE; plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n", port_cfg->socket, port_cfg->n_rxd, mbuf_size); port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size, @@ -295,9 +433,14 @@ static void init_port(struct prox_port_cfg *port_cfg) dummy_pool_name[0]++; } else { // Most pmd should now support setting mtu + if (port_cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) { + plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len); + port_cfg->mtu = port_cfg->max_rx_pkt_len; + } plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id); ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu); - PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret); + if (ret) + plog_err("\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret); if (port_cfg->n_txq == 0) { /* not sending on this port */ @@ -306,18 +449,83 @@ static void init_port(struct prox_port_cfg *port_cfg) } } + print_port_capa(port_cfg); + if (port_cfg->n_rxq > 1) { // Enable RSS if multiple receive queues port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS; port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key; port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN; #if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0) - port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONFRAG_IPV4_UDP; + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP|ETH_RSS_UDP; #else port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP; #endif } + // Make sure that the requested RSS offload is supported by the PMD +#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0) + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf &= port_cfg->dev_info.flow_type_rss_offloads; +#endif + plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, ETH_RSS_IP|ETH_RSS_UDP); + + // rxmode such as hw src strip +#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1) + CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_CRC_STRIP); + CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_JUMBO_FRAME); + CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_VLAN_STRIP); +#else + if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) { + port_cfg->port_conf.rxmode.hw_strip_crc = 1; + } + if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_JUMBO_FRAME) { + port_cfg->port_conf.rxmode.jumbo_frame = 1; + } +#endif + + // IPV4, UDP, SCTP Checksums +#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1) + CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_IPV4_CKSUM); + CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_UDP_CKSUM); + CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_VLAN_INSERT); +#else + if ((port_cfg->dev_info.tx_offload_capa & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) == 0) { + port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS; + plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n"); + } + if (!strcmp(port_cfg->short_name, "vmxnet3")) { + port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP; + plog_info("\t\tDisabling SCTP offload on port %d as vmxnet3 does not support them\n", port_id); + } +#endif + // Multi Segments +#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1) + CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MULTI_SEGS); +#else + if (!strcmp(port_cfg->short_name, "vmxnet3")) { + port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + plog_info("\t\tDisabling TX multsegs on port %d as vmxnet3 does not support them\n", port_id); + } else if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) + plog_info("\t\tDisabling TX multsegs on port %d\n", port_id); + else + plog_info("\t\tEnabling TX multsegs on port %d\n", port_id); + + if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) + plog_info("\t\tEnabling No TX offloads on port %d\n", port_id); + else + plog_info("\t\tTX offloads enabled on port %d\n", port_id); +#endif + + // Refcount +#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1) + CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MBUF_FAST_FREE); +#else + if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) + plog_info("\t\tEnabling No refcnt on port %d\n", port_id); + else + plog_info("\t\tRefcnt enabled on port %d\n", port_id); +#endif + plog_info("\t\tConfiguring port %u... with %u RX queues and %u TX queues\n", port_id, port_cfg->n_rxq, port_cfg->n_txq); @@ -359,36 +567,26 @@ static void init_port(struct prox_port_cfg *port_cfg) plog_info("\t\tMAC address set to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes)); + /* initialize TX queues first */ + for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) { + plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n", + queue_id, port_cfg->socket, port_cfg->n_txd); + ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd, + port_cfg->socket, &port_cfg->tx_conf); + PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret); + } + /* initialize RX queues */ for (uint16_t queue_id = 0; queue_id < port_cfg->n_rxq; ++queue_id) { plog_info("\t\tSetting up RX queue %u on port %u on socket %u with %u desc (pool 0x%p)\n", queue_id, port_id, port_cfg->socket, port_cfg->n_rxd, port_cfg->pool[queue_id]); - ret = rte_eth_rx_queue_setup(port_id, queue_id, port_cfg->n_rxd, port_cfg->socket, &port_cfg->rx_conf, port_cfg->pool[queue_id]); - PROX_PANIC(ret < 0, "\t\t\trte_eth_rx_queue_setup() failed on port %u: error %s (%d)\n", port_id, strerror(-ret), ret); } - if (port_cfg->capabilities.tx_offload_cksum == 0) { - port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS; - plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n"); - } - - if (!strcmp(port_cfg->short_name, "vmxnet3")) { - port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; - plog_info("\t\tDisabling multsegs on port %d as vmxnet3 does not support them\n", port_id); - } - /* initialize one TX queue per logical core on each port */ - for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) { - plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n", - queue_id, port_cfg->socket, port_cfg->n_txd); - ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd, - port_cfg->socket, &port_cfg->tx_conf); - PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret); - } plog_info("\t\tStarting up port %u ...", port_id); ret = rte_eth_dev_start(port_id);