2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include <rte_version.h>
20 #include <rte_eth_ring.h>
22 #if (RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0))
23 #include <rte_bus_vdev.h>
25 #if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,2))
28 #if (RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0))
29 #include <rte_eth_null.h>
34 #include <sys/ioctl.h>
37 #include "prox_port_cfg.h"
38 #include "prox_globals.h"
44 #include "prox_cksum.h"
45 #include "stats_irq.h"
46 #include "prox_compat.h"
47 #include "rte_ethdev.h"
50 struct prox_port_cfg prox_port_cfg[PROX_MAX_PORTS];
54 int prox_nb_active_ports(void)
57 for (uint32_t i = 0; i < PROX_MAX_PORTS; ++i) {
58 ret += prox_port_cfg[i].active;
63 int prox_last_port_active(void)
66 for (uint32_t i = 0; i < PROX_MAX_PORTS; ++i) {
67 if (prox_port_cfg[i].active) {
74 #if RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0)
75 static int lsc_cb(__attribute__((unused)) uint16_t port_id, enum rte_eth_event_type type, __attribute__((unused)) void *param,
76 __attribute__((unused)) void *ret_param)
78 #if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,1)
79 static int lsc_cb(__attribute__((unused)) uint8_t port_id, enum rte_eth_event_type type, __attribute__((unused)) void *param,
80 __attribute__((unused)) void *ret_param)
82 static void lsc_cb(__attribute__((unused)) uint8_t port_id, enum rte_eth_event_type type, __attribute__((unused)) void *param)
86 if (RTE_ETH_EVENT_INTR_LSC != type) {
87 #if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,1)
94 rte_atomic32_inc(&lsc);
96 #if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,1)
101 struct prox_pktmbuf_reinit_args {
102 struct rte_mempool *mp;
103 struct lcore_cfg *lconf;
106 /* standard mbuf initialization procedure */
107 void prox_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsigned i)
109 struct rte_mbuf *mbuf = _m;
111 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
112 mbuf->tx_offload = CALC_TX_OL(sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr));
114 mbuf->pkt.vlan_macip.f.l2_len = sizeof(prox_rte_ether_hdr);
115 mbuf->pkt.vlan_macip.f.l3_len = sizeof(prox_rte_ipv4_hdr);
118 rte_pktmbuf_init(mp, opaque_arg, mbuf, i);
121 void prox_pktmbuf_reinit(void *arg, void *start, __attribute__((unused)) void *end, uint32_t idx)
123 struct prox_pktmbuf_reinit_args *init_args = arg;
127 obj += init_args->mp->header_size;
128 m = (struct rte_mbuf*)obj;
130 prox_pktmbuf_init(init_args->mp, init_args->lconf, obj, idx);
133 #define CONFIGURE_TX_OFFLOAD(flag) \
134 if (port_cfg->requested_tx_offload & flag) {\
135 if (port_cfg->disabled_tx_offload & flag) {\
136 plog_info("\t\t%s disabled by configuration\n", #flag);\
137 port_cfg->requested_tx_offload &= ~flag;\
138 } else if (port_cfg->dev_info.tx_offload_capa & flag) {\
139 port_cfg->port_conf.txmode.offloads |= flag;\
140 plog_info("\t\t%s enabled on port\n", #flag);\
141 } else if (port_cfg->dev_info.tx_queue_offload_capa & flag) {\
142 port_cfg->tx_conf.offloads |= flag;\
143 plog_info("\t\t%s enabled on queue\n", #flag);\
145 port_cfg->requested_tx_offload &= ~flag;\
146 plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
149 plog_info("\t\t%s disabled\n", #flag);\
152 #define CONFIGURE_RX_OFFLOAD(flag) \
153 if (port_cfg->requested_rx_offload & flag) {\
154 if (port_cfg->dev_info.rx_offload_capa & flag) {\
155 port_cfg->port_conf.rxmode.offloads |= flag;\
156 plog_info("\t\t%s enabled on port\n", #flag);\
157 } else if (port_cfg->dev_info.rx_queue_offload_capa & flag) {\
158 port_cfg->rx_conf.offloads |= flag;\
159 plog_info("\t\t%s enabled on queue\n", #flag);\
161 port_cfg->requested_rx_offload &= ~flag;\
162 plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
165 plog_info("\t\t%s disabled\n", #flag);\
168 static inline uint32_t get_netmask(uint8_t prefix)
171 return(~((uint32_t) -1));
173 return rte_cpu_to_be_32(~((1 << (32 - prefix)) - 1));
176 static void set_ip_address(char *devname, uint32_t *ip, uint8_t prefix)
179 struct sockaddr_in in_addr;
181 uint32_t netmask = get_netmask(prefix);
182 plog_info("Setting netmask to %x\n", netmask);
184 fd = socket(AF_INET, SOCK_DGRAM, 0);
186 memset(&ifreq, 0, sizeof(struct ifreq));
187 memset(&in_addr, 0, sizeof(struct sockaddr_in));
189 in_addr.sin_family = AF_INET;
190 in_addr.sin_addr = *(struct in_addr *)ip;
192 strncpy(ifreq.ifr_name, devname, IFNAMSIZ);
193 ifreq.ifr_addr = *(struct sockaddr *)&in_addr;
194 rc = ioctl(fd, SIOCSIFADDR, &ifreq);
195 PROX_PANIC(rc < 0, "Failed to set IP address %x on device %s: error = %d (%s)\n", *ip, devname, errno, strerror(errno));
197 in_addr.sin_addr = *(struct in_addr *)&netmask;
198 ifreq.ifr_netmask = *(struct sockaddr *)&in_addr;
199 rc = ioctl(fd, SIOCSIFNETMASK, &ifreq);
200 PROX_PANIC(rc < 0, "Failed to set netmask %x (prefix %d) on device %s: error = %d (%s)\n", netmask, prefix, devname, errno, strerror(errno));
204 /* initialize rte devices and check the number of available ports */
205 void init_rte_dev(int use_dummy_devices)
207 uint8_t nb_ports, port_id_max;
208 int port_id_last, rc = 0;
209 struct rte_eth_dev_info dev_info;
210 const struct rte_pci_device *pci_dev;
212 for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
213 if (!prox_port_cfg[port_id].active) {
216 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
217 if (port_cfg->vdev[0]) {
218 char name[MAX_NAME_SIZE], tap[MAX_NAME_SIZE];
219 snprintf(tap, MAX_NAME_SIZE, "net_tap%d", port_id);
220 #if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1))
221 snprintf(name, MAX_NAME_SIZE, "iface=%s", port_cfg->vdev);
222 rc = rte_vdev_init(tap, name);
224 PROX_PANIC(1, "vdev not supported in DPDK < 17.05\n");
226 PROX_PANIC(rc != 0, "Unable to create device %s %s\n", "net tap", port_cfg->vdev);
227 int vdev_port_id = prox_rte_eth_dev_count_avail() - 1;
228 PROX_PANIC(vdev_port_id >= PROX_MAX_PORTS, "Too many port defined %d >= %d\n", vdev_port_id, PROX_MAX_PORTS);
229 plog_info("\tCreating device %s, port %d\n", port_cfg->vdev, vdev_port_id);
230 prox_port_cfg[vdev_port_id].is_vdev = 1;
231 prox_port_cfg[vdev_port_id].active = 1;
232 prox_port_cfg[vdev_port_id].dpdk_mapping = port_id;
233 prox_port_cfg[vdev_port_id].n_txq = 1;
235 if (prox_port_cfg[port_id].vlan_tag) {
237 snprintf(prox_port_cfg[vdev_port_id].name, MAX_NAME_SIZE, "%s_%d", port_cfg->vdev, prox_port_cfg[port_id].vlan_tag);
238 sprintf(command, "ip link add link %s name %s type vlan id %d", port_cfg->vdev, prox_port_cfg[vdev_port_id].name, prox_port_cfg[port_id].vlan_tag);
240 plog_info("Running %s\n", command);
241 plog_info("Using vlan tag %d - added device %s\n", prox_port_cfg[port_id].vlan_tag, prox_port_cfg[vdev_port_id].name);
243 strncpy(prox_port_cfg[vdev_port_id].name, port_cfg->vdev, MAX_NAME_SIZE);
245 prox_port_cfg[port_id].dpdk_mapping = vdev_port_id;
246 prox_port_cfg[vdev_port_id].ip = rte_be_to_cpu_32(prox_port_cfg[port_id].ip);
247 prox_port_cfg[vdev_port_id].prefix = prox_port_cfg[port_id].prefix;
248 prox_port_cfg[vdev_port_id].type = prox_port_cfg[port_id].type;
249 if (prox_port_cfg[vdev_port_id].type == PROX_PORT_MAC_HW) {
250 // If DPDK port MAC set to HW, then make sure the vdev has the same MAC as DPDK port
251 prox_port_cfg[vdev_port_id].type = PROX_PORT_MAC_SET;
252 rte_eth_macaddr_get(port_id, &prox_port_cfg[vdev_port_id].eth_addr);
253 plog_info("\tDPDK port %d MAC address pre-configured to MAC from port %d: "MAC_BYTES_FMT"\n",
254 vdev_port_id, port_id, MAC_BYTES(prox_port_cfg[vdev_port_id].eth_addr.addr_bytes));
256 memcpy(&prox_port_cfg[vdev_port_id].eth_addr, &prox_port_cfg[port_id].eth_addr, sizeof(prox_port_cfg[port_id].eth_addr));
259 nb_ports = prox_rte_eth_dev_count_avail();
260 /* get available ports configuration */
261 PROX_PANIC(use_dummy_devices && nb_ports, "Can't use dummy devices while there are also real ports\n");
263 if (use_dummy_devices) {
264 #if (RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0))
265 nb_ports = prox_last_port_active() + 1;
266 plog_info("Creating %u dummy devices\n", nb_ports);
268 char port_name[32] = "0dummy_dev";
269 for (uint32_t i = 0; i < nb_ports; ++i) {
270 #if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1))
271 rte_vdev_init(port_name, "size=64,copy=0");
273 eth_dev_null_create(port_name, 0, PROX_RTE_ETHER_MIN_LEN, 0);
278 PROX_PANIC(use_dummy_devices, "Can't use dummy devices\n");
281 else if (prox_last_port_active() != -1) {
282 PROX_PANIC(nb_ports == 0, "\tError: DPDK could not find any port\n");
283 plog_info("\tDPDK has found %u ports\n", nb_ports);
286 if (nb_ports > PROX_MAX_PORTS) {
287 plog_warn("\tWarning: I can deal with at most %u ports."
288 " Please update PROX_MAX_PORTS and recompile.\n", PROX_MAX_PORTS);
290 nb_ports = PROX_MAX_PORTS;
293 #if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
296 RTE_ETH_FOREACH_DEV(id) {
298 rte_eth_dev_get_name_by_port(id, name);
299 plog_info("\tFound DPDK port id %u %s\n", id, name);
300 if (id >= PROX_MAX_PORTS) {
301 plog_warn("\tWarning: I can deal with at most %u ports."
302 " Please update PROX_MAX_PORTS and recompile.\n", PROX_MAX_PORTS);
304 prox_port_cfg[id].available = 1;
305 if (id > port_id_max)
310 port_id_max = nb_ports - 1;
313 port_id_last = prox_last_port_active();
314 PROX_PANIC(port_id_last > port_id_max,
315 "\tError: invalid port(s) specified, last port index active: %d (max index is %d)\n",
316 port_id_last, port_id_max);
318 /* Assign ports to PROX interfaces & Read max RX/TX queues per port */
319 #if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
320 for (uint8_t port_id = 0; port_id <= port_id_last; ++port_id) {
322 for (uint8_t port_id = 0; port_id <= nb_ports; ++port_id) {
324 /* skip ports that are not enabled */
325 if (!prox_port_cfg[port_id].active) {
327 #if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
328 } else if (prox_port_cfg[port_id].available == 0) {
329 PROX_PANIC(1, "port %u enabled but not available\n", port_id);
332 plog_info("\tGetting info for rte dev %u\n", port_id);
333 rte_eth_dev_info_get(port_id, &dev_info);
334 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
335 port_cfg->socket = -1;
337 memcpy(&port_cfg->dev_info, &dev_info, sizeof(struct rte_eth_dev_info));
338 port_cfg->max_txq = dev_info.max_tx_queues;
339 port_cfg->max_rxq = dev_info.max_rx_queues;
340 port_cfg->max_rx_pkt_len = dev_info.max_rx_pktlen;
341 port_cfg->min_rx_bufsize = dev_info.min_rx_bufsize;
342 port_cfg->min_tx_desc = dev_info.tx_desc_lim.nb_min;
343 port_cfg->max_tx_desc = dev_info.tx_desc_lim.nb_max;
344 port_cfg->min_rx_desc = dev_info.rx_desc_lim.nb_min;
345 port_cfg->max_rx_desc = dev_info.rx_desc_lim.nb_max;
347 prox_strncpy(port_cfg->driver_name, dev_info.driver_name, sizeof(port_cfg->driver_name));
348 plog_info("\tPort %u : driver='%s' tx_queues=%d rx_queues=%d\n", port_id, !strcmp(port_cfg->driver_name, "")? "null" : port_cfg->driver_name, port_cfg->max_txq, port_cfg->max_rxq);
349 plog_info("\tPort %u : %d<=nb_tx_desc<=%d %d<=nb_rx_desc<=%d\n", port_id, port_cfg->min_tx_desc, port_cfg->max_tx_desc, port_cfg->min_rx_desc, port_cfg->max_rx_desc);
351 if (strncmp(port_cfg->driver_name, "rte_", 4) == 0) {
352 prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
353 } else if (strncmp(port_cfg->driver_name, "net_", 4) == 0) {
354 prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
356 prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name, sizeof(port_cfg->short_name));
359 if ((ptr = strstr(port_cfg->short_name, "_pmd")) != NULL) {
363 #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
364 pci_dev = dev_info.pci_dev;
366 if (!dev_info.device)
368 pci_dev = RTE_DEV_TO_PCI(dev_info.device);
373 snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr),
374 "%04x:%02x:%02x.%1x", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
375 /* Try to find the device's numa node */
377 snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/numa_node", port_cfg->pci_addr);
378 FILE* numa_node_fd = fopen(buf, "r");
380 if (fgets(buf, sizeof(buf), numa_node_fd) == NULL) {
381 plog_warn("Failed to read numa_node for device %s\n", port_cfg->pci_addr);
383 port_cfg->socket = strtol(buf, 0, 0);
384 if (port_cfg->socket == -1) {
385 plog_warn("System did not report numa_node for device %s\n", port_cfg->pci_addr);
387 fclose(numa_node_fd);
390 // In DPDK 18.08 vmxnet3 reports it supports IPV4 checksum, but packets does not go through when IPv4 cksum is enabled
391 if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
392 plog_info("\t\tDisabling IPV4 cksum on vmxnet3\n");
393 port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_IPV4_CKSUM;
395 if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) {
396 plog_info("\t\tDisabling UDP cksum on vmxnet3\n");
397 port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM;
399 // Some OVS versions reports that they support UDP offload and no IPv4 offload, but fails when UDP offload is enabled
400 if ((!strcmp(port_cfg->short_name, "virtio")) &&
401 ((port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) &&
402 (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) {
403 plog_info("\t\tDisabling UDP cksum on virtio\n");
404 port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM;
409 /* Create rte ring-backed devices */
410 uint8_t init_rte_ring_dev(void)
412 uint8_t nb_ring_dev = 0;
414 for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
415 /* skip ports that are not enabled */
416 if (!prox_port_cfg[port_id].active) {
419 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
420 if (port_cfg->rx_ring[0] != '\0') {
421 plog_info("\tRing-backed port %u: rx='%s' tx='%s'\n", port_id, port_cfg->rx_ring, port_cfg->tx_ring);
423 struct rte_ring* rx_ring = rte_ring_lookup(port_cfg->rx_ring);
424 PROX_PANIC(rx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->rx_ring, port_id);
425 struct rte_ring* tx_ring = rte_ring_lookup(port_cfg->tx_ring);
426 PROX_PANIC(tx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->tx_ring, port_id);
428 int ret = rte_eth_from_rings(port_cfg->name, &rx_ring, 1, &tx_ring, 1, rte_socket_id());
429 PROX_PANIC(ret != 0, "Failed to create eth_dev from rings for port %d\n", port_id);
431 port_cfg->port_conf.intr_conf.lsc = 0; /* Link state interrupt not supported for ring-backed ports */
440 static void print_port_capa(struct prox_port_cfg *port_cfg)
444 port_id = port_cfg - prox_port_cfg;
445 plog_info("\t*** Initializing port %u ***\n", port_id);
446 plog_info("\t\tPort name is set to %s\n", port_cfg->name);
447 plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
448 plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
449 #if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
450 plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
452 if (port_cfg->max_link_speed != UINT32_MAX) {
453 plog_info("\t\tHighest link speed capa = %d Mbps\n", port_cfg->max_link_speed);
456 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
457 plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
458 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP)
459 plog_info("VLAN STRIP | ");
460 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM)
461 plog_info("IPV4 CKSUM | ");
462 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM)
463 plog_info("UDP CKSUM | ");
464 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)
465 plog_info("TCP CKSUM | ");
466 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
467 plog_info("TCP LRO | ");
468 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP)
469 plog_info("QINQ STRIP | ");
470 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
471 plog_info("OUTER_IPV4_CKSUM | ");
472 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_MACSEC_STRIP)
473 plog_info("MACSEC STRIP | ");
474 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_HEADER_SPLIT)
475 plog_info("HEADER SPLIT | ");
476 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_FILTER)
477 plog_info("VLAN FILTER | ");
478 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_EXTEND)
479 plog_info("VLAN EXTEND | ");
480 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)
481 plog_info("JUMBO FRAME | ");
482 #if defined(DEV_RX_OFFLOAD_CRC_STRIP)
483 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_CRC_STRIP)
484 plog_info("CRC STRIP | ");
486 #if defined(DEV_RX_OFFLOAD_KEEP_CRC)
487 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC)
488 plog_info("KEEP CRC | ");
490 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
491 plog_info("SCATTER | ");
492 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
493 plog_info("TIMESTAMP | ");
494 if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
495 plog_info("SECURITY ");
498 plog_info("\t\tTX offload capa = 0x%lx = ", port_cfg->dev_info.tx_offload_capa);
499 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT)
500 plog_info("VLAN INSERT | ");
501 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
502 plog_info("IPV4 CKSUM | ");
503 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)
504 plog_info("UDP CKSUM | ");
505 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)
506 plog_info("TCP CKSUM | ");
507 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM)
508 plog_info("SCTP CKSUM | ");
509 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO)
510 plog_info("TCP TS0 | ");
511 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO)
512 plog_info("UDP TSO | ");
513 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
514 plog_info("OUTER IPV4 CKSUM | ");
515 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT)
516 plog_info("QINQ INSERT | ");
517 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
518 plog_info("VLAN TNL TSO | ");
519 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO)
520 plog_info("GRE TNL TSO | ");
521 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO)
522 plog_info("IPIP TNL TSO | ");
523 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
524 plog_info("GENEVE TNL TSO | ");
525 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT)
526 plog_info("MACSEC INSERT | ");
527 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE)
528 plog_info("MT LOCKFREE | ");
529 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS)
530 plog_info("MULTI SEG | ");
531 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
532 plog_info("SECURITY | ");
533 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO)
534 plog_info("UDP TNL TSO | ");
535 if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO)
536 plog_info("IP TNL TSO | ");
539 plog_info("\t\trx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.rx_queue_offload_capa);
540 plog_info("\t\ttx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.tx_queue_offload_capa);
541 plog_info("\t\tflow_type_rss_offloads = 0x%lx\n", port_cfg->dev_info.flow_type_rss_offloads);
542 plog_info("\t\tdefault RX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_rxportconf.burst_size, port_cfg->dev_info.default_rxportconf.ring_size, port_cfg->dev_info.default_rxportconf.nb_queues);
543 plog_info("\t\tdefault TX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_txportconf.burst_size, port_cfg->dev_info.default_txportconf.ring_size, port_cfg->dev_info.default_txportconf.nb_queues);
547 static void get_max_link_speed(struct prox_port_cfg *port_cfg)
549 port_cfg->max_link_speed = UINT32_MAX;
551 #if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
552 // virtio and vmxnet3 reports fake max_link_speed
553 if (strcmp(port_cfg->short_name, "vmxnet3") && strcmp(port_cfg->short_name, "virtio")) {
554 // Get link_speed from highest capability from the port
555 // This will be used by gen and lat for extrapolation purposes
556 // The negotiated link_speed (as reported by rte_eth_link_get
557 // or rte_eth_link_get_nowait) might be reported too late
558 // and might result in wrong exrapolation, and hence should not be used
559 // for extrapolation purposes
560 if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_100G)
561 port_cfg->max_link_speed = ETH_SPEED_NUM_100G;
562 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_56G)
563 port_cfg->max_link_speed = ETH_SPEED_NUM_56G;
564 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_50G)
565 port_cfg->max_link_speed = ETH_SPEED_NUM_50G;
566 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_40G)
567 port_cfg->max_link_speed = ETH_SPEED_NUM_40G;
568 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_25G)
569 port_cfg->max_link_speed = ETH_SPEED_NUM_25G;
570 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_20G)
571 port_cfg->max_link_speed = ETH_SPEED_NUM_20G;
572 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_10G)
573 port_cfg->max_link_speed = ETH_SPEED_NUM_10G;
574 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_5G)
575 port_cfg->max_link_speed = ETH_SPEED_NUM_5G;
576 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_2_5G)
577 port_cfg->max_link_speed = ETH_SPEED_NUM_2_5G;
578 else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_1G)
579 port_cfg->max_link_speed = ETH_SPEED_NUM_1G;
580 else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M))
581 port_cfg->max_link_speed = ETH_SPEED_NUM_100M;
582 else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M))
583 port_cfg->max_link_speed = ETH_SPEED_NUM_10M;
589 static void init_port(struct prox_port_cfg *port_cfg)
591 static char dummy_pool_name[] = "0_dummy";
592 struct rte_eth_link link;
596 get_max_link_speed(port_cfg);
597 print_port_capa(port_cfg);
598 port_id = port_cfg - prox_port_cfg;
599 PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
600 "\t\t port %u is enabled but no RX or TX queues have been configured", port_id);
602 if (port_cfg->n_rxq == 0) {
603 /* not receiving on this port */
604 plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id);
606 uint32_t mbuf_size = TX_MBUF_SIZE;
607 if (mbuf_size < port_cfg->min_rx_bufsize + RTE_PKTMBUF_HEADROOM + sizeof(struct rte_mbuf))
608 mbuf_size = port_cfg->min_rx_bufsize + RTE_PKTMBUF_HEADROOM + sizeof(struct rte_mbuf);
610 plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n",
611 port_cfg->socket, port_cfg->n_rxd, mbuf_size);
612 port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size,
614 sizeof(struct rte_pktmbuf_pool_private),
615 rte_pktmbuf_pool_init, NULL,
616 prox_pktmbuf_init, 0,
617 port_cfg->socket, 0);
618 PROX_PANIC(port_cfg->pool[0] == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
619 port_cfg->socket, port_cfg->n_rxd);
620 dummy_pool_name[0]++;
622 // Most pmd should now support setting mtu
623 if (port_cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) {
624 plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len);
625 port_cfg->mtu = port_cfg->max_rx_pkt_len;
627 plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id);
628 ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu);
630 plog_err("\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret);
632 if (port_cfg->n_txq == 0) {
633 /* not sending on this port */
634 plog_info("\t\tPort %u had no TX queues, setting to 1\n", port_id);
639 if (port_cfg->n_rxq > 1) {
640 // Enable RSS if multiple receive queues
641 if (strcmp(port_cfg->short_name, "virtio")) {
642 port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
643 port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
644 port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
645 #if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
646 port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP|ETH_RSS_UDP;
648 port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
653 // Make sure that the requested RSS offload is supported by the PMD
654 #if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
655 port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf &= port_cfg->dev_info.flow_type_rss_offloads;
657 if (strcmp(port_cfg->short_name, "virtio")) {
658 plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx, supported 0x%lx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, ETH_RSS_IP|ETH_RSS_UDP, port_cfg->dev_info.flow_type_rss_offloads);
660 plog_info("\t\t Not enabling RSS on virtio port");
663 // rxmode such as hw src strip
664 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
665 #if defined (DEV_RX_OFFLOAD_CRC_STRIP)
666 CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_CRC_STRIP);
668 #if defined (DEV_RX_OFFLOAD_KEEP_CRC)
669 CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_KEEP_CRC);
671 CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_JUMBO_FRAME);
672 CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_VLAN_STRIP);
674 if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
675 port_cfg->port_conf.rxmode.hw_strip_crc = 1;
677 if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_JUMBO_FRAME) {
678 port_cfg->port_conf.rxmode.jumbo_frame = 1;
682 // IPV4, UDP, SCTP Checksums
683 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
684 CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_IPV4_CKSUM);
685 CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_UDP_CKSUM);
686 CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_VLAN_INSERT);
688 if ((port_cfg->dev_info.tx_offload_capa & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) == 0) {
689 port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
690 plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n");
692 if (!strcmp(port_cfg->short_name, "vmxnet3")) {
693 port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
694 plog_info("\t\tDisabling SCTP offload on port %d as vmxnet3 does not support them\n", port_id);
698 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
699 CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MULTI_SEGS);
701 if (!strcmp(port_cfg->short_name, "vmxnet3")) {
702 port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
703 plog_info("\t\tDisabling TX multsegs on port %d as vmxnet3 does not support them\n", port_id);
704 } else if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)
705 plog_info("\t\tDisabling TX multsegs on port %d\n", port_id);
707 plog_info("\t\tEnabling TX multsegs on port %d\n", port_id);
709 if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
710 plog_info("\t\tEnabling No TX offloads on port %d\n", port_id);
712 plog_info("\t\tTX offloads enabled on port %d\n", port_id);
716 #if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
717 CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MBUF_FAST_FREE);
719 if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT)
720 plog_info("\t\tEnabling No refcnt on port %d\n", port_id);
722 plog_info("\t\tRefcnt enabled on port %d\n", port_id);
725 plog_info("\t\tConfiguring port %u... with %u RX queues and %u TX queues\n",
726 port_id, port_cfg->n_rxq, port_cfg->n_txq);
728 PROX_PANIC(port_cfg->n_rxq > port_cfg->max_rxq, "\t\t\tToo many RX queues (configuring %u, max is %u)\n", port_cfg->n_rxq, port_cfg->max_rxq);
729 PROX_PANIC(port_cfg->n_txq > port_cfg->max_txq, "\t\t\tToo many TX queues (configuring %u, max is %u)\n", port_cfg->n_txq, port_cfg->max_txq);
731 if (!strcmp(port_cfg->short_name, "ixgbe_vf") ||
732 !strcmp(port_cfg->short_name, "virtio") ||
733 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
734 !strcmp(port_cfg->short_name, "i40e") ||
736 !strcmp(port_cfg->short_name, "i40e_vf") ||
737 !strcmp(port_cfg->short_name, "avp") || /* Wind River */
738 !strcmp(port_cfg->driver_name, "") || /* NULL device */
739 !strcmp(port_cfg->short_name, "vmxnet3")) {
740 port_cfg->port_conf.intr_conf.lsc = 0;
741 plog_info("\t\tDisabling link state interrupt for vmxnet3/VF/virtio (unsupported)\n");
744 if (port_cfg->lsc_set_explicitely) {
745 port_cfg->port_conf.intr_conf.lsc = port_cfg->lsc_val;
746 plog_info("\t\tOverriding link state interrupt configuration to '%s'\n", port_cfg->lsc_val? "enabled" : "disabled");
748 if (port_cfg->n_txd < port_cfg->min_tx_desc) {
749 plog_info("\t\tNumber of TX descriptors is set to %d (minimum required for %s\n", port_cfg->min_tx_desc, port_cfg->short_name);
750 port_cfg->n_txd = port_cfg->min_tx_desc;
753 if (port_cfg->n_rxd < port_cfg->min_rx_desc) {
754 plog_info("\t\tNumber of RX descriptors is set to %d (minimum required for %s\n", port_cfg->min_rx_desc, port_cfg->short_name);
755 port_cfg->n_rxd = port_cfg->min_rx_desc;
758 if (port_cfg->n_txd > port_cfg->max_tx_desc) {
759 plog_info("\t\tNumber of TX descriptors is set to %d (maximum required for %s\n", port_cfg->max_tx_desc, port_cfg->short_name);
760 port_cfg->n_txd = port_cfg->max_tx_desc;
763 if (port_cfg->n_rxd > port_cfg->max_rx_desc) {
764 plog_info("\t\tNumber of RX descriptors is set to %d (maximum required for %s\n", port_cfg->max_rx_desc, port_cfg->short_name);
765 port_cfg->n_rxd = port_cfg->max_rx_desc;
768 ret = rte_eth_dev_configure(port_id, port_cfg->n_rxq,
769 port_cfg->n_txq, &port_cfg->port_conf);
770 PROX_PANIC(ret < 0, "\t\t\trte_eth_dev_configure() failed on port %u: %s (%d)\n", port_id, strerror(-ret), ret);
772 if (port_cfg->port_conf.intr_conf.lsc) {
773 rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_INTR_LSC, lsc_cb, NULL);
776 plog_info("\t\tMAC address set to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
778 /* initialize TX queues first */
779 for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) {
780 plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n",
781 queue_id, port_cfg->socket, port_cfg->n_txd);
782 ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd,
783 port_cfg->socket, &port_cfg->tx_conf);
784 PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret);
787 /* initialize RX queues */
788 for (uint16_t queue_id = 0; queue_id < port_cfg->n_rxq; ++queue_id) {
789 plog_info("\t\tSetting up RX queue %u on port %u on socket %u with %u desc (pool 0x%p)\n",
790 queue_id, port_id, port_cfg->socket,
791 port_cfg->n_rxd, port_cfg->pool[queue_id]);
792 ret = rte_eth_rx_queue_setup(port_id, queue_id,
794 port_cfg->socket, &port_cfg->rx_conf,
795 port_cfg->pool[queue_id]);
796 PROX_PANIC(ret < 0, "\t\t\trte_eth_rx_queue_setup() failed on port %u: error %s (%d)\n", port_id, strerror(-ret), ret);
799 plog_info("\t\tStarting up port %u ...", port_id);
800 ret = rte_eth_dev_start(port_id);
802 PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_start() failed on port %u: error %d\n", port_id, ret);
803 plog_info(" done: ");
805 if ((prox_port_cfg[port_id].ip) && (prox_port_cfg[port_id].is_vdev)) {
806 set_ip_address(prox_port_cfg[port_id].name, &prox_port_cfg[port_id].ip, prox_port_cfg[port_id].prefix);
808 /* Getting link status can be done without waiting if Link
809 State Interrupt is enabled since in that case, if the link
810 is recognized as being down, an interrupt will notify that
812 if (port_cfg->port_conf.intr_conf.lsc)
813 rte_eth_link_get_nowait(port_id, &link);
815 rte_eth_link_get(port_id, &link);
817 port_cfg->link_up = link.link_status;
818 port_cfg->link_speed = link.link_speed;
820 if (link.link_status) {
821 plog_info("Link Up - speed %'u Mbps - %s\n",
823 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
824 "full-duplex" : "half-duplex");
827 plog_info("Link Down\n");
830 if (port_cfg->promiscuous) {
831 rte_eth_promiscuous_enable(port_id);
832 plog_info("\t\tport %u in promiscuous mode\n", port_id);
835 if (strcmp(port_cfg->short_name, "ixgbe_vf") &&
836 strcmp(port_cfg->short_name, "i40e") &&
837 strcmp(port_cfg->short_name, "i40e_vf") &&
838 strcmp(port_cfg->short_name, "vmxnet3")) {
839 for (uint8_t i = 0; i < port_cfg->n_rxq; ++i) {
840 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
842 plog_info("\t\trte_eth_dev_set_rx_queue_stats_mapping() failed: error %d\n", ret);
845 for (uint8_t i = 0; i < port_cfg->n_txq; ++i) {
846 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
848 plog_info("\t\trte_eth_dev_set_tx_queue_stats_mapping() failed: error %d\n", ret);
852 if (port_cfg->nb_mc_addr) {
853 rte_eth_allmulticast_enable(port_id);
854 if ((ret = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr)) != 0) {
855 plog_err("\t\trte_eth_dev_set_mc_addr_list returns %d on port %u\n", ret, port_id);
856 port_cfg->nb_mc_addr = 0;
857 rte_eth_allmulticast_disable(port_id);
858 plog_info("\t\tport %u NOT in multicast mode as failed to add mcast address\n", port_id);
860 plog_info("\t\trte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
861 plog_info("\t\tport %u in multicast mode\n", port_id);
866 void init_port_all(void)
868 enum rte_proc_type_t proc_type;
869 proc_type = rte_eal_process_type();
870 if (proc_type == RTE_PROC_SECONDARY) {
871 plog_info("\tSkipping port initialization as secondary process\n");
874 uint8_t max_port_idx = prox_last_port_active() + 1;
876 for (uint8_t portid = 0; portid < max_port_idx; ++portid) {
877 if (!prox_port_cfg[portid].active) {
880 init_port(&prox_port_cfg[portid]);
884 void close_ports_atexit(void)
886 uint8_t max_port_idx = prox_last_port_active() + 1;
888 for (uint8_t portid = 0; portid < max_port_idx; ++portid) {
889 if (!prox_port_cfg[portid].active) {
892 plog_info("Closing port %u\n", portid);
893 rte_eth_dev_close(portid);
896 if (lcore_cfg == NULL)
899 struct lcore_cfg *lconf = NULL;
900 struct task_args *targ;
901 while (core_targ_next(&lconf, &targ, 0) == 0) {
903 rte_mempool_free(targ->pool);
904 plog_info("freeing pool %p\n", targ->pool);
910 void init_port_addr(void)
912 struct prox_port_cfg *port_cfg;
913 enum rte_proc_type_t proc_type;
916 for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
917 if (!prox_port_cfg[port_id].active) {
920 port_cfg = &prox_port_cfg[port_id];
922 switch (port_cfg->type) {
923 case PROX_PORT_MAC_HW:
924 rte_eth_macaddr_get(port_id, &port_cfg->eth_addr);
926 case PROX_PORT_MAC_RAND:
927 prox_rte_eth_random_addr(port_cfg->eth_addr.addr_bytes);
929 case PROX_PORT_MAC_SET:
930 proc_type = rte_eal_process_type();
931 if (proc_type == RTE_PROC_SECONDARY) {
932 plog_warn("\tport %u: unable to change port mac address as secondary process\n", port_id);
933 } else if ((rc = rte_eth_dev_default_mac_addr_set(port_id, &port_cfg->eth_addr)) != 0)
934 plog_warn("\tport %u: failed to set mac address. Error = %d\n", port_id, rc);
936 plog_info("Setting MAC to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
942 int port_is_active(uint8_t port_id)
944 if (port_id > PROX_MAX_PORTS) {
945 plog_info("requested port is higher than highest supported port ID (%u)\n", PROX_MAX_PORTS);
949 struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
950 if (!port_cfg->active) {
951 plog_info("Port %u is not active\n", port_id);