2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_lcore.h>
19 #include <rte_hash_crc.h>
22 #include "task_base.h"
27 #include "handle_master.h"
28 #include "prox_port_cfg.h"
29 #include "packet_utils.h"
30 #include "prox_shared.h"
32 #include "hash_entry_types.h"
33 #include "prox_compat.h"
36 static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_dst)
38 prox_rte_vlan_hdr *vlan_hdr;
39 prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
40 prox_rte_ipv4_hdr *ip;
41 uint16_t ether_type = eth_hdr->ether_type;
42 uint16_t l2_len = sizeof(prox_rte_ether_hdr);
45 while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
46 vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
48 ether_type = vlan_hdr->eth_proto;
54 // In case of MPLS, next hop MAC is based on MPLS, not destination IP
66 plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type);
70 if (l2_len && (l2_len + sizeof(prox_rte_ipv4_hdr) <= len)) {
71 prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)((uint8_t *)pkt + l2_len);
72 // TODO: implement LPM => replace ip_dst by next hop IP DST
73 *ip_dst = ip->dst_addr;
79 /* This implementation could be improved: instead of checking each time we send a packet whether we need also
80 to send an ARP, we should only check whether the MAC is valid.
81 We should check arp_update_time in the master process. This would also require the generating task to clear its arp ring
82 to avoid sending many ARP while starting after a long stop.
83 We could also check for arp_timeout in the master so that dataplane has only to check whether MAC is available
84 but this would require either thread safety, or the exchange of information between master and generating core.
87 static inline int add_key_and_send_arp(struct rte_hash *ip_hash, uint32_t *ip_dst, struct arp_table *entries, uint64_t tsc, uint64_t hz, uint32_t arp_update_time, prox_next_hop_index_type nh, uint64_t **time)
89 int ret = rte_hash_add_key(ip_hash, (const void *)ip_dst);
90 if (unlikely(ret < 0)) {
91 // No reason to send ARP, as reply would be anyhow ignored
92 plogx_err("Unable to add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(*ip_dst));
95 entries[ret].ip = *ip_dst;
97 *time = &entries[ret].arp_update_time;
102 static inline int update_mac_and_send_mbuf(struct arp_table *entry, prox_rte_ether_addr *mac, uint64_t tsc, uint64_t hz, uint32_t arp_update_time, uint64_t **time)
104 if (likely((tsc < entry->arp_update_time) && (tsc < entry->arp_timeout))) {
105 memcpy(mac, &entry->mac, sizeof(prox_rte_ether_addr));
107 } else if (tsc > entry->arp_update_time) {
108 // long time since we have sent an arp, send arp
109 *time = &entry->arp_update_time;
110 if (tsc < entry->arp_timeout){
111 // MAC is valid in the table => send also the mbuf
112 memcpy(mac, &entry->mac, sizeof(prox_rte_ether_addr));
113 return SEND_MBUF_AND_ARP;
115 // MAC still unknown, or timed out => only send ARP
119 // MAC is unknown and we already sent an ARP recently, drop mbuf and wait for ARP reply
123 int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst, uint64_t **time, uint64_t tsc)
125 const uint64_t hz = rte_get_tsc_hz();
126 struct ether_hdr_arp *packet = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
127 prox_rte_ether_addr *mac = &packet->ether_hdr.d_addr;
128 prox_next_hop_index_type next_hop_index;
129 static uint64_t last_tsc = 0, n_no_route = 0;
131 struct l3_base *l3 = &(tbase->l3);
133 // First find the next hop
135 // A routing table was configured
136 // If a gw (gateway_ipv4) is also specified, it is used as default gw only i.e. lowest priority (shortest prefix)
137 // This is implemented automatically through lpm
138 uint16_t len = rte_pktmbuf_pkt_len(mbuf);
139 if (find_ip(packet, len, ip_dst) != 0) {
140 // Unable to find IP address => non IP packet => send it as it
143 if (unlikely(rte_lpm_lookup(l3->ipv4_lpm, rte_bswap32(*ip_dst), &next_hop_index) != 0)) {
144 // Prevent printing too many messages
146 if (tsc > last_tsc + rte_get_tsc_hz()) {
147 plog_err("No route to IP "IPv4_BYTES_FMT" (%ld times)\n", IP4(*ip_dst), n_no_route);
153 struct arp_table *entry = &l3->next_hops[next_hop_index];
157 return update_mac_and_send_mbuf(entry, mac, tsc, hz, l3->arp_update_time, time);
160 // no next ip: this is a local route
161 // Find IP in lookup table. Send ARP if not found
162 int ret = rte_hash_lookup(l3->ip_hash, (const void *)ip_dst);
163 if (unlikely(ret < 0)) {
164 // IP not found, try to send an ARP
165 return add_key_and_send_arp(l3->ip_hash, ip_dst, l3->arp_table, tsc, hz, l3->arp_update_time, MAX_HOP_INDEX, time);
167 return update_mac_and_send_mbuf(&l3->arp_table[ret], mac, tsc, hz, l3->arp_update_time, time);
171 // No Routing table specified: only a local ip and maybe a gateway
172 // Old default behavior: if a gw is specified, ALL packets go to this gateway (even those we could send w/o the gw
174 if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_update_time) && (tsc < l3->gw.arp_timeout))) {
175 memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr));
177 } else if (tsc > l3->gw.arp_update_time) {
178 // long time since we have successfully sent an arp, send arp
179 // If sending ARP failed (ring full) then arp_update_time is not updated to avoid having to wait 1 sec to send ARP REQ again
180 *time = &l3->gw.arp_update_time;
182 if ((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_timeout)){
183 // MAC is valid in the table => send also the mbuf
184 memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr));
185 return SEND_MBUF_AND_ARP;
187 // MAC still unknown, or timed out => only send ARP
191 // MAC is unknown and we already sent an ARP recently, drop mbuf and wait for ARP reply
196 uint16_t len = rte_pktmbuf_pkt_len(mbuf);
197 if (find_ip(packet, len, ip_dst) != 0) {
198 // Unable to find IP address => non IP packet => send it as it
201 if (likely(l3->n_pkts < 4)) {
202 for (unsigned int idx = 0; idx < l3->n_pkts; idx++) {
203 if (*ip_dst == l3->optimized_arp_table[idx].ip) {
204 // IP address already in table
205 return update_mac_and_send_mbuf(&l3->optimized_arp_table[idx], mac, tsc, hz, l3->arp_update_time, time);
208 // IP address not found in table
209 l3->optimized_arp_table[l3->n_pkts].ip = *ip_dst;
210 *time = &l3->optimized_arp_table[l3->n_pkts].arp_update_time;
213 if (l3->n_pkts < 4) {
217 // We have too many IP addresses to search linearly; lets use hash table instead => copy all entries in hash table
218 for (uint32_t idx = 0; idx < l3->n_pkts; idx++) {
219 uint32_t ip = l3->optimized_arp_table[idx].ip;
220 int ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
222 // This should not happen as few entries so far.
223 // If it happens, we still send the ARP as easier:
224 // If the ARP corresponds to this error, the ARP reply will be ignored
225 // If ARP does not correspond to this error/ip, then ARP reply will be handled.
226 plogx_err("Unable add ip "IPv4_BYTES_FMT" in mac_hash (already %d entries)\n", IP4(ip), idx);
228 memcpy(&l3->arp_table[ret], &l3->optimized_arp_table[idx], sizeof(struct arp_table));
233 // Find IP in lookup table. Send ARP if not found
234 int ret = rte_hash_lookup(l3->ip_hash, (const void *)ip_dst);
235 if (unlikely(ret < 0)) {
236 // IP not found, try to send an ARP
237 return add_key_and_send_arp(l3->ip_hash, ip_dst, &l3->arp_table[ret], tsc, hz, l3->arp_update_time, MAX_HOP_INDEX, time);
240 return update_mac_and_send_mbuf(&l3->arp_table[ret], mac, tsc, hz, l3->arp_update_time, time);
247 void task_init_l3(struct task_base *tbase, struct task_args *targ)
249 static char hash_name[30];
250 uint32_t n_entries = MAX_ARP_ENTRIES * 4;
251 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
252 sprintf(hash_name, "A%03d_%03d_mac_table", targ->lconf->id, targ->id);
256 struct rte_hash_parameters hash_params = {
258 .entries = n_entries,
259 .key_len = sizeof(uint32_t),
260 .hash_func = rte_hash_crc,
261 .hash_func_init_val = 0,
263 tbase->l3.ip_hash = rte_hash_create(&hash_params);
264 PROX_PANIC(tbase->l3.ip_hash == NULL, "Failed to set up ip hash table\n");
266 tbase->l3.arp_table = (struct arp_table *)prox_zmalloc(n_entries * sizeof(struct arp_table), socket_id);
267 PROX_PANIC(tbase->l3.arp_table == NULL, "Failed to allocate memory for %u entries in arp table\n", n_entries);
268 plog_info("\tarp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
270 targ->lconf->ctrl_func_p[targ->task] = handle_ctrl_plane_pkts;
271 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
272 tbase->l3.gw.ip = rte_cpu_to_be_32(targ->gateway_ipv4);
273 tbase->flags |= TASK_L3;
274 tbase->l3.core_id = targ->lconf->id;
275 tbase->l3.task_id = targ->id;
276 tbase->l3.tmaster = targ->tmaster;
277 tbase->l3.seed = (uint)rte_rdtsc();
278 if (targ->arp_timeout != 0)
279 tbase->l3.arp_timeout = targ->arp_timeout;
281 tbase->l3.arp_timeout = DEFAULT_ARP_TIMEOUT;
282 if (targ->arp_update_time != 0)
283 tbase->l3.arp_update_time = targ->arp_update_time;
285 tbase->l3.arp_update_time = DEFAULT_ARP_UPDATE_TIME;
288 void task_start_l3(struct task_base *tbase, struct task_args *targ)
290 const int NB_ARP_MBUF = 1024;
291 const int ARP_MBUF_SIZE = 2048;
292 const int NB_CACHE_ARP_MBUF = 256;
293 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
295 struct prox_port_cfg *port = find_reachable_port(targ);
296 if (port && (tbase->l3.arp_pool == NULL)) {
297 static char name[] = "arp0_pool";
298 tbase->l3.reachable_port_id = port - prox_port_cfg;
299 if (targ->local_ipv4) {
300 tbase->local_ipv4 = rte_be_to_cpu_32(targ->local_ipv4);
301 register_ip_to_ctrl_plane(tbase->l3.tmaster, tbase->local_ipv4, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
303 if (strcmp(targ->route_table, "") != 0) {
307 PROX_PANIC(tbase->local_ipv4 == 0, "missing local_ipv4 will route table is specified in L3 mode\n");
309 // LPM might be modified runtime => do not share with other cores
310 ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
311 PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
313 tbase->l3.ipv4_lpm = lpm->rte_lpm;
314 tbase->l3.next_hops = prox_zmalloc(sizeof(*tbase->l3.next_hops) * MAX_HOP_INDEX, socket_id);
315 PROX_PANIC(tbase->l3.next_hops == NULL, "Could not allocate memory for next hop\n");
317 for (uint32_t i = 0; i < MAX_HOP_INDEX; i++) {
318 if (!lpm->next_hops[i].ip_dst)
321 tbase->l3.next_hops[i].ip = rte_bswap32(lpm->next_hops[i].ip_dst);
322 int tx_port = lpm->next_hops[i].mac_port.out_idx;
323 // gen only supports one port right now .... hence port = 0
324 if ((tx_port > targ->nb_txports - 1) && (tx_port > targ->nb_txrings - 1)) {
325 PROX_PANIC(1, "Routing Table contains port %d but only %d tx port/ %d ring:\n", tx_port, targ->nb_txports, targ->nb_txrings);
328 plog_info("Using routing table %s in l3 mode, with %d gateways\n", targ->route_table, tbase->l3.nb_gws);
330 // Last but one "next_hop_index" is not a gateway but direct routes
331 tbase->l3.next_hops[tbase->l3.nb_gws].ip = 0;
332 ret = rte_lpm_add(tbase->l3.ipv4_lpm, targ->local_ipv4, targ->local_prefix, tbase->l3.nb_gws++);
333 PROX_PANIC(ret, "Failed to add local_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(tbase->local_ipv4), targ->local_prefix);
334 // Last "next_hop_index" is default gw
335 tbase->l3.next_hops[tbase->l3.nb_gws].ip = rte_bswap32(targ->gateway_ipv4);
336 if (targ->gateway_ipv4) {
337 ret = rte_lpm_add(tbase->l3.ipv4_lpm, targ->gateway_ipv4, 0, tbase->l3.nb_gws++);
338 PROX_PANIC(ret, "Failed to add gateway_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(tbase->l3.gw.ip), 0);
342 master_init_vdev(tbase->l3.tmaster, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
344 struct rte_mempool *ret = rte_mempool_create(name, NB_ARP_MBUF, ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF,
345 sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
347 PROX_PANIC(ret == NULL, "Failed to allocate ARP memory pool on socket %u with %u elements\n",
348 rte_socket_id(), NB_ARP_MBUF);
349 plog_info("\t\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
350 ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, rte_socket_id());
351 tbase->l3.arp_pool = ret;
355 void task_set_gateway_ip(struct task_base *tbase, uint32_t ip)
357 tbase->l3.gw.ip = ip;
358 tbase->flags &= ~FLAG_DST_MAC_KNOWN;
361 void task_set_local_ip(struct task_base *tbase, uint32_t ip)
363 tbase->local_ipv4 = ip;
366 static void reset_arp_update_time(struct l3_base *l3, uint32_t ip)
369 plogx_dbg("MAC entry for IP "IPv4_BYTES_FMT" timeout in kernel\n", IP4(ip));
372 int ret = rte_hash_lookup(l3->ip_hash, (const void *)&ip);
374 l3->arp_table[ret].arp_update_time = 0;
375 } else if (ip == l3->gw.ip) {
376 l3->gw.arp_update_time = 0;
377 } else if (l3->n_pkts < 4) {
378 for (idx = 0; idx < l3->n_pkts; idx++) {
379 uint32_t ip_dst = l3->optimized_arp_table[idx].ip;
383 if (idx < l3->n_pkts) {
384 l3->optimized_arp_table[idx].arp_update_time = 0;
387 int ret = rte_hash_lookup(l3->ip_hash, (const void *)&ip);
389 l3->arp_table[ret].arp_update_time = 0;
394 static prox_next_hop_index_type get_nh_index(struct task_base *tbase, uint32_t gw_ip)
396 // Check if gateway already exists
397 for (prox_next_hop_index_type i = 0; i < tbase->l3.nb_gws; i++) {
398 if (tbase->l3.next_hops[i].ip == gw_ip) {
402 if (tbase->l3.nb_gws < MAX_HOP_INDEX) {
403 tbase->l3.next_hops[tbase->l3.nb_gws].ip = gw_ip;
405 return tbase->l3.nb_gws - 1;
407 return MAX_HOP_INDEX;
409 void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
412 const uint64_t hz = rte_get_tsc_hz();
413 uint32_t ip, ip_dst, idx, gateway_ip, prefix;
414 prox_next_hop_index_type gateway_index;
415 int j, ret, modified_route;
417 prox_rte_ether_hdr *hdr;
418 struct ether_hdr_arp *hdr_arp;
419 struct l3_base *l3 = &tbase->l3;
420 uint64_t tsc= rte_rdtsc();
421 uint64_t arp_timeout = l3->arp_timeout * hz / 1000;
423 prox_rte_ipv4_hdr *pip;
424 prox_rte_udp_hdr *udp_hdr;
426 for (j = 0; j < n_pkts; ++j) {
429 for (j = 0; j < n_pkts; ++j) {
430 PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void *));
433 for (j = 0; j < n_pkts; ++j) {
436 out[0] = OUT_HANDLED;
437 command = mbufs[j]->udata64 & 0xFFFF;
438 plogx_dbg("\tReceived %s mbuf %p\n", actions_string[command], mbufs[j]);
440 case ROUTE_ADD_FROM_CTRL:
441 ip = ctrl_ring_get_ip(mbufs[j]);
442 gateway_ip = ctrl_ring_get_gateway_ip(mbufs[j]);
443 prefix = ctrl_ring_get_prefix(mbufs[j]);
444 gateway_index = get_nh_index(tbase, gateway_ip);
445 if (gateway_index >= MAX_HOP_INDEX) {
446 plog_err("Unable to find or define gateway index - too many\n");
449 modified_route = rte_lpm_is_rule_present(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, &nh);
450 ret = rte_lpm_add(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, gateway_index);
452 plog_err("Failed to add route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index);
453 } else if (modified_route)
454 plogx_dbg("Modified route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d) (was using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index, IP4(tbase->l3.next_hops[nh].ip), nh);
456 plogx_dbg("Added new route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index);
460 case ROUTE_DEL_FROM_CTRL:
461 ip = ctrl_ring_get_ip(mbufs[j]);
462 prefix = ctrl_ring_get_prefix(mbufs[j]);
464 ret = rte_lpm_is_rule_present(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, &nh);
466 ret = rte_lpm_delete(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix);
468 plog_err("Failed to add rule\n");
470 plog_info("Deleting route to "IPv4_BYTES_FMT"/%d\n", IP4(ip), prefix);
474 case UPDATE_FROM_CTRL:
475 hdr_arp = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *);
476 ip = (mbufs[j]->udata64 >> 32) & 0xFFFFFFFF;
478 if (prox_rte_is_zero_ether_addr(&hdr_arp->arp.data.sha)) {
479 // MAC timeout or deleted from kernel table => reset update_time
480 // This will cause us to send new ARP request
481 // However, as arp_timeout not touched, we should continue sending our regular IP packets
482 reset_arp_update_time(l3, ip);
485 plogx_dbg("\tUpdating MAC entry for IP "IPv4_BYTES_FMT" with MAC "MAC_BYTES_FMT"\n",
486 IP4(ip), MAC_BYTES(hdr_arp->arp.data.sha.addr_bytes));
490 struct arp_table *entry;
491 ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
493 plogx_info("Unable add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(ip));
494 } else if ((nh = l3->arp_table[ret].nh) != MAX_HOP_INDEX) {
495 entry = &l3->next_hops[nh];
496 memcpy(&entry->mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
497 entry->arp_timeout = tsc + arp_timeout;
498 update_arp_update_time(l3, &entry->arp_update_time, l3->arp_update_time);
500 memcpy(&l3->arp_table[ret].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
501 l3->arp_table[ret].arp_timeout = tsc + arp_timeout;
502 update_arp_update_time(l3, &l3->arp_table[ret].arp_update_time, l3->arp_update_time);
505 else if (ip == l3->gw.ip) {
506 // MAC address of the gateway
507 memcpy(&l3->gw.mac, &hdr_arp->arp.data.sha, 6);
508 l3->flags |= FLAG_DST_MAC_KNOWN;
509 l3->gw.arp_timeout = tsc + arp_timeout;
510 update_arp_update_time(l3, &l3->gw.arp_update_time, l3->arp_update_time);
511 } else if (l3->n_pkts < 4) {
512 // Few packets tracked - should be faster to loop through them thean using a hash table
513 for (idx = 0; idx < l3->n_pkts; idx++) {
514 ip_dst = l3->optimized_arp_table[idx].ip;
518 if (idx < l3->n_pkts) {
519 memcpy(&l3->optimized_arp_table[idx].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
520 l3->optimized_arp_table[idx].arp_timeout = tsc + arp_timeout;
521 update_arp_update_time(l3, &l3->optimized_arp_table[idx].arp_update_time, l3->arp_update_time);
524 ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
526 plogx_info("Unable add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(ip));
528 memcpy(&l3->arp_table[ret].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
529 l3->arp_table[ret].arp_timeout = tsc + arp_timeout;
530 update_arp_update_time(l3, &l3->arp_table[ret].arp_update_time, l3->arp_update_time);
535 case ARP_REPLY_FROM_CTRL:
536 case ARP_REQ_FROM_CTRL:
538 // tx_ctrlplane_pkt does not drop packets
539 plogx_dbg("\tForwarding (ARP) packet from master\n");
540 tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
541 TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
545 // tx_ctrlplane_pkt does not drop packets
546 plogx_dbg("\tForwarding (PING) packet from master\n");
547 tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
548 TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
551 // Drop Pseudo packets sent to generate ARP requests
552 // There are other IPv4 packets sent from TAP which we cannot delete e.g. BGP packets
554 hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
555 if (hdr->ether_type == ETYPE_IPv4) {
556 pip = (prox_rte_ipv4_hdr *)(hdr + 1);
557 } else if (hdr->ether_type == ETYPE_VLAN) {
558 prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(hdr + 1);
559 vlan = (prox_rte_vlan_hdr *)(hdr + 1);
560 if (vlan->eth_proto == ETYPE_IPv4) {
561 pip = (prox_rte_ipv4_hdr *)(vlan + 1);
564 if (pip && (pip->next_proto_id == IPPROTO_UDP)) {
565 udp_hdr = (prox_rte_udp_hdr *)(pip + 1);
566 if ((udp_hdr->dst_port == rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT)) &&
567 (udp_hdr->src_port == rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT)) &&
568 (rte_be_to_cpu_16(udp_hdr->dgram_len) == 8)) {
569 plogx_dbg("Dropping PROX packet\n");
575 uint16_t src_port = 0, dst_port = 0, len = 0;
577 src_port = udp_hdr->src_port;
578 dst_port = udp_hdr->dst_port;
579 len = rte_be_to_cpu_16(udp_hdr->dgram_len);
581 plogx_dbg("tForwarding TAP packet from master. Type = %x, pip=%p, udp = %p, udp = {src = %x, dst = %x, len = %d}\n", hdr->ether_type, pip, udp_hdr, src_port, dst_port,len );
583 // tx_ctrlplane_pkt does not drop packets
584 tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
585 TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);