2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline VFW BE Implementation.
21 * Implementation of Pipeline VFW Back End (BE).
22 * Responsible for packet processing.
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
43 #include <rte_byteorder.h>
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
70 uint8_t firewall_flag = 1;
72 uint8_t cnxn_tracking_is_active = 1;
74 * A structure defining the VFW pipeline input port per thread data.
76 struct vfw_ports_in_args {
77 struct pipeline *pipe;
78 struct rte_ct_cnxn_tracker *cnxn_tracker;
79 } __rte_cache_aligned;
81 * A structure defining the VFW pipeline per thread data.
85 pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
87 struct rte_ct_cnxn_tracker *cnxn_tracker;
88 struct rte_VFW_counter_block *counters;
89 struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
90 struct lib_acl *plib_acl;
91 /* timestamp retrieved during in-port computations */
95 uint8_t links_map[PIPELINE_MAX_PORT_IN];
96 uint8_t outport_id[PIPELINE_MAX_PORT_IN];
97 /* Local ARP & ND Tables */
98 struct lib_arp_route_table_entry
99 local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
100 uint8_t local_lib_arp_route_ent_cnt;
101 struct lib_nd_route_table_entry
102 local_lib_nd_route_table[MAX_ND_RT_ENTRY];
103 uint8_t local_lib_nd_route_ent_cnt;
105 } __rte_cache_aligned;
107 * A structure defining the mbuf meta data for VFW.
109 struct mbuf_tcp_meta_data {
110 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
111 uint32_t output_port;
112 struct rte_mbuf *next; /* next pointer for chained buffers */
113 } __rte_cache_aligned;
115 #define DONT_CARE_TCP_PACKET 0
116 #define IS_NOT_TCP_PACKET 0
117 #define IS_TCP_PACKET 1
119 #define META_DATA_OFFSET 128
121 #define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
122 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
123 #define ETH_HDR_SIZE 14
124 #define PROTOCOL_START (IP_START + 9)
126 #define TCP_START (IP_START + 20)
127 #define RTE_LB_PORT_OFFSET 204 /* TODO: Need definition in LB header */
128 #define TCP_START_IPV6 (IP_START + 40)
129 #define PROTOCOL_START_IPV6 (IP_START + 6)
130 #define IP_HDR_DSCP_OFST 1
132 #define TCP_PROTOCOL 6
133 #define UDP_PROTOCOL 17
135 #define DELETE_BUFFERED_PACKETS 0
136 #define FORWARD_BUFFERED_PACKETS 1
140 #define IPv4_HEADER_SIZE 20
141 #define IPv6_HEADER_SIZE 40
143 #define IP_VERSION_4 4
144 #define IP_VERSION_6 6
147 #define IP_HDR_SIZE_IPV6 40
148 #define IP_HDR_DSCP_OFST_IPV6 0
149 #define IP_HDR_LENGTH_OFST_IPV6 4
150 #define IP_HDR_PROTOCOL_OFST_IPV6 6
151 #define IP_HDR_DST_ADR_OFST_IPV6 24
152 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
153 /** The counter table for VFW pipeline per thread data.*/
154 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
156 int rte_VFW_hi_counter_block_in_use = -1;
158 /* a spin lock used during vfw initialization only */
159 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
162 struct pipeline_action_key *action_array_a;
163 struct pipeline_action_key *action_array_b;
164 struct pipeline_action_key *action_array_active;
165 struct pipeline_action_key *action_array_standby;
166 uint32_t action_array_size;
167 struct action_counter_block
168 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
171 * Pipeline table strategy for firewall. Unfortunately, there does not seem to
172 * be any use for the built-in table lookup of ip_pipeline for the firewall.
173 * The main table requirement of the firewall is the hash table to maintain
174 * connection info, but that is implemented seperately in the connection
175 * tracking library. So a "dummy" table lookup will be performed.
176 * TODO: look into "stub" table and see if that can be used
177 * to avoid useless table lookup
179 /***** ARP local cache *****/
181 uint8_t link_hw_laddr_valid[MAX_NUM_LOCAL_MAC_ADDRESS] = {
182 0, 0, 0, 0, 0, 0, 0, 0,
183 0, 0, 0, 0, 0, 0, 0, 0
186 static struct ether_addr link_hw_laddr[MAX_NUM_LOCAL_MAC_ADDRESS] = {
187 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
188 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
189 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
190 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
191 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
192 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
193 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
194 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
195 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
196 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
197 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
198 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
199 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
200 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
201 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
202 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }
205 uint64_t arp_pkts_mask;
207 /* Start TSC measurement */
208 /* Prefetch counters and pipe before this function */
209 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
210 vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
211 if (likely(vfw_pipe->counters->exit_timestamp))
212 vfw_pipe->counters->external_time_sum +=
213 vfw_pipe->counters->entry_timestamp -
214 vfw_pipe->counters->exit_timestamp;
217 /* End TSC measurement */
218 static inline void end_tsc_measure(
219 struct pipeline_vfw *vfw_pipe,
222 if (likely(n_pkts > 1)) {
223 vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
224 vfw_pipe->counters->internal_time_sum +=
225 vfw_pipe->counters->exit_timestamp -
226 vfw_pipe->counters->entry_timestamp;
227 vfw_pipe->counters->time_measurements++;
229 /* small counts skew results, ignore */
230 vfw_pipe->counters->exit_timestamp = 0;
234 //static struct ether_addr *get_local_link_hw_addr(uint8_t out_port)
236 // return &link_hw_laddr[out_port];
239 static uint8_t local_dest_mac_present(uint8_t out_port)
241 return link_hw_laddr_valid[out_port];
244 static uint32_t local_get_nh_ipv4(
248 struct pipeline_vfw *vfw_pipe)
252 for (i = 0; i < vfw_pipe->local_lib_arp_route_ent_cnt; i++) {
253 if (((vfw_pipe->local_lib_arp_route_table[i].ip &
254 vfw_pipe->local_lib_arp_route_table[i].mask) ==
255 (ip & vfw_pipe->local_lib_arp_route_table[i].mask))) {
256 *port = vfw_pipe->local_lib_arp_route_table[i].port;
258 *nhip = vfw_pipe->local_lib_arp_route_table[i].nh;
265 static void do_local_nh_ipv4_cache(uint32_t dest_if,
266 struct pipeline_vfw *vfw_pipe)
269 /* Search for the entry and do local copy */
272 for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
273 if (lib_arp_route_table[i].port == dest_if) {
275 struct lib_arp_route_table_entry *lentry =
277 local_lib_arp_route_table[vfw_pipe->
278 local_lib_arp_route_ent_cnt];
280 lentry->ip = lib_arp_route_table[i].ip;
281 lentry->mask = lib_arp_route_table[i].mask;
282 lentry->port = lib_arp_route_table[i].port;
283 lentry->nh = lib_arp_route_table[i].nh;
285 vfw_pipe->local_lib_arp_route_ent_cnt++;
291 static uint32_t local_get_nh_ipv6(
295 struct pipeline_vfw *vfw_pipe)
297 uint8_t netmask_ipv6[IPV6_ADD_SIZE], netip_nd[IPV6_ADD_SIZE],
298 netip_in[IPV6_ADD_SIZE];
299 uint8_t i = 0, j = 0, k = 0, l = 0, depthflags = 0, depthflags1 = 0;
300 memset(netmask_ipv6, 0, sizeof(netmask_ipv6));
301 memset(netip_nd, 0, sizeof(netip_nd));
302 memset(netip_in, 0, sizeof(netip_in));
304 for (i = 0; i < vfw_pipe->local_lib_nd_route_ent_cnt; i++) {
306 convert_prefixlen_to_netmask_ipv6(
307 vfw_pipe->local_lib_nd_route_table[i].depth,
310 for (k = 0; k < IPV6_ADD_SIZE; k++)
311 if (vfw_pipe->local_lib_nd_route_table[i].ipv6[k] &
314 netip_nd[k] = vfw_pipe->
315 local_lib_nd_route_table[i].ipv6[k];
318 for (l = 0; l < IPV6_ADD_SIZE; l++)
319 if (ip[l] & netmask_ipv6[l]) {
325 if ((depthflags == depthflags1) && (memcmp(netip_nd, netip_in,
326 sizeof(netip_nd)) == 0)) {
328 *port = vfw_pipe->local_lib_nd_route_table[i].port;
330 for (j = 0; j < IPV6_ADD_SIZE; j++)
332 local_lib_nd_route_table[i].nhipv6[j];
342 static void do_local_nh_ipv6_cache(uint32_t dest_if,
343 struct pipeline_vfw *vfw_pipe)
345 /* Search for the entry and do local copy */
348 for (i = 0; i < MAX_ND_RT_ENTRY; i++) {
350 if (lib_nd_route_table[i].port == dest_if) {
352 struct lib_nd_route_table_entry *lentry = &vfw_pipe->
353 local_lib_nd_route_table[vfw_pipe->
354 local_lib_nd_route_ent_cnt];
356 for (l = 0; l < IPV6_ADD_SIZE; l++) {
358 lib_nd_route_table[i].ipv6[l];
360 lib_nd_route_table[i].nhipv6[l];
362 lentry->depth = lib_nd_route_table[i].depth;
363 lentry->port = lib_nd_route_table[i].port;
365 vfw_pipe->local_lib_nd_route_ent_cnt++;
371 * Print packet for debugging.
374 * A pointer to the packet.
377 static __rte_unused void print_pkt(struct rte_mbuf *pkt)
380 int size = (int)sizeof(struct mbuf_tcp_meta_data);
381 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
383 printf("Meta-data:\n");
384 for (i = 0; i < size; i++) {
385 printf("%02x ", rd[i]);
386 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
390 printf("IP and TCP/UDP headers:\n");
391 rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
392 for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
393 printf("%02x ", rd[i]);
394 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
400 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
401 #define IP_ICMP_PROTOCOL 1
402 #define IP_TCP_PROTOCOL 6
403 #define IP_UDP_PROTOCOL 17
404 #define IPv6_FRAGMENT_HEADER 44
407 * Return ethernet header structure form packet.
410 * A pointer to the packet.
413 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
415 return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
420 * Return IPV4 header structure form packet.
423 * A pointer to the packet.
427 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
428 struct rte_mbuf *pkt)
430 return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
433 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
435 /* NOTE: Only supporting IP headers with no options,
436 * so header is fixed size */
437 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
440 return ip_type == IPv4_HDR_VERSION;
443 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
445 /* NOTE: Only supporting IP headers with no options,
446 * so header is fixed size */
447 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
450 return ip_type == IPv6_HDR_VERSION;
453 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
455 if (likely(firewall_flag))
459 static uint8_t check_arp_icmp(
460 struct rte_mbuf *pkt,
461 struct pipeline_vfw *vfw_pipe)
463 struct ether_hdr *ehdr;
464 struct app_link_params *link;
465 uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
466 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
469 /* ARP outport number */
470 uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
471 struct ipv4_hdr *ipv4_h;
472 struct ipv6_hdr *ipv6_h;
473 link = &myApp->link_params[pkt->port];
475 ehdr = rte_vfw_get_ether_addr(pkt);
476 switch (rte_be_to_cpu_16(ehdr->ether_type)) {
479 rte_pipeline_port_out_packet_insert(
484 vfw_pipe->counters->arpicmpPktCount++;
488 ipv4_h = (struct ipv4_hdr *)
489 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
490 if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
492 rte_be_to_cpu_32(ipv4_h->dst_addr)) {
493 if (is_phy_port_privte(pkt->port)) {
494 rte_pipeline_port_out_packet_insert(
499 vfw_pipe->counters->arpicmpPktCount++;
506 ipv6_h = (struct ipv6_hdr *)
507 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
509 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
510 if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
511 || !memcmp(ipv6_h->dst_addr,
512 solicited_node_multicast_addr,
513 IPV6_ADD_CMP_MULTI)) {
515 rte_pipeline_port_out_packet_insert(
520 vfw_pipe->counters->arpicmpPktCount++;
524 pkts_drop_unsupported_type++;
537 * Performs basic VFW ipv4 packet filtering.
539 * A pointer to the packets.
543 * A pointer to VFW pipeline.
547 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
549 struct pipeline_vfw *vfw_pipe)
553 * Make use of cache prefetch. At beginning of loop, want to prefetch
554 * mbuf data for next iteration (not current one).
555 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
556 * is 20 bytes (extensions not supported), while the IPv6 header is 40
557 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
558 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
559 * need two pre-fetches.
562 uint8_t pos, next_pos = 0;
563 uint64_t pkt_mask; /* bitmask representing a single packet */
564 struct rte_mbuf *pkt;
565 struct rte_mbuf *next_pkt = NULL;
566 struct ipv4_hdr *ihdr4;
567 void *next_iphdr = NULL;
569 if (unlikely(pkts_mask == 0))
571 pos = (uint8_t) __builtin_ctzll(pkts_mask);
572 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
575 uint64_t bytes_processed = 0;
576 /* bitmap of packets left to process */
577 uint64_t pkts_to_process = pkts_mask;
578 /* bitmap of valid packets to return */
579 uint64_t valid_packets = pkts_mask;
582 /* prefetch counters, updated below. Most likely counters to update
584 rte_prefetch0(&vfw_pipe->counters);
586 do { /* always execute at least once */
588 /* remove this packet from remaining list */
589 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
591 if (likely(next_pkts_to_process)) {
592 /* another packet to process after this, prefetch it */
595 (uint8_t) __builtin_ctzll(next_pkts_to_process);
596 next_pkt = pkts[next_pos];
597 next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
599 rte_prefetch0(next_iphdr);
603 /* remove this packet from remaining list */
604 pkts_to_process &= ~pkt_mask;
607 if (!check_arp_icmp(pkt, vfw_pipe)) {
608 /* make next packet data the current */
609 pkts_to_process = next_pkts_to_process;
613 pkt_mask = 1LLU << pos;
614 valid_packets &= ~pkt_mask;
619 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
621 bytes_processed += packet_length;
623 ihdr4 = (struct ipv4_hdr *)
624 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
626 /* verify that packet size according to mbuf is at least
627 * as large as the size according to the IP header.
630 uint32_t ip_length = rte_bswap16(ihdr4->total_length);
633 (ip_length > (packet_length - ETH_HDR_SIZE))) {
635 vfw_pipe->counters->pkts_drop_bad_size++;
639 * IPv4 fragmented if: MF (more fragments) or Fragment
640 * Offset are non-zero. Header in Intel order, so flip
641 * constant to compensate. Note that IPv6 uses a header
642 * extension for identifying fragments.
645 int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
646 uint8_t ttl = ihdr4->time_to_live;
648 if (unlikely(fragmented)) {
650 vfw_pipe->counters->pkts_drop_fragmented++;
653 if (unlikely(ttl <= 1)) {
655 * about to decrement to zero (or is somehow
656 * already zero), so discard
659 vfw_pipe->counters->pkts_drop_ttl++;
663 * Dropping the packets other than TCP AND UDP.
666 uint8_t proto = ihdr4->next_proto_id;
668 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
669 proto == IP_UDP_PROTOCOL ||
670 proto == IP_ICMP_PROTOCOL))) {
673 pkts_drop_unsupported_type++;
676 if (unlikely(discard)) {
677 valid_packets &= ~pkt_mask;
680 /* make next packet data the current */
681 pkts_to_process = next_pkts_to_process;
685 pkt_mask = 1LLU << pos;
687 } while (pkts_to_process);
689 /* finalize counters, etc. */
690 vfw_pipe->counters->bytes_processed += bytes_processed;
692 if (likely(firewall_flag))
693 return valid_packets;
698 * Performs basic VFW IPV6 packet filtering.
700 * A pointer to the packets.
704 * A pointer to VFW pipeline.
707 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
709 struct pipeline_vfw *vfw_pipe)
713 * Make use of cache prefetch. At beginning of loop, want to prefetch
714 * mbuf data for next iteration (not current one).
715 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
716 * is 20 bytes (extensions not supported), while the IPv6 header is 40
717 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
718 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
719 * need two pre-fetches.
722 uint8_t pos, next_pos = 0;
723 uint64_t pkt_mask; /* bitmask representing a single packet */
724 struct rte_mbuf *pkt;
725 struct rte_mbuf *next_pkt = NULL;
726 struct ipv6_hdr *ihdr6;
727 void *next_iphdr = NULL;
729 if (unlikely(pkts_mask == 0))
731 pos = (uint8_t) __builtin_ctzll(pkts_mask);
732 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
735 uint64_t bytes_processed = 0;
736 /* bitmap of packets left to process */
737 uint64_t pkts_to_process = pkts_mask;
738 /* bitmap of valid packets to return */
739 uint64_t valid_packets = pkts_mask;
741 /* prefetch counters, updated below. Most likely counters to update
743 rte_prefetch0(&vfw_pipe->counters);
745 do { /* always execute at least once */
747 /* remove this packet from remaining list */
748 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
750 if (likely(next_pkts_to_process)) {
751 /* another packet to process after this, prefetch it */
754 (uint8_t) __builtin_ctzll(next_pkts_to_process);
755 next_pkt = pkts[next_pos];
757 RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
758 rte_prefetch0(next_iphdr);
762 /* remove this packet from remaining list */
763 pkts_to_process &= ~pkt_mask;
766 if (!check_arp_icmp(pkt, vfw_pipe)) {
767 /* make next packet data the current */
768 pkts_to_process = next_pkts_to_process;
772 pkt_mask = 1LLU << pos;
773 valid_packets &= ~pkt_mask;
778 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
780 bytes_processed += packet_length;
782 ihdr6 = (struct ipv6_hdr *)
783 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
786 * verify that packet size according to mbuf is at least
787 * as large as the size according to the IP header.
788 * For IPv6, note that size includes header extensions
789 * but not the base header size
793 rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
796 (ip_length > (packet_length - ETH_HDR_SIZE))) {
798 vfw_pipe->counters->pkts_drop_bad_size++;
802 * Dropping the packets other than TCP AND UDP.
805 uint8_t proto = ihdr6->proto;
807 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
808 proto == IP_UDP_PROTOCOL ||
809 proto == IP_ICMP_PROTOCOL))) {
811 if (proto == IPv6_FRAGMENT_HEADER)
813 pkts_drop_fragmented++;
816 pkts_drop_unsupported_type++;
820 * Behave like a router, and decrement the TTL of an
821 * IP packet. If this causes the TTL to become zero,
822 * the packet will be discarded. Unlike a router,
823 * no ICMP code 11 (Time * Exceeded) message will be
824 * sent back to the packet originator.
827 if (unlikely(ihdr6->hop_limits <= 1)) {
829 * about to decrement to zero (or is somehow
830 * already zero), so discard
833 vfw_pipe->counters->pkts_drop_ttl++;
836 if (unlikely(discard))
837 valid_packets &= ~pkt_mask;
841 /* make next packet data the current */
842 pkts_to_process = next_pkts_to_process;
846 pkt_mask = 1LLU << pos;
848 } while (pkts_to_process);
850 /* finalize counters, etc. */
851 vfw_pipe->counters->bytes_processed += bytes_processed;
853 if (likely(firewall_flag))
854 return valid_packets;
860 * exchange the mac address so source becomes destination and vice versa.
863 * A pointer to the ethernet header.
866 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
868 struct ether_addr saved_copy;
870 ether_addr_copy(&ehdr->d_addr, &saved_copy);
871 ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
872 ether_addr_copy(&saved_copy, &ehdr->s_addr);
877 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
878 * To support synproxy, some (altered) packets may need to be sent back where
879 * they came from. The ip header has already been adjusted, but the ethernet
880 * header has not, so this must be performed here.
881 * Return an updated pkts_mask, since arp may drop some packets
884 * A pointer to the packet array.
886 * Packet num to start processing
889 * @param synproxy_reply_mask
890 * Reply Packet mask for Synproxy
892 * A pointer to VFW pipeline.
895 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
898 uint64_t synproxy_reply_mask,
899 struct pipeline_vfw *vfw_pipe)
904 struct mbuf_tcp_meta_data *meta_data_addr;
905 struct ether_hdr *ehdr;
906 struct rte_mbuf *pkt;
908 for (i = 0; i < 4; i++) {
909 uint32_t dest_if = INVALID_DESTIF;
910 /* bitmask representing only this packet */
911 uint64_t pkt_mask = 1LLU << (pkt_num + i);
915 if(!(*pkts_mask & pkt_mask))
918 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
920 meta_data_addr = (struct mbuf_tcp_meta_data *)
921 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
922 ehdr = rte_vfw_get_ether_addr(pkt);
925 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
926 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
929 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
931 rte_sp_exchange_mac_addresses(ehdr);
933 ret = local_get_nh_ipv4(dest_address, &dest_if,
936 rte_sp_exchange_mac_addresses(ehdr);
937 if (is_phy_port_privte(phy_port)) {
939 dest_if = get_pub_to_prv_port(
942 if (dest_if == INVALID_DESTIF) {
943 *pkts_mask &= ~pkt_mask;
945 pkts_drop_without_arp_entry++;
947 do_local_nh_ipv4_cache(
953 dest_if = get_prv_to_pub_port(
956 if (dest_if == INVALID_DESTIF) {
957 *pkts_mask &= ~pkt_mask;
959 pkts_drop_without_arp_entry++;
961 do_local_nh_ipv4_cache(dest_if,
965 } else if (is_phy_port_privte(phy_port)) {
967 dest_if = get_prv_to_pub_port(&dest_address,
969 if (dest_if == INVALID_DESTIF) {
970 *pkts_mask &= ~pkt_mask;
972 pkts_drop_without_arp_entry++;
974 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
979 dest_if = get_pub_to_prv_port(&dest_address,
981 if (dest_if == INVALID_DESTIF) {
982 *pkts_mask &= ~pkt_mask;
984 pkts_drop_without_arp_entry++;
986 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
990 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
991 if (local_dest_mac_present(dest_if)) {
992 ether_addr_copy(get_local_link_hw_addr(dest_if),
994 ether_addr_copy(get_link_hw_addr(dest_if),
998 struct arp_entry_data *ret_arp_data = NULL;
999 ret_arp_data = get_dest_mac_addr_port(dest_address,
1000 &dest_if, &ehdr->d_addr);
1001 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1003 if (arp_cache_dest_mac_present(dest_if)) {
1005 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1006 arp_data_ptr[dest_if]->n_last_update = time(NULL);
1008 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1010 printf("sending buffered packets\n");
1011 arp_send_buffered_pkts(ret_arp_data,
1012 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
1017 if (unlikely(ret_arp_data == NULL)) {
1019 printf("%s: NHIP Not Found, nhip:%x , "
1020 "outport_id: %d\n", __func__, nhip,
1021 vfw_pipe->outport_id[dest_if]);
1024 vfw_pipe->counters->
1025 pkts_drop_without_arp_entry++;
1028 if (ret_arp_data->status == INCOMPLETE ||
1029 ret_arp_data->status == PROBE) {
1030 if (ret_arp_data->num_pkts >= NUM_DESC) {
1031 /* ICMP req sent, drop packet by
1032 * changing the mask */
1033 vfw_pipe->counters->
1034 pkts_drop_without_arp_entry++;
1037 arp_pkts_mask |= pkt_mask;
1038 arp_queue_unresolved_packet(ret_arp_data, pkt);
1048 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1049 * To support synproxy, some (altered) packets may need to be sent back where
1050 * they came from. The ip header has already been adjusted, but the ethernet
1051 * header has not, so this must be performed here.
1052 * Return an updated pkts_mask, since arp may drop some packets
1055 * A pointer to the packet.
1057 * Packet number to process
1059 * Packet mask pointer
1060 * @param synproxy_reply_mask
1061 * Reply Packet mask for Synproxy
1063 * A pointer to VFW pipeline.
1066 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
1068 uint64_t *pkts_mask,
1069 uint64_t synproxy_reply_mask,
1070 struct pipeline_vfw *vfw_pipe)
1073 uint32_t dest_if = INVALID_DESTIF;
1075 struct mbuf_tcp_meta_data *meta_data_addr;
1076 struct ether_hdr *ehdr;
1077 struct rte_mbuf *pkt;
1078 uint64_t pkt_mask = 1LLU << pkt_num;
1082 if(*pkts_mask & pkt_mask) {
1084 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1086 meta_data_addr = (struct mbuf_tcp_meta_data *)
1087 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1088 ehdr = rte_vfw_get_ether_addr(pkt);
1091 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1092 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1095 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1097 rte_sp_exchange_mac_addresses(ehdr);
1100 ret = local_get_nh_ipv4(dest_address, &dest_if,
1103 rte_sp_exchange_mac_addresses(ehdr);
1104 if (is_phy_port_privte(phy_port)) {
1106 dest_if = get_pub_to_prv_port(
1109 if (dest_if == INVALID_DESTIF) {
1110 *pkts_mask &= ~pkt_mask;
1111 vfw_pipe->counters->
1112 pkts_drop_without_arp_entry++;
1114 do_local_nh_ipv4_cache(
1120 dest_if = get_prv_to_pub_port(
1123 if (dest_if == INVALID_DESTIF) {
1124 *pkts_mask &= ~pkt_mask;
1125 vfw_pipe->counters->
1126 pkts_drop_without_arp_entry++;
1128 do_local_nh_ipv4_cache(dest_if,
1132 } else if (is_phy_port_privte(phy_port)) {
1134 dest_if = get_prv_to_pub_port(&dest_address,
1136 if (dest_if == INVALID_DESTIF) {
1137 *pkts_mask &= ~pkt_mask;
1138 vfw_pipe->counters->
1139 pkts_drop_without_arp_entry++;
1141 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1146 dest_if = get_pub_to_prv_port(&dest_address,
1148 if (dest_if == INVALID_DESTIF) {
1149 *pkts_mask &= ~pkt_mask;
1150 vfw_pipe->counters->
1151 pkts_drop_without_arp_entry++;
1153 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1157 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1158 if (local_dest_mac_present(dest_if)) {
1159 ether_addr_copy(get_local_link_hw_addr(dest_if),
1161 ether_addr_copy(get_link_hw_addr(dest_if),
1165 struct arp_entry_data *ret_arp_data = NULL;
1166 ret_arp_data = get_dest_mac_addr_port(dest_address,
1167 &dest_if, &ehdr->d_addr);
1168 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1169 if (arp_cache_dest_mac_present(dest_if)) {
1171 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1172 arp_data_ptr[dest_if]->n_last_update = time(NULL);
1174 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1176 printf("sending buffered packets\n");
1177 arp_send_buffered_pkts(ret_arp_data,
1178 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
1183 if (unlikely(ret_arp_data == NULL)) {
1185 printf("%s: NHIP Not Found, nhip:%x , "
1186 "outport_id: %d\n", __func__, nhip,
1187 vfw_pipe->outport_id[dest_if]);
1189 vfw_pipe->counters->
1190 pkts_drop_without_arp_entry++;
1193 if (ret_arp_data->status == INCOMPLETE ||
1194 ret_arp_data->status == PROBE) {
1195 if (ret_arp_data->num_pkts >= NUM_DESC) {
1196 /* ICMP req sent, drop packet by
1197 * changing the mask */
1198 vfw_pipe->counters->
1199 pkts_drop_without_arp_entry++;
1202 arp_pkts_mask |= pkt_mask;
1203 arp_queue_unresolved_packet(ret_arp_data, pkt);
1214 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1215 * To support synproxy, some (altered) packets may need to be sent back where
1216 * they came from. The ip header has already been adjusted, but the ethernet
1217 * header has not, so this must be performed here.
1218 * Return an updated pkts_mask, since arp may drop some packets
1221 * A pointer to the packets array.
1223 * Packet number to start processing.
1225 * Packet mask pointer
1226 * @param synproxy_reply_mask
1227 * Reply Packet mask for Synproxy
1229 * A pointer to VFW pipeline.
1233 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
1235 uint64_t *pkts_mask,
1236 uint64_t synproxy_reply_mask,
1237 struct pipeline_vfw *vfw_pipe)
1239 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1241 struct ether_addr hw_addr;
1242 struct mbuf_tcp_meta_data *meta_data_addr;
1243 struct ether_hdr *ehdr;
1244 struct rte_mbuf *pkt;
1248 for (i = 0; i < 4; i++) {
1249 uint32_t dest_if = INVALID_DESTIF;
1250 /* bitmask representing only this packet */
1251 uint64_t pkt_mask = 1LLU << (pkt_num + i);
1255 if(!(*pkts_mask & pkt_mask))
1257 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1259 phy_port = pkt->port;
1260 meta_data_addr = (struct mbuf_tcp_meta_data *)
1261 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1262 ehdr = rte_vfw_get_ether_addr(pkt);
1264 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1265 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1267 uint8_t nhip[IPV6_ADD_SIZE];
1268 uint8_t dest_address[IPV6_ADD_SIZE];
1270 memset(nhip, 0, IPV6_ADD_SIZE);
1272 rte_mov16(dest_address, ihdr->dst_addr);
1273 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
1274 &nhip[0], vfw_pipe);
1276 rte_sp_exchange_mac_addresses(ehdr);
1277 if (is_phy_port_privte(phy_port)) {
1279 dest_if = get_pub_to_prv_port(
1283 if (dest_if == INVALID_DESTIF) {
1284 *pkts_mask &= ~pkt_mask;
1285 vfw_pipe->counters->
1286 pkts_drop_without_arp_entry++;
1288 do_local_nh_ipv6_cache(dest_if,
1294 dest_if = get_prv_to_pub_port(
1298 if (dest_if == INVALID_DESTIF) {
1299 *pkts_mask &= ~pkt_mask;
1300 vfw_pipe->counters->
1301 pkts_drop_without_arp_entry++;
1303 do_local_nh_ipv6_cache(dest_if,
1308 } else if (is_phy_port_privte(phy_port)) {
1310 dest_if = get_prv_to_pub_port((uint32_t *)
1311 &dest_address[0], IP_VERSION_6);
1312 if (dest_if == INVALID_DESTIF) {
1313 *pkts_mask &= ~pkt_mask;
1314 vfw_pipe->counters->
1315 pkts_drop_without_arp_entry++;
1317 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1322 dest_if = get_pub_to_prv_port((uint32_t *)
1323 &dest_address[0], IP_VERSION_6);
1324 if (dest_if == INVALID_DESTIF) {
1325 *pkts_mask &= ~pkt_mask;
1326 vfw_pipe->counters->
1327 pkts_drop_without_arp_entry++;
1330 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1335 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1337 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1338 if (get_dest_mac_address_ipv6_port(
1343 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1344 ether_addr_copy(get_link_hw_addr(dest_if),
1347 if (vfw_debug >= DEBUG_LEVEL_4) {
1348 char buf[HW_ADDR_SIZE];
1350 ether_format_addr(buf, sizeof(buf),
1352 printf("MAC found for dest_if %d: %s, ",
1354 ether_format_addr(buf, sizeof(buf),
1356 printf("new eth hdr src: %s, ", buf);
1357 ether_format_addr(buf, sizeof(buf),
1359 printf("new eth hdr dst: %s\n", buf);
1363 printf("deleting ipv6\n");
1364 *pkts_mask &= ~pkt_mask;
1365 /*Next Neighbor is not yet implemented
1367 vfw_pipe->counters->
1368 pkts_drop_without_arp_entry++;
1376 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1377 * To support synproxy, some (altered) packets may need to be sent back where
1378 * they came from. The ip header has already been adjusted, but the ethernet
1379 * header has not, so this must be performed here.
1380 * Return an updated pkts_mask, since arp may drop some packets
1383 * A pointer to the packets.
1385 * Packet number to process.
1387 * Packet mask pointer
1388 * @param synproxy_reply_mask
1389 * Reply Packet mask for Synproxy
1391 * A pointer to VFW pipeline.
1395 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1397 uint64_t *pkts_mask,
1398 uint64_t synproxy_reply_mask,
1399 struct pipeline_vfw *vfw_pipe)
1401 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1403 struct ether_addr hw_addr;
1404 struct mbuf_tcp_meta_data *meta_data_addr;
1405 struct ether_hdr *ehdr;
1406 struct rte_mbuf *pkt;
1409 uint32_t dest_if = INVALID_DESTIF;
1410 /* bitmask representing only this packet */
1411 uint64_t pkt_mask = 1LLU << pkt_num;
1415 if(*pkts_mask & pkt_mask) {
1417 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1419 phy_port = pkt->port;
1420 meta_data_addr = (struct mbuf_tcp_meta_data *)
1421 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1422 ehdr = rte_vfw_get_ether_addr(pkt);
1424 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1425 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1427 uint8_t nhip[IPV6_ADD_SIZE];
1428 uint8_t dest_address[IPV6_ADD_SIZE];
1430 memset(nhip, 0, IPV6_ADD_SIZE);
1432 rte_mov16(dest_address, ihdr->dst_addr);
1433 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
1434 &nhip[0], vfw_pipe);
1436 rte_sp_exchange_mac_addresses(ehdr);
1437 if (is_phy_port_privte(phy_port)) {
1439 dest_if = get_pub_to_prv_port(
1443 if (dest_if == INVALID_DESTIF) {
1444 *pkts_mask &= ~pkt_mask;
1445 vfw_pipe->counters->
1446 pkts_drop_without_arp_entry++;
1448 do_local_nh_ipv6_cache(dest_if,
1454 dest_if = get_prv_to_pub_port(
1458 if (dest_if == INVALID_DESTIF) {
1459 *pkts_mask &= ~pkt_mask;
1460 vfw_pipe->counters->
1461 pkts_drop_without_arp_entry++;
1463 do_local_nh_ipv6_cache(dest_if,
1468 } else if (is_phy_port_privte(phy_port)) {
1470 dest_if = get_prv_to_pub_port((uint32_t *)
1471 &dest_address[0], IP_VERSION_6);
1472 if (dest_if == INVALID_DESTIF) {
1473 *pkts_mask &= ~pkt_mask;
1474 vfw_pipe->counters->
1475 pkts_drop_without_arp_entry++;
1477 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1482 dest_if = get_pub_to_prv_port((uint32_t *)
1483 &dest_address[0], IP_VERSION_6);
1484 if (dest_if == INVALID_DESTIF) {
1485 *pkts_mask &= ~pkt_mask;
1486 vfw_pipe->counters->
1487 pkts_drop_without_arp_entry++;
1490 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1495 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1497 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1498 if (get_dest_mac_address_ipv6_port(
1503 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1504 ether_addr_copy(get_link_hw_addr(dest_if),
1507 if (vfw_debug >= DEBUG_LEVEL_4) {
1508 char buf[HW_ADDR_SIZE];
1510 ether_format_addr(buf, sizeof(buf),
1512 printf("MAC found for dest_if %d: %s, ",
1514 ether_format_addr(buf, sizeof(buf),
1516 printf("new eth hdr src: %s, ", buf);
1517 ether_format_addr(buf, sizeof(buf),
1519 printf("new eth hdr dst: %s\n", buf);
1523 printf("deleting ipv6\n");
1524 *pkts_mask &= ~pkt_mask;
1525 /*Next Neighbor is not yet implemented
1527 vfw_pipe->counters->
1528 pkts_drop_without_arp_entry++;
1538 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1539 * To support synproxy, some (altered) packets may need to be sent back where
1540 * they came from. The ip header has already been adjusted, but the ethernet
1541 * header has not, so this must be performed here.
1542 * Return an updated pkts_mask, since arp may drop some packets
1545 * A pointer to the packet.
1548 * @param synproxy_reply_mask
1549 * Reply Packet mask for Synproxy
1551 * A pointer to VFW pipeline.
1554 rte_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
1556 uint64_t synproxy_reply_mask,
1557 struct pipeline_vfw *vfw_pipe)
1559 uint64_t pkts_to_arp = pkts_mask;
1562 uint32_t dest_if = INVALID_DESTIF;
1563 for (; pkts_to_arp;) {
1564 struct ether_addr hw_addr;
1565 struct mbuf_tcp_meta_data *meta_data_addr;
1566 struct ether_hdr *ehdr;
1567 struct rte_mbuf *pkt;
1570 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1571 /* bitmask representing only this packet */
1572 uint64_t pkt_mask = 1LLU << pos;
1573 /* remove this packet from remaining list */
1574 pkts_to_arp &= ~pkt_mask;
1576 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1578 phy_port = pkt->port;
1579 meta_data_addr = (struct mbuf_tcp_meta_data *)
1580 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1581 ehdr = rte_vfw_get_ether_addr(pkt);
1584 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1585 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1588 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1590 rte_sp_exchange_mac_addresses(ehdr);
1592 ret = local_get_nh_ipv4(dest_address, &dest_if,
1595 rte_sp_exchange_mac_addresses(ehdr);
1596 if (is_phy_port_privte(phy_port)) {
1598 dest_if = get_pub_to_prv_port(
1601 if (dest_if == INVALID_DESTIF) {
1602 pkts_mask &= ~pkt_mask;
1603 vfw_pipe->counters->
1604 pkts_drop_without_arp_entry++;
1606 do_local_nh_ipv4_cache(
1612 dest_if = get_prv_to_pub_port(
1615 if (dest_if == INVALID_DESTIF) {
1616 pkts_mask &= ~pkt_mask;
1617 vfw_pipe->counters->
1618 pkts_drop_without_arp_entry++;
1620 do_local_nh_ipv4_cache(dest_if,
1624 } else if (is_phy_port_privte(phy_port)) {
1626 dest_if = get_prv_to_pub_port(&dest_address,
1628 if (dest_if == INVALID_DESTIF) {
1629 pkts_mask &= ~pkt_mask;
1630 vfw_pipe->counters->
1631 pkts_drop_without_arp_entry++;
1633 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1638 dest_if = get_pub_to_prv_port(&dest_address,
1640 if (dest_if == INVALID_DESTIF) {
1641 pkts_mask &= ~pkt_mask;
1642 vfw_pipe->counters->
1643 pkts_drop_without_arp_entry++;
1645 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1649 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1650 if (local_dest_mac_present(dest_if)) {
1651 ether_addr_copy(get_local_link_hw_addr(dest_if),
1653 ether_addr_copy(get_link_hw_addr(dest_if),
1657 struct arp_entry_data *ret_arp_data = NULL;
1658 ret_arp_data = get_dest_mac_addr_port(dest_address,
1659 &dest_if, &ehdr->d_addr);
1660 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1661 if (arp_cache_dest_mac_present(dest_if)) {
1663 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1664 arp_data_ptr[dest_if]->n_last_update = time(NULL);
1666 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1668 printf("sending buffered packets\n");
1669 p_nat->naptedPktCount += ret_arp_data->num_pkts;
1670 arp_send_buffered_pkts(ret_arp_data,
1671 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
1676 if (unlikely(ret_arp_data == NULL)) {
1678 printf("%s: NHIP Not Found, nhip:%x , "
1679 "outport_id: %d\n", __func__, nhip,
1680 vfw_pipe->outport_id[dest_if]);
1683 vfw_pipe->counters->
1684 pkts_drop_without_arp_entry++;
1687 if (ret_arp_data->status == INCOMPLETE ||
1688 ret_arp_data->status == PROBE) {
1689 if (ret_arp_data->num_pkts >= NUM_DESC) {
1690 /* ICMP req sent, drop packet by
1691 * changing the mask */
1692 vfw_pipe->counters->
1693 pkts_drop_without_arp_entry++;
1696 arp_pkts_mask |= pkt_mask;
1697 arp_queue_unresolved_packet(ret_arp_data, pkt);
1708 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1709 * To support synproxy, some (altered) packets may need to be sent back where
1710 * they came from. The ip header has already been adjusted, but the ethernet
1711 * header has not, so this must be performed here.
1712 * Return an updated pkts_mask, since arp may drop some packets
1715 * A pointer to the packet.
1718 * @param synproxy_reply_mask
1719 * Reply Packet mask for Synproxy
1721 * A pointer to VFW pipeline.
1725 rte_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
1727 uint64_t synproxy_reply_mask,
1728 struct pipeline_vfw *vfw_pipe)
1730 uint64_t pkts_to_arp = pkts_mask;
1731 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1733 uint32_t dest_if = INVALID_DESTIF;
1735 for (; pkts_to_arp;) {
1736 struct ether_addr hw_addr;
1737 struct mbuf_tcp_meta_data *meta_data_addr;
1738 struct ether_hdr *ehdr;
1739 struct rte_mbuf *pkt;
1742 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1743 /* bitmask representing only this packet */
1744 uint64_t pkt_mask = 1LLU << pos;
1745 /* remove this packet from remaining list */
1746 pkts_to_arp &= ~pkt_mask;
1748 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1750 phy_port = pkt->port;
1751 meta_data_addr = (struct mbuf_tcp_meta_data *)
1752 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1753 ehdr = rte_vfw_get_ether_addr(pkt);
1755 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1756 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1758 uint8_t nhip[IPV6_ADD_SIZE];
1759 uint8_t dest_address[IPV6_ADD_SIZE];
1761 memset(nhip, 0, IPV6_ADD_SIZE);
1763 rte_mov16(dest_address, ihdr->dst_addr);
1764 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
1765 &nhip[0], vfw_pipe);
1767 rte_sp_exchange_mac_addresses(ehdr);
1768 if (is_phy_port_privte(phy_port)) {
1770 dest_if = get_pub_to_prv_port(
1774 if (dest_if == INVALID_DESTIF) {
1775 pkts_mask &= ~pkt_mask;
1776 vfw_pipe->counters->
1777 pkts_drop_without_arp_entry++;
1779 do_local_nh_ipv6_cache(dest_if,
1785 dest_if = get_prv_to_pub_port(
1789 if (dest_if == INVALID_DESTIF) {
1790 pkts_mask &= ~pkt_mask;
1791 vfw_pipe->counters->
1792 pkts_drop_without_arp_entry++;
1794 do_local_nh_ipv6_cache(dest_if,
1799 } else if (is_phy_port_privte(phy_port)) {
1801 dest_if = get_prv_to_pub_port((uint32_t *)
1802 &dest_address[0], IP_VERSION_6);
1803 if (dest_if == INVALID_DESTIF) {
1804 pkts_mask &= ~pkt_mask;
1805 vfw_pipe->counters->
1806 pkts_drop_without_arp_entry++;
1808 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1813 dest_if = get_pub_to_prv_port((uint32_t *)
1814 &dest_address[0], IP_VERSION_6);
1815 if (dest_if == INVALID_DESTIF) {
1816 pkts_mask &= ~pkt_mask;
1817 vfw_pipe->counters->
1818 pkts_drop_without_arp_entry++;
1821 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1826 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1828 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1829 if (get_dest_mac_address_ipv6_port(
1834 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1835 ether_addr_copy(get_link_hw_addr(dest_if),
1838 if (vfw_debug >= DEBUG_LEVEL_4) {
1839 char buf[HW_ADDR_SIZE];
1841 ether_format_addr(buf, sizeof(buf),
1843 printf("MAC found for dest_if %d: %s, ",
1845 ether_format_addr(buf, sizeof(buf),
1847 printf("new eth hdr src: %s, ", buf);
1848 ether_format_addr(buf, sizeof(buf),
1850 printf("new eth hdr dst: %s\n", buf);
1854 printf("deleting ipv6\n");
1855 pkts_mask &= ~pkt_mask;
1856 /*Next Neighbor is not yet implemented
1858 vfw_pipe->counters->
1859 pkts_drop_without_arp_entry++;
1870 * Packets processing for connection tracking.
1873 * A pointer to the pipeline.
1875 * A pointer to the connetion tracker .
1877 * A pointer to a burst of packets.
1878 * @param packet_mask_in
1879 * Input packets Mask.
1883 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1884 struct rte_ct_cnxn_tracker *ct,
1885 struct rte_mbuf **pkts, uint64_t packet_mask_in)
1887 uint64_t keep_mask = packet_mask_in;
1888 struct rte_synproxy_helper sp_helper; /* for synproxy */
1891 rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1894 if (unlikely(sp_helper.hijack_mask))
1895 printf("buffered hijack pkts severe error\n");
1897 if (unlikely(sp_helper.reply_pkt_mask))
1898 printf("buffered reply pkts severe error\n");
1904 * Free Packets from mbuf.
1907 * A pointer to the connection tracker to increment drop counter.
1910 * Packet to be free.
1913 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1915 ct->counters->pkts_drop++;
1916 rte_pktmbuf_free(pkt);
1920 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1921 struct rte_pipeline *p,
1922 struct rte_mbuf **pkts,
1923 int num_pkts, uint64_t pkts_mask)
1926 struct mbuf_tcp_meta_data *meta_data_addr;
1927 uint64_t pkt_mask = 1;
1929 /* any clear bits in low-order num_pkts bit of
1930 * pkt_mask must be discarded */
1932 for (i = 0; i < num_pkts; i++) {
1933 struct rte_mbuf *pkt = pkts[i];
1935 if (pkts_mask & pkt_mask) {
1936 printf("vfw_output_or_delete_buffered_packets\n");
1937 meta_data_addr = (struct mbuf_tcp_meta_data *)
1938 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1939 rte_pipeline_port_out_packet_insert(
1940 p, meta_data_addr->output_port, pkt);
1943 vfw_pktmbuf_free(ct, pkt);
1946 pkt_mask = pkt_mask << 1;
1951 *Packet buffered for synproxy.
1954 * A pointer to the pipeline.
1956 * A pointer to the vfw pipeline.
1958 * A pointer to the connection tracker.
1959 * @param forward_pkts
1960 * Packet forwarded by synproxy.
1964 vfw_handle_buffered_packets(struct rte_pipeline *p,
1965 struct pipeline_vfw *vfw_pipe,
1966 struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1968 struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1970 if (likely(pkt_list == NULL)) /* only during proxy setup is != NULL */
1974 uint64_t keep_mask = 0;
1975 struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1976 struct rte_mbuf *pkt;
1978 while (pkt_list != NULL) {
1979 struct mbuf_tcp_meta_data *meta_data =
1980 (struct mbuf_tcp_meta_data *)
1981 RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1983 /* detach head of list and advance list */
1985 pkt_list = meta_data->next;
1989 pkts[pkt_count++] = pkt;
1991 if (pkt_count == PKT_BUFFER_SIZE) {
1992 /* need to send out packets */
1993 /* currently 0, set all bits */
1994 keep_mask = ~keep_mask;
1997 vfw_process_buffered_pkts(vfw_pipe,
2000 vfw_output_or_delete_buffered_packets(
2010 vfw_pktmbuf_free(ct, pkt);
2014 if (pkt_count != 0) {
2015 /* need to send out packets */
2016 keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
2019 vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
2022 vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
2030 * The pipeline port-in action is used to do all the firewall and
2031 * connection tracking work for IPV4 packets.
2034 * A pointer to the pipeline.
2036 * A pointer to a burst of packets.
2038 * Number of packets to process.
2040 * A pointer to pipeline specific data.
2043 * 0 on success, negative on error.
2047 vfw_port_in_action_ipv4(struct rte_pipeline *p,
2048 struct rte_mbuf **pkts,
2049 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
2051 struct vfw_ports_in_args *port_in_args =
2052 (struct vfw_ports_in_args *)arg;
2053 struct pipeline_vfw *vfw_pipe =
2054 (struct pipeline_vfw *)port_in_args->pipe;
2055 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
2057 start_tsc_measure(vfw_pipe);
2059 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
2060 uint64_t pkts_drop_mask;
2061 uint64_t hijack_mask = 0;
2063 uint64_t synproxy_reply_mask = 0; /* for synproxy */
2064 uint64_t keep_mask = packet_mask_in;
2066 uint64_t conntrack_mask = 0, connexist_mask = 0;
2067 struct rte_CT_helper ct_helper;
2071 * This routine uses a bit mask to represent which packets in the
2072 * "pkts" table are considered valid. Any table entry which exists
2073 * and is considered valid has the corresponding bit in the mask set.
2074 * Otherwise, it is cleared. Note that the mask is 64 bits,
2075 * but the number of packets in the table may be considerably less.
2076 * Any mask bits which do correspond to actual packets are cleared.
2077 * Various routines are called which may determine that an existing
2078 * packet is somehow invalid. The routine will return an altered bit
2079 * mask, with the bit cleared. At the end of all the checks,
2080 * packets are dropped if their mask bit is a zero
2083 rte_prefetch0(& vfw_pipe->counters);
2086 /* Pre-fetch all rte_mbuf header */
2087 for(j = 0; j < n_pkts; j++)
2088 rte_prefetch0(pkts[j]);
2090 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
2092 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
2093 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
2096 if (unlikely(vfw_debug > 1))
2097 printf("Enter in-port action IPV4 with %p packet mask\n",
2098 (void *)packet_mask_in);
2099 vfw_pipe->counters->pkts_received =
2100 vfw_pipe->counters->pkts_received + n_pkts;
2102 if (unlikely(VFW_DEBUG))
2103 printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
2105 vfw_pipe->counters->pkts_received, n_pkts);
2107 /* first handle handle any previously buffered packets now released */
2108 vfw_handle_buffered_packets(p, vfw_pipe, ct,
2109 FORWARD_BUFFERED_PACKETS);
2111 /* now handle any new packets on input ports */
2112 if (likely(firewall_flag)) {
2113 keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
2114 keep_mask, vfw_pipe);
2115 vfw_pipe->counters->pkts_fw_forwarded +=
2116 __builtin_popcountll(keep_mask);
2120 rte_prefetch0((void*)vfw_pipe->plib_acl);
2121 rte_prefetch0((void*)vfw_rule_table_ipv4_active);
2122 #endif /* EN_SWP_ACL */
2123 keep_mask = lib_acl_ipv4_pkt_work_key(
2124 vfw_pipe->plib_acl, pkts, keep_mask,
2125 &vfw_pipe->counters->pkts_drop_without_rule,
2126 vfw_rule_table_ipv4_active,
2127 action_array_active,
2128 action_counter_table,
2129 &conntrack_mask, &connexist_mask);
2130 vfw_pipe->counters->pkts_acl_forwarded +=
2131 __builtin_popcountll(keep_mask);
2132 if (conntrack_mask > 0) {
2133 keep_mask = conntrack_mask;
2134 ct_helper.no_new_cnxn_mask = connexist_mask;
2135 cnxn_tracking_is_active = 1;
2137 cnxn_tracking_is_active = 0;
2138 #endif /* ACL_ENABLE */
2140 if (likely(cnxn_tracking_is_active)) {
2141 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
2142 &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
2143 synproxy_reply_mask = ct_helper.reply_pkt_mask;
2144 hijack_mask = ct_helper.hijack_mask;
2149 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
2150 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2152 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2155 rte_prefetch0((void*)in_port_dir_a);
2156 rte_prefetch0((void*)prv_to_pub_map);
2159 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
2160 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
2161 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2163 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2166 pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
2167 synproxy_reply_mask, vfw_pipe);
2169 for (j = i; j < n_pkts; j++) {
2170 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2172 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2175 for (; i < n_pkts; i++) {
2176 pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
2177 synproxy_reply_mask, vfw_pipe);
2180 rte_prefetch0((void*)in_port_dir_a);
2181 rte_prefetch0((void*)prv_to_pub_map);
2182 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
2183 keep_mask = rte_vfw_arp_ipv4_packets(pkts, keep_mask,
2184 synproxy_reply_mask, vfw_pipe);
2187 if (vfw_debug > 1) {
2188 printf(" Exit in-port action with %p packet mask\n",
2190 if (keep_mask != packet_mask_in)
2191 printf("dropped packets, %p in, %p out\n",
2192 (void *)packet_mask_in,
2196 /* Update mask before returning, so that bad packets are dropped */
2197 if (arp_pkts_mask) {
2198 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
2201 pkts_drop_mask = packet_mask_in & ~keep_mask;
2203 if (unlikely(pkts_drop_mask != 0)) {
2204 /* printf("drop %p\n", (void *) pkts_drop_mask); */
2205 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2208 if (unlikely(hijack_mask != 0))
2209 rte_pipeline_ah_packet_hijack(p, hijack_mask);
2211 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
2212 vfw_pipe->counters->num_pkts_measurements++;
2214 end_tsc_measure(vfw_pipe, n_pkts);
2219 * The pipeline port-in action is used to do all the firewall and
2220 * connection tracking work for IPV6 packet.
2223 * A pointer to the pipeline.
2225 * A pointer to a burst of packets.
2227 * Number of packets to process.
2229 * A pointer to pipeline specific data.
2232 * 0 on success, negative on error.
2236 vfw_port_in_action_ipv6(struct rte_pipeline *p,
2237 struct rte_mbuf **pkts,
2238 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
2240 struct vfw_ports_in_args *port_in_args =
2241 (struct vfw_ports_in_args *)arg;
2242 struct pipeline_vfw *vfw_pipe =
2243 (struct pipeline_vfw *)port_in_args->pipe;
2244 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
2246 start_tsc_measure(vfw_pipe);
2248 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
2249 uint64_t pkts_drop_mask;
2250 uint64_t hijack_mask = 0;
2251 uint64_t synproxy_reply_mask = 0; /* for synproxy */
2252 uint64_t keep_mask = packet_mask_in;
2254 uint64_t conntrack_mask = 0, connexist_mask = 0;
2255 struct rte_CT_helper ct_helper;
2259 * This routine uses a bit mask to represent which packets in the
2260 * "pkts" table are considered valid. Any table entry which exists
2261 * and is considered valid has the corresponding bit in the mask set.
2262 * Otherwise, it is cleared. Note that the mask is 64 bits,
2263 * but the number of packets in the table may be considerably less.
2264 * Any mask bits which do correspond to actual packets are cleared.
2265 * Various routines are called which may determine that an existing
2266 * packet is somehow invalid. The routine will return an altered bit
2267 * mask, with the bit cleared. At the end of all the checks,
2268 * packets are dropped if their mask bit is a zero
2271 rte_prefetch0(& vfw_pipe->counters);
2273 /* Pre-fetch all rte_mbuf header */
2274 for(j = 0; j < n_pkts; j++)
2275 rte_prefetch0(pkts[j]);
2277 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
2278 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
2279 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
2282 printf("Enter in-port action with %p packet mask\n",
2283 (void *)packet_mask_in);
2284 vfw_pipe->counters->pkts_received =
2285 vfw_pipe->counters->pkts_received + n_pkts;
2287 printf("vfw_port_in_action pkts_received: %" PRIu64
2289 vfw_pipe->counters->pkts_received, n_pkts);
2291 /* first handle handle any previously buffered packets now released */
2292 vfw_handle_buffered_packets(p, vfw_pipe, ct,
2293 FORWARD_BUFFERED_PACKETS);
2295 /* now handle any new packets on input ports */
2296 if (likely(firewall_flag)) {
2297 keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
2298 keep_mask, vfw_pipe);
2299 vfw_pipe->counters->pkts_fw_forwarded +=
2300 __builtin_popcountll(keep_mask);
2305 rte_prefetch0((void*)vfw_pipe->plib_acl);
2306 rte_prefetch0((void*)vfw_rule_table_ipv6_active);
2307 #endif /* EN_SWP_ACL */
2308 keep_mask = lib_acl_ipv6_pkt_work_key(
2309 vfw_pipe->plib_acl, pkts, keep_mask,
2310 &vfw_pipe->counters->pkts_drop_without_rule,
2311 vfw_rule_table_ipv6_active,
2312 action_array_active,
2313 action_counter_table,
2314 &conntrack_mask, &connexist_mask);
2315 vfw_pipe->counters->pkts_acl_forwarded +=
2316 __builtin_popcountll(keep_mask);
2317 if (conntrack_mask > 0) {
2318 keep_mask = conntrack_mask;
2319 ct_helper.no_new_cnxn_mask = connexist_mask;
2320 cnxn_tracking_is_active = 1;
2322 cnxn_tracking_is_active = 0;
2323 #endif /* ACL_ENABLE */
2324 if (likely(cnxn_tracking_is_active)) {
2325 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
2326 &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
2327 synproxy_reply_mask = ct_helper.reply_pkt_mask;
2328 hijack_mask = ct_helper.hijack_mask;
2333 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
2334 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2336 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2339 rte_prefetch0((void*)in_port_dir_a);
2340 rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
2343 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
2344 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
2345 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2347 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2350 pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
2351 synproxy_reply_mask, vfw_pipe);
2353 for (j = i; j < n_pkts; j++) {
2354 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2356 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2359 for (; i < n_pkts; i++) {
2360 pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
2361 synproxy_reply_mask, vfw_pipe);
2364 rte_prefetch0((void*)in_port_dir_a);
2365 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
2366 keep_mask = rte_vfw_arp_ipv6_packets(pkts, keep_mask,
2367 synproxy_reply_mask, vfw_pipe);
2370 if (vfw_debug > 1) {
2371 printf(" Exit in-port action with %p packet mask\n",
2373 if (keep_mask != packet_mask_in)
2374 printf("dropped packets, %p in, %p out\n",
2375 (void *)packet_mask_in,
2379 /* Update mask before returning, so that bad packets are dropped */
2381 pkts_drop_mask = packet_mask_in & ~keep_mask;
2383 if (unlikely(pkts_drop_mask != 0)) {
2384 /* printf("drop %p\n", (void *) pkts_drop_mask); */
2385 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2388 if (unlikely(hijack_mask != 0))
2389 rte_pipeline_ah_packet_hijack(p, hijack_mask);
2391 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
2392 vfw_pipe->counters->num_pkts_measurements++;
2394 end_tsc_measure(vfw_pipe, n_pkts);
2401 * Parse arguments in config file.
2404 * A pointer to the pipeline.
2406 * A pointer to pipeline specific parameters.
2409 * 0 on success, negative on error.
2412 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
2413 struct pipeline_params *params)
2419 printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
2422 for (i = 0; i < params->n_args; i++) {
2423 char *arg_name = params->args_name[i];
2424 char *arg_value = params->args_value[i];
2426 printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
2427 atoi(arg_value), arg_value);
2429 status = lib_acl_parse_config(vfw_pipe->plib_acl,
2430 arg_name, arg_value, &vfw_n_rules);
2432 printf("rte_ct_set_configuration_options =%s,%s",
2433 arg_name, arg_value);
2435 } else if (status == 0)
2438 #endif /* traffic_type */
2439 if (strcmp(arg_name, "traffic_type") == 0) {
2440 int traffic_type = atoi(arg_value);
2442 if (traffic_type == 0 ||
2443 !(traffic_type == IP_VERSION_4 ||
2444 traffic_type == IP_VERSION_6)) {
2445 printf("not IPV4/IPV6");
2449 vfw_pipe->traffic_type = traffic_type;
2455 if (strcmp(arg_name, "n_flows") == 0) {
2456 int n_flows = atoi(arg_value);
2461 /* must be power of 2, round up if not */
2462 if (!rte_is_power_of_2(n_flows))
2463 n_flows = rte_align32pow2(n_flows);
2465 vfw_pipe->n_flows = n_flows;
2469 /* not firewall option, process as cnxn tracking option */
2470 status = rte_ct_set_configuration_options(
2471 vfw_pipe->cnxn_tracker,
2472 arg_name, arg_value);
2474 printf("rte_ct_set_configuration_options =%s,%s",
2475 arg_name, arg_value);
2477 } else if (status == 0)
2485 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
2488 static pipeline_msg_req_handler handlers[] = {
2489 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
2490 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
2491 pipeline_msg_req_stats_port_in_handler,
2492 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
2493 pipeline_msg_req_stats_port_out_handler,
2494 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
2495 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
2496 pipeline_msg_req_port_in_enable_handler,
2497 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
2498 pipeline_msg_req_port_in_disable_handler,
2499 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
2502 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2504 static pipeline_msg_req_handler custom_handlers[] = {
2506 [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
2507 pipeline_vfw_msg_req_synproxy_flag_handler
2511 * Create and initialize Pipeline Back End (BE).
2514 * A pointer to the pipeline specific parameters..
2516 * A pointer to pipeline specific data.
2519 * A pointer to the pipeline create, NULL on error.
2522 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
2526 /* Check input arguments */
2527 if ((params == NULL) ||
2528 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2532 printf("num ports in %d / num ports out %d\n",
2533 params->n_ports_in, params->n_ports_out);
2535 /* Create a single pipeline instance and initialize. */
2536 struct pipeline_vfw *pipe_vfw;
2538 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
2539 pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2541 if (pipe_vfw == NULL)
2544 struct pipeline *pipe;
2546 pipe = &pipe_vfw->pipe;
2548 strncpy(pipe->name, params->name, sizeof(pipe->name));
2549 pipe->log_level = params->log_level;
2550 pipe_vfw->n_flows = 4096; /* small default value */
2551 pipe_vfw->traffic_type = MIX;
2552 pipe_vfw->pipeline_num = 0xff;
2553 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2554 pipe_vfw->links_map[i] = 0xff;
2555 pipe_vfw->outport_id[i] = 0xff;
2557 PLOG(pipe, HIGH, "VFW");
2559 /* Create a firewall instance and initialize. */
2560 pipe_vfw->cnxn_tracker =
2561 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2562 RTE_CACHE_LINE_SIZE);
2564 if (pipe_vfw->cnxn_tracker == NULL)
2567 /* Create a acl instance and initialize. */
2568 pipe_vfw->plib_acl =
2569 rte_zmalloc(NULL, sizeof(struct lib_acl),
2570 RTE_CACHE_LINE_SIZE);
2572 if (pipe_vfw->plib_acl == NULL)
2575 timer_lcore = rte_lcore_id();
2577 * Now allocate a counter block entry. It appears that the
2578 * initialization of all instances is serialized on core 0,
2579 * so no lock is necessary.
2581 struct rte_VFW_counter_block *counter_ptr;
2583 if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2584 /* error, exceeded table bounds */
2587 rte_VFW_hi_counter_block_in_use++;
2589 &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2590 strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2592 pipe_vfw->counters = counter_ptr;
2594 rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2595 /* Parse arguments */
2596 if (pipeline_vfw_parse_args(pipe_vfw, params))
2599 uint16_t pointers_offset =
2600 META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2602 if (pipe_vfw->n_flows > 0)
2603 rte_ct_initialize_cnxn_tracker_with_synproxy(
2604 pipe_vfw->cnxn_tracker,
2609 pipe_vfw->counters->ct_counters =
2610 rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2614 struct rte_pipeline_params pipeline_params = {
2615 .name = params->name,
2616 .socket_id = params->socket_id,
2617 .offset_port_id = META_DATA_OFFSET +
2618 offsetof(struct mbuf_tcp_meta_data, output_port)
2621 pipe->p = rte_pipeline_create(&pipeline_params);
2622 if (pipe->p == NULL) {
2631 * create a different "arg_ah" for each input port.
2632 * They differ only in the recorded port number. Unfortunately,
2633 * IP_PIPELINE does not pass port number in to input port handler
2636 uint32_t in_ports_arg_size =
2637 RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2638 (params->n_ports_in));
2639 struct vfw_ports_in_args *port_in_args =
2640 (struct vfw_ports_in_args *)
2641 rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2643 if (port_in_args == NULL)
2646 pipe->n_ports_in = params->n_ports_in;
2647 for (i = 0; i < pipe->n_ports_in; i++) {
2649 /* initialize this instance of port_in_args as necessary */
2650 port_in_args[i].pipe = pipe;
2651 port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2653 struct rte_pipeline_port_in_params port_params = {
2655 pipeline_port_in_params_get_ops(¶ms->port_in
2658 pipeline_port_in_params_convert(¶ms->port_in
2660 .f_action = vfw_port_in_action_ipv4,
2661 .arg_ah = &(port_in_args[i]),
2662 .burst_size = params->port_in[i].burst_size,
2664 if (pipe_vfw->traffic_type == IP_VERSION_6)
2665 port_params.f_action = vfw_port_in_action_ipv6;
2666 int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2667 &pipe->port_in_id[i]);
2670 rte_pipeline_free(pipe->p);
2677 pipe->n_ports_out = params->n_ports_out;
2678 for (i = 0; i < pipe->n_ports_out; i++) {
2679 struct rte_pipeline_port_out_params port_params = {
2680 .ops = pipeline_port_out_params_get_ops(
2681 ¶ms->port_out[i]),
2682 .arg_create = pipeline_port_out_params_convert(
2683 ¶ms->port_out[i]),
2688 int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2689 &pipe->port_out_id[i]);
2692 rte_pipeline_free(pipe->p);
2698 int pipeline_num = 0;
2699 int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2702 printf("sscanf unble to read pipeline id\n");
2703 pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2704 register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2705 set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2706 set_outport_id(pipe_vfw->pipeline_num, pipe,
2707 pipe_vfw->outport_id);
2708 printf("pipeline_num=%d\n", pipeline_num);
2710 /*If this is the first VFW thread, create common VFW Rule tables*/
2711 if (rte_VFW_hi_counter_block_in_use == 0) {
2712 vfw_rule_table_ipv4_active =
2713 lib_acl_create_active_standby_table_ipv4(1,
2715 if (vfw_rule_table_ipv4_active == NULL) {
2716 printf("Failed to create active table for IPV4\n");
2717 rte_pipeline_free(pipe->p);
2718 rte_free(pipe_vfw->cnxn_tracker);
2719 rte_free(pipe_vfw->plib_acl);
2723 vfw_rule_table_ipv4_standby =
2724 lib_acl_create_active_standby_table_ipv4(2,
2726 if (vfw_rule_table_ipv4_standby == NULL) {
2727 printf("Failed to create standby table for IPV4\n");
2728 rte_pipeline_free(pipe->p);
2729 rte_free(pipe_vfw->cnxn_tracker);
2730 rte_free(pipe_vfw->plib_acl);
2735 vfw_rule_table_ipv6_active =
2736 lib_acl_create_active_standby_table_ipv6(1,
2739 if (vfw_rule_table_ipv6_active == NULL) {
2740 printf("Failed to create active table for IPV6\n");
2741 rte_pipeline_free(pipe->p);
2742 rte_free(pipe_vfw->cnxn_tracker);
2743 rte_free(pipe_vfw->plib_acl);
2747 vfw_rule_table_ipv6_standby =
2748 lib_acl_create_active_standby_table_ipv6(2,
2750 if (vfw_rule_table_ipv6_standby == NULL) {
2751 printf("Failed to create standby table for IPV6\n");
2752 rte_pipeline_free(pipe->p);
2753 rte_free(pipe_vfw->cnxn_tracker);
2754 rte_free(pipe_vfw->plib_acl);
2766 struct rte_pipeline_table_params table_params = {
2767 .ops = &rte_table_stub_ops,
2769 .f_action_hit = NULL,
2770 .f_action_miss = NULL,
2772 .action_data_size = 0,
2775 int status = rte_pipeline_table_create(pipe->p,
2777 &pipe->table_id[0]);
2780 rte_pipeline_free(pipe->p);
2785 struct rte_pipeline_table_entry default_entry = {
2786 .action = RTE_PIPELINE_ACTION_PORT_META
2789 struct rte_pipeline_table_entry *default_entry_ptr;
2791 status = rte_pipeline_table_default_entry_add(pipe->p,
2794 &default_entry_ptr);
2797 rte_pipeline_free(pipe->p);
2801 for (i = 0; i < pipe->n_ports_in; i++) {
2802 int status = rte_pipeline_port_in_connect_to_table(
2804 pipe->port_in_id[i],
2808 rte_pipeline_free(pipe->p);
2814 /* Enable input ports */
2815 for (i = 0; i < pipe->n_ports_in; i++) {
2817 rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2820 rte_pipeline_free(pipe->p);
2826 /* Check pipeline consistency */
2827 if (rte_pipeline_check(pipe->p) < 0) {
2828 rte_pipeline_free(pipe->p);
2833 /* Message queues */
2834 pipe->n_msgq = params->n_msgq;
2835 for (i = 0; i < pipe->n_msgq; i++)
2836 pipe->msgq_in[i] = params->msgq_in[i];
2838 for (i = 0; i < pipe->n_msgq; i++)
2839 pipe->msgq_out[i] = params->msgq_out[i];
2841 /* Message handlers */
2842 memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2843 memcpy(pipe_vfw->custom_handlers, custom_handlers,
2844 sizeof(pipe_vfw->custom_handlers));
2850 * Free resources and delete pipeline.
2853 * A pointer to the pipeline.
2856 * 0 on success, negative on error.
2858 static int pipeline_vfw_free(void *pipeline)
2860 struct pipeline *p = (struct pipeline *)pipeline;
2862 /* Check input arguments */
2866 /* Free resources */
2867 rte_pipeline_free(p->p);
2873 * Callback function to map input/output ports.
2876 * A pointer to the pipeline.
2880 * A pointer to the Output port.
2883 * 0 on success, negative on error.
2886 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2889 struct pipeline *p = (struct pipeline *)pipeline;
2891 /* Check input arguments */
2892 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2895 if (p->n_ports_in == 1) {
2904 * Callback function to process timers.
2907 * A pointer to the pipeline.
2910 * 0 on success, negative on error.
2912 static int pipeline_vfw_timer(void *pipeline)
2914 struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2917 * handle any good buffered packets released by synproxy before checking
2918 * for packets relased by synproxy due to timeout.
2919 * Don't want packets missed
2922 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2923 FORWARD_BUFFERED_PACKETS);
2925 pipeline_msg_req_handle(&p->pipe);
2926 rte_pipeline_flush(p->pipe.p);
2928 rte_ct_handle_expired_timers(p->cnxn_tracker);
2930 /* now handle packets released by synproxy due to timeout. */
2931 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2932 DELETE_BUFFERED_PACKETS);
2938 * Callback function to process CLI commands from FE.
2941 * A pointer to the pipeline.
2943 * A pointer to command specific data.
2946 * A pointer to message handler on success,
2947 * pipeline_msg_req_invalid_hander on error.
2949 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2951 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2952 struct pipeline_custom_msg_req *req = msg;
2953 pipeline_msg_req_handler f_handle;
2955 f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2956 pipe_vfw->custom_handlers[req->subtype] :
2957 pipeline_msg_req_invalid_handler;
2959 if (f_handle == NULL)
2960 f_handle = pipeline_msg_req_invalid_handler;
2962 return f_handle(p, req);
2966 * Handler for synproxy ON/OFF CLI command.
2969 * A pointer to the pipeline.
2971 * A pointer to command specific data.
2974 * Response message contains status.
2977 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2980 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2981 struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2982 struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2984 if (req->synproxy_flag == 0) {
2985 rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2987 printf("synproxy turned OFF for %s\n", p->name);
2988 } else if (req->synproxy_flag == 1) {
2989 rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2991 printf("synproxy turned ON for %s\n", p->name);
2993 printf("Invalid synproxy setting\n");
3000 struct pipeline_be_ops pipeline_vfw_be_ops = {
3001 .f_init = pipeline_vfw_init,
3002 .f_free = pipeline_vfw_free,
3004 .f_timer = pipeline_vfw_timer,
3005 .f_track = pipeline_vfw_track,