2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline VFW BE Implementation.
21 * Implementation of Pipeline VFW Back End (BE).
22 * Responsible for packet processing.
27 //#define EN_SWP_ARP 1
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
43 #include <rte_byteorder.h>
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
71 uint8_t firewall_flag = 1;
72 uint8_t VFW_DEBUG = 0;
73 uint8_t cnxn_tracking_is_active = 1;
75 * A structure defining the VFW pipeline input port per thread data.
77 struct vfw_ports_in_args {
78 struct pipeline *pipe;
79 struct rte_ct_cnxn_tracker *cnxn_tracker;
80 } __rte_cache_aligned;
82 * A structure defining the VFW pipeline per thread data.
86 pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
88 struct rte_ct_cnxn_tracker *cnxn_tracker;
89 struct rte_VFW_counter_block *counters;
90 struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
91 struct lib_acl *plib_acl;
92 /* timestamp retrieved during in-port computations */
96 uint8_t links_map[PIPELINE_MAX_PORT_IN];
97 uint8_t outport_id[PIPELINE_MAX_PORT_IN];
99 } __rte_cache_aligned;
101 * A structure defining the mbuf meta data for VFW.
103 struct mbuf_tcp_meta_data {
104 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
105 uint32_t output_port;
106 struct rte_mbuf *next; /* next pointer for chained buffers */
107 } __rte_cache_aligned;
109 #define DONT_CARE_TCP_PACKET 0
110 #define IS_NOT_TCP_PACKET 0
111 #define IS_TCP_PACKET 1
113 #define META_DATA_OFFSET 128
115 #define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
116 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
117 #define ETH_HDR_SIZE 14
118 #define PROTOCOL_START (IP_START + 9)
120 #define TCP_START (IP_START + 20)
121 #define RTE_LB_PORT_OFFSET 204 /* TODO: Need definition in LB header */
122 #define TCP_START_IPV6 (IP_START + 40)
123 #define PROTOCOL_START_IPV6 (IP_START + 6)
124 #define IP_HDR_DSCP_OFST 1
126 #define TCP_PROTOCOL 6
127 #define UDP_PROTOCOL 17
129 #define DELETE_BUFFERED_PACKETS 0
130 #define FORWARD_BUFFERED_PACKETS 1
134 #define IPv4_HEADER_SIZE 20
135 #define IPv6_HEADER_SIZE 40
137 #define IP_VERSION_4 4
138 #define IP_VERSION_6 6
141 #define IP_HDR_SIZE_IPV6 40
142 #define IP_HDR_DSCP_OFST_IPV6 0
143 #define IP_HDR_LENGTH_OFST_IPV6 4
144 #define IP_HDR_PROTOCOL_OFST_IPV6 6
145 #define IP_HDR_DST_ADR_OFST_IPV6 24
146 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
147 /** The counter table for VFW pipeline per thread data.*/
148 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
150 int rte_VFW_hi_counter_block_in_use = -1;
152 /* a spin lock used during vfw initialization only */
153 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
156 struct pipeline_action_key *action_array_a;
157 struct pipeline_action_key *action_array_b;
158 struct pipeline_action_key *action_array_active;
159 struct pipeline_action_key *action_array_standby;
160 uint32_t action_array_size;
161 struct action_counter_block
162 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
165 * Pipeline table strategy for firewall. Unfortunately, there does not seem to
166 * be any use for the built-in table lookup of ip_pipeline for the firewall.
167 * The main table requirement of the firewall is the hash table to maintain
168 * connection info, but that is implemented seperately in the connection
169 * tracking library. So a "dummy" table lookup will be performed.
170 * TODO: look into "stub" table and see if that can be used
171 * to avoid useless table lookup
173 uint64_t arp_pkts_mask;
175 /* Start TSC measurement */
176 /* Prefetch counters and pipe before this function */
177 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
178 vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
179 if (likely(vfw_pipe->counters->exit_timestamp))
180 vfw_pipe->counters->external_time_sum +=
181 vfw_pipe->counters->entry_timestamp -
182 vfw_pipe->counters->exit_timestamp;
185 /* End TSC measurement */
186 static inline void end_tsc_measure(
187 struct pipeline_vfw *vfw_pipe,
190 if (likely(n_pkts > 1)) {
191 vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
192 vfw_pipe->counters->internal_time_sum +=
193 vfw_pipe->counters->exit_timestamp -
194 vfw_pipe->counters->entry_timestamp;
195 vfw_pipe->counters->time_measurements++;
197 /* small counts skew results, ignore */
198 vfw_pipe->counters->exit_timestamp = 0;
203 * Print packet for debugging.
206 * A pointer to the packet.
209 static __rte_unused void print_pkt(struct rte_mbuf *pkt)
212 int size = (int)sizeof(struct mbuf_tcp_meta_data);
213 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
215 printf("Meta-data:\n");
216 for (i = 0; i < size; i++) {
217 printf("%02x ", rd[i]);
218 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
222 printf("IP and TCP/UDP headers:\n");
223 rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
224 for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
225 printf("%02x ", rd[i]);
226 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
232 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
233 #define IP_ICMP_PROTOCOL 1
234 #define IP_TCP_PROTOCOL 6
235 #define IP_UDP_PROTOCOL 17
236 #define IPv6_FRAGMENT_HEADER 44
239 * Return ethernet header structure form packet.
242 * A pointer to the packet.
245 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
247 return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
252 * Return IPV4 header structure form packet.
255 * A pointer to the packet.
259 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
260 struct rte_mbuf *pkt)
262 return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
265 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
267 /* NOTE: Only supporting IP headers with no options,
268 * so header is fixed size */
269 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
272 return ip_type == IPv4_HDR_VERSION;
275 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
277 /* NOTE: Only supporting IP headers with no options,
278 * so header is fixed size */
279 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
282 return ip_type == IPv6_HDR_VERSION;
285 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
287 if (likely(firewall_flag))
291 static uint8_t check_arp_icmp(
292 struct rte_mbuf *pkt,
293 struct pipeline_vfw *vfw_pipe)
295 struct ether_hdr *ehdr;
296 struct app_link_params *link;
297 uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
298 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
301 /* ARP outport number */
302 uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
303 struct ipv4_hdr *ipv4_h;
304 struct ipv6_hdr *ipv6_h;
305 link = &myApp->link_params[pkt->port];
307 ehdr = rte_vfw_get_ether_addr(pkt);
308 switch (rte_be_to_cpu_16(ehdr->ether_type)) {
311 rte_pipeline_port_out_packet_insert(
316 vfw_pipe->counters->arpicmpPktCount++;
320 ipv4_h = (struct ipv4_hdr *)
321 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
322 if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
324 rte_be_to_cpu_32(ipv4_h->dst_addr)) {
325 if (is_phy_port_privte(pkt->port)) {
326 rte_pipeline_port_out_packet_insert(
331 vfw_pipe->counters->arpicmpPktCount++;
338 ipv6_h = (struct ipv6_hdr *)
339 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
341 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
342 if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
343 || !memcmp(ipv6_h->dst_addr,
344 solicited_node_multicast_addr,
345 IPV6_ADD_CMP_MULTI)) {
347 rte_pipeline_port_out_packet_insert(
352 vfw_pipe->counters->arpicmpPktCount++;
356 pkts_drop_unsupported_type++;
369 * Performs basic VFW ipv4 packet filtering.
371 * A pointer to the packets.
375 * A pointer to VFW pipeline.
379 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
381 struct pipeline_vfw *vfw_pipe)
385 * Make use of cache prefetch. At beginning of loop, want to prefetch
386 * mbuf data for next iteration (not current one).
387 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
388 * is 20 bytes (extensions not supported), while the IPv6 header is 40
389 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
390 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
391 * need two pre-fetches.
394 uint8_t pos, next_pos = 0;
395 uint64_t pkt_mask; /* bitmask representing a single packet */
396 struct rte_mbuf *pkt;
397 struct rte_mbuf *next_pkt = NULL;
398 struct ipv4_hdr *ihdr4;
399 void *next_iphdr = NULL;
401 if (unlikely(pkts_mask == 0))
403 pos = (uint8_t) __builtin_ctzll(pkts_mask);
404 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
407 uint64_t bytes_processed = 0;
408 /* bitmap of packets left to process */
409 uint64_t pkts_to_process = pkts_mask;
410 /* bitmap of valid packets to return */
411 uint64_t valid_packets = pkts_mask;
414 /* prefetch counters, updated below. Most likely counters to update
416 rte_prefetch0(&vfw_pipe->counters);
418 do { /* always execute at least once */
420 /* remove this packet from remaining list */
421 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
423 if (likely(next_pkts_to_process)) {
424 /* another packet to process after this, prefetch it */
427 (uint8_t) __builtin_ctzll(next_pkts_to_process);
428 next_pkt = pkts[next_pos];
429 next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
431 rte_prefetch0(next_iphdr);
435 /* remove this packet from remaining list */
436 pkts_to_process &= ~pkt_mask;
439 if (!check_arp_icmp(pkt, vfw_pipe)) {
440 /* make next packet data the current */
441 pkts_to_process = next_pkts_to_process;
445 pkt_mask = 1LLU << pos;
446 valid_packets &= ~pkt_mask;
451 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
453 bytes_processed += packet_length;
455 ihdr4 = (struct ipv4_hdr *)
456 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
458 /* verify that packet size according to mbuf is at least
459 * as large as the size according to the IP header.
462 uint32_t ip_length = rte_bswap16(ihdr4->total_length);
465 (ip_length > (packet_length - ETH_HDR_SIZE))) {
467 vfw_pipe->counters->pkts_drop_bad_size++;
471 * IPv4 fragmented if: MF (more fragments) or Fragment
472 * Offset are non-zero. Header in Intel order, so flip
473 * constant to compensate. Note that IPv6 uses a header
474 * extension for identifying fragments.
477 int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
478 uint8_t ttl = ihdr4->time_to_live;
480 if (unlikely(fragmented)) {
482 vfw_pipe->counters->pkts_drop_fragmented++;
485 if (unlikely(ttl <= 1)) {
487 * about to decrement to zero (or is somehow
488 * already zero), so discard
491 vfw_pipe->counters->pkts_drop_ttl++;
495 * Dropping the packets other than TCP AND UDP.
498 uint8_t proto = ihdr4->next_proto_id;
500 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
501 proto == IP_UDP_PROTOCOL ||
502 proto == IP_ICMP_PROTOCOL))) {
505 pkts_drop_unsupported_type++;
508 if (unlikely(discard)) {
509 valid_packets &= ~pkt_mask;
512 /* make next packet data the current */
513 pkts_to_process = next_pkts_to_process;
517 pkt_mask = 1LLU << pos;
519 } while (pkts_to_process);
521 /* finalize counters, etc. */
522 vfw_pipe->counters->bytes_processed += bytes_processed;
524 if (likely(firewall_flag))
525 return valid_packets;
530 * Performs basic VFW IPV6 packet filtering.
532 * A pointer to the packets.
536 * A pointer to VFW pipeline.
539 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
541 struct pipeline_vfw *vfw_pipe)
545 * Make use of cache prefetch. At beginning of loop, want to prefetch
546 * mbuf data for next iteration (not current one).
547 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
548 * is 20 bytes (extensions not supported), while the IPv6 header is 40
549 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
550 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
551 * need two pre-fetches.
554 uint8_t pos, next_pos = 0;
555 uint64_t pkt_mask; /* bitmask representing a single packet */
556 struct rte_mbuf *pkt;
557 struct rte_mbuf *next_pkt = NULL;
558 struct ipv6_hdr *ihdr6;
559 void *next_iphdr = NULL;
561 if (unlikely(pkts_mask == 0))
563 pos = (uint8_t) __builtin_ctzll(pkts_mask);
564 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
567 uint64_t bytes_processed = 0;
568 /* bitmap of packets left to process */
569 uint64_t pkts_to_process = pkts_mask;
570 /* bitmap of valid packets to return */
571 uint64_t valid_packets = pkts_mask;
573 /* prefetch counters, updated below. Most likely counters to update
575 rte_prefetch0(&vfw_pipe->counters);
577 do { /* always execute at least once */
579 /* remove this packet from remaining list */
580 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
582 if (likely(next_pkts_to_process)) {
583 /* another packet to process after this, prefetch it */
586 (uint8_t) __builtin_ctzll(next_pkts_to_process);
587 next_pkt = pkts[next_pos];
589 RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
590 rte_prefetch0(next_iphdr);
594 /* remove this packet from remaining list */
595 pkts_to_process &= ~pkt_mask;
598 if (!check_arp_icmp(pkt, vfw_pipe)) {
599 /* make next packet data the current */
600 pkts_to_process = next_pkts_to_process;
604 pkt_mask = 1LLU << pos;
605 valid_packets &= ~pkt_mask;
610 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
612 bytes_processed += packet_length;
614 ihdr6 = (struct ipv6_hdr *)
615 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
618 * verify that packet size according to mbuf is at least
619 * as large as the size according to the IP header.
620 * For IPv6, note that size includes header extensions
621 * but not the base header size
625 rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
628 (ip_length > (packet_length - ETH_HDR_SIZE))) {
630 vfw_pipe->counters->pkts_drop_bad_size++;
634 * Dropping the packets other than TCP AND UDP.
637 uint8_t proto = ihdr6->proto;
639 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
640 proto == IP_UDP_PROTOCOL ||
641 proto == IP_ICMP_PROTOCOL))) {
643 if (proto == IPv6_FRAGMENT_HEADER)
645 pkts_drop_fragmented++;
648 pkts_drop_unsupported_type++;
652 * Behave like a router, and decrement the TTL of an
653 * IP packet. If this causes the TTL to become zero,
654 * the packet will be discarded. Unlike a router,
655 * no ICMP code 11 (Time * Exceeded) message will be
656 * sent back to the packet originator.
659 if (unlikely(ihdr6->hop_limits <= 1)) {
661 * about to decrement to zero (or is somehow
662 * already zero), so discard
665 vfw_pipe->counters->pkts_drop_ttl++;
668 if (unlikely(discard))
669 valid_packets &= ~pkt_mask;
673 /* make next packet data the current */
674 pkts_to_process = next_pkts_to_process;
678 pkt_mask = 1LLU << pos;
680 } while (pkts_to_process);
682 /* finalize counters, etc. */
683 vfw_pipe->counters->bytes_processed += bytes_processed;
685 if (likely(firewall_flag))
686 return valid_packets;
692 * exchange the mac address so source becomes destination and vice versa.
695 * A pointer to the ethernet header.
698 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
700 struct ether_addr saved_copy;
702 ether_addr_copy(&ehdr->d_addr, &saved_copy);
703 ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
704 ether_addr_copy(&saved_copy, &ehdr->s_addr);
709 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
710 * To support synproxy, some (altered) packets may need to be sent back where
711 * they came from. The ip header has already been adjusted, but the ethernet
712 * header has not, so this must be performed here.
713 * Return an updated pkts_mask, since arp may drop some packets
716 * A pointer to the packet array.
718 * Packet num to start processing
721 * @param synproxy_reply_mask
722 * Reply Packet mask for Synproxy
724 * A pointer to VFW pipeline.
727 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
730 uint64_t synproxy_reply_mask,
731 struct pipeline_vfw *vfw_pipe)
736 struct mbuf_tcp_meta_data *meta_data_addr;
737 struct ether_hdr *ehdr;
738 struct rte_mbuf *pkt;
740 for (i = 0; i < 4; i++) {
741 uint32_t dest_if = INVALID_DESTIF;
742 /* bitmask representing only this packet */
743 uint64_t pkt_mask = 1LLU << (pkt_num + i);
747 if(!(*pkts_mask & pkt_mask))
750 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
752 meta_data_addr = (struct mbuf_tcp_meta_data *)
753 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
754 ehdr = rte_vfw_get_ether_addr(pkt);
757 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
758 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
761 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
763 rte_sp_exchange_mac_addresses(ehdr);
765 struct arp_entry_data *ret_arp_data = NULL;
766 ret_arp_data = get_dest_mac_addr_port(dest_address,
767 &dest_if, &ehdr->d_addr);
768 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
770 if (arp_cache_dest_mac_present(dest_if)) {
771 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
772 update_nhip_access(dest_if);
773 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
774 arp_send_buffered_pkts(ret_arp_data,
775 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
780 if (unlikely(ret_arp_data == NULL)) {
782 printf("%s: NHIP Not Found, nhip:%x , "
783 "outport_id: %d\n", __func__, nhip,
784 vfw_pipe->outport_id[dest_if]);
788 pkts_drop_without_arp_entry++;
791 if (ret_arp_data->status == INCOMPLETE ||
792 ret_arp_data->status == PROBE) {
793 if (ret_arp_data->num_pkts >= NUM_DESC) {
794 /* ICMP req sent, drop packet by
795 * changing the mask */
797 pkts_drop_without_arp_entry++;
800 //arp_pkts_mask |= pkt_mask;
801 *arp_hijack_mask |= pkt_mask;
802 arp_queue_unresolved_packet(ret_arp_data, pkt);
812 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
813 * To support synproxy, some (altered) packets may need to be sent back where
814 * they came from. The ip header has already been adjusted, but the ethernet
815 * header has not, so this must be performed here.
816 * Return an updated pkts_mask, since arp may drop some packets
819 * A pointer to the packet.
821 * Packet number to process
823 * Packet mask pointer
824 * @param synproxy_reply_mask
825 * Reply Packet mask for Synproxy
827 * A pointer to VFW pipeline.
830 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
833 uint64_t synproxy_reply_mask,
834 struct pipeline_vfw *vfw_pipe)
837 uint32_t dest_if = INVALID_DESTIF;
839 struct mbuf_tcp_meta_data *meta_data_addr;
840 struct ether_hdr *ehdr;
841 struct rte_mbuf *pkt;
842 uint64_t pkt_mask = 1LLU << pkt_num;
846 if(*pkts_mask & pkt_mask) {
848 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
850 meta_data_addr = (struct mbuf_tcp_meta_data *)
851 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
852 ehdr = rte_vfw_get_ether_addr(pkt);
855 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
856 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
859 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
861 rte_sp_exchange_mac_addresses(ehdr);
863 struct arp_entry_data *ret_arp_data = NULL;
864 ret_arp_data = get_dest_mac_addr_port(dest_address,
865 &dest_if, &ehdr->d_addr);
866 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
868 if (arp_cache_dest_mac_present(dest_if)) {
870 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
871 update_nhip_access(dest_if);
872 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
873 arp_send_buffered_pkts(ret_arp_data,
874 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
878 if (unlikely(ret_arp_data == NULL)) {
881 printf("%s: NHIP Not Found, nhip:%x , "
882 "outport_id: %d\n", __func__, nhip,
883 vfw_pipe->outport_id[dest_if]);
886 pkts_drop_without_arp_entry++;
889 if (ret_arp_data->status == INCOMPLETE ||
890 ret_arp_data->status == PROBE) {
891 if (ret_arp_data->num_pkts >= NUM_DESC) {
892 /* ICMP req sent, drop packet by
893 * changing the mask */
895 pkts_drop_without_arp_entry++;
898 arp_pkts_mask |= pkt_mask;
899 arp_queue_unresolved_packet(ret_arp_data, pkt);
910 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
911 * To support synproxy, some (altered) packets may need to be sent back where
912 * they came from. The ip header has already been adjusted, but the ethernet
913 * header has not, so this must be performed here.
914 * Return an updated pkts_mask, since arp may drop some packets
917 * A pointer to the packets array.
919 * Packet number to start processing.
921 * Packet mask pointer
922 * @param synproxy_reply_mask
923 * Reply Packet mask for Synproxy
925 * A pointer to VFW pipeline.
929 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
932 uint64_t synproxy_reply_mask,
933 struct pipeline_vfw *vfw_pipe)
935 uint8_t nh_ipv6[IPV6_ADD_SIZE];
936 struct ether_addr hw_addr;
937 struct mbuf_tcp_meta_data *meta_data_addr;
938 struct ether_hdr *ehdr;
939 struct rte_mbuf *pkt;
942 for (i = 0; i < 4; i++) {
943 uint32_t dest_if = INVALID_DESTIF;
944 /* bitmask representing only this packet */
945 uint64_t pkt_mask = 1LLU << (pkt_num + i);
949 if(!(*pkts_mask & pkt_mask))
951 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
953 meta_data_addr = (struct mbuf_tcp_meta_data *)
954 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
955 ehdr = rte_vfw_get_ether_addr(pkt);
957 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
958 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
960 uint8_t nhip[IPV6_ADD_SIZE];
961 uint8_t dest_address[IPV6_ADD_SIZE];
963 memset(nhip, 0, IPV6_ADD_SIZE);
965 rte_sp_exchange_mac_addresses(ehdr);
967 rte_mov16(dest_address, ihdr->dst_addr);
968 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
969 struct nd_entry_data *ret_nd_data = NULL;
970 ret_nd_data = get_dest_mac_address_ipv6_port(
976 meta_data_addr->output_port = vfw_pipe->
978 if (nd_cache_dest_mac_present(dest_if)) {
979 ether_addr_copy(get_link_hw_addr(dest_if),
981 update_nhip_access(dest_if);
983 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
984 nd_send_buffered_pkts(ret_nd_data,
985 &ehdr->d_addr, meta_data_addr->output_port);
988 if (unlikely(ret_nd_data == NULL)) {
989 *pkts_mask &= ~pkt_mask;
991 pkts_drop_without_arp_entry++;
994 if (ret_nd_data->status == INCOMPLETE ||
995 ret_nd_data->status == PROBE) {
996 if (ret_nd_data->num_pkts >= NUM_DESC) {
998 *pkts_mask &= ~pkt_mask;
1000 pkts_drop_without_arp_entry++;
1003 arp_pkts_mask |= pkt_mask;
1004 nd_queue_unresolved_packet(ret_nd_data, pkt);
1015 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1016 * To support synproxy, some (altered) packets may need to be sent back where
1017 * they came from. The ip header has already been adjusted, but the ethernet
1018 * header has not, so this must be performed here.
1019 * Return an updated pkts_mask, since arp may drop some packets
1022 * A pointer to the packets.
1024 * Packet number to process.
1026 * Packet mask pointer
1027 * @param synproxy_reply_mask
1028 * Reply Packet mask for Synproxy
1030 * A pointer to VFW pipeline.
1034 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1036 uint64_t *pkts_mask,
1037 uint64_t synproxy_reply_mask,
1038 struct pipeline_vfw *vfw_pipe)
1040 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1041 struct ether_addr hw_addr;
1042 struct mbuf_tcp_meta_data *meta_data_addr;
1043 struct ether_hdr *ehdr;
1044 struct rte_mbuf *pkt;
1046 uint32_t dest_if = INVALID_DESTIF;
1047 /* bitmask representing only this packet */
1048 uint64_t pkt_mask = 1LLU << pkt_num;
1052 if(*pkts_mask & pkt_mask) {
1054 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1056 meta_data_addr = (struct mbuf_tcp_meta_data *)
1057 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1058 ehdr = rte_vfw_get_ether_addr(pkt);
1060 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1061 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1063 uint8_t nhip[IPV6_ADD_SIZE];
1064 uint8_t dest_address[IPV6_ADD_SIZE];
1066 memset(nhip, 0, IPV6_ADD_SIZE);
1068 rte_sp_exchange_mac_addresses(ehdr);
1069 rte_mov16(dest_address, ihdr->dst_addr);
1070 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1071 struct nd_entry_data *ret_nd_data = NULL;
1072 ret_nd_data = get_dest_mac_address_ipv6_port(
1077 meta_data_addr->output_port = vfw_pipe->
1078 outport_id[dest_if];
1079 if (nd_cache_dest_mac_present(dest_if)) {
1080 ether_addr_copy(get_link_hw_addr(dest_if),
1082 update_nhip_access(dest_if);
1084 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1085 nd_send_buffered_pkts(ret_nd_data,
1086 &ehdr->d_addr, meta_data_addr->output_port);
1089 if (unlikely(ret_nd_data == NULL)) {
1090 *pkts_mask &= ~pkt_mask;
1091 vfw_pipe->counters->
1092 pkts_drop_without_arp_entry++;
1095 if (ret_nd_data->status == INCOMPLETE ||
1096 ret_nd_data->status == PROBE) {
1097 if (ret_nd_data->num_pkts >= NUM_DESC) {
1099 *pkts_mask &= ~pkt_mask;
1100 vfw_pipe->counters->
1101 pkts_drop_without_arp_entry++;
1104 arp_pkts_mask |= pkt_mask;
1105 nd_queue_unresolved_packet(ret_nd_data, pkt);
1118 * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1119 * To support synproxy, some (altered) packets may need to be sent back where
1120 * they came from. The ip header has already been adjusted, but the ethernet
1121 * header has not, so this must be performed here.
1122 * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1125 * A pointer to the packet array.
1127 * Packets mask to be processed
1128 * @param arp_hijack_mask
1129 * Packets to be hijacked for arp buffering
1131 * A pointer to VFW pipeline.
1133 static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1134 uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1136 uint64_t pkts_to_arp = *pkts_mask;
1138 for (; pkts_to_arp;) {
1140 struct mbuf_tcp_meta_data *meta_data_addr;
1141 struct ether_hdr *ehdr;
1142 struct rte_mbuf *pkt;
1143 uint32_t src_phy_port;
1145 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1146 /* bitmask representing only this packet */
1147 uint64_t pkt_mask = 1LLU << pos;
1148 /* remove this packet from remaining list */
1149 pkts_to_arp &= ~pkt_mask;
1153 printf("----------------\n");
1157 meta_data_addr = (struct mbuf_tcp_meta_data *)
1158 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1160 ehdr = (struct ether_hdr *)
1161 RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1163 src_phy_port = pkt->port;
1164 uint32_t dst_phy_port = INVALID_DESTIF;
1166 if(is_phy_port_privte(src_phy_port))
1167 dst_phy_port = prv_to_pub_map[src_phy_port];
1169 dst_phy_port = pub_to_prv_map[src_phy_port];
1172 if(likely(is_gateway())){
1173 struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
1174 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1176 /* Gateway Proc Starts */
1178 struct arp_entry_data *ret_arp_data = NULL;
1179 struct ether_addr dst_mac;
1181 uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
1183 gw_get_route_nh_port_ipv4(dst_ip_addr, &dst_phy_port, &nhip, dst_phy_port);
1185 ret_arp_data = get_dest_mac_addr_ipv4(nhip, dst_phy_port, &dst_mac);
1187 /* Gateway Proc Ends */
1189 if (likely(arp_cache_dest_mac_present(dst_phy_port))) {
1191 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1192 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1194 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1196 update_nhip_access(dst_phy_port);
1198 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1200 arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
1201 vfw_pipe->outport_id[dst_phy_port]);
1205 if (unlikely(ret_arp_data == NULL)) {
1207 printf("NHIP Not Found\n");
1210 vfw_pipe->counters->
1211 pkts_drop_without_arp_entry++;
1214 if (ret_arp_data->status == INCOMPLETE ||
1215 ret_arp_data->status == PROBE) {
1216 if (ret_arp_data->num_pkts >= NUM_DESC) {
1217 /* ICMP req sent, drop packet by
1218 * changing the mask */
1219 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1222 *arp_hijack_mask |= pkt_mask;
1223 arp_queue_unresolved_packet(ret_arp_data, pkt);
1229 /* IP Pkt forwarding based on pub/prv mapping */
1230 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1233 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1234 src_phy_port, dst_phy_port);
1245 * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1246 * To support synproxy, some (altered) packets may need to be sent back where
1247 * they came from. The ip header has already been adjusted, but the ethernet
1248 * header has not, so this must be performed here.
1249 * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1252 * A pointer to the packet array.
1254 * Packets mask to be processed
1255 * @param arp_hijack_mask
1256 * Packets to be hijacked for arp buffering
1258 * A pointer to VFW pipeline.
1260 static void vfw_fwd_pkts_ipv6(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1261 uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1263 uint64_t pkts_to_arp = *pkts_mask;
1265 for (; pkts_to_arp;) {
1267 struct mbuf_tcp_meta_data *meta_data_addr;
1268 struct ether_hdr *ehdr;
1269 struct rte_mbuf *pkt;
1270 uint32_t src_phy_port;
1272 struct nd_entry_data *ret_nd_data = NULL;
1274 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1275 /* bitmask representing only this packet */
1276 uint64_t pkt_mask = 1LLU << pos;
1277 /* remove this packet from remaining list */
1278 pkts_to_arp &= ~pkt_mask;
1282 printf("----------------\n");
1286 meta_data_addr = (struct mbuf_tcp_meta_data *)
1287 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1289 ehdr = (struct ether_hdr *)
1290 RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1292 src_phy_port = pkt->port;
1293 uint32_t dst_phy_port = INVALID_DESTIF;
1296 struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
1297 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1299 /* Gateway Proc Starts */
1301 struct ether_addr dst_mac;
1302 uint32_t dst_phy_port = INVALID_DESTIF;
1303 uint8_t nhipv6[IPV6_ADD_SIZE];
1304 uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
1305 memset(nhipv6, 0, IPV6_ADD_SIZE);
1306 src_phy_port = pkt->port;
1307 rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
1309 gw_get_nh_port_ipv6(dest_ipv6_address, &dst_phy_port, nhipv6);
1311 ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dst_phy_port, &dst_mac);
1313 /* Gateway Proc Ends */
1315 if (nd_cache_dest_mac_present(dst_phy_port)) {
1317 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1318 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1320 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1322 update_nhip_access(dst_phy_port);
1324 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1325 nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
1326 vfw_pipe->outport_id[dst_phy_port]);
1330 if (unlikely(ret_nd_data == NULL)) {
1332 printf("NHIP Not Found\n");
1335 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1338 if (ret_nd_data->status == INCOMPLETE ||
1339 ret_nd_data->status == PROBE) {
1340 if (ret_nd_data->num_pkts >= NUM_DESC) {
1341 /* ICMP req sent, drop packet by
1342 * changing the mask */
1343 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1346 *arp_hijack_mask |= pkt_mask;
1347 nd_queue_unresolved_packet(ret_nd_data, pkt);
1354 /* IP Pkt forwarding based on pub/prv mapping */
1355 if(is_phy_port_privte(src_phy_port))
1356 dst_phy_port = prv_to_pub_map[src_phy_port];
1358 dst_phy_port = pub_to_prv_map[src_phy_port];
1360 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1363 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1364 src_phy_port, dst_phy_port);
1374 * Packets processing for connection tracking.
1377 * A pointer to the pipeline.
1379 * A pointer to the connetion tracker .
1381 * A pointer to a burst of packets.
1382 * @param packet_mask_in
1383 * Input packets Mask.
1387 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1388 struct rte_ct_cnxn_tracker *ct,
1389 struct rte_mbuf **pkts, uint64_t packet_mask_in)
1391 uint64_t keep_mask = packet_mask_in;
1392 struct rte_synproxy_helper sp_helper; /* for synproxy */
1395 rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1398 if (unlikely(sp_helper.hijack_mask))
1399 printf("buffered hijack pkts severe error\n");
1401 if (unlikely(sp_helper.reply_pkt_mask))
1402 printf("buffered reply pkts severe error\n");
1408 * Free Packets from mbuf.
1411 * A pointer to the connection tracker to increment drop counter.
1414 * Packet to be free.
1417 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1419 ct->counters->pkts_drop++;
1420 rte_pktmbuf_free(pkt);
1424 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1425 struct rte_pipeline *p,
1426 struct rte_mbuf **pkts,
1427 int num_pkts, uint64_t pkts_mask)
1430 struct mbuf_tcp_meta_data *meta_data_addr;
1431 uint64_t pkt_mask = 1;
1433 /* any clear bits in low-order num_pkts bit of
1434 * pkt_mask must be discarded */
1436 for (i = 0; i < num_pkts; i++) {
1437 struct rte_mbuf *pkt = pkts[i];
1439 if (pkts_mask & pkt_mask) {
1440 printf("vfw_output_or_delete_buffered_packets\n");
1441 meta_data_addr = (struct mbuf_tcp_meta_data *)
1442 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1443 rte_pipeline_port_out_packet_insert(
1444 p, meta_data_addr->output_port, pkt);
1447 vfw_pktmbuf_free(ct, pkt);
1450 pkt_mask = pkt_mask << 1;
1455 *Packet buffered for synproxy.
1458 * A pointer to the pipeline.
1460 * A pointer to the vfw pipeline.
1462 * A pointer to the connection tracker.
1463 * @param forward_pkts
1464 * Packet forwarded by synproxy.
1468 vfw_handle_buffered_packets(struct rte_pipeline *p,
1469 struct pipeline_vfw *vfw_pipe,
1470 struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1472 struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1474 if (likely(pkt_list == NULL)) /* only during proxy setup is != NULL */
1478 uint64_t keep_mask = 0;
1479 struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1480 struct rte_mbuf *pkt;
1482 while (pkt_list != NULL) {
1483 struct mbuf_tcp_meta_data *meta_data =
1484 (struct mbuf_tcp_meta_data *)
1485 RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1487 /* detach head of list and advance list */
1489 pkt_list = meta_data->next;
1493 pkts[pkt_count++] = pkt;
1495 if (pkt_count == PKT_BUFFER_SIZE) {
1496 /* need to send out packets */
1497 /* currently 0, set all bits */
1498 keep_mask = ~keep_mask;
1501 vfw_process_buffered_pkts(vfw_pipe,
1504 vfw_output_or_delete_buffered_packets(
1514 vfw_pktmbuf_free(ct, pkt);
1518 if (pkt_count != 0) {
1519 /* need to send out packets */
1520 keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
1523 vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
1526 vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
1534 * The pipeline port-in action is used to do all the firewall and
1535 * connection tracking work for IPV4 packets.
1538 * A pointer to the pipeline.
1540 * A pointer to a burst of packets.
1542 * Number of packets to process.
1544 * A pointer to pipeline specific data.
1547 * 0 on success, negative on error.
1551 vfw_port_in_action_ipv4(struct rte_pipeline *p,
1552 struct rte_mbuf **pkts,
1553 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1555 struct vfw_ports_in_args *port_in_args =
1556 (struct vfw_ports_in_args *)arg;
1557 struct pipeline_vfw *vfw_pipe =
1558 (struct pipeline_vfw *)port_in_args->pipe;
1559 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1561 start_tsc_measure(vfw_pipe);
1563 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1564 uint64_t pkts_drop_mask;
1565 uint64_t synp_hijack_mask = 0;
1566 uint64_t arp_hijack_mask = 0;
1567 // uint64_t synproxy_reply_mask; /* for synproxy */
1568 uint64_t keep_mask = packet_mask_in;
1570 uint64_t conntrack_mask = 0, connexist_mask = 0;
1571 struct rte_CT_helper ct_helper;
1575 * This routine uses a bit mask to represent which packets in the
1576 * "pkts" table are considered valid. Any table entry which exists
1577 * and is considered valid has the corresponding bit in the mask set.
1578 * Otherwise, it is cleared. Note that the mask is 64 bits,
1579 * but the number of packets in the table may be considerably less.
1580 * Any mask bits which do correspond to actual packets are cleared.
1581 * Various routines are called which may determine that an existing
1582 * packet is somehow invalid. The routine will return an altered bit
1583 * mask, with the bit cleared. At the end of all the checks,
1584 * packets are dropped if their mask bit is a zero
1587 rte_prefetch0(& vfw_pipe->counters);
1590 /* Pre-fetch all rte_mbuf header */
1591 for(j = 0; j < n_pkts; j++)
1592 rte_prefetch0(pkts[j]);
1594 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1596 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1597 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1600 if (unlikely(vfw_debug > 1))
1601 printf("Enter in-port action IPV4 with %p packet mask\n",
1602 (void *)packet_mask_in);
1603 vfw_pipe->counters->pkts_received =
1604 vfw_pipe->counters->pkts_received + n_pkts;
1606 if (unlikely(VFW_DEBUG))
1607 printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
1609 vfw_pipe->counters->pkts_received, n_pkts);
1611 /* first handle handle any previously buffered packets now released */
1612 vfw_handle_buffered_packets(p, vfw_pipe, ct,
1613 FORWARD_BUFFERED_PACKETS);
1615 /* now handle any new packets on input ports */
1616 if (likely(firewall_flag)) {
1617 keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
1618 keep_mask, vfw_pipe);
1619 vfw_pipe->counters->pkts_fw_forwarded +=
1620 __builtin_popcountll(keep_mask);
1624 rte_prefetch0((void*)vfw_pipe->plib_acl);
1625 rte_prefetch0((void*)vfw_rule_table_ipv4_active);
1626 #endif /* EN_SWP_ACL */
1627 keep_mask = lib_acl_ipv4_pkt_work_key(
1628 vfw_pipe->plib_acl, pkts, keep_mask,
1629 &vfw_pipe->counters->pkts_drop_without_rule,
1630 vfw_rule_table_ipv4_active,
1631 action_array_active,
1632 action_counter_table,
1633 &conntrack_mask, &connexist_mask);
1634 vfw_pipe->counters->pkts_acl_forwarded +=
1635 __builtin_popcountll(keep_mask);
1636 if (conntrack_mask > 0) {
1637 keep_mask = conntrack_mask;
1638 ct_helper.no_new_cnxn_mask = connexist_mask;
1639 cnxn_tracking_is_active = 1;
1641 cnxn_tracking_is_active = 0;
1642 #endif /* ACL_ENABLE */
1644 if (likely(cnxn_tracking_is_active)) {
1645 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1646 &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
1647 // synproxy_reply_mask = ct_helper.reply_pkt_mask;
1648 synp_hijack_mask = ct_helper.hijack_mask;
1653 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1654 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1656 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1659 rte_prefetch0((void*)in_port_dir_a);
1660 rte_prefetch0((void*)prv_to_pub_map);
1663 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1664 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1665 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1667 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1670 pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
1671 synproxy_reply_mask, vfw_pipe);
1673 for (j = i; j < n_pkts; j++) {
1674 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1676 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1679 for (; i < n_pkts; i++) {
1680 pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
1681 synproxy_reply_mask, vfw_pipe);
1684 rte_prefetch0((void*)in_port_dir_a);
1685 rte_prefetch0((void*)prv_to_pub_map);
1687 vfw_fwd_pkts_ipv4(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1691 if (vfw_debug > 1) {
1692 printf(" Exit in-port action with %p packet mask\n",
1694 if (keep_mask != packet_mask_in)
1695 printf("dropped packets, %p in, %p out\n",
1696 (void *)packet_mask_in,
1700 /* Hijack the Synproxy and ARP buffered packets */
1702 if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1704 // printf("Pkts hijacked arp = %lX, synp = %lX\n",
1705 // arp_hijack_mask, synp_hijack_mask);
1707 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1710 pkts_drop_mask = packet_mask_in & ~keep_mask;
1712 if (unlikely(pkts_drop_mask != 0)) {
1713 /* printf("drop %p\n", (void *) pkts_drop_mask); */
1714 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1717 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1718 vfw_pipe->counters->num_pkts_measurements++;
1720 end_tsc_measure(vfw_pipe, n_pkts);
1725 * The pipeline port-in action is used to do all the firewall and
1726 * connection tracking work for IPV6 packet.
1729 * A pointer to the pipeline.
1731 * A pointer to a burst of packets.
1733 * Number of packets to process.
1735 * A pointer to pipeline specific data.
1738 * 0 on success, negative on error.
1742 vfw_port_in_action_ipv6(struct rte_pipeline *p,
1743 struct rte_mbuf **pkts,
1744 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1746 struct vfw_ports_in_args *port_in_args =
1747 (struct vfw_ports_in_args *)arg;
1748 struct pipeline_vfw *vfw_pipe =
1749 (struct pipeline_vfw *)port_in_args->pipe;
1750 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1752 start_tsc_measure(vfw_pipe);
1754 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1755 uint64_t pkts_drop_mask;
1756 uint64_t synp_hijack_mask = 0;
1757 uint64_t arp_hijack_mask = 0;
1758 // uint64_t hijack_mask = 0;
1759 // uint64_t synproxy_reply_mask = 0; /* for synproxy */
1760 uint64_t keep_mask = packet_mask_in;
1762 uint64_t conntrack_mask = 0, connexist_mask = 0;
1763 struct rte_CT_helper ct_helper;
1767 * This routine uses a bit mask to represent which packets in the
1768 * "pkts" table are considered valid. Any table entry which exists
1769 * and is considered valid has the corresponding bit in the mask set.
1770 * Otherwise, it is cleared. Note that the mask is 64 bits,
1771 * but the number of packets in the table may be considerably less.
1772 * Any mask bits which do correspond to actual packets are cleared.
1773 * Various routines are called which may determine that an existing
1774 * packet is somehow invalid. The routine will return an altered bit
1775 * mask, with the bit cleared. At the end of all the checks,
1776 * packets are dropped if their mask bit is a zero
1779 rte_prefetch0(& vfw_pipe->counters);
1781 /* Pre-fetch all rte_mbuf header */
1782 for(j = 0; j < n_pkts; j++)
1783 rte_prefetch0(pkts[j]);
1785 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1786 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1787 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1790 printf("Enter in-port action with %p packet mask\n",
1791 (void *)packet_mask_in);
1792 vfw_pipe->counters->pkts_received =
1793 vfw_pipe->counters->pkts_received + n_pkts;
1795 printf("vfw_port_in_action pkts_received: %" PRIu64
1797 vfw_pipe->counters->pkts_received, n_pkts);
1799 /* first handle handle any previously buffered packets now released */
1800 vfw_handle_buffered_packets(p, vfw_pipe, ct,
1801 FORWARD_BUFFERED_PACKETS);
1803 /* now handle any new packets on input ports */
1804 if (likely(firewall_flag)) {
1805 keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
1806 keep_mask, vfw_pipe);
1807 vfw_pipe->counters->pkts_fw_forwarded +=
1808 __builtin_popcountll(keep_mask);
1813 rte_prefetch0((void*)vfw_pipe->plib_acl);
1814 rte_prefetch0((void*)vfw_rule_table_ipv6_active);
1815 #endif /* EN_SWP_ACL */
1816 keep_mask = lib_acl_ipv6_pkt_work_key(
1817 vfw_pipe->plib_acl, pkts, keep_mask,
1818 &vfw_pipe->counters->pkts_drop_without_rule,
1819 vfw_rule_table_ipv6_active,
1820 action_array_active,
1821 action_counter_table,
1822 &conntrack_mask, &connexist_mask);
1823 vfw_pipe->counters->pkts_acl_forwarded +=
1824 __builtin_popcountll(keep_mask);
1825 if (conntrack_mask > 0) {
1826 keep_mask = conntrack_mask;
1827 ct_helper.no_new_cnxn_mask = connexist_mask;
1828 cnxn_tracking_is_active = 1;
1830 cnxn_tracking_is_active = 0;
1831 #endif /* ACL_ENABLE */
1832 if (likely(cnxn_tracking_is_active)) {
1833 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1834 &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
1835 // synproxy_reply_mask = ct_helper.reply_pkt_mask;
1836 synp_hijack_mask = ct_helper.hijack_mask;
1841 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1842 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1844 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1847 rte_prefetch0((void*)in_port_dir_a);
1848 // rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
1851 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1852 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1853 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1855 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1858 pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
1859 synproxy_reply_mask, vfw_pipe);
1861 for (j = i; j < n_pkts; j++) {
1862 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1864 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1867 for (; i < n_pkts; i++) {
1868 pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
1869 synproxy_reply_mask, vfw_pipe);
1872 rte_prefetch0((void*)in_port_dir_a);
1874 vfw_fwd_pkts_ipv6(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1878 if (vfw_debug > 1) {
1879 printf(" Exit in-port action with %p packet mask\n",
1881 if (keep_mask != packet_mask_in)
1882 printf("dropped packets, %p in, %p out\n",
1883 (void *)packet_mask_in,
1887 /* Hijack the Synproxy and ARP buffered packets */
1889 if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1891 // printf("Pkts hijacked arp = %lX, synp = %lX\n",
1892 // arp_hijack_mask, synp_hijack_mask);
1894 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1897 /* Update mask before returning, so that bad packets are dropped */
1899 pkts_drop_mask = packet_mask_in & ~keep_mask;
1901 if (unlikely(pkts_drop_mask != 0)) {
1902 /* printf("drop %p\n", (void *) pkts_drop_mask); */
1903 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1906 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1907 vfw_pipe->counters->num_pkts_measurements++;
1909 end_tsc_measure(vfw_pipe, n_pkts);
1916 * Parse arguments in config file.
1919 * A pointer to the pipeline.
1921 * A pointer to pipeline specific parameters.
1924 * 0 on success, negative on error.
1927 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
1928 struct pipeline_params *params)
1934 printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
1937 for (i = 0; i < params->n_args; i++) {
1938 char *arg_name = params->args_name[i];
1939 char *arg_value = params->args_value[i];
1941 printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
1942 atoi(arg_value), arg_value);
1944 status = lib_acl_parse_config(vfw_pipe->plib_acl,
1945 arg_name, arg_value, &vfw_n_rules);
1947 printf("rte_ct_set_configuration_options =%s,%s",
1948 arg_name, arg_value);
1950 } else if (status == 0)
1953 #endif /* traffic_type */
1954 if (strcmp(arg_name, "traffic_type") == 0) {
1955 int traffic_type = atoi(arg_value);
1957 if (traffic_type == 0 ||
1958 !(traffic_type == IP_VERSION_4 ||
1959 traffic_type == IP_VERSION_6)) {
1960 printf("not IPV4/IPV6");
1964 vfw_pipe->traffic_type = traffic_type;
1970 if (strcmp(arg_name, "n_flows") == 0) {
1971 int n_flows = atoi(arg_value);
1973 if ((n_flows == 0) || (n_flows > 8000000))
1976 /* must be power of 2, round up if not */
1977 if (!rte_is_power_of_2(n_flows))
1978 n_flows = rte_align32pow2(n_flows);
1980 vfw_pipe->n_flows = n_flows;
1984 /* not firewall option, process as cnxn tracking option */
1985 status = rte_ct_set_configuration_options(
1986 vfw_pipe->cnxn_tracker,
1987 arg_name, arg_value);
1989 printf("rte_ct_set_configuration_options =%s,%s",
1990 arg_name, arg_value);
1992 } else if (status == 0)
2000 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
2003 static pipeline_msg_req_handler handlers[] = {
2004 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
2005 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
2006 pipeline_msg_req_stats_port_in_handler,
2007 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
2008 pipeline_msg_req_stats_port_out_handler,
2009 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
2010 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
2011 pipeline_msg_req_port_in_enable_handler,
2012 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
2013 pipeline_msg_req_port_in_disable_handler,
2014 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
2017 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2019 static pipeline_msg_req_handler custom_handlers[] = {
2021 [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
2022 pipeline_vfw_msg_req_synproxy_flag_handler
2026 * Create and initialize Pipeline Back End (BE).
2029 * A pointer to the pipeline specific parameters..
2031 * A pointer to pipeline specific data.
2034 * A pointer to the pipeline create, NULL on error.
2037 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
2041 /* Check input arguments */
2042 if ((params == NULL) ||
2043 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2047 printf("num ports in %d / num ports out %d\n",
2048 params->n_ports_in, params->n_ports_out);
2050 /* Create a single pipeline instance and initialize. */
2051 struct pipeline_vfw *pipe_vfw;
2053 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
2054 pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2056 if (pipe_vfw == NULL)
2059 struct pipeline *pipe;
2061 pipe = &pipe_vfw->pipe;
2063 strncpy(pipe->name, params->name, sizeof(pipe->name));
2064 pipe->log_level = params->log_level;
2065 pipe_vfw->n_flows = 4096; /* small default value */
2066 pipe_vfw->traffic_type = IP_VERSION_4;
2067 pipe_vfw->pipeline_num = 0xff;
2068 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2069 pipe_vfw->links_map[i] = 0xff;
2070 pipe_vfw->outport_id[i] = 0xff;
2072 PLOG(pipe, HIGH, "VFW");
2074 /* Create a firewall instance and initialize. */
2075 pipe_vfw->cnxn_tracker =
2076 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2077 RTE_CACHE_LINE_SIZE);
2079 if (pipe_vfw->cnxn_tracker == NULL)
2082 /* Create a acl instance and initialize. */
2083 pipe_vfw->plib_acl =
2084 rte_zmalloc(NULL, sizeof(struct lib_acl),
2085 RTE_CACHE_LINE_SIZE);
2087 if (pipe_vfw->plib_acl == NULL)
2090 timer_lcore = rte_lcore_id();
2092 * Now allocate a counter block entry. It appears that the
2093 * initialization of all instances is serialized on core 0,
2094 * so no lock is necessary.
2096 struct rte_VFW_counter_block *counter_ptr;
2098 if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2099 /* error, exceeded table bounds */
2102 rte_VFW_hi_counter_block_in_use++;
2104 &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2105 strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2107 pipe_vfw->counters = counter_ptr;
2109 rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2110 /* Parse arguments */
2111 if (pipeline_vfw_parse_args(pipe_vfw, params))
2114 uint16_t pointers_offset =
2115 META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2117 if (pipe_vfw->n_flows > 0)
2118 rte_ct_initialize_cnxn_tracker_with_synproxy(
2119 pipe_vfw->cnxn_tracker,
2124 pipe_vfw->counters->ct_counters =
2125 rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2129 struct rte_pipeline_params pipeline_params = {
2130 .name = params->name,
2131 .socket_id = params->socket_id,
2132 .offset_port_id = META_DATA_OFFSET +
2133 offsetof(struct mbuf_tcp_meta_data, output_port)
2136 pipe->p = rte_pipeline_create(&pipeline_params);
2137 if (pipe->p == NULL) {
2146 * create a different "arg_ah" for each input port.
2147 * They differ only in the recorded port number. Unfortunately,
2148 * IP_PIPELINE does not pass port number in to input port handler
2151 uint32_t in_ports_arg_size =
2152 RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2153 (params->n_ports_in));
2154 struct vfw_ports_in_args *port_in_args =
2155 (struct vfw_ports_in_args *)
2156 rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2158 if (port_in_args == NULL)
2161 pipe->n_ports_in = params->n_ports_in;
2162 for (i = 0; i < pipe->n_ports_in; i++) {
2164 /* initialize this instance of port_in_args as necessary */
2165 port_in_args[i].pipe = pipe;
2166 port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2168 struct rte_pipeline_port_in_params port_params = {
2170 pipeline_port_in_params_get_ops(¶ms->port_in
2173 pipeline_port_in_params_convert(¶ms->port_in
2175 .f_action = vfw_port_in_action_ipv4,
2176 .arg_ah = &(port_in_args[i]),
2177 .burst_size = params->port_in[i].burst_size,
2179 if (pipe_vfw->traffic_type == IP_VERSION_6)
2180 port_params.f_action = vfw_port_in_action_ipv6;
2181 int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2182 &pipe->port_in_id[i]);
2185 rte_pipeline_free(pipe->p);
2192 pipe->n_ports_out = params->n_ports_out;
2193 for (i = 0; i < pipe->n_ports_out; i++) {
2194 struct rte_pipeline_port_out_params port_params = {
2195 .ops = pipeline_port_out_params_get_ops(
2196 ¶ms->port_out[i]),
2197 .arg_create = pipeline_port_out_params_convert(
2198 ¶ms->port_out[i]),
2203 int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2204 &pipe->port_out_id[i]);
2207 rte_pipeline_free(pipe->p);
2213 int pipeline_num = 0;
2214 int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2217 printf("sscanf unble to read pipeline id\n");
2218 pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2219 register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2220 set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2221 set_outport_id(pipe_vfw->pipeline_num, pipe,
2222 pipe_vfw->outport_id);
2223 printf("pipeline_num=%d\n", pipeline_num);
2225 /*If this is the first VFW thread, create common VFW Rule tables*/
2226 if (rte_VFW_hi_counter_block_in_use == 0) {
2227 vfw_rule_table_ipv4_active =
2228 lib_acl_create_active_standby_table_ipv4(1,
2230 if (vfw_rule_table_ipv4_active == NULL) {
2231 printf("Failed to create active table for IPV4\n");
2232 rte_pipeline_free(pipe->p);
2233 rte_free(pipe_vfw->cnxn_tracker);
2234 rte_free(pipe_vfw->plib_acl);
2238 vfw_rule_table_ipv4_standby =
2239 lib_acl_create_active_standby_table_ipv4(2,
2241 if (vfw_rule_table_ipv4_standby == NULL) {
2242 printf("Failed to create standby table for IPV4\n");
2243 rte_pipeline_free(pipe->p);
2244 rte_free(pipe_vfw->cnxn_tracker);
2245 rte_free(pipe_vfw->plib_acl);
2250 vfw_rule_table_ipv6_active =
2251 lib_acl_create_active_standby_table_ipv6(1,
2254 if (vfw_rule_table_ipv6_active == NULL) {
2255 printf("Failed to create active table for IPV6\n");
2256 rte_pipeline_free(pipe->p);
2257 rte_free(pipe_vfw->cnxn_tracker);
2258 rte_free(pipe_vfw->plib_acl);
2262 vfw_rule_table_ipv6_standby =
2263 lib_acl_create_active_standby_table_ipv6(2,
2265 if (vfw_rule_table_ipv6_standby == NULL) {
2266 printf("Failed to create standby table for IPV6\n");
2267 rte_pipeline_free(pipe->p);
2268 rte_free(pipe_vfw->cnxn_tracker);
2269 rte_free(pipe_vfw->plib_acl);
2281 struct rte_pipeline_table_params table_params = {
2282 .ops = &rte_table_stub_ops,
2284 .f_action_hit = NULL,
2285 .f_action_miss = NULL,
2287 .action_data_size = 0,
2290 int status = rte_pipeline_table_create(pipe->p,
2292 &pipe->table_id[0]);
2295 rte_pipeline_free(pipe->p);
2300 struct rte_pipeline_table_entry default_entry = {
2301 .action = RTE_PIPELINE_ACTION_PORT_META
2304 struct rte_pipeline_table_entry *default_entry_ptr;
2306 status = rte_pipeline_table_default_entry_add(pipe->p,
2309 &default_entry_ptr);
2312 rte_pipeline_free(pipe->p);
2316 for (i = 0; i < pipe->n_ports_in; i++) {
2317 int status = rte_pipeline_port_in_connect_to_table(
2319 pipe->port_in_id[i],
2323 rte_pipeline_free(pipe->p);
2329 /* Enable input ports */
2330 for (i = 0; i < pipe->n_ports_in; i++) {
2332 rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2335 rte_pipeline_free(pipe->p);
2341 /* Check pipeline consistency */
2342 if (rte_pipeline_check(pipe->p) < 0) {
2343 rte_pipeline_free(pipe->p);
2348 /* Message queues */
2349 pipe->n_msgq = params->n_msgq;
2350 for (i = 0; i < pipe->n_msgq; i++)
2351 pipe->msgq_in[i] = params->msgq_in[i];
2353 for (i = 0; i < pipe->n_msgq; i++)
2354 pipe->msgq_out[i] = params->msgq_out[i];
2356 /* Message handlers */
2357 memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2358 memcpy(pipe_vfw->custom_handlers, custom_handlers,
2359 sizeof(pipe_vfw->custom_handlers));
2365 * Free resources and delete pipeline.
2368 * A pointer to the pipeline.
2371 * 0 on success, negative on error.
2373 static int pipeline_vfw_free(void *pipeline)
2375 struct pipeline *p = (struct pipeline *)pipeline;
2377 /* Check input arguments */
2381 /* Free resources */
2382 rte_pipeline_free(p->p);
2388 * Callback function to map input/output ports.
2391 * A pointer to the pipeline.
2395 * A pointer to the Output port.
2398 * 0 on success, negative on error.
2401 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2404 struct pipeline *p = (struct pipeline *)pipeline;
2406 /* Check input arguments */
2407 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2410 if (p->n_ports_in == 1) {
2419 * Callback function to process timers.
2422 * A pointer to the pipeline.
2425 * 0 on success, negative on error.
2427 static int pipeline_vfw_timer(void *pipeline)
2429 struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2432 * handle any good buffered packets released by synproxy before checking
2433 * for packets relased by synproxy due to timeout.
2434 * Don't want packets missed
2437 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2438 FORWARD_BUFFERED_PACKETS);
2440 pipeline_msg_req_handle(&p->pipe);
2441 rte_pipeline_flush(p->pipe.p);
2443 rte_ct_handle_expired_timers(p->cnxn_tracker);
2445 /* now handle packets released by synproxy due to timeout. */
2446 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2447 DELETE_BUFFERED_PACKETS);
2453 * Callback function to process CLI commands from FE.
2456 * A pointer to the pipeline.
2458 * A pointer to command specific data.
2461 * A pointer to message handler on success,
2462 * pipeline_msg_req_invalid_hander on error.
2464 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2466 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2467 struct pipeline_custom_msg_req *req = msg;
2468 pipeline_msg_req_handler f_handle;
2470 f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2471 pipe_vfw->custom_handlers[req->subtype] :
2472 pipeline_msg_req_invalid_handler;
2474 if (f_handle == NULL)
2475 f_handle = pipeline_msg_req_invalid_handler;
2477 return f_handle(p, req);
2481 * Handler for synproxy ON/OFF CLI command.
2484 * A pointer to the pipeline.
2486 * A pointer to command specific data.
2489 * Response message contains status.
2492 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2495 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2496 struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2497 struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2499 if (req->synproxy_flag == 0) {
2500 rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2502 printf("synproxy turned OFF for %s\n", p->name);
2503 } else if (req->synproxy_flag == 1) {
2504 rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2506 printf("synproxy turned ON for %s\n", p->name);
2508 printf("Invalid synproxy setting\n");
2515 struct pipeline_be_ops pipeline_vfw_be_ops = {
2516 .f_init = pipeline_vfw_init,
2517 .f_free = pipeline_vfw_free,
2519 .f_timer = pipeline_vfw_timer,
2520 .f_track = pipeline_vfw_track,