2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline VFW BE Implementation.
21 * Implementation of Pipeline VFW Back End (BE).
22 * Responsible for packet processing.
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
43 #include <rte_byteorder.h>
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
70 uint8_t firewall_flag = 1;
71 uint8_t VFW_DEBUG = 0;
72 uint8_t cnxn_tracking_is_active = 1;
74 * A structure defining the VFW pipeline input port per thread data.
76 struct vfw_ports_in_args {
77 struct pipeline *pipe;
78 struct rte_ct_cnxn_tracker *cnxn_tracker;
79 } __rte_cache_aligned;
81 * A structure defining the VFW pipeline per thread data.
85 pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
87 struct rte_ct_cnxn_tracker *cnxn_tracker;
88 struct rte_VFW_counter_block *counters;
89 struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
90 struct lib_acl *plib_acl;
91 /* timestamp retrieved during in-port computations */
95 uint8_t links_map[PIPELINE_MAX_PORT_IN];
96 uint8_t outport_id[PIPELINE_MAX_PORT_IN];
97 /* Local ARP & ND Tables */
98 struct lib_arp_route_table_entry
99 local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
100 uint8_t local_lib_arp_route_ent_cnt;
101 struct lib_nd_route_table_entry
102 local_lib_nd_route_table[MAX_ND_RT_ENTRY];
103 uint8_t local_lib_nd_route_ent_cnt;
105 } __rte_cache_aligned;
107 * A structure defining the mbuf meta data for VFW.
109 struct mbuf_tcp_meta_data {
110 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
111 uint32_t output_port;
112 struct rte_mbuf *next; /* next pointer for chained buffers */
113 } __rte_cache_aligned;
115 #define DONT_CARE_TCP_PACKET 0
116 #define IS_NOT_TCP_PACKET 0
117 #define IS_TCP_PACKET 1
119 #define META_DATA_OFFSET 128
121 #define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
122 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
123 #define ETH_HDR_SIZE 14
124 #define PROTOCOL_START (IP_START + 9)
126 #define TCP_START (IP_START + 20)
127 #define RTE_LB_PORT_OFFSET 204 /* TODO: Need definition in LB header */
128 #define TCP_START_IPV6 (IP_START + 40)
129 #define PROTOCOL_START_IPV6 (IP_START + 6)
130 #define IP_HDR_DSCP_OFST 1
132 #define TCP_PROTOCOL 6
133 #define UDP_PROTOCOL 17
135 #define DELETE_BUFFERED_PACKETS 0
136 #define FORWARD_BUFFERED_PACKETS 1
140 #define IPv4_HEADER_SIZE 20
141 #define IPv6_HEADER_SIZE 40
143 #define IP_VERSION_4 4
144 #define IP_VERSION_6 6
147 #define IP_HDR_SIZE_IPV6 40
148 #define IP_HDR_DSCP_OFST_IPV6 0
149 #define IP_HDR_LENGTH_OFST_IPV6 4
150 #define IP_HDR_PROTOCOL_OFST_IPV6 6
151 #define IP_HDR_DST_ADR_OFST_IPV6 24
152 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
153 /** The counter table for VFW pipeline per thread data.*/
154 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
156 int rte_VFW_hi_counter_block_in_use = -1;
158 /* a spin lock used during vfw initialization only */
159 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
162 struct pipeline_action_key *action_array_a;
163 struct pipeline_action_key *action_array_b;
164 struct pipeline_action_key *action_array_active;
165 struct pipeline_action_key *action_array_standby;
166 uint32_t action_array_size;
167 struct action_counter_block
168 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
171 * Pipeline table strategy for firewall. Unfortunately, there does not seem to
172 * be any use for the built-in table lookup of ip_pipeline for the firewall.
173 * The main table requirement of the firewall is the hash table to maintain
174 * connection info, but that is implemented seperately in the connection
175 * tracking library. So a "dummy" table lookup will be performed.
176 * TODO: look into "stub" table and see if that can be used
177 * to avoid useless table lookup
179 uint64_t arp_pkts_mask;
181 /* Start TSC measurement */
182 /* Prefetch counters and pipe before this function */
183 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
184 vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
185 if (likely(vfw_pipe->counters->exit_timestamp))
186 vfw_pipe->counters->external_time_sum +=
187 vfw_pipe->counters->entry_timestamp -
188 vfw_pipe->counters->exit_timestamp;
191 /* End TSC measurement */
192 static inline void end_tsc_measure(
193 struct pipeline_vfw *vfw_pipe,
196 if (likely(n_pkts > 1)) {
197 vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
198 vfw_pipe->counters->internal_time_sum +=
199 vfw_pipe->counters->exit_timestamp -
200 vfw_pipe->counters->entry_timestamp;
201 vfw_pipe->counters->time_measurements++;
203 /* small counts skew results, ignore */
204 vfw_pipe->counters->exit_timestamp = 0;
209 * Print packet for debugging.
212 * A pointer to the packet.
215 static __rte_unused void print_pkt(struct rte_mbuf *pkt)
218 int size = (int)sizeof(struct mbuf_tcp_meta_data);
219 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
221 printf("Meta-data:\n");
222 for (i = 0; i < size; i++) {
223 printf("%02x ", rd[i]);
224 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
228 printf("IP and TCP/UDP headers:\n");
229 rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
230 for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
231 printf("%02x ", rd[i]);
232 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
238 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
239 #define IP_ICMP_PROTOCOL 1
240 #define IP_TCP_PROTOCOL 6
241 #define IP_UDP_PROTOCOL 17
242 #define IPv6_FRAGMENT_HEADER 44
245 * Return ethernet header structure form packet.
248 * A pointer to the packet.
251 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
253 return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
258 * Return IPV4 header structure form packet.
261 * A pointer to the packet.
265 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
266 struct rte_mbuf *pkt)
268 return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
271 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
273 /* NOTE: Only supporting IP headers with no options,
274 * so header is fixed size */
275 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
278 return ip_type == IPv4_HDR_VERSION;
281 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
283 /* NOTE: Only supporting IP headers with no options,
284 * so header is fixed size */
285 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
288 return ip_type == IPv6_HDR_VERSION;
291 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
293 if (likely(firewall_flag))
297 static uint8_t check_arp_icmp(
298 struct rte_mbuf *pkt,
299 struct pipeline_vfw *vfw_pipe)
301 struct ether_hdr *ehdr;
302 struct app_link_params *link;
303 uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
304 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
307 /* ARP outport number */
308 uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
309 struct ipv4_hdr *ipv4_h;
310 struct ipv6_hdr *ipv6_h;
311 link = &myApp->link_params[pkt->port];
313 ehdr = rte_vfw_get_ether_addr(pkt);
314 switch (rte_be_to_cpu_16(ehdr->ether_type)) {
317 rte_pipeline_port_out_packet_insert(
322 vfw_pipe->counters->arpicmpPktCount++;
326 ipv4_h = (struct ipv4_hdr *)
327 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
328 if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
330 rte_be_to_cpu_32(ipv4_h->dst_addr)) {
331 if (is_phy_port_privte(pkt->port)) {
332 rte_pipeline_port_out_packet_insert(
337 vfw_pipe->counters->arpicmpPktCount++;
344 ipv6_h = (struct ipv6_hdr *)
345 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
347 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
348 if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
349 || !memcmp(ipv6_h->dst_addr,
350 solicited_node_multicast_addr,
351 IPV6_ADD_CMP_MULTI)) {
353 rte_pipeline_port_out_packet_insert(
358 vfw_pipe->counters->arpicmpPktCount++;
362 pkts_drop_unsupported_type++;
375 * Performs basic VFW ipv4 packet filtering.
377 * A pointer to the packets.
381 * A pointer to VFW pipeline.
385 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
387 struct pipeline_vfw *vfw_pipe)
391 * Make use of cache prefetch. At beginning of loop, want to prefetch
392 * mbuf data for next iteration (not current one).
393 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
394 * is 20 bytes (extensions not supported), while the IPv6 header is 40
395 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
396 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
397 * need two pre-fetches.
400 uint8_t pos, next_pos = 0;
401 uint64_t pkt_mask; /* bitmask representing a single packet */
402 struct rte_mbuf *pkt;
403 struct rte_mbuf *next_pkt = NULL;
404 struct ipv4_hdr *ihdr4;
405 void *next_iphdr = NULL;
407 if (unlikely(pkts_mask == 0))
409 pos = (uint8_t) __builtin_ctzll(pkts_mask);
410 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
413 uint64_t bytes_processed = 0;
414 /* bitmap of packets left to process */
415 uint64_t pkts_to_process = pkts_mask;
416 /* bitmap of valid packets to return */
417 uint64_t valid_packets = pkts_mask;
420 /* prefetch counters, updated below. Most likely counters to update
422 rte_prefetch0(&vfw_pipe->counters);
424 do { /* always execute at least once */
426 /* remove this packet from remaining list */
427 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
429 if (likely(next_pkts_to_process)) {
430 /* another packet to process after this, prefetch it */
433 (uint8_t) __builtin_ctzll(next_pkts_to_process);
434 next_pkt = pkts[next_pos];
435 next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
437 rte_prefetch0(next_iphdr);
441 /* remove this packet from remaining list */
442 pkts_to_process &= ~pkt_mask;
445 if (!check_arp_icmp(pkt, vfw_pipe)) {
446 /* make next packet data the current */
447 pkts_to_process = next_pkts_to_process;
451 pkt_mask = 1LLU << pos;
452 valid_packets &= ~pkt_mask;
457 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
459 bytes_processed += packet_length;
461 ihdr4 = (struct ipv4_hdr *)
462 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
464 /* verify that packet size according to mbuf is at least
465 * as large as the size according to the IP header.
468 uint32_t ip_length = rte_bswap16(ihdr4->total_length);
471 (ip_length > (packet_length - ETH_HDR_SIZE))) {
473 vfw_pipe->counters->pkts_drop_bad_size++;
477 * IPv4 fragmented if: MF (more fragments) or Fragment
478 * Offset are non-zero. Header in Intel order, so flip
479 * constant to compensate. Note that IPv6 uses a header
480 * extension for identifying fragments.
483 int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
484 uint8_t ttl = ihdr4->time_to_live;
486 if (unlikely(fragmented)) {
488 vfw_pipe->counters->pkts_drop_fragmented++;
491 if (unlikely(ttl <= 1)) {
493 * about to decrement to zero (or is somehow
494 * already zero), so discard
497 vfw_pipe->counters->pkts_drop_ttl++;
501 * Dropping the packets other than TCP AND UDP.
504 uint8_t proto = ihdr4->next_proto_id;
506 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
507 proto == IP_UDP_PROTOCOL ||
508 proto == IP_ICMP_PROTOCOL))) {
511 pkts_drop_unsupported_type++;
514 if (unlikely(discard)) {
515 valid_packets &= ~pkt_mask;
518 /* make next packet data the current */
519 pkts_to_process = next_pkts_to_process;
523 pkt_mask = 1LLU << pos;
525 } while (pkts_to_process);
527 /* finalize counters, etc. */
528 vfw_pipe->counters->bytes_processed += bytes_processed;
530 if (likely(firewall_flag))
531 return valid_packets;
536 * Performs basic VFW IPV6 packet filtering.
538 * A pointer to the packets.
542 * A pointer to VFW pipeline.
545 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
547 struct pipeline_vfw *vfw_pipe)
551 * Make use of cache prefetch. At beginning of loop, want to prefetch
552 * mbuf data for next iteration (not current one).
553 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
554 * is 20 bytes (extensions not supported), while the IPv6 header is 40
555 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
556 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
557 * need two pre-fetches.
560 uint8_t pos, next_pos = 0;
561 uint64_t pkt_mask; /* bitmask representing a single packet */
562 struct rte_mbuf *pkt;
563 struct rte_mbuf *next_pkt = NULL;
564 struct ipv6_hdr *ihdr6;
565 void *next_iphdr = NULL;
567 if (unlikely(pkts_mask == 0))
569 pos = (uint8_t) __builtin_ctzll(pkts_mask);
570 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
573 uint64_t bytes_processed = 0;
574 /* bitmap of packets left to process */
575 uint64_t pkts_to_process = pkts_mask;
576 /* bitmap of valid packets to return */
577 uint64_t valid_packets = pkts_mask;
579 /* prefetch counters, updated below. Most likely counters to update
581 rte_prefetch0(&vfw_pipe->counters);
583 do { /* always execute at least once */
585 /* remove this packet from remaining list */
586 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
588 if (likely(next_pkts_to_process)) {
589 /* another packet to process after this, prefetch it */
592 (uint8_t) __builtin_ctzll(next_pkts_to_process);
593 next_pkt = pkts[next_pos];
595 RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
596 rte_prefetch0(next_iphdr);
600 /* remove this packet from remaining list */
601 pkts_to_process &= ~pkt_mask;
604 if (!check_arp_icmp(pkt, vfw_pipe)) {
605 /* make next packet data the current */
606 pkts_to_process = next_pkts_to_process;
610 pkt_mask = 1LLU << pos;
611 valid_packets &= ~pkt_mask;
616 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
618 bytes_processed += packet_length;
620 ihdr6 = (struct ipv6_hdr *)
621 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
624 * verify that packet size according to mbuf is at least
625 * as large as the size according to the IP header.
626 * For IPv6, note that size includes header extensions
627 * but not the base header size
631 rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
634 (ip_length > (packet_length - ETH_HDR_SIZE))) {
636 vfw_pipe->counters->pkts_drop_bad_size++;
640 * Dropping the packets other than TCP AND UDP.
643 uint8_t proto = ihdr6->proto;
645 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
646 proto == IP_UDP_PROTOCOL ||
647 proto == IP_ICMP_PROTOCOL))) {
649 if (proto == IPv6_FRAGMENT_HEADER)
651 pkts_drop_fragmented++;
654 pkts_drop_unsupported_type++;
658 * Behave like a router, and decrement the TTL of an
659 * IP packet. If this causes the TTL to become zero,
660 * the packet will be discarded. Unlike a router,
661 * no ICMP code 11 (Time * Exceeded) message will be
662 * sent back to the packet originator.
665 if (unlikely(ihdr6->hop_limits <= 1)) {
667 * about to decrement to zero (or is somehow
668 * already zero), so discard
671 vfw_pipe->counters->pkts_drop_ttl++;
674 if (unlikely(discard))
675 valid_packets &= ~pkt_mask;
679 /* make next packet data the current */
680 pkts_to_process = next_pkts_to_process;
684 pkt_mask = 1LLU << pos;
686 } while (pkts_to_process);
688 /* finalize counters, etc. */
689 vfw_pipe->counters->bytes_processed += bytes_processed;
691 if (likely(firewall_flag))
692 return valid_packets;
698 * exchange the mac address so source becomes destination and vice versa.
701 * A pointer to the ethernet header.
704 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
706 struct ether_addr saved_copy;
708 ether_addr_copy(&ehdr->d_addr, &saved_copy);
709 ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
710 ether_addr_copy(&saved_copy, &ehdr->s_addr);
715 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
716 * To support synproxy, some (altered) packets may need to be sent back where
717 * they came from. The ip header has already been adjusted, but the ethernet
718 * header has not, so this must be performed here.
719 * Return an updated pkts_mask, since arp may drop some packets
722 * A pointer to the packet array.
724 * Packet num to start processing
727 * @param synproxy_reply_mask
728 * Reply Packet mask for Synproxy
730 * A pointer to VFW pipeline.
733 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
736 uint64_t synproxy_reply_mask,
737 struct pipeline_vfw *vfw_pipe)
742 struct mbuf_tcp_meta_data *meta_data_addr;
743 struct ether_hdr *ehdr;
744 struct rte_mbuf *pkt;
746 for (i = 0; i < 4; i++) {
747 uint32_t dest_if = INVALID_DESTIF;
748 /* bitmask representing only this packet */
749 uint64_t pkt_mask = 1LLU << (pkt_num + i);
753 if(!(*pkts_mask & pkt_mask))
756 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
758 meta_data_addr = (struct mbuf_tcp_meta_data *)
759 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
760 ehdr = rte_vfw_get_ether_addr(pkt);
763 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
764 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
767 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
769 rte_sp_exchange_mac_addresses(ehdr);
771 struct arp_entry_data *ret_arp_data = NULL;
772 ret_arp_data = get_dest_mac_addr_port(dest_address,
773 &dest_if, &ehdr->d_addr);
774 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
776 if (arp_cache_dest_mac_present(dest_if)) {
777 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
778 update_nhip_access(dest_if);
779 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
780 arp_send_buffered_pkts(ret_arp_data,
781 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
786 if (unlikely(ret_arp_data == NULL)) {
788 printf("%s: NHIP Not Found, nhip:%x , "
789 "outport_id: %d\n", __func__, nhip,
790 vfw_pipe->outport_id[dest_if]);
794 pkts_drop_without_arp_entry++;
797 if (ret_arp_data->status == INCOMPLETE ||
798 ret_arp_data->status == PROBE) {
799 if (ret_arp_data->num_pkts >= NUM_DESC) {
800 /* ICMP req sent, drop packet by
801 * changing the mask */
802 vfw_pipe->counters->pkts_drop_without_arp_entry++;
805 arp_pkts_mask |= pkt_mask;
806 arp_queue_unresolved_packet(ret_arp_data, pkt);
816 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
817 * To support synproxy, some (altered) packets may need to be sent back where
818 * they came from. The ip header has already been adjusted, but the ethernet
819 * header has not, so this must be performed here.
820 * Return an updated pkts_mask, since arp may drop some packets
823 * A pointer to the packet.
825 * Packet number to process
827 * Packet mask pointer
828 * @param synproxy_reply_mask
829 * Reply Packet mask for Synproxy
831 * A pointer to VFW pipeline.
834 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
837 uint64_t synproxy_reply_mask,
838 struct pipeline_vfw *vfw_pipe)
841 uint32_t dest_if = INVALID_DESTIF;
843 struct mbuf_tcp_meta_data *meta_data_addr;
844 struct ether_hdr *ehdr;
845 struct rte_mbuf *pkt;
846 uint64_t pkt_mask = 1LLU << pkt_num;
850 if(*pkts_mask & pkt_mask) {
852 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
854 meta_data_addr = (struct mbuf_tcp_meta_data *)
855 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
856 ehdr = rte_vfw_get_ether_addr(pkt);
859 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
860 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
863 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
865 rte_sp_exchange_mac_addresses(ehdr);
867 struct arp_entry_data *ret_arp_data = NULL;
868 ret_arp_data = get_dest_mac_addr_port(dest_address,
869 &dest_if, &ehdr->d_addr);
870 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
872 if (arp_cache_dest_mac_present(dest_if)) {
874 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
875 update_nhip_access(dest_if);
876 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
877 arp_send_buffered_pkts(ret_arp_data,
878 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
882 if (unlikely(ret_arp_data == NULL)) {
885 printf("%s: NHIP Not Found, nhip:%x , "
886 "outport_id: %d\n", __func__, nhip,
887 vfw_pipe->outport_id[dest_if]);
890 pkts_drop_without_arp_entry++;
893 if (ret_arp_data->status == INCOMPLETE ||
894 ret_arp_data->status == PROBE) {
895 if (ret_arp_data->num_pkts >= NUM_DESC) {
896 /* ICMP req sent, drop packet by
897 * changing the mask */
898 vfw_pipe->counters->pkts_drop_without_arp_entry++;
901 arp_pkts_mask |= pkt_mask;
902 arp_queue_unresolved_packet(ret_arp_data, pkt);
913 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
914 * To support synproxy, some (altered) packets may need to be sent back where
915 * they came from. The ip header has already been adjusted, but the ethernet
916 * header has not, so this must be performed here.
917 * Return an updated pkts_mask, since arp may drop some packets
920 * A pointer to the packets array.
922 * Packet number to start processing.
924 * Packet mask pointer
925 * @param synproxy_reply_mask
926 * Reply Packet mask for Synproxy
928 * A pointer to VFW pipeline.
932 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
935 uint64_t synproxy_reply_mask,
936 struct pipeline_vfw *vfw_pipe)
938 uint8_t nh_ipv6[IPV6_ADD_SIZE];
939 struct ether_addr hw_addr;
940 struct mbuf_tcp_meta_data *meta_data_addr;
941 struct ether_hdr *ehdr;
942 struct rte_mbuf *pkt;
945 for (i = 0; i < 4; i++) {
946 uint32_t dest_if = INVALID_DESTIF;
947 /* bitmask representing only this packet */
948 uint64_t pkt_mask = 1LLU << (pkt_num + i);
952 if(!(*pkts_mask & pkt_mask))
954 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
956 meta_data_addr = (struct mbuf_tcp_meta_data *)
957 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
958 ehdr = rte_vfw_get_ether_addr(pkt);
960 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
961 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
963 uint8_t nhip[IPV6_ADD_SIZE];
964 uint8_t dest_address[IPV6_ADD_SIZE];
966 memset(nhip, 0, IPV6_ADD_SIZE);
968 rte_sp_exchange_mac_addresses(ehdr);
970 rte_mov16(dest_address, ihdr->dst_addr);
971 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
972 struct nd_entry_data *ret_nd_data = NULL;
973 ret_nd_data = get_dest_mac_address_ipv6_port(
979 meta_data_addr->output_port = vfw_pipe->
981 if (nd_cache_dest_mac_present(dest_if)) {
982 ether_addr_copy(get_link_hw_addr(dest_if),
984 update_nhip_access(dest_if);
986 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
987 nd_send_buffered_pkts(ret_nd_data,
988 &ehdr->d_addr, meta_data_addr->output_port);
991 if (unlikely(ret_nd_data == NULL)) {
992 *pkts_mask &= ~pkt_mask;
994 pkts_drop_without_arp_entry++;
997 if (ret_nd_data->status == INCOMPLETE ||
998 ret_nd_data->status == PROBE) {
999 if (ret_nd_data->num_pkts >= NUM_DESC) {
1001 *pkts_mask &= ~pkt_mask;
1002 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1005 arp_pkts_mask |= pkt_mask;
1006 nd_queue_unresolved_packet(ret_nd_data, pkt);
1017 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1018 * To support synproxy, some (altered) packets may need to be sent back where
1019 * they came from. The ip header has already been adjusted, but the ethernet
1020 * header has not, so this must be performed here.
1021 * Return an updated pkts_mask, since arp may drop some packets
1024 * A pointer to the packets.
1026 * Packet number to process.
1028 * Packet mask pointer
1029 * @param synproxy_reply_mask
1030 * Reply Packet mask for Synproxy
1032 * A pointer to VFW pipeline.
1036 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1038 uint64_t *pkts_mask,
1039 uint64_t synproxy_reply_mask,
1040 struct pipeline_vfw *vfw_pipe)
1042 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1043 struct ether_addr hw_addr;
1044 struct mbuf_tcp_meta_data *meta_data_addr;
1045 struct ether_hdr *ehdr;
1046 struct rte_mbuf *pkt;
1048 uint32_t dest_if = INVALID_DESTIF;
1049 /* bitmask representing only this packet */
1050 uint64_t pkt_mask = 1LLU << pkt_num;
1054 if(*pkts_mask & pkt_mask) {
1056 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1058 meta_data_addr = (struct mbuf_tcp_meta_data *)
1059 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1060 ehdr = rte_vfw_get_ether_addr(pkt);
1062 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1063 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1065 uint8_t nhip[IPV6_ADD_SIZE];
1066 uint8_t dest_address[IPV6_ADD_SIZE];
1068 memset(nhip, 0, IPV6_ADD_SIZE);
1070 rte_sp_exchange_mac_addresses(ehdr);
1071 rte_mov16(dest_address, ihdr->dst_addr);
1072 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1073 struct nd_entry_data *ret_nd_data = NULL;
1074 ret_nd_data = get_dest_mac_address_ipv6_port(
1079 meta_data_addr->output_port = vfw_pipe->
1080 outport_id[dest_if];
1081 if (nd_cache_dest_mac_present(dest_if)) {
1082 ether_addr_copy(get_link_hw_addr(dest_if),
1084 update_nhip_access(dest_if);
1086 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1087 nd_send_buffered_pkts(ret_nd_data,
1088 &ehdr->d_addr, meta_data_addr->output_port);
1091 if (unlikely(ret_nd_data == NULL)) {
1092 *pkts_mask &= ~pkt_mask;
1093 vfw_pipe->counters->
1094 pkts_drop_without_arp_entry++;
1097 if (ret_nd_data->status == INCOMPLETE ||
1098 ret_nd_data->status == PROBE) {
1099 if (ret_nd_data->num_pkts >= NUM_DESC) {
1101 *pkts_mask &= ~pkt_mask;
1102 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1105 arp_pkts_mask |= pkt_mask;
1106 nd_queue_unresolved_packet(ret_nd_data, pkt);
1119 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1120 * To support synproxy, some (altered) packets may need to be sent back where
1121 * they came from. The ip header has already been adjusted, but the ethernet
1122 * header has not, so this must be performed here.
1123 * Return an updated pkts_mask, since arp may drop some packets
1126 * A pointer to the packet.
1129 * @param synproxy_reply_mask
1130 * Reply Packet mask for Synproxy
1132 * A pointer to VFW pipeline.
1135 rte_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
1137 uint64_t synproxy_reply_mask,
1138 struct pipeline_vfw *vfw_pipe)
1140 uint64_t pkts_to_arp = pkts_mask;
1143 uint32_t dest_if = INVALID_DESTIF;
1144 for (; pkts_to_arp;) {
1145 struct ether_addr hw_addr;
1146 struct mbuf_tcp_meta_data *meta_data_addr;
1147 struct ether_hdr *ehdr;
1148 struct rte_mbuf *pkt;
1151 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1152 /* bitmask representing only this packet */
1153 uint64_t pkt_mask = 1LLU << pos;
1154 /* remove this packet from remaining list */
1155 pkts_to_arp &= ~pkt_mask;
1157 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1159 phy_port = pkt->port;
1160 meta_data_addr = (struct mbuf_tcp_meta_data *)
1161 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1162 ehdr = rte_vfw_get_ether_addr(pkt);
1165 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1166 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1169 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1171 rte_sp_exchange_mac_addresses(ehdr);
1172 struct arp_entry_data *ret_arp_data = NULL;
1173 ret_arp_data = get_dest_mac_addr_port(dest_address,
1174 &dest_if, &ehdr->d_addr);
1175 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1176 if (arp_cache_dest_mac_present(dest_if)) {
1178 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1179 update_nhip_access(dest_if);
1180 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1182 arp_send_buffered_pkts(ret_arp_data,
1183 &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
1188 if (unlikely(ret_arp_data == NULL)) {
1191 printf("%s: NHIP Not Found, nhip:%x , "
1192 "outport_id: %d\n", __func__, nhip,
1193 vfw_pipe->outport_id[dest_if]);
1196 vfw_pipe->counters->
1197 pkts_drop_without_arp_entry++;
1200 if (ret_arp_data->status == INCOMPLETE ||
1201 ret_arp_data->status == PROBE) {
1202 if (ret_arp_data->num_pkts >= NUM_DESC) {
1203 /* ICMP req sent, drop packet by
1204 * changing the mask */
1205 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1208 arp_pkts_mask |= pkt_mask;
1209 arp_queue_unresolved_packet(ret_arp_data, pkt);
1220 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1221 * To support synproxy, some (altered) packets may need to be sent back where
1222 * they came from. The ip header has already been adjusted, but the ethernet
1223 * header has not, so this must be performed here.
1224 * Return an updated pkts_mask, since arp may drop some packets
1227 * A pointer to the packet.
1230 * @param synproxy_reply_mask
1231 * Reply Packet mask for Synproxy
1233 * A pointer to VFW pipeline.
1237 rte_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
1239 uint64_t synproxy_reply_mask,
1240 struct pipeline_vfw *vfw_pipe)
1242 uint64_t pkts_to_arp = pkts_mask;
1243 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1245 uint32_t dest_if = INVALID_DESTIF;
1247 for (; pkts_to_arp;) {
1248 struct ether_addr hw_addr;
1249 struct mbuf_tcp_meta_data *meta_data_addr;
1250 struct ether_hdr *ehdr;
1251 struct rte_mbuf *pkt;
1254 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1255 /* bitmask representing only this packet */
1256 uint64_t pkt_mask = 1LLU << pos;
1257 /* remove this packet from remaining list */
1258 pkts_to_arp &= ~pkt_mask;
1260 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1262 phy_port = pkt->port;
1263 meta_data_addr = (struct mbuf_tcp_meta_data *)
1264 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1265 ehdr = rte_vfw_get_ether_addr(pkt);
1267 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1268 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1270 uint8_t nhip[IPV6_ADD_SIZE];
1271 uint8_t dest_address[IPV6_ADD_SIZE];
1273 memset(nhip, 0, IPV6_ADD_SIZE);
1275 rte_sp_exchange_mac_addresses(ehdr);
1277 rte_mov16(dest_address, ihdr->dst_addr);
1278 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1279 struct nd_entry_data *ret_nd_data = NULL;
1280 ret_nd_data = get_dest_mac_address_ipv6_port(
1286 meta_data_addr->output_port = vfw_pipe->
1287 outport_id[dest_if];
1288 if (nd_cache_dest_mac_present(dest_if)) {
1289 ether_addr_copy(get_link_hw_addr(dest_if),
1291 update_nhip_access(dest_if);
1293 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1294 nd_send_buffered_pkts(ret_nd_data,
1295 &ehdr->d_addr, meta_data_addr->output_port);
1299 if (unlikely(ret_nd_data == NULL)) {
1300 pkts_mask &= ~pkt_mask;
1301 vfw_pipe->counters->
1302 pkts_drop_without_arp_entry++;
1305 if (ret_nd_data->status == INCOMPLETE ||
1306 ret_nd_data->status == PROBE) {
1307 if (ret_nd_data->num_pkts >= NUM_DESC) {
1309 pkts_mask &= ~pkt_mask;
1310 vfw_pipe->counters->
1311 pkts_drop_without_arp_entry++;
1314 arp_pkts_mask |= pkt_mask;
1315 nd_queue_unresolved_packet(ret_nd_data, pkt);
1328 * Packets processing for connection tracking.
1331 * A pointer to the pipeline.
1333 * A pointer to the connetion tracker .
1335 * A pointer to a burst of packets.
1336 * @param packet_mask_in
1337 * Input packets Mask.
1341 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1342 struct rte_ct_cnxn_tracker *ct,
1343 struct rte_mbuf **pkts, uint64_t packet_mask_in)
1345 uint64_t keep_mask = packet_mask_in;
1346 struct rte_synproxy_helper sp_helper; /* for synproxy */
1349 rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1352 if (unlikely(sp_helper.hijack_mask))
1353 printf("buffered hijack pkts severe error\n");
1355 if (unlikely(sp_helper.reply_pkt_mask))
1356 printf("buffered reply pkts severe error\n");
1362 * Free Packets from mbuf.
1365 * A pointer to the connection tracker to increment drop counter.
1368 * Packet to be free.
1371 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1373 ct->counters->pkts_drop++;
1374 rte_pktmbuf_free(pkt);
1378 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1379 struct rte_pipeline *p,
1380 struct rte_mbuf **pkts,
1381 int num_pkts, uint64_t pkts_mask)
1384 struct mbuf_tcp_meta_data *meta_data_addr;
1385 uint64_t pkt_mask = 1;
1387 /* any clear bits in low-order num_pkts bit of
1388 * pkt_mask must be discarded */
1390 for (i = 0; i < num_pkts; i++) {
1391 struct rte_mbuf *pkt = pkts[i];
1393 if (pkts_mask & pkt_mask) {
1394 printf("vfw_output_or_delete_buffered_packets\n");
1395 meta_data_addr = (struct mbuf_tcp_meta_data *)
1396 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1397 rte_pipeline_port_out_packet_insert(
1398 p, meta_data_addr->output_port, pkt);
1401 vfw_pktmbuf_free(ct, pkt);
1404 pkt_mask = pkt_mask << 1;
1409 *Packet buffered for synproxy.
1412 * A pointer to the pipeline.
1414 * A pointer to the vfw pipeline.
1416 * A pointer to the connection tracker.
1417 * @param forward_pkts
1418 * Packet forwarded by synproxy.
1422 vfw_handle_buffered_packets(struct rte_pipeline *p,
1423 struct pipeline_vfw *vfw_pipe,
1424 struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1426 struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1428 if (likely(pkt_list == NULL)) /* only during proxy setup is != NULL */
1432 uint64_t keep_mask = 0;
1433 struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1434 struct rte_mbuf *pkt;
1436 while (pkt_list != NULL) {
1437 struct mbuf_tcp_meta_data *meta_data =
1438 (struct mbuf_tcp_meta_data *)
1439 RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1441 /* detach head of list and advance list */
1443 pkt_list = meta_data->next;
1447 pkts[pkt_count++] = pkt;
1449 if (pkt_count == PKT_BUFFER_SIZE) {
1450 /* need to send out packets */
1451 /* currently 0, set all bits */
1452 keep_mask = ~keep_mask;
1455 vfw_process_buffered_pkts(vfw_pipe,
1458 vfw_output_or_delete_buffered_packets(
1468 vfw_pktmbuf_free(ct, pkt);
1472 if (pkt_count != 0) {
1473 /* need to send out packets */
1474 keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
1477 vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
1480 vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
1488 * The pipeline port-in action is used to do all the firewall and
1489 * connection tracking work for IPV4 packets.
1492 * A pointer to the pipeline.
1494 * A pointer to a burst of packets.
1496 * Number of packets to process.
1498 * A pointer to pipeline specific data.
1501 * 0 on success, negative on error.
1505 vfw_port_in_action_ipv4(struct rte_pipeline *p,
1506 struct rte_mbuf **pkts,
1507 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1509 struct vfw_ports_in_args *port_in_args =
1510 (struct vfw_ports_in_args *)arg;
1511 struct pipeline_vfw *vfw_pipe =
1512 (struct pipeline_vfw *)port_in_args->pipe;
1513 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1515 start_tsc_measure(vfw_pipe);
1517 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1518 uint64_t pkts_drop_mask;
1519 uint64_t hijack_mask = 0;
1521 uint64_t synproxy_reply_mask = 0; /* for synproxy */
1522 uint64_t keep_mask = packet_mask_in;
1524 uint64_t conntrack_mask = 0, connexist_mask = 0;
1525 struct rte_CT_helper ct_helper;
1529 * This routine uses a bit mask to represent which packets in the
1530 * "pkts" table are considered valid. Any table entry which exists
1531 * and is considered valid has the corresponding bit in the mask set.
1532 * Otherwise, it is cleared. Note that the mask is 64 bits,
1533 * but the number of packets in the table may be considerably less.
1534 * Any mask bits which do correspond to actual packets are cleared.
1535 * Various routines are called which may determine that an existing
1536 * packet is somehow invalid. The routine will return an altered bit
1537 * mask, with the bit cleared. At the end of all the checks,
1538 * packets are dropped if their mask bit is a zero
1541 rte_prefetch0(& vfw_pipe->counters);
1544 /* Pre-fetch all rte_mbuf header */
1545 for(j = 0; j < n_pkts; j++)
1546 rte_prefetch0(pkts[j]);
1548 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1550 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1551 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1554 if (unlikely(vfw_debug > 1))
1555 printf("Enter in-port action IPV4 with %p packet mask\n",
1556 (void *)packet_mask_in);
1557 vfw_pipe->counters->pkts_received =
1558 vfw_pipe->counters->pkts_received + n_pkts;
1560 if (unlikely(VFW_DEBUG))
1561 printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
1563 vfw_pipe->counters->pkts_received, n_pkts);
1565 /* first handle handle any previously buffered packets now released */
1566 vfw_handle_buffered_packets(p, vfw_pipe, ct,
1567 FORWARD_BUFFERED_PACKETS);
1569 /* now handle any new packets on input ports */
1570 if (likely(firewall_flag)) {
1571 keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
1572 keep_mask, vfw_pipe);
1573 vfw_pipe->counters->pkts_fw_forwarded +=
1574 __builtin_popcountll(keep_mask);
1578 rte_prefetch0((void*)vfw_pipe->plib_acl);
1579 rte_prefetch0((void*)vfw_rule_table_ipv4_active);
1580 #endif /* EN_SWP_ACL */
1581 keep_mask = lib_acl_ipv4_pkt_work_key(
1582 vfw_pipe->plib_acl, pkts, keep_mask,
1583 &vfw_pipe->counters->pkts_drop_without_rule,
1584 vfw_rule_table_ipv4_active,
1585 action_array_active,
1586 action_counter_table,
1587 &conntrack_mask, &connexist_mask);
1588 vfw_pipe->counters->pkts_acl_forwarded +=
1589 __builtin_popcountll(keep_mask);
1590 if (conntrack_mask > 0) {
1591 keep_mask = conntrack_mask;
1592 ct_helper.no_new_cnxn_mask = connexist_mask;
1593 cnxn_tracking_is_active = 1;
1595 cnxn_tracking_is_active = 0;
1596 #endif /* ACL_ENABLE */
1598 if (likely(cnxn_tracking_is_active)) {
1599 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1600 &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
1601 synproxy_reply_mask = ct_helper.reply_pkt_mask;
1602 hijack_mask = ct_helper.hijack_mask;
1607 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1608 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1610 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1613 rte_prefetch0((void*)in_port_dir_a);
1614 rte_prefetch0((void*)prv_to_pub_map);
1617 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1618 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1619 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1621 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1624 pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
1625 synproxy_reply_mask, vfw_pipe);
1627 for (j = i; j < n_pkts; j++) {
1628 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1630 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1633 for (; i < n_pkts; i++) {
1634 pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
1635 synproxy_reply_mask, vfw_pipe);
1638 rte_prefetch0((void*)in_port_dir_a);
1639 rte_prefetch0((void*)prv_to_pub_map);
1640 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
1641 keep_mask = rte_vfw_arp_ipv4_packets(pkts, keep_mask,
1642 synproxy_reply_mask, vfw_pipe);
1645 if (vfw_debug > 1) {
1646 printf(" Exit in-port action with %p packet mask\n",
1648 if (keep_mask != packet_mask_in)
1649 printf("dropped packets, %p in, %p out\n",
1650 (void *)packet_mask_in,
1654 /* Update mask before returning, so that bad packets are dropped */
1655 if (arp_pkts_mask) {
1656 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1659 pkts_drop_mask = packet_mask_in & ~keep_mask;
1661 if (unlikely(pkts_drop_mask != 0)) {
1662 /* printf("drop %p\n", (void *) pkts_drop_mask); */
1663 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1666 if (unlikely(hijack_mask != 0))
1667 rte_pipeline_ah_packet_hijack(p, hijack_mask);
1669 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1670 vfw_pipe->counters->num_pkts_measurements++;
1672 end_tsc_measure(vfw_pipe, n_pkts);
1677 * The pipeline port-in action is used to do all the firewall and
1678 * connection tracking work for IPV6 packet.
1681 * A pointer to the pipeline.
1683 * A pointer to a burst of packets.
1685 * Number of packets to process.
1687 * A pointer to pipeline specific data.
1690 * 0 on success, negative on error.
1694 vfw_port_in_action_ipv6(struct rte_pipeline *p,
1695 struct rte_mbuf **pkts,
1696 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1698 struct vfw_ports_in_args *port_in_args =
1699 (struct vfw_ports_in_args *)arg;
1700 struct pipeline_vfw *vfw_pipe =
1701 (struct pipeline_vfw *)port_in_args->pipe;
1702 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1704 start_tsc_measure(vfw_pipe);
1706 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1707 uint64_t pkts_drop_mask;
1708 uint64_t hijack_mask = 0;
1709 uint64_t synproxy_reply_mask = 0; /* for synproxy */
1710 uint64_t keep_mask = packet_mask_in;
1712 uint64_t conntrack_mask = 0, connexist_mask = 0;
1713 struct rte_CT_helper ct_helper;
1717 * This routine uses a bit mask to represent which packets in the
1718 * "pkts" table are considered valid. Any table entry which exists
1719 * and is considered valid has the corresponding bit in the mask set.
1720 * Otherwise, it is cleared. Note that the mask is 64 bits,
1721 * but the number of packets in the table may be considerably less.
1722 * Any mask bits which do correspond to actual packets are cleared.
1723 * Various routines are called which may determine that an existing
1724 * packet is somehow invalid. The routine will return an altered bit
1725 * mask, with the bit cleared. At the end of all the checks,
1726 * packets are dropped if their mask bit is a zero
1729 rte_prefetch0(& vfw_pipe->counters);
1731 /* Pre-fetch all rte_mbuf header */
1732 for(j = 0; j < n_pkts; j++)
1733 rte_prefetch0(pkts[j]);
1735 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1736 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1737 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1740 printf("Enter in-port action with %p packet mask\n",
1741 (void *)packet_mask_in);
1742 vfw_pipe->counters->pkts_received =
1743 vfw_pipe->counters->pkts_received + n_pkts;
1745 printf("vfw_port_in_action pkts_received: %" PRIu64
1747 vfw_pipe->counters->pkts_received, n_pkts);
1749 /* first handle handle any previously buffered packets now released */
1750 vfw_handle_buffered_packets(p, vfw_pipe, ct,
1751 FORWARD_BUFFERED_PACKETS);
1753 /* now handle any new packets on input ports */
1754 if (likely(firewall_flag)) {
1755 keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
1756 keep_mask, vfw_pipe);
1757 vfw_pipe->counters->pkts_fw_forwarded +=
1758 __builtin_popcountll(keep_mask);
1763 rte_prefetch0((void*)vfw_pipe->plib_acl);
1764 rte_prefetch0((void*)vfw_rule_table_ipv6_active);
1765 #endif /* EN_SWP_ACL */
1766 keep_mask = lib_acl_ipv6_pkt_work_key(
1767 vfw_pipe->plib_acl, pkts, keep_mask,
1768 &vfw_pipe->counters->pkts_drop_without_rule,
1769 vfw_rule_table_ipv6_active,
1770 action_array_active,
1771 action_counter_table,
1772 &conntrack_mask, &connexist_mask);
1773 vfw_pipe->counters->pkts_acl_forwarded +=
1774 __builtin_popcountll(keep_mask);
1775 if (conntrack_mask > 0) {
1776 keep_mask = conntrack_mask;
1777 ct_helper.no_new_cnxn_mask = connexist_mask;
1778 cnxn_tracking_is_active = 1;
1780 cnxn_tracking_is_active = 0;
1781 #endif /* ACL_ENABLE */
1782 if (likely(cnxn_tracking_is_active)) {
1783 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1784 &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
1785 synproxy_reply_mask = ct_helper.reply_pkt_mask;
1786 hijack_mask = ct_helper.hijack_mask;
1791 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1792 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1794 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1797 rte_prefetch0((void*)in_port_dir_a);
1798 rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
1801 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1802 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1803 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1805 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1808 pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
1809 synproxy_reply_mask, vfw_pipe);
1811 for (j = i; j < n_pkts; j++) {
1812 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1814 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1817 for (; i < n_pkts; i++) {
1818 pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
1819 synproxy_reply_mask, vfw_pipe);
1822 rte_prefetch0((void*)in_port_dir_a);
1823 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
1824 keep_mask = rte_vfw_arp_ipv6_packets(pkts, keep_mask,
1825 synproxy_reply_mask, vfw_pipe);
1828 if (vfw_debug > 1) {
1829 printf(" Exit in-port action with %p packet mask\n",
1831 if (keep_mask != packet_mask_in)
1832 printf("dropped packets, %p in, %p out\n",
1833 (void *)packet_mask_in,
1837 /* Update mask before returning, so that bad packets are dropped */
1839 pkts_drop_mask = packet_mask_in & ~keep_mask;
1841 if (unlikely(pkts_drop_mask != 0)) {
1842 /* printf("drop %p\n", (void *) pkts_drop_mask); */
1843 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1846 if (unlikely(hijack_mask != 0))
1847 rte_pipeline_ah_packet_hijack(p, hijack_mask);
1849 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1850 vfw_pipe->counters->num_pkts_measurements++;
1852 end_tsc_measure(vfw_pipe, n_pkts);
1859 * Parse arguments in config file.
1862 * A pointer to the pipeline.
1864 * A pointer to pipeline specific parameters.
1867 * 0 on success, negative on error.
1870 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
1871 struct pipeline_params *params)
1877 printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
1880 for (i = 0; i < params->n_args; i++) {
1881 char *arg_name = params->args_name[i];
1882 char *arg_value = params->args_value[i];
1884 printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
1885 atoi(arg_value), arg_value);
1887 status = lib_acl_parse_config(vfw_pipe->plib_acl,
1888 arg_name, arg_value, &vfw_n_rules);
1890 printf("rte_ct_set_configuration_options =%s,%s",
1891 arg_name, arg_value);
1893 } else if (status == 0)
1896 #endif /* traffic_type */
1897 if (strcmp(arg_name, "traffic_type") == 0) {
1898 int traffic_type = atoi(arg_value);
1900 if (traffic_type == 0 ||
1901 !(traffic_type == IP_VERSION_4 ||
1902 traffic_type == IP_VERSION_6)) {
1903 printf("not IPV4/IPV6");
1907 vfw_pipe->traffic_type = traffic_type;
1913 if (strcmp(arg_name, "n_flows") == 0) {
1914 int n_flows = atoi(arg_value);
1919 /* must be power of 2, round up if not */
1920 if (!rte_is_power_of_2(n_flows))
1921 n_flows = rte_align32pow2(n_flows);
1923 vfw_pipe->n_flows = n_flows;
1927 /* not firewall option, process as cnxn tracking option */
1928 status = rte_ct_set_configuration_options(
1929 vfw_pipe->cnxn_tracker,
1930 arg_name, arg_value);
1932 printf("rte_ct_set_configuration_options =%s,%s",
1933 arg_name, arg_value);
1935 } else if (status == 0)
1943 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
1946 static pipeline_msg_req_handler handlers[] = {
1947 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
1948 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
1949 pipeline_msg_req_stats_port_in_handler,
1950 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
1951 pipeline_msg_req_stats_port_out_handler,
1952 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
1953 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
1954 pipeline_msg_req_port_in_enable_handler,
1955 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
1956 pipeline_msg_req_port_in_disable_handler,
1957 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
1960 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
1962 static pipeline_msg_req_handler custom_handlers[] = {
1964 [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
1965 pipeline_vfw_msg_req_synproxy_flag_handler
1969 * Create and initialize Pipeline Back End (BE).
1972 * A pointer to the pipeline specific parameters..
1974 * A pointer to pipeline specific data.
1977 * A pointer to the pipeline create, NULL on error.
1980 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
1984 /* Check input arguments */
1985 if ((params == NULL) ||
1986 (params->n_ports_in == 0) || (params->n_ports_out == 0))
1990 printf("num ports in %d / num ports out %d\n",
1991 params->n_ports_in, params->n_ports_out);
1993 /* Create a single pipeline instance and initialize. */
1994 struct pipeline_vfw *pipe_vfw;
1996 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
1997 pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
1999 if (pipe_vfw == NULL)
2002 struct pipeline *pipe;
2004 pipe = &pipe_vfw->pipe;
2006 strncpy(pipe->name, params->name, sizeof(pipe->name));
2007 pipe->log_level = params->log_level;
2008 pipe_vfw->n_flows = 4096; /* small default value */
2009 pipe_vfw->traffic_type = IP_VERSION_4;
2010 pipe_vfw->pipeline_num = 0xff;
2011 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2012 pipe_vfw->links_map[i] = 0xff;
2013 pipe_vfw->outport_id[i] = 0xff;
2015 PLOG(pipe, HIGH, "VFW");
2017 /* Create a firewall instance and initialize. */
2018 pipe_vfw->cnxn_tracker =
2019 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2020 RTE_CACHE_LINE_SIZE);
2022 if (pipe_vfw->cnxn_tracker == NULL)
2025 /* Create a acl instance and initialize. */
2026 pipe_vfw->plib_acl =
2027 rte_zmalloc(NULL, sizeof(struct lib_acl),
2028 RTE_CACHE_LINE_SIZE);
2030 if (pipe_vfw->plib_acl == NULL)
2033 timer_lcore = rte_lcore_id();
2035 * Now allocate a counter block entry. It appears that the
2036 * initialization of all instances is serialized on core 0,
2037 * so no lock is necessary.
2039 struct rte_VFW_counter_block *counter_ptr;
2041 if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2042 /* error, exceeded table bounds */
2045 rte_VFW_hi_counter_block_in_use++;
2047 &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2048 strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2050 pipe_vfw->counters = counter_ptr;
2052 rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2053 /* Parse arguments */
2054 if (pipeline_vfw_parse_args(pipe_vfw, params))
2057 uint16_t pointers_offset =
2058 META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2060 if (pipe_vfw->n_flows > 0)
2061 rte_ct_initialize_cnxn_tracker_with_synproxy(
2062 pipe_vfw->cnxn_tracker,
2067 pipe_vfw->counters->ct_counters =
2068 rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2072 struct rte_pipeline_params pipeline_params = {
2073 .name = params->name,
2074 .socket_id = params->socket_id,
2075 .offset_port_id = META_DATA_OFFSET +
2076 offsetof(struct mbuf_tcp_meta_data, output_port)
2079 pipe->p = rte_pipeline_create(&pipeline_params);
2080 if (pipe->p == NULL) {
2089 * create a different "arg_ah" for each input port.
2090 * They differ only in the recorded port number. Unfortunately,
2091 * IP_PIPELINE does not pass port number in to input port handler
2094 uint32_t in_ports_arg_size =
2095 RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2096 (params->n_ports_in));
2097 struct vfw_ports_in_args *port_in_args =
2098 (struct vfw_ports_in_args *)
2099 rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2101 if (port_in_args == NULL)
2104 pipe->n_ports_in = params->n_ports_in;
2105 for (i = 0; i < pipe->n_ports_in; i++) {
2107 /* initialize this instance of port_in_args as necessary */
2108 port_in_args[i].pipe = pipe;
2109 port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2111 struct rte_pipeline_port_in_params port_params = {
2113 pipeline_port_in_params_get_ops(¶ms->port_in
2116 pipeline_port_in_params_convert(¶ms->port_in
2118 .f_action = vfw_port_in_action_ipv4,
2119 .arg_ah = &(port_in_args[i]),
2120 .burst_size = params->port_in[i].burst_size,
2122 if (pipe_vfw->traffic_type == IP_VERSION_6)
2123 port_params.f_action = vfw_port_in_action_ipv6;
2124 int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2125 &pipe->port_in_id[i]);
2128 rte_pipeline_free(pipe->p);
2135 pipe->n_ports_out = params->n_ports_out;
2136 for (i = 0; i < pipe->n_ports_out; i++) {
2137 struct rte_pipeline_port_out_params port_params = {
2138 .ops = pipeline_port_out_params_get_ops(
2139 ¶ms->port_out[i]),
2140 .arg_create = pipeline_port_out_params_convert(
2141 ¶ms->port_out[i]),
2146 int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2147 &pipe->port_out_id[i]);
2150 rte_pipeline_free(pipe->p);
2156 int pipeline_num = 0;
2157 int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2160 printf("sscanf unble to read pipeline id\n");
2161 pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2162 register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2163 set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2164 set_outport_id(pipe_vfw->pipeline_num, pipe,
2165 pipe_vfw->outport_id);
2166 printf("pipeline_num=%d\n", pipeline_num);
2168 /*If this is the first VFW thread, create common VFW Rule tables*/
2169 if (rte_VFW_hi_counter_block_in_use == 0) {
2170 vfw_rule_table_ipv4_active =
2171 lib_acl_create_active_standby_table_ipv4(1,
2173 if (vfw_rule_table_ipv4_active == NULL) {
2174 printf("Failed to create active table for IPV4\n");
2175 rte_pipeline_free(pipe->p);
2176 rte_free(pipe_vfw->cnxn_tracker);
2177 rte_free(pipe_vfw->plib_acl);
2181 vfw_rule_table_ipv4_standby =
2182 lib_acl_create_active_standby_table_ipv4(2,
2184 if (vfw_rule_table_ipv4_standby == NULL) {
2185 printf("Failed to create standby table for IPV4\n");
2186 rte_pipeline_free(pipe->p);
2187 rte_free(pipe_vfw->cnxn_tracker);
2188 rte_free(pipe_vfw->plib_acl);
2193 vfw_rule_table_ipv6_active =
2194 lib_acl_create_active_standby_table_ipv6(1,
2197 if (vfw_rule_table_ipv6_active == NULL) {
2198 printf("Failed to create active table for IPV6\n");
2199 rte_pipeline_free(pipe->p);
2200 rte_free(pipe_vfw->cnxn_tracker);
2201 rte_free(pipe_vfw->plib_acl);
2205 vfw_rule_table_ipv6_standby =
2206 lib_acl_create_active_standby_table_ipv6(2,
2208 if (vfw_rule_table_ipv6_standby == NULL) {
2209 printf("Failed to create standby table for IPV6\n");
2210 rte_pipeline_free(pipe->p);
2211 rte_free(pipe_vfw->cnxn_tracker);
2212 rte_free(pipe_vfw->plib_acl);
2224 struct rte_pipeline_table_params table_params = {
2225 .ops = &rte_table_stub_ops,
2227 .f_action_hit = NULL,
2228 .f_action_miss = NULL,
2230 .action_data_size = 0,
2233 int status = rte_pipeline_table_create(pipe->p,
2235 &pipe->table_id[0]);
2238 rte_pipeline_free(pipe->p);
2243 struct rte_pipeline_table_entry default_entry = {
2244 .action = RTE_PIPELINE_ACTION_PORT_META
2247 struct rte_pipeline_table_entry *default_entry_ptr;
2249 status = rte_pipeline_table_default_entry_add(pipe->p,
2252 &default_entry_ptr);
2255 rte_pipeline_free(pipe->p);
2259 for (i = 0; i < pipe->n_ports_in; i++) {
2260 int status = rte_pipeline_port_in_connect_to_table(
2262 pipe->port_in_id[i],
2266 rte_pipeline_free(pipe->p);
2272 /* Enable input ports */
2273 for (i = 0; i < pipe->n_ports_in; i++) {
2275 rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2278 rte_pipeline_free(pipe->p);
2284 /* Check pipeline consistency */
2285 if (rte_pipeline_check(pipe->p) < 0) {
2286 rte_pipeline_free(pipe->p);
2291 /* Message queues */
2292 pipe->n_msgq = params->n_msgq;
2293 for (i = 0; i < pipe->n_msgq; i++)
2294 pipe->msgq_in[i] = params->msgq_in[i];
2296 for (i = 0; i < pipe->n_msgq; i++)
2297 pipe->msgq_out[i] = params->msgq_out[i];
2299 /* Message handlers */
2300 memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2301 memcpy(pipe_vfw->custom_handlers, custom_handlers,
2302 sizeof(pipe_vfw->custom_handlers));
2308 * Free resources and delete pipeline.
2311 * A pointer to the pipeline.
2314 * 0 on success, negative on error.
2316 static int pipeline_vfw_free(void *pipeline)
2318 struct pipeline *p = (struct pipeline *)pipeline;
2320 /* Check input arguments */
2324 /* Free resources */
2325 rte_pipeline_free(p->p);
2331 * Callback function to map input/output ports.
2334 * A pointer to the pipeline.
2338 * A pointer to the Output port.
2341 * 0 on success, negative on error.
2344 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2347 struct pipeline *p = (struct pipeline *)pipeline;
2349 /* Check input arguments */
2350 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2353 if (p->n_ports_in == 1) {
2362 * Callback function to process timers.
2365 * A pointer to the pipeline.
2368 * 0 on success, negative on error.
2370 static int pipeline_vfw_timer(void *pipeline)
2372 struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2375 * handle any good buffered packets released by synproxy before checking
2376 * for packets relased by synproxy due to timeout.
2377 * Don't want packets missed
2380 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2381 FORWARD_BUFFERED_PACKETS);
2383 pipeline_msg_req_handle(&p->pipe);
2384 rte_pipeline_flush(p->pipe.p);
2386 rte_ct_handle_expired_timers(p->cnxn_tracker);
2388 /* now handle packets released by synproxy due to timeout. */
2389 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2390 DELETE_BUFFERED_PACKETS);
2396 * Callback function to process CLI commands from FE.
2399 * A pointer to the pipeline.
2401 * A pointer to command specific data.
2404 * A pointer to message handler on success,
2405 * pipeline_msg_req_invalid_hander on error.
2407 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2409 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2410 struct pipeline_custom_msg_req *req = msg;
2411 pipeline_msg_req_handler f_handle;
2413 f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2414 pipe_vfw->custom_handlers[req->subtype] :
2415 pipeline_msg_req_invalid_handler;
2417 if (f_handle == NULL)
2418 f_handle = pipeline_msg_req_invalid_handler;
2420 return f_handle(p, req);
2424 * Handler for synproxy ON/OFF CLI command.
2427 * A pointer to the pipeline.
2429 * A pointer to command specific data.
2432 * Response message contains status.
2435 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2438 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2439 struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2440 struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2442 if (req->synproxy_flag == 0) {
2443 rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2445 printf("synproxy turned OFF for %s\n", p->name);
2446 } else if (req->synproxy_flag == 1) {
2447 rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2449 printf("synproxy turned ON for %s\n", p->name);
2451 printf("Invalid synproxy setting\n");
2458 struct pipeline_be_ops pipeline_vfw_be_ops = {
2459 .f_init = pipeline_vfw_init,
2460 .f_free = pipeline_vfw_free,
2462 .f_timer = pipeline_vfw_timer,
2463 .f_track = pipeline_vfw_track,