2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline VFW BE Implementation.
21 * Implementation of Pipeline VFW Back End (BE).
22 * Responsible for packet processing.
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
43 #include <rte_byteorder.h>
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
70 uint8_t firewall_flag = 1;
72 uint8_t cnxn_tracking_is_active = 1;
74 * A structure defining the VFW pipeline input port per thread data.
76 struct vfw_ports_in_args {
77 struct pipeline *pipe;
78 struct rte_ct_cnxn_tracker *cnxn_tracker;
79 } __rte_cache_aligned;
81 * A structure defining the VFW pipeline per thread data.
85 pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
87 struct rte_ct_cnxn_tracker *cnxn_tracker;
88 struct rte_VFW_counter_block *counters;
89 struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
90 struct lib_acl *plib_acl;
91 /* timestamp retrieved during in-port computations */
95 uint8_t links_map[PIPELINE_MAX_PORT_IN];
96 uint8_t outport_id[PIPELINE_MAX_PORT_IN];
97 /* Local ARP & ND Tables */
98 struct lib_arp_route_table_entry
99 local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
100 uint8_t local_lib_arp_route_ent_cnt;
101 struct lib_nd_route_table_entry
102 local_lib_nd_route_table[MAX_ND_RT_ENTRY];
103 uint8_t local_lib_nd_route_ent_cnt;
105 } __rte_cache_aligned;
107 * A structure defining the mbuf meta data for VFW.
109 struct mbuf_tcp_meta_data {
110 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
111 uint32_t output_port;
112 struct rte_mbuf *next; /* next pointer for chained buffers */
113 } __rte_cache_aligned;
115 #define DONT_CARE_TCP_PACKET 0
116 #define IS_NOT_TCP_PACKET 0
117 #define IS_TCP_PACKET 1
119 #define META_DATA_OFFSET 128
121 #define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
122 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
123 #define ETH_HDR_SIZE 14
124 #define PROTOCOL_START (IP_START + 9)
126 #define TCP_START (IP_START + 20)
127 #define RTE_LB_PORT_OFFSET 204 /* TODO: Need definition in LB header */
128 #define TCP_START_IPV6 (IP_START + 40)
129 #define PROTOCOL_START_IPV6 (IP_START + 6)
130 #define IP_HDR_DSCP_OFST 1
132 #define TCP_PROTOCOL 6
133 #define UDP_PROTOCOL 17
135 #define DELETE_BUFFERED_PACKETS 0
136 #define FORWARD_BUFFERED_PACKETS 1
140 #define IPv4_HEADER_SIZE 20
141 #define IPv6_HEADER_SIZE 40
143 #define IP_VERSION_4 4
144 #define IP_VERSION_6 6
147 #define IP_HDR_SIZE_IPV6 40
148 #define IP_HDR_DSCP_OFST_IPV6 0
149 #define IP_HDR_LENGTH_OFST_IPV6 4
150 #define IP_HDR_PROTOCOL_OFST_IPV6 6
151 #define IP_HDR_DST_ADR_OFST_IPV6 24
152 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
153 /** The counter table for VFW pipeline per thread data.*/
154 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
156 int rte_VFW_hi_counter_block_in_use = -1;
158 /* a spin lock used during vfw initialization only */
159 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
162 struct pipeline_action_key *action_array_a;
163 struct pipeline_action_key *action_array_b;
164 struct pipeline_action_key *action_array_active;
165 struct pipeline_action_key *action_array_standby;
166 uint32_t action_array_size;
167 struct action_counter_block
168 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
171 * Pipeline table strategy for firewall. Unfortunately, there does not seem to
172 * be any use for the built-in table lookup of ip_pipeline for the firewall.
173 * The main table requirement of the firewall is the hash table to maintain
174 * connection info, but that is implemented seperately in the connection
175 * tracking library. So a "dummy" table lookup will be performed.
176 * TODO: look into "stub" table and see if that can be used
177 * to avoid useless table lookup
179 /***** ARP local cache *****/
180 uint8_t link_hw_laddr_valid[MAX_NUM_LOCAL_MAC_ADDRESS] = {
181 0, 0, 0, 0, 0, 0, 0, 0,
182 0, 0, 0, 0, 0, 0, 0, 0
185 static struct ether_addr link_hw_laddr[MAX_NUM_LOCAL_MAC_ADDRESS] = {
186 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
187 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
188 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
189 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
190 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
191 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
192 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
193 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
194 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
195 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
196 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
197 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
198 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
199 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
200 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
201 {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }
204 /* Start TSC measurement */
205 /* Prefetch counters and pipe before this function */
206 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
207 vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
208 if (likely(vfw_pipe->counters->exit_timestamp))
209 vfw_pipe->counters->external_time_sum +=
210 vfw_pipe->counters->entry_timestamp -
211 vfw_pipe->counters->exit_timestamp;
214 /* End TSC measurement */
215 static inline void end_tsc_measure(
216 struct pipeline_vfw *vfw_pipe,
219 if (likely(n_pkts > 1)) {
220 vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
221 vfw_pipe->counters->internal_time_sum +=
222 vfw_pipe->counters->exit_timestamp -
223 vfw_pipe->counters->entry_timestamp;
224 vfw_pipe->counters->time_measurements++;
226 /* small counts skew results, ignore */
227 vfw_pipe->counters->exit_timestamp = 0;
231 static struct ether_addr *get_local_link_hw_addr(uint8_t out_port)
233 return &link_hw_laddr[out_port];
236 static uint8_t local_dest_mac_present(uint8_t out_port)
238 return link_hw_laddr_valid[out_port];
241 static uint32_t local_get_nh_ipv4(
245 struct pipeline_vfw *vfw_pipe)
249 for (i = 0; i < vfw_pipe->local_lib_arp_route_ent_cnt; i++) {
250 if (((vfw_pipe->local_lib_arp_route_table[i].ip &
251 vfw_pipe->local_lib_arp_route_table[i].mask) ==
252 (ip & vfw_pipe->local_lib_arp_route_table[i].mask))) {
253 *port = vfw_pipe->local_lib_arp_route_table[i].port;
255 *nhip = vfw_pipe->local_lib_arp_route_table[i].nh;
262 static void do_local_nh_ipv4_cache(uint32_t dest_if,
263 struct pipeline_vfw *vfw_pipe)
266 /* Search for the entry and do local copy */
269 for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
270 if (lib_arp_route_table[i].port == dest_if) {
272 struct lib_arp_route_table_entry *lentry =
274 local_lib_arp_route_table[vfw_pipe->
275 local_lib_arp_route_ent_cnt];
277 lentry->ip = lib_arp_route_table[i].ip;
278 lentry->mask = lib_arp_route_table[i].mask;
279 lentry->port = lib_arp_route_table[i].port;
280 lentry->nh = lib_arp_route_table[i].nh;
282 vfw_pipe->local_lib_arp_route_ent_cnt++;
287 static uint32_t local_get_nh_ipv6(
291 struct pipeline_vfw *vfw_pipe)
293 uint8_t netmask_ipv6[IPV6_ADD_SIZE], netip_nd[IPV6_ADD_SIZE],
294 netip_in[IPV6_ADD_SIZE];
295 uint8_t i = 0, j = 0, k = 0, l = 0, depthflags = 0, depthflags1 = 0;
296 memset(netmask_ipv6, 0, sizeof(netmask_ipv6));
297 memset(netip_nd, 0, sizeof(netip_nd));
298 memset(netip_in, 0, sizeof(netip_in));
300 for (i = 0; i < vfw_pipe->local_lib_nd_route_ent_cnt; i++) {
302 convert_prefixlen_to_netmask_ipv6(
303 vfw_pipe->local_lib_nd_route_table[i].depth,
306 for (k = 0; k < IPV6_ADD_SIZE; k++)
307 if (vfw_pipe->local_lib_nd_route_table[i].ipv6[k] &
310 netip_nd[k] = vfw_pipe->
311 local_lib_nd_route_table[i].ipv6[k];
314 for (l = 0; l < IPV6_ADD_SIZE; l++)
315 if (ip[l] & netmask_ipv6[l]) {
321 if ((depthflags == depthflags1) && (memcmp(netip_nd, netip_in,
322 sizeof(netip_nd)) == 0)) {
324 *port = vfw_pipe->local_lib_nd_route_table[i].port;
326 for (j = 0; j < IPV6_ADD_SIZE; j++)
328 local_lib_nd_route_table[i].nhipv6[j];
338 static void do_local_nh_ipv6_cache(uint32_t dest_if,
339 struct pipeline_vfw *vfw_pipe)
341 /* Search for the entry and do local copy */
344 for (i = 0; i < MAX_ND_RT_ENTRY; i++) {
346 if (lib_nd_route_table[i].port == dest_if) {
348 struct lib_nd_route_table_entry *lentry = &vfw_pipe->
349 local_lib_nd_route_table[vfw_pipe->
350 local_lib_nd_route_ent_cnt];
352 for (l = 0; l < IPV6_ADD_SIZE; l++) {
354 lib_nd_route_table[i].ipv6[l];
356 lib_nd_route_table[i].nhipv6[l];
358 lentry->depth = lib_nd_route_table[i].depth;
359 lentry->port = lib_nd_route_table[i].port;
361 vfw_pipe->local_lib_nd_route_ent_cnt++;
367 * Print packet for debugging.
370 * A pointer to the packet.
373 static __rte_unused void print_pkt(struct rte_mbuf *pkt)
376 int size = (int)sizeof(struct mbuf_tcp_meta_data);
377 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
379 printf("Meta-data:\n");
380 for (i = 0; i < size; i++) {
381 printf("%02x ", rd[i]);
382 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
386 printf("IP and TCP/UDP headers:\n");
387 rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
388 for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
389 printf("%02x ", rd[i]);
390 if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
396 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
397 #define IP_ICMP_PROTOCOL 1
398 #define IP_TCP_PROTOCOL 6
399 #define IP_UDP_PROTOCOL 17
400 #define IPv6_FRAGMENT_HEADER 44
403 * Return ethernet header structure form packet.
406 * A pointer to the packet.
409 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
411 return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
416 * Return IPV4 header structure form packet.
419 * A pointer to the packet.
423 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
424 struct rte_mbuf *pkt)
426 return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
429 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
431 /* NOTE: Only supporting IP headers with no options,
432 * so header is fixed size */
433 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
436 return ip_type == IPv4_HDR_VERSION;
439 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
441 /* NOTE: Only supporting IP headers with no options,
442 * so header is fixed size */
443 uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
446 return ip_type == IPv6_HDR_VERSION;
449 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
451 if (likely(firewall_flag))
455 static uint8_t check_arp_icmp(
456 struct rte_mbuf *pkt,
457 struct pipeline_vfw *vfw_pipe)
459 struct ether_hdr *ehdr;
460 struct app_link_params *link;
461 uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
462 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
465 /* ARP outport number */
466 uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
467 struct ipv4_hdr *ipv4_h;
468 struct ipv6_hdr *ipv6_h;
469 link = &myApp->link_params[pkt->port];
471 ehdr = rte_vfw_get_ether_addr(pkt);
472 switch (rte_be_to_cpu_16(ehdr->ether_type)) {
475 rte_pipeline_port_out_packet_insert(
480 vfw_pipe->counters->arpicmpPktCount++;
484 ipv4_h = (struct ipv4_hdr *)
485 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
486 if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
488 rte_be_to_cpu_32(ipv4_h->dst_addr)) {
489 if (is_phy_port_privte(pkt->port)) {
490 rte_pipeline_port_out_packet_insert(
495 vfw_pipe->counters->arpicmpPktCount++;
502 ipv6_h = (struct ipv6_hdr *)
503 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
505 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
506 if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
507 || !memcmp(ipv6_h->dst_addr,
508 solicited_node_multicast_addr,
509 IPV6_ADD_CMP_MULTI)) {
511 rte_pipeline_port_out_packet_insert(
516 vfw_pipe->counters->arpicmpPktCount++;
520 pkts_drop_unsupported_type++;
534 * Performs basic VFW packet filtering.
536 * A pointer to the packets.
540 * A pointer to VFW pipeline.
544 rte_vfw_packet_filter_and_process(struct rte_mbuf **pkts,
546 struct pipeline_vfw *vfw_pipe)
550 * Make use of cache prefetch. At beginning of loop, want to prefetch
551 * mbuf data for next iteration (not current one).
552 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
553 * is 20 bytes (extensions not supported), while the IPv6 header is 40
554 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
555 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
556 * need two pre-fetches.
559 uint8_t pos, next_pos = 0;
560 uint64_t pkt_mask; /* bitmask representing a single packet */
561 struct rte_mbuf *pkt;
562 struct rte_mbuf *next_pkt = NULL;
564 void *next_iphdr = NULL;
566 if (unlikely(pkts_mask == 0))
568 pos = (uint8_t) __builtin_ctzll(pkts_mask);
569 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
571 iphdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
572 rte_prefetch0(iphdr);
574 uint64_t bytes_processed = 0;
575 /* bitmap of packets left to process */
576 uint64_t pkts_to_process = pkts_mask;
577 /* bitmap of valid packets to return */
578 uint64_t valid_packets = pkts_mask;
580 /* prefetch counters, updated below. Most likely counters to update
582 rte_prefetch0(&vfw_pipe->counters);
584 do { /* always execute at least once */
586 /* remove this packet from remaining list */
587 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
589 if (likely(next_pkts_to_process)) {
590 /* another packet to process after this, prefetch it */
593 (uint8_t) __builtin_ctzll(next_pkts_to_process);
594 next_pkt = pkts[next_pos];
596 RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
597 rte_prefetch0(next_iphdr);
601 /* remove this packet from remaining list */
602 pkts_to_process &= ~pkt_mask;
604 if (!check_arp_icmp(pkt, vfw_pipe))
606 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
608 bytes_processed += packet_length;
610 if (rte_vfw_is_IPv4(pkt)) {
611 struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
613 /* verify that packet size according to mbuf is at least
614 * as large as the size according to the IP header.
617 uint32_t ip_length = rte_bswap16(ihdr4->total_length);
620 (ip_length > (packet_length - ETH_HDR_SIZE))) {
622 vfw_pipe->counters->pkts_drop_bad_size++;
626 * IPv4 fragmented if: MF (more fragments) or Fragment
627 * Offset are non-zero. Header in Intel order, so flip
628 * constant to compensate. Note that IPv6 uses a header
629 * extension for identifying fragments.
632 int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
633 uint8_t ttl = ihdr4->time_to_live;
635 if (unlikely(fragmented)) {
637 vfw_pipe->counters->pkts_drop_fragmented++;
641 * Behave like a router, and decrement the TTL of an
642 * IP packet. If this causes the TTL to become zero,
643 * the packet will be discarded. Unlike a router,
644 * no ICMP code 11 (Time * Exceeded) message will be
645 * sent back to the packet originator.
648 if (unlikely(ttl <= 1)) {
650 * about to decrement to zero (or is somehow
651 * already zero), so discard
654 vfw_pipe->counters->pkts_drop_ttl++;
658 * Dropping the packets other than TCP AND UDP.
661 uint8_t proto = ihdr4->next_proto_id;
663 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
664 proto == IP_UDP_PROTOCOL ||
665 proto == IP_ICMP_PROTOCOL))) {
668 pkts_drop_unsupported_type++;
671 if (unlikely(discard)) {
672 valid_packets &= ~pkt_mask;
675 } else if (likely(rte_vfw_is_IPv6(pkt))) {
676 struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
679 * verify that packet size according to mbuf is at least
680 * as large as the size according to the IP header.
681 * For IPv6, note that size includes header extensions
682 * but not the base header size
686 rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
689 (ip_length > (packet_length - ETH_HDR_SIZE))) {
691 vfw_pipe->counters->pkts_drop_bad_size++;
695 * Dropping the packets other than TCP AND UDP.
698 uint8_t proto = ihdr6->proto;
700 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
701 proto == IP_UDP_PROTOCOL ||
702 proto == IP_ICMP_PROTOCOL))) {
704 if (proto == IPv6_FRAGMENT_HEADER)
706 pkts_drop_fragmented++;
709 pkts_drop_unsupported_type++;
713 * Behave like a router, and decrement the TTL of an
714 * IP packet. If this causes the TTL to become zero,
715 * the packet will be discarded. Unlike a router,
716 * no ICMP code 11 (Time * Exceeded) message will be
717 * sent back to the packet originator.
720 if (unlikely(ihdr6->hop_limits <= 1)) {
722 * about to decrement to zero (or is somehow
723 * already zero), so discard
726 vfw_pipe->counters->pkts_drop_ttl++;
729 if (unlikely(discard))
730 valid_packets &= ~pkt_mask;
735 valid_packets &= ~pkt_mask;
737 /* make next packet data the current */
738 pkts_to_process = next_pkts_to_process;
742 pkt_mask = 1LLU << pos;
744 } while (pkts_to_process);
746 /* finalize counters, etc. */
747 vfw_pipe->counters->bytes_processed += bytes_processed;
749 if (likely(firewall_flag))
750 return valid_packets;
755 * Performs basic VFW ipv4 packet filtering.
757 * A pointer to the packets.
761 * A pointer to VFW pipeline.
765 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
767 struct pipeline_vfw *vfw_pipe)
771 * Make use of cache prefetch. At beginning of loop, want to prefetch
772 * mbuf data for next iteration (not current one).
773 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
774 * is 20 bytes (extensions not supported), while the IPv6 header is 40
775 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
776 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
777 * need two pre-fetches.
780 uint8_t pos, next_pos = 0;
781 uint64_t pkt_mask; /* bitmask representing a single packet */
782 struct rte_mbuf *pkt;
783 struct rte_mbuf *next_pkt = NULL;
784 struct ipv4_hdr *ihdr4;
785 void *next_iphdr = NULL;
787 if (unlikely(pkts_mask == 0))
789 pos = (uint8_t) __builtin_ctzll(pkts_mask);
790 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
793 uint64_t bytes_processed = 0;
794 /* bitmap of packets left to process */
795 uint64_t pkts_to_process = pkts_mask;
796 /* bitmap of valid packets to return */
797 uint64_t valid_packets = pkts_mask;
800 /* prefetch counters, updated below. Most likely counters to update
802 rte_prefetch0(&vfw_pipe->counters);
804 do { /* always execute at least once */
806 /* remove this packet from remaining list */
807 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
809 if (likely(next_pkts_to_process)) {
810 /* another packet to process after this, prefetch it */
813 (uint8_t) __builtin_ctzll(next_pkts_to_process);
814 next_pkt = pkts[next_pos];
815 next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
817 rte_prefetch0(next_iphdr);
821 /* remove this packet from remaining list */
822 pkts_to_process &= ~pkt_mask;
825 if (!check_arp_icmp(pkt, vfw_pipe))
828 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
830 bytes_processed += packet_length;
832 ihdr4 = (struct ipv4_hdr *)
833 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
835 /* verify that packet size according to mbuf is at least
836 * as large as the size according to the IP header.
839 uint32_t ip_length = rte_bswap16(ihdr4->total_length);
842 (ip_length > (packet_length - ETH_HDR_SIZE))) {
844 vfw_pipe->counters->pkts_drop_bad_size++;
848 * IPv4 fragmented if: MF (more fragments) or Fragment
849 * Offset are non-zero. Header in Intel order, so flip
850 * constant to compensate. Note that IPv6 uses a header
851 * extension for identifying fragments.
854 int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
855 uint8_t ttl = ihdr4->time_to_live;
857 if (unlikely(fragmented)) {
859 vfw_pipe->counters->pkts_drop_fragmented++;
863 * Behave like a router, and decrement the TTL of an
864 * IP packet. If this causes the TTL to become zero,
865 * the packet will be discarded. Unlike a router,
866 * no ICMP code 11 (Time * Exceeded) message will be
867 * sent back to the packet originator.
870 if (unlikely(ttl <= 1)) {
872 * about to decrement to zero (or is somehow
873 * already zero), so discard
876 vfw_pipe->counters->pkts_drop_ttl++;
880 * Dropping the packets other than TCP AND UDP.
883 uint8_t proto = ihdr4->next_proto_id;
885 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
886 proto == IP_UDP_PROTOCOL ||
887 proto == IP_ICMP_PROTOCOL))) {
890 pkts_drop_unsupported_type++;
893 if (unlikely(discard)) {
894 valid_packets &= ~pkt_mask;
896 ihdr4->time_to_live = ttl - 1;
898 /* update header checksum, from rfc 1141 */
900 uint16_t checksum = rte_bswap16(
901 ihdr4->hdr_checksum);
902 /* increment checksum high byte */
903 sum = checksum + 0x100;
905 checksum = (sum + (sum >> BIT_CARRY));
906 ihdr4->hdr_checksum = rte_bswap16(checksum);
909 /* make next packet data the current */
910 pkts_to_process = next_pkts_to_process;
914 pkt_mask = 1LLU << pos;
916 } while (pkts_to_process);
918 /* finalize counters, etc. */
919 vfw_pipe->counters->bytes_processed += bytes_processed;
921 if (likely(firewall_flag))
922 return valid_packets;
927 * Performs basic VFW IPV6 packet filtering.
929 * A pointer to the packets.
933 * A pointer to VFW pipeline.
936 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
938 struct pipeline_vfw *vfw_pipe)
942 * Make use of cache prefetch. At beginning of loop, want to prefetch
943 * mbuf data for next iteration (not current one).
944 * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
945 * is 20 bytes (extensions not supported), while the IPv6 header is 40
946 * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
947 * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
948 * need two pre-fetches.
951 uint8_t pos, next_pos = 0;
952 uint64_t pkt_mask; /* bitmask representing a single packet */
953 struct rte_mbuf *pkt;
954 struct rte_mbuf *next_pkt = NULL;
955 struct ipv6_hdr *ihdr6;
956 void *next_iphdr = NULL;
958 if (unlikely(pkts_mask == 0))
960 pos = (uint8_t) __builtin_ctzll(pkts_mask);
961 pkt_mask = 1LLU << pos; /* bitmask representing only this packet */
964 uint64_t bytes_processed = 0;
965 /* bitmap of packets left to process */
966 uint64_t pkts_to_process = pkts_mask;
967 /* bitmap of valid packets to return */
968 uint64_t valid_packets = pkts_mask;
970 /* prefetch counters, updated below. Most likely counters to update
972 rte_prefetch0(&vfw_pipe->counters);
974 do { /* always execute at least once */
976 /* remove this packet from remaining list */
977 uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
979 if (likely(next_pkts_to_process)) {
980 /* another packet to process after this, prefetch it */
983 (uint8_t) __builtin_ctzll(next_pkts_to_process);
984 next_pkt = pkts[next_pos];
986 RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
987 rte_prefetch0(next_iphdr);
991 /* remove this packet from remaining list */
992 pkts_to_process &= ~pkt_mask;
995 if (!check_arp_icmp(pkt, vfw_pipe))
998 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1000 bytes_processed += packet_length;
1002 ihdr6 = (struct ipv6_hdr *)
1003 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1006 * verify that packet size according to mbuf is at least
1007 * as large as the size according to the IP header.
1008 * For IPv6, note that size includes header extensions
1009 * but not the base header size
1012 uint32_t ip_length =
1013 rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
1016 (ip_length > (packet_length - ETH_HDR_SIZE))) {
1018 vfw_pipe->counters->pkts_drop_bad_size++;
1022 * Dropping the packets other than TCP AND UDP.
1025 uint8_t proto = ihdr6->proto;
1027 if (unlikely(!(proto == IP_TCP_PROTOCOL ||
1028 proto == IP_UDP_PROTOCOL ||
1029 proto == IP_ICMP_PROTOCOL))) {
1031 if (proto == IPv6_FRAGMENT_HEADER)
1032 vfw_pipe->counters->
1033 pkts_drop_fragmented++;
1035 vfw_pipe->counters->
1036 pkts_drop_unsupported_type++;
1040 * Behave like a router, and decrement the TTL of an
1041 * IP packet. If this causes the TTL to become zero,
1042 * the packet will be discarded. Unlike a router,
1043 * no ICMP code 11 (Time * Exceeded) message will be
1044 * sent back to the packet originator.
1047 if (unlikely(ihdr6->hop_limits <= 1)) {
1049 * about to decrement to zero (or is somehow
1050 * already zero), so discard
1053 vfw_pipe->counters->pkts_drop_ttl++;
1056 if (unlikely(discard))
1057 valid_packets &= ~pkt_mask;
1059 ihdr6->hop_limits--;
1061 /* make next packet data the current */
1062 pkts_to_process = next_pkts_to_process;
1066 pkt_mask = 1LLU << pos;
1068 } while (pkts_to_process);
1070 /* finalize counters, etc. */
1071 vfw_pipe->counters->bytes_processed += bytes_processed;
1073 if (likely(firewall_flag))
1074 return valid_packets;
1080 * exchange the mac address so source becomes destination and vice versa.
1083 * A pointer to the ethernet header.
1086 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
1088 struct ether_addr saved_copy;
1090 ether_addr_copy(&ehdr->d_addr, &saved_copy);
1091 ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
1092 ether_addr_copy(&saved_copy, &ehdr->s_addr);
1095 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1096 * To support synproxy, some (altered) packets may need to be sent back where
1097 * they came from. The ip header has already been adjusted, but the ethernet
1098 * header has not, so this must be performed here.
1099 * Return an updated pkts_mask, since arp may drop some packets
1102 * A pointer to the packet.
1105 * @param synproxy_reply_mask
1106 * Reply Packet mask for Synproxy
1108 * A pointer to VFW pipeline.
1112 rte_vfw_arp_packets(struct rte_mbuf **pkts,
1114 uint64_t synproxy_reply_mask,
1115 struct pipeline_vfw *vfw_pipe)
1117 uint64_t pkts_to_arp = pkts_mask;
1119 uint32_t dest_if = INVALID_DESTIF;
1122 for (; pkts_to_arp;) {
1123 struct ether_addr hw_addr;
1124 struct mbuf_tcp_meta_data *meta_data_addr;
1125 struct ether_hdr *ehdr;
1126 struct rte_mbuf *pkt;
1128 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1129 /* bitmask representing only this packet */
1130 uint64_t pkt_mask = 1LLU << pos;
1131 /* remove this packet from remaining list */
1132 pkts_to_arp &= ~pkt_mask;
1134 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1136 phy_port = pkt->port;
1137 meta_data_addr = (struct mbuf_tcp_meta_data *)
1138 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1139 ehdr = rte_vfw_get_ether_addr(pkt);
1141 void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1143 if (rte_vfw_is_IPv4(pkt)) {
1144 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)iphdr;
1147 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1149 ret = local_get_nh_ipv4(dest_address, &dest_if,
1152 rte_sp_exchange_mac_addresses(ehdr);
1153 if (is_phy_port_privte(phy_port)) {
1155 dest_if = get_pub_to_prv_port(
1158 if (dest_if == INVALID_DESTIF) {
1159 pkts_mask &= ~pkt_mask;
1160 vfw_pipe->counters->
1161 pkts_drop_without_arp_entry++;
1163 do_local_nh_ipv4_cache(dest_if,
1169 dest_if = get_prv_to_pub_port(
1172 if (dest_if == INVALID_DESTIF) {
1173 pkts_mask &= ~pkt_mask;
1174 vfw_pipe->counters->
1175 pkts_drop_without_arp_entry++;
1177 do_local_nh_ipv4_cache(dest_if,
1182 } else if (is_phy_port_privte(phy_port)) {
1184 dest_if = get_prv_to_pub_port(
1187 if (dest_if == INVALID_DESTIF) {
1188 pkts_mask &= ~pkt_mask;
1189 vfw_pipe->counters->
1190 pkts_drop_without_arp_entry++;
1192 do_local_nh_ipv4_cache(dest_if,
1198 dest_if = get_pub_to_prv_port(
1201 if (dest_if == INVALID_DESTIF) {
1202 pkts_mask &= ~pkt_mask;
1203 vfw_pipe->counters->
1204 pkts_drop_without_arp_entry++;
1206 do_local_nh_ipv4_cache(dest_if,
1211 meta_data_addr->output_port =
1212 vfw_pipe->outport_id[dest_if];
1213 if (local_dest_mac_present(dest_if)) {
1214 ether_addr_copy(get_local_link_hw_addr(dest_if),
1216 ether_addr_copy(get_link_hw_addr(dest_if),
1219 ret_mac = get_dest_mac_addr_port(dest_address,
1220 &dest_if, &hw_addr);
1221 if (ret_mac == ARP_FOUND) {
1223 link_hw_laddr_valid[dest_if] = 1;
1224 memcpy(&link_hw_laddr[dest_if], &hw_addr,
1225 sizeof(struct ether_addr));
1227 ether_addr_copy(&hw_addr,
1229 ether_addr_copy(get_link_hw_addr(dest_if),
1232 if (vfw_debug >= DEBUG_LEVEL_4) {
1233 char buf[HW_ADDR_SIZE];
1235 ether_format_addr(buf, sizeof(buf),
1237 printf("MAC found for ip 0x%"PRIx32
1238 ",dest_if %d: %s, ",
1241 ether_format_addr(buf, sizeof(buf),
1243 printf("new eth hdr src: %s, ", buf);
1244 ether_format_addr(buf, sizeof(buf),
1246 printf("new eth hdr dst: %s\n", buf);
1251 if (vfw_debug >= DEBUG_LEVEL_4) {
1252 char buf[HW_ADDR_SIZE];
1254 ether_format_addr(buf, sizeof(buf),
1256 printf("MAC NOT FOUND for ip 0x%"
1262 /* ICMP req sent, drop packet by
1263 * changing the mask */
1264 pkts_mask &= ~pkt_mask;
1266 counters->pkts_drop_without_arp_entry++;
1270 } else if (likely(rte_vfw_is_IPv6(pkt))) {
1271 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)iphdr;
1272 uint8_t dest_addr_ipv6[IPV6_ADD_SIZE];
1274 rte_mov16(dest_addr_ipv6, ihdr->dst_addr);
1275 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1277 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1278 ret = local_get_nh_ipv6(&dest_addr_ipv6[0], &dest_if,
1279 &nh_ipv6[0], vfw_pipe);
1281 rte_sp_exchange_mac_addresses(ehdr);
1282 if (is_phy_port_privte(phy_port)) {
1284 dest_if = get_pub_to_prv_port(
1288 if (dest_if == INVALID_DESTIF) {
1289 pkts_mask &= ~pkt_mask;
1290 vfw_pipe->counters->
1291 pkts_drop_without_arp_entry++;
1293 do_local_nh_ipv6_cache(dest_if,
1299 dest_if = get_prv_to_pub_port(
1303 if (dest_if == INVALID_DESTIF) {
1304 pkts_mask &= ~pkt_mask;
1305 vfw_pipe->counters->
1306 pkts_drop_without_arp_entry++;
1308 do_local_nh_ipv6_cache(dest_if,
1315 } else if (is_phy_port_privte(phy_port)) {
1317 dest_if = get_prv_to_pub_port(
1321 if (dest_if == INVALID_DESTIF) {
1322 pkts_mask &= ~pkt_mask;
1323 vfw_pipe->counters->
1324 pkts_drop_without_arp_entry++;
1326 do_local_nh_ipv6_cache(dest_if,
1332 dest_if = get_pub_to_prv_port(
1336 if (dest_if == INVALID_DESTIF) {
1337 pkts_mask &= ~pkt_mask;
1338 vfw_pipe->counters->
1339 pkts_drop_without_arp_entry++;
1341 do_local_nh_ipv6_cache(dest_if,
1346 meta_data_addr->output_port = vfw_pipe->
1347 outport_id[dest_if];
1349 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1350 if (get_dest_mac_address_ipv6_port(
1355 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1356 ether_addr_copy(get_link_hw_addr(dest_if),
1359 if (vfw_debug >= DEBUG_LEVEL_4) {
1360 char buf[HW_ADDR_SIZE];
1362 ether_format_addr(buf, sizeof(buf),
1364 printf("MAC found for dest_if %d: %s,",
1366 ether_format_addr(buf, sizeof(buf),
1368 printf("new eth hdr src: %s, ", buf);
1369 ether_format_addr(buf, sizeof(buf),
1371 printf("new eth hdr dst: %s\n", buf);
1375 printf("deleting ipv6\n");
1376 pkts_mask &= ~pkt_mask;
1377 /*Next Neighbor is not yet implemented
1379 vfw_pipe->counters->
1380 pkts_drop_without_arp_entry++;
1384 /* neither IPv4 or IPv6, drop quietly */
1385 pkts_mask &= ~pkt_mask;
1393 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1394 * To support synproxy, some (altered) packets may need to be sent back where
1395 * they came from. The ip header has already been adjusted, but the ethernet
1396 * header has not, so this must be performed here.
1397 * Return an updated pkts_mask, since arp may drop some packets
1400 * A pointer to the packet array.
1402 * Packet num to start processing
1405 * @param synproxy_reply_mask
1406 * Reply Packet mask for Synproxy
1408 * A pointer to VFW pipeline.
1411 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
1413 uint64_t *pkts_mask,
1414 uint64_t synproxy_reply_mask,
1415 struct pipeline_vfw *vfw_pipe)
1422 struct ether_addr hw_addr;
1423 struct mbuf_tcp_meta_data *meta_data_addr;
1424 struct ether_hdr *ehdr;
1425 struct rte_mbuf *pkt;
1428 for (i = 0; i < 4; i++) {
1429 uint32_t dest_if = INVALID_DESTIF;
1430 /* bitmask representing only this packet */
1431 uint64_t pkt_mask = 1LLU << (pkt_num + i);
1435 if(!(*pkts_mask & pkt_mask))
1438 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1440 phy_port = pkt->port;
1441 meta_data_addr = (struct mbuf_tcp_meta_data *)
1442 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1443 ehdr = rte_vfw_get_ether_addr(pkt);
1446 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1447 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1450 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1452 ret = local_get_nh_ipv4(dest_address, &dest_if,
1455 rte_sp_exchange_mac_addresses(ehdr);
1456 if (is_phy_port_privte(phy_port)) {
1458 dest_if = get_pub_to_prv_port(
1461 if (dest_if == INVALID_DESTIF) {
1462 *pkts_mask &= ~pkt_mask;
1463 vfw_pipe->counters->
1464 pkts_drop_without_arp_entry++;
1466 do_local_nh_ipv4_cache(
1472 dest_if = get_prv_to_pub_port(
1475 if (dest_if == INVALID_DESTIF) {
1476 *pkts_mask &= ~pkt_mask;
1477 vfw_pipe->counters->
1478 pkts_drop_without_arp_entry++;
1480 do_local_nh_ipv4_cache(dest_if,
1484 } else if (is_phy_port_privte(phy_port)) {
1486 dest_if = get_prv_to_pub_port(&dest_address,
1488 if (dest_if == INVALID_DESTIF) {
1489 *pkts_mask &= ~pkt_mask;
1490 vfw_pipe->counters->
1491 pkts_drop_without_arp_entry++;
1493 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1498 dest_if = get_pub_to_prv_port(&dest_address,
1500 if (dest_if == INVALID_DESTIF) {
1501 *pkts_mask &= ~pkt_mask;
1502 vfw_pipe->counters->
1503 pkts_drop_without_arp_entry++;
1505 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1509 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1510 if (local_dest_mac_present(dest_if)) {
1511 ether_addr_copy(get_local_link_hw_addr(dest_if),
1513 ether_addr_copy(get_link_hw_addr(dest_if),
1516 ret_mac = get_dest_mac_addr_port(dest_address,
1517 &dest_if, &hw_addr);
1518 if (ret_mac == ARP_FOUND) {
1520 link_hw_laddr_valid[dest_if] = 1;
1521 memcpy(&link_hw_laddr[dest_if], &hw_addr,
1522 sizeof(struct ether_addr));
1524 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1525 ether_addr_copy(get_link_hw_addr(dest_if),
1528 if (vfw_debug >= DEBUG_LEVEL_4) {
1529 char buf[HW_ADDR_SIZE];
1531 ether_format_addr(buf, sizeof(buf),
1533 printf("MAC found for ip 0x%"
1534 PRIx32", dest_if %d: %s, ",
1537 ether_format_addr(buf, sizeof(buf),
1539 printf("new eth hdr src: %s, ", buf);
1540 ether_format_addr(buf, sizeof(buf),
1542 printf("new eth hdr dst: %s\n", buf);
1547 if (vfw_debug >= DEBUG_LEVEL_4) {
1548 char buf[HW_ADDR_SIZE];
1550 ether_format_addr(buf, sizeof(buf),
1552 printf("MAC NOT FOUND for ip 0x%"
1558 /* ICMP req sent, drop packet by
1559 * changing the mask */
1560 *pkts_mask &= ~pkt_mask;
1562 counters->pkts_drop_without_arp_entry++;
1570 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1571 * To support synproxy, some (altered) packets may need to be sent back where
1572 * they came from. The ip header has already been adjusted, but the ethernet
1573 * header has not, so this must be performed here.
1574 * Return an updated pkts_mask, since arp may drop some packets
1577 * A pointer to the packet.
1579 * Packet number to process
1581 * Packet mask pointer
1582 * @param synproxy_reply_mask
1583 * Reply Packet mask for Synproxy
1585 * A pointer to VFW pipeline.
1588 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
1590 uint64_t *pkts_mask,
1591 uint64_t synproxy_reply_mask,
1592 struct pipeline_vfw *vfw_pipe)
1596 uint32_t dest_if = INVALID_DESTIF;
1599 struct ether_addr hw_addr;
1600 struct mbuf_tcp_meta_data *meta_data_addr;
1601 struct ether_hdr *ehdr;
1602 struct rte_mbuf *pkt;
1604 uint64_t pkt_mask = 1LLU << pkt_num;
1608 if(*pkts_mask & pkt_mask) {
1610 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1612 phy_port = pkt->port;
1613 meta_data_addr = (struct mbuf_tcp_meta_data *)
1614 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1615 ehdr = rte_vfw_get_ether_addr(pkt);
1618 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1619 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1622 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1624 ret = local_get_nh_ipv4(dest_address, &dest_if,
1627 rte_sp_exchange_mac_addresses(ehdr);
1628 if (is_phy_port_privte(phy_port)) {
1630 dest_if = get_pub_to_prv_port(
1633 if (dest_if == INVALID_DESTIF) {
1634 *pkts_mask &= ~pkt_mask;
1635 vfw_pipe->counters->
1636 pkts_drop_without_arp_entry++;
1638 do_local_nh_ipv4_cache(
1644 dest_if = get_prv_to_pub_port(
1647 if (dest_if == INVALID_DESTIF) {
1648 *pkts_mask &= ~pkt_mask;
1649 vfw_pipe->counters->
1650 pkts_drop_without_arp_entry++;
1652 do_local_nh_ipv4_cache(dest_if,
1656 } else if (is_phy_port_privte(phy_port)) {
1658 dest_if = get_prv_to_pub_port(&dest_address,
1660 if (dest_if == INVALID_DESTIF) {
1661 *pkts_mask &= ~pkt_mask;
1662 vfw_pipe->counters->
1663 pkts_drop_without_arp_entry++;
1665 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1670 dest_if = get_pub_to_prv_port(&dest_address,
1672 if (dest_if == INVALID_DESTIF) {
1673 *pkts_mask &= ~pkt_mask;
1674 vfw_pipe->counters->
1675 pkts_drop_without_arp_entry++;
1677 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
1681 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1682 if (local_dest_mac_present(dest_if)) {
1683 ether_addr_copy(get_local_link_hw_addr(dest_if),
1685 ether_addr_copy(get_link_hw_addr(dest_if),
1688 ret_mac = get_dest_mac_addr_port(dest_address,
1689 &dest_if, &hw_addr);
1691 link_hw_laddr_valid[dest_if] = 1;
1692 memcpy(&link_hw_laddr[dest_if], &hw_addr,
1693 sizeof(struct ether_addr));
1695 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1696 ether_addr_copy(get_link_hw_addr(dest_if),
1699 if (vfw_debug >= DEBUG_LEVEL_4) {
1700 char buf[HW_ADDR_SIZE];
1702 ether_format_addr(buf, sizeof(buf),
1704 printf("MAC found for ip 0x%"
1705 PRIx32", dest_if %d: %s, ",
1708 ether_format_addr(buf, sizeof(buf),
1710 printf("new eth hdr src: %s, ", buf);
1711 ether_format_addr(buf, sizeof(buf),
1713 printf("new eth hdr dst: %s\n", buf);
1717 if (vfw_debug >= DEBUG_LEVEL_4) {
1718 char buf[HW_ADDR_SIZE];
1720 ether_format_addr(buf, sizeof(buf),
1722 printf("MAC NOT FOUND for ip 0x%"
1728 /* ICMP req sent, drop packet by
1729 * changing the mask */
1730 *pkts_mask &= ~pkt_mask;
1732 counters->pkts_drop_without_arp_entry++;
1741 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1742 * To support synproxy, some (altered) packets may need to be sent back where
1743 * they came from. The ip header has already been adjusted, but the ethernet
1744 * header has not, so this must be performed here.
1745 * Return an updated pkts_mask, since arp may drop some packets
1748 * A pointer to the packets array.
1750 * Packet number to start processing.
1752 * Packet mask pointer
1753 * @param synproxy_reply_mask
1754 * Reply Packet mask for Synproxy
1756 * A pointer to VFW pipeline.
1760 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
1762 uint64_t *pkts_mask,
1763 uint64_t synproxy_reply_mask,
1764 struct pipeline_vfw *vfw_pipe)
1766 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1768 struct ether_addr hw_addr;
1769 struct mbuf_tcp_meta_data *meta_data_addr;
1770 struct ether_hdr *ehdr;
1771 struct rte_mbuf *pkt;
1775 for (i = 0; i < 4; i++) {
1776 uint32_t dest_if = INVALID_DESTIF;
1777 /* bitmask representing only this packet */
1778 uint64_t pkt_mask = 1LLU << (pkt_num + i);
1782 if(!(*pkts_mask & pkt_mask))
1784 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1786 phy_port = pkt->port;
1787 meta_data_addr = (struct mbuf_tcp_meta_data *)
1788 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1789 ehdr = rte_vfw_get_ether_addr(pkt);
1791 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1792 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1794 uint8_t nhip[IPV6_ADD_SIZE];
1795 uint8_t dest_address[IPV6_ADD_SIZE];
1797 memset(nhip, 0, IPV6_ADD_SIZE);
1799 rte_mov16(dest_address, ihdr->dst_addr);
1800 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
1801 &nhip[0], vfw_pipe);
1803 rte_sp_exchange_mac_addresses(ehdr);
1804 if (is_phy_port_privte(phy_port)) {
1806 dest_if = get_pub_to_prv_port(
1810 if (dest_if == INVALID_DESTIF) {
1811 *pkts_mask &= ~pkt_mask;
1812 vfw_pipe->counters->
1813 pkts_drop_without_arp_entry++;
1815 do_local_nh_ipv6_cache(dest_if,
1821 dest_if = get_prv_to_pub_port(
1825 if (dest_if == INVALID_DESTIF) {
1826 *pkts_mask &= ~pkt_mask;
1827 vfw_pipe->counters->
1828 pkts_drop_without_arp_entry++;
1830 do_local_nh_ipv6_cache(dest_if,
1835 } else if (is_phy_port_privte(phy_port)) {
1837 dest_if = get_prv_to_pub_port((uint32_t *)
1838 &dest_address[0], IP_VERSION_6);
1839 if (dest_if == INVALID_DESTIF) {
1840 *pkts_mask &= ~pkt_mask;
1841 vfw_pipe->counters->
1842 pkts_drop_without_arp_entry++;
1844 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1849 dest_if = get_pub_to_prv_port((uint32_t *)
1850 &dest_address[0], IP_VERSION_6);
1851 if (dest_if == INVALID_DESTIF) {
1852 *pkts_mask &= ~pkt_mask;
1853 vfw_pipe->counters->
1854 pkts_drop_without_arp_entry++;
1857 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
1862 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
1864 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1865 if (get_dest_mac_address_ipv6_port(
1870 ether_addr_copy(&hw_addr, &ehdr->d_addr);
1871 ether_addr_copy(get_link_hw_addr(dest_if),
1874 if (vfw_debug >= DEBUG_LEVEL_4) {
1875 char buf[HW_ADDR_SIZE];
1877 ether_format_addr(buf, sizeof(buf),
1879 printf("MAC found for dest_if %d: %s, ",
1881 ether_format_addr(buf, sizeof(buf),
1883 printf("new eth hdr src: %s, ", buf);
1884 ether_format_addr(buf, sizeof(buf),
1886 printf("new eth hdr dst: %s\n", buf);
1890 printf("deleting ipv6\n");
1891 *pkts_mask &= ~pkt_mask;
1892 /*Next Neighbor is not yet implemented
1894 vfw_pipe->counters->
1895 pkts_drop_without_arp_entry++;
1903 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1904 * To support synproxy, some (altered) packets may need to be sent back where
1905 * they came from. The ip header has already been adjusted, but the ethernet
1906 * header has not, so this must be performed here.
1907 * Return an updated pkts_mask, since arp may drop some packets
1910 * A pointer to the packets.
1912 * Packet number to process.
1914 * Packet mask pointer
1915 * @param synproxy_reply_mask
1916 * Reply Packet mask for Synproxy
1918 * A pointer to VFW pipeline.
1922 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1924 uint64_t *pkts_mask,
1925 uint64_t synproxy_reply_mask,
1926 struct pipeline_vfw *vfw_pipe)
1928 uint8_t nh_ipv6[IPV6_ADD_SIZE];
1930 struct ether_addr hw_addr;
1931 struct mbuf_tcp_meta_data *meta_data_addr;
1932 struct ether_hdr *ehdr;
1933 struct rte_mbuf *pkt;
1936 uint32_t dest_if = INVALID_DESTIF;
1937 /* bitmask representing only this packet */
1938 uint64_t pkt_mask = 1LLU << pkt_num;
1942 if(*pkts_mask & pkt_mask) {
1944 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1946 phy_port = pkt->port;
1947 meta_data_addr = (struct mbuf_tcp_meta_data *)
1948 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1949 ehdr = rte_vfw_get_ether_addr(pkt);
1951 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1952 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1954 uint8_t nhip[IPV6_ADD_SIZE];
1955 uint8_t dest_address[IPV6_ADD_SIZE];
1957 memset(nhip, 0, IPV6_ADD_SIZE);
1959 rte_mov16(dest_address, ihdr->dst_addr);
1960 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
1961 &nhip[0], vfw_pipe);
1963 rte_sp_exchange_mac_addresses(ehdr);
1964 if (is_phy_port_privte(phy_port)) {
1966 dest_if = get_pub_to_prv_port(
1970 if (dest_if == INVALID_DESTIF) {
1971 *pkts_mask &= ~pkt_mask;
1972 vfw_pipe->counters->
1973 pkts_drop_without_arp_entry++;
1975 do_local_nh_ipv6_cache(dest_if,
1981 dest_if = get_prv_to_pub_port(
1985 if (dest_if == INVALID_DESTIF) {
1986 *pkts_mask &= ~pkt_mask;
1987 vfw_pipe->counters->
1988 pkts_drop_without_arp_entry++;
1990 do_local_nh_ipv6_cache(dest_if,
1995 } else if (is_phy_port_privte(phy_port)) {
1997 dest_if = get_prv_to_pub_port((uint32_t *)
1998 &dest_address[0], IP_VERSION_6);
1999 if (dest_if == INVALID_DESTIF) {
2000 *pkts_mask &= ~pkt_mask;
2001 vfw_pipe->counters->
2002 pkts_drop_without_arp_entry++;
2004 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
2009 dest_if = get_pub_to_prv_port((uint32_t *)
2010 &dest_address[0], IP_VERSION_6);
2011 if (dest_if == INVALID_DESTIF) {
2012 *pkts_mask &= ~pkt_mask;
2013 vfw_pipe->counters->
2014 pkts_drop_without_arp_entry++;
2017 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
2022 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
2024 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
2025 if (get_dest_mac_address_ipv6_port(
2030 ether_addr_copy(&hw_addr, &ehdr->d_addr);
2031 ether_addr_copy(get_link_hw_addr(dest_if),
2034 if (vfw_debug >= DEBUG_LEVEL_4) {
2035 char buf[HW_ADDR_SIZE];
2037 ether_format_addr(buf, sizeof(buf),
2039 printf("MAC found for dest_if %d: %s, ",
2041 ether_format_addr(buf, sizeof(buf),
2043 printf("new eth hdr src: %s, ", buf);
2044 ether_format_addr(buf, sizeof(buf),
2046 printf("new eth hdr dst: %s\n", buf);
2050 printf("deleting ipv6\n");
2051 *pkts_mask &= ~pkt_mask;
2052 /*Next Neighbor is not yet implemented
2054 vfw_pipe->counters->
2055 pkts_drop_without_arp_entry++;
2065 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
2066 * To support synproxy, some (altered) packets may need to be sent back where
2067 * they came from. The ip header has already been adjusted, but the ethernet
2068 * header has not, so this must be performed here.
2069 * Return an updated pkts_mask, since arp may drop some packets
2072 * A pointer to the packet.
2075 * @param synproxy_reply_mask
2076 * Reply Packet mask for Synproxy
2078 * A pointer to VFW pipeline.
2081 rte_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
2083 uint64_t synproxy_reply_mask,
2084 struct pipeline_vfw *vfw_pipe)
2086 uint64_t pkts_to_arp = pkts_mask;
2089 uint32_t dest_if = INVALID_DESTIF;
2091 for (; pkts_to_arp;) {
2092 struct ether_addr hw_addr;
2093 struct mbuf_tcp_meta_data *meta_data_addr;
2094 struct ether_hdr *ehdr;
2095 struct rte_mbuf *pkt;
2098 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
2099 /* bitmask representing only this packet */
2100 uint64_t pkt_mask = 1LLU << pos;
2101 /* remove this packet from remaining list */
2102 pkts_to_arp &= ~pkt_mask;
2104 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
2106 phy_port = pkt->port;
2107 meta_data_addr = (struct mbuf_tcp_meta_data *)
2108 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
2109 ehdr = rte_vfw_get_ether_addr(pkt);
2112 struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
2113 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
2116 uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
2118 ret = local_get_nh_ipv4(dest_address, &dest_if,
2121 rte_sp_exchange_mac_addresses(ehdr);
2122 if (is_phy_port_privte(phy_port)) {
2124 dest_if = get_pub_to_prv_port(
2127 if (dest_if == INVALID_DESTIF) {
2128 pkts_mask &= ~pkt_mask;
2129 vfw_pipe->counters->
2130 pkts_drop_without_arp_entry++;
2132 do_local_nh_ipv4_cache(
2138 dest_if = get_prv_to_pub_port(
2141 if (dest_if == INVALID_DESTIF) {
2142 pkts_mask &= ~pkt_mask;
2143 vfw_pipe->counters->
2144 pkts_drop_without_arp_entry++;
2146 do_local_nh_ipv4_cache(dest_if,
2150 } else if (is_phy_port_privte(phy_port)) {
2152 dest_if = get_prv_to_pub_port(&dest_address,
2154 if (dest_if == INVALID_DESTIF) {
2155 pkts_mask &= ~pkt_mask;
2156 vfw_pipe->counters->
2157 pkts_drop_without_arp_entry++;
2159 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
2164 dest_if = get_pub_to_prv_port(&dest_address,
2166 if (dest_if == INVALID_DESTIF) {
2167 pkts_mask &= ~pkt_mask;
2168 vfw_pipe->counters->
2169 pkts_drop_without_arp_entry++;
2171 do_local_nh_ipv4_cache(dest_if, vfw_pipe);
2175 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
2176 if (local_dest_mac_present(dest_if)) {
2177 ether_addr_copy(get_local_link_hw_addr(dest_if),
2179 ether_addr_copy(get_link_hw_addr(dest_if),
2182 ret_mac = get_dest_mac_addr_port(dest_address,
2183 &dest_if, &hw_addr);
2185 link_hw_laddr_valid[dest_if] = 1;
2186 memcpy(&link_hw_laddr[dest_if], &hw_addr,
2187 sizeof(struct ether_addr));
2189 ether_addr_copy(&hw_addr, &ehdr->d_addr);
2190 ether_addr_copy(get_link_hw_addr(dest_if),
2193 if (vfw_debug >= DEBUG_LEVEL_4) {
2194 char buf[HW_ADDR_SIZE];
2196 ether_format_addr(buf, sizeof(buf),
2198 printf("MAC found for ip 0x%"
2199 PRIx32", dest_if %d: %s, ",
2202 ether_format_addr(buf, sizeof(buf),
2204 printf("new eth hdr src: %s, ", buf);
2205 ether_format_addr(buf, sizeof(buf),
2207 printf("new eth hdr dst: %s\n", buf);
2211 if (unlikely(ret_mac == 0))
2212 request_arp(meta_data_addr->output_port,
2215 if (vfw_debug >= DEBUG_LEVEL_4) {
2216 char buf[HW_ADDR_SIZE];
2218 ether_format_addr(buf, sizeof(buf),
2220 printf("MAC NOT FOUND for ip 0x%"
2226 /* ICMP req sent, drop packet by
2227 * changing the mask */
2228 pkts_mask &= ~pkt_mask;
2230 counters->pkts_drop_without_arp_entry++;
2239 * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
2240 * To support synproxy, some (altered) packets may need to be sent back where
2241 * they came from. The ip header has already been adjusted, but the ethernet
2242 * header has not, so this must be performed here.
2243 * Return an updated pkts_mask, since arp may drop some packets
2246 * A pointer to the packet.
2249 * @param synproxy_reply_mask
2250 * Reply Packet mask for Synproxy
2252 * A pointer to VFW pipeline.
2256 rte_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
2258 uint64_t synproxy_reply_mask,
2259 struct pipeline_vfw *vfw_pipe)
2261 uint64_t pkts_to_arp = pkts_mask;
2262 uint8_t nh_ipv6[IPV6_ADD_SIZE];
2264 uint32_t dest_if = INVALID_DESTIF;
2266 for (; pkts_to_arp;) {
2267 struct ether_addr hw_addr;
2268 struct mbuf_tcp_meta_data *meta_data_addr;
2269 struct ether_hdr *ehdr;
2270 struct rte_mbuf *pkt;
2273 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
2274 /* bitmask representing only this packet */
2275 uint64_t pkt_mask = 1LLU << pos;
2276 /* remove this packet from remaining list */
2277 pkts_to_arp &= ~pkt_mask;
2279 int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
2281 phy_port = pkt->port;
2282 meta_data_addr = (struct mbuf_tcp_meta_data *)
2283 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
2284 ehdr = rte_vfw_get_ether_addr(pkt);
2286 struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
2287 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
2289 uint8_t nhip[IPV6_ADD_SIZE];
2290 uint8_t dest_address[IPV6_ADD_SIZE];
2292 memset(nhip, 0, IPV6_ADD_SIZE);
2294 rte_mov16(dest_address, ihdr->dst_addr);
2295 ret = local_get_nh_ipv6(&dest_address[0], &dest_if,
2296 &nhip[0], vfw_pipe);
2298 rte_sp_exchange_mac_addresses(ehdr);
2299 if (is_phy_port_privte(phy_port)) {
2301 dest_if = get_pub_to_prv_port(
2305 if (dest_if == INVALID_DESTIF) {
2306 pkts_mask &= ~pkt_mask;
2307 vfw_pipe->counters->
2308 pkts_drop_without_arp_entry++;
2310 do_local_nh_ipv6_cache(dest_if,
2316 dest_if = get_prv_to_pub_port(
2320 if (dest_if == INVALID_DESTIF) {
2321 pkts_mask &= ~pkt_mask;
2322 vfw_pipe->counters->
2323 pkts_drop_without_arp_entry++;
2325 do_local_nh_ipv6_cache(dest_if,
2330 } else if (is_phy_port_privte(phy_port)) {
2332 dest_if = get_prv_to_pub_port((uint32_t *)
2333 &dest_address[0], IP_VERSION_6);
2334 if (dest_if == INVALID_DESTIF) {
2335 pkts_mask &= ~pkt_mask;
2336 vfw_pipe->counters->
2337 pkts_drop_without_arp_entry++;
2339 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
2344 dest_if = get_pub_to_prv_port((uint32_t *)
2345 &dest_address[0], IP_VERSION_6);
2346 if (dest_if == INVALID_DESTIF) {
2347 pkts_mask &= ~pkt_mask;
2348 vfw_pipe->counters->
2349 pkts_drop_without_arp_entry++;
2352 do_local_nh_ipv6_cache(dest_if, vfw_pipe);
2357 meta_data_addr->output_port = vfw_pipe->outport_id[dest_if];
2359 memset(nh_ipv6, 0, IPV6_ADD_SIZE);
2360 if (get_dest_mac_address_ipv6_port(
2365 ether_addr_copy(&hw_addr, &ehdr->d_addr);
2366 ether_addr_copy(get_link_hw_addr(dest_if),
2369 if (vfw_debug >= DEBUG_LEVEL_4) {
2370 char buf[HW_ADDR_SIZE];
2372 ether_format_addr(buf, sizeof(buf),
2374 printf("MAC found for dest_if %d: %s, ",
2376 ether_format_addr(buf, sizeof(buf),
2378 printf("new eth hdr src: %s, ", buf);
2379 ether_format_addr(buf, sizeof(buf),
2381 printf("new eth hdr dst: %s\n", buf);
2385 printf("deleting ipv6\n");
2386 pkts_mask &= ~pkt_mask;
2387 /*Next Neighbor is not yet implemented
2389 vfw_pipe->counters->
2390 pkts_drop_without_arp_entry++;
2401 * Packets processing for connection tracking.
2404 * A pointer to the pipeline.
2406 * A pointer to the connetion tracker .
2408 * A pointer to a burst of packets.
2409 * @param packet_mask_in
2410 * Input packets Mask.
2414 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
2415 struct rte_ct_cnxn_tracker *ct,
2416 struct rte_mbuf **pkts, uint64_t packet_mask_in)
2418 uint64_t keep_mask = packet_mask_in;
2419 struct rte_synproxy_helper sp_helper; /* for synproxy */
2422 rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
2425 if (unlikely(sp_helper.hijack_mask))
2426 printf("buffered hijack pkts severe error\n");
2428 if (unlikely(sp_helper.reply_pkt_mask))
2429 printf("buffered reply pkts severe error\n");
2435 * Free Packets from mbuf.
2438 * A pointer to the connection tracker to increment drop counter.
2441 * Packet to be free.
2444 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
2446 ct->counters->pkts_drop++;
2447 rte_pktmbuf_free(pkt);
2451 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
2452 struct rte_pipeline *p,
2453 struct rte_mbuf **pkts,
2454 int num_pkts, uint64_t pkts_mask)
2457 struct mbuf_tcp_meta_data *meta_data_addr;
2458 uint64_t pkt_mask = 1;
2460 /* any clear bits in low-order num_pkts bit of
2461 * pkt_mask must be discarded */
2463 for (i = 0; i < num_pkts; i++) {
2464 struct rte_mbuf *pkt = pkts[i];
2466 if (pkts_mask & pkt_mask) {
2467 printf("vfw_output_or_delete_buffered_packets\n");
2468 meta_data_addr = (struct mbuf_tcp_meta_data *)
2469 RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
2470 rte_pipeline_port_out_packet_insert(
2471 p, meta_data_addr->output_port, pkt);
2474 vfw_pktmbuf_free(ct, pkt);
2477 pkt_mask = pkt_mask << 1;
2482 *Packet buffered for synproxy.
2485 * A pointer to the pipeline.
2487 * A pointer to the vfw pipeline.
2489 * A pointer to the connection tracker.
2490 * @param forward_pkts
2491 * Packet forwarded by synproxy.
2495 vfw_handle_buffered_packets(struct rte_pipeline *p,
2496 struct pipeline_vfw *vfw_pipe,
2497 struct rte_ct_cnxn_tracker *ct, int forward_pkts)
2499 struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
2501 if (likely(pkt_list == NULL)) /* only during proxy setup is != NULL */
2505 uint64_t keep_mask = 0;
2506 struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
2507 struct rte_mbuf *pkt;
2509 while (pkt_list != NULL) {
2510 struct mbuf_tcp_meta_data *meta_data =
2511 (struct mbuf_tcp_meta_data *)
2512 RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
2514 /* detach head of list and advance list */
2516 pkt_list = meta_data->next;
2520 pkts[pkt_count++] = pkt;
2522 if (pkt_count == PKT_BUFFER_SIZE) {
2523 /* need to send out packets */
2524 /* currently 0, set all bits */
2525 keep_mask = ~keep_mask;
2528 vfw_process_buffered_pkts(vfw_pipe,
2531 vfw_output_or_delete_buffered_packets(
2541 vfw_pktmbuf_free(ct, pkt);
2545 if (pkt_count != 0) {
2546 /* need to send out packets */
2547 keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
2550 vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
2553 vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
2562 * The pipeline port-in action is used to do all the firewall and
2563 * connection tracking work.
2566 * A pointer to the pipeline.
2568 * A pointer to a burst of packets.
2570 * Number of packets to process.
2572 * A pointer to pipeline specific data.
2575 * 0 on success, negative on error.
2579 vfw_port_in_action(struct rte_pipeline *p,
2580 struct rte_mbuf **pkts,
2581 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
2583 struct vfw_ports_in_args *port_in_args =
2584 (struct vfw_ports_in_args *)arg;
2585 struct pipeline_vfw *vfw_pipe =
2586 (struct pipeline_vfw *)port_in_args->pipe;
2587 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
2589 start_tsc_measure(vfw_pipe);
2591 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
2592 uint64_t pkts_drop_mask;
2593 uint64_t hijack_mask = 0;
2594 uint64_t synproxy_reply_mask = 0; /* for synproxy */
2595 uint64_t keep_mask = packet_mask_in;
2596 struct rte_CT_helper ct_helper;
2598 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
2602 * This routine uses a bit mask to represent which packets in the
2603 * "pkts" table are considered valid. Any table entry which exists
2604 * and is considered valid has the corresponding bit in the mask set.
2605 * Otherwise, it is cleared. Note that the mask is 64 bits,
2606 * but the number of packets in the table may be considerably less.
2607 * Any mask bits which do correspond to actual packets are cleared.
2608 * Various routines are called which may determine that an existing
2609 * packet is somehow invalid. The routine will return an altered bit
2610 * mask, with the bit cleared. At the end of all the checks,
2611 * packets are dropped if their mask bit is a zero
2615 printf("Enter in-port action with %p packet mask\n",
2616 (void *)packet_mask_in);
2617 vfw_pipe->counters->pkts_received =
2618 vfw_pipe->counters->pkts_received + n_pkts;
2620 printf("vfw_port_in_action pkts_received: %" PRIu64
2622 vfw_pipe->counters->pkts_received, n_pkts);
2624 /* first handle handle any previously buffered packets now released */
2625 vfw_handle_buffered_packets(p, vfw_pipe, ct,
2626 FORWARD_BUFFERED_PACKETS);
2628 /* now handle any new packets on input ports */
2629 if (likely(firewall_flag)) {
2631 rte_vfw_packet_filter_and_process(pkts, keep_mask,
2633 vfw_pipe->counters->pkts_fw_forwarded +=
2634 __builtin_popcountll(keep_mask);
2637 uint64_t conntrack_mask = 0, connexist_mask = 0;
2638 keep_mask = lib_acl_pkt_work_key(
2639 vfw_pipe->plib_acl, pkts, keep_mask,
2640 &vfw_pipe->counters->pkts_drop_without_rule,
2641 vfw_rule_table_ipv4_active,
2642 vfw_rule_table_ipv6_active,
2643 action_array_active,
2644 action_counter_table,
2645 &conntrack_mask, &connexist_mask,
2648 vfw_pipe->counters->pkts_acl_forwarded +=
2649 __builtin_popcountll(keep_mask);
2650 if (conntrack_mask > 0) {
2651 keep_mask = conntrack_mask;
2652 ct_helper.no_new_cnxn_mask = connexist_mask;
2653 cnxn_tracking_is_active = 1;
2655 cnxn_tracking_is_active = 0;
2657 if (likely(cnxn_tracking_is_active)) {
2658 keep_mask = rte_ct_cnxn_tracker_batch_lookup(ct, pkts,
2659 keep_mask, &ct_helper);
2660 synproxy_reply_mask = ct_helper.reply_pkt_mask;
2661 hijack_mask = ct_helper.hijack_mask;
2667 rte_vfw_arp_packets(pkts, keep_mask, synproxy_reply_mask,
2670 if (vfw_debug > 1) {
2671 printf(" Exit in-port action with %p packet mask\n",
2673 if (keep_mask != packet_mask_in)
2674 printf("dropped packets, %p in, %p out\n",
2675 (void *)packet_mask_in,
2679 /* Update mask before returning, so that bad packets are dropped */
2681 pkts_drop_mask = packet_mask_in & ~keep_mask;
2683 if (unlikely(pkts_drop_mask != 0)) {
2684 /* printf("drop %p\n", (void *) pkts_drop_mask); */
2685 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2688 if (unlikely(hijack_mask != 0))
2689 rte_pipeline_ah_packet_hijack(p, hijack_mask);
2691 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
2692 vfw_pipe->counters->num_pkts_measurements++;
2694 end_tsc_measure(vfw_pipe, n_pkts);
2699 * The pipeline port-in action is used to do all the firewall and
2700 * connection tracking work for IPV4 packets.
2703 * A pointer to the pipeline.
2705 * A pointer to a burst of packets.
2707 * Number of packets to process.
2709 * A pointer to pipeline specific data.
2712 * 0 on success, negative on error.
2716 vfw_port_in_action_ipv4(struct rte_pipeline *p,
2717 struct rte_mbuf **pkts,
2718 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
2720 struct vfw_ports_in_args *port_in_args =
2721 (struct vfw_ports_in_args *)arg;
2722 struct pipeline_vfw *vfw_pipe =
2723 (struct pipeline_vfw *)port_in_args->pipe;
2724 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
2726 start_tsc_measure(vfw_pipe);
2728 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
2729 uint64_t pkts_drop_mask;
2730 uint64_t hijack_mask = 0;
2731 uint64_t synproxy_reply_mask = 0; /* for synproxy */
2732 uint64_t keep_mask = packet_mask_in;
2734 uint64_t conntrack_mask = 0, connexist_mask = 0;
2735 struct rte_CT_helper ct_helper;
2739 * This routine uses a bit mask to represent which packets in the
2740 * "pkts" table are considered valid. Any table entry which exists
2741 * and is considered valid has the corresponding bit in the mask set.
2742 * Otherwise, it is cleared. Note that the mask is 64 bits,
2743 * but the number of packets in the table may be considerably less.
2744 * Any mask bits which do correspond to actual packets are cleared.
2745 * Various routines are called which may determine that an existing
2746 * packet is somehow invalid. The routine will return an altered bit
2747 * mask, with the bit cleared. At the end of all the checks,
2748 * packets are dropped if their mask bit is a zero
2751 rte_prefetch0(& vfw_pipe->counters);
2754 /* Pre-fetch all rte_mbuf header */
2755 for(j = 0; j < n_pkts; j++)
2756 rte_prefetch0(pkts[j]);
2758 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
2760 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
2761 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
2764 if (unlikely(vfw_debug > 1))
2765 printf("Enter in-port action IPV4 with %p packet mask\n",
2766 (void *)packet_mask_in);
2767 vfw_pipe->counters->pkts_received =
2768 vfw_pipe->counters->pkts_received + n_pkts;
2770 if (unlikely(VFW_DEBUG))
2771 printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
2773 vfw_pipe->counters->pkts_received, n_pkts);
2775 /* first handle handle any previously buffered packets now released */
2776 vfw_handle_buffered_packets(p, vfw_pipe, ct,
2777 FORWARD_BUFFERED_PACKETS);
2779 /* now handle any new packets on input ports */
2780 if (likely(firewall_flag)) {
2781 keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
2782 keep_mask, vfw_pipe);
2783 vfw_pipe->counters->pkts_fw_forwarded +=
2784 __builtin_popcountll(keep_mask);
2788 rte_prefetch0((void*)vfw_pipe->plib_acl);
2789 rte_prefetch0((void*)vfw_rule_table_ipv4_active);
2790 #endif /* EN_SWP_ACL */
2791 keep_mask = lib_acl_ipv4_pkt_work_key(
2792 vfw_pipe->plib_acl, pkts, keep_mask,
2793 &vfw_pipe->counters->pkts_drop_without_rule,
2794 vfw_rule_table_ipv4_active,
2795 action_array_active,
2796 action_counter_table,
2797 &conntrack_mask, &connexist_mask);
2798 vfw_pipe->counters->pkts_acl_forwarded +=
2799 __builtin_popcountll(keep_mask);
2800 if (conntrack_mask > 0) {
2801 keep_mask = conntrack_mask;
2802 ct_helper.no_new_cnxn_mask = connexist_mask;
2803 cnxn_tracking_is_active = 1;
2805 cnxn_tracking_is_active = 0;
2806 #endif /* ACL_ENABLE */
2808 if (likely(cnxn_tracking_is_active)) {
2809 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
2810 &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
2811 synproxy_reply_mask = ct_helper.reply_pkt_mask;
2812 hijack_mask = ct_helper.hijack_mask;
2817 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
2818 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2820 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2823 rte_prefetch0((void*)in_port_dir_a);
2824 rte_prefetch0((void*)prv_to_pub_map);
2827 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
2828 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
2829 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2831 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2834 pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
2835 synproxy_reply_mask, vfw_pipe);
2837 for (j = i; j < n_pkts; j++) {
2838 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2840 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
2843 for (; i < n_pkts; i++) {
2844 pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
2845 synproxy_reply_mask, vfw_pipe);
2848 rte_prefetch0((void*)in_port_dir_a);
2849 rte_prefetch0((void*)prv_to_pub_map);
2850 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
2851 keep_mask = rte_vfw_arp_ipv4_packets(pkts, keep_mask,
2852 synproxy_reply_mask, vfw_pipe);
2855 if (vfw_debug > 1) {
2856 printf(" Exit in-port action with %p packet mask\n",
2858 if (keep_mask != packet_mask_in)
2859 printf("dropped packets, %p in, %p out\n",
2860 (void *)packet_mask_in,
2864 /* Update mask before returning, so that bad packets are dropped */
2866 pkts_drop_mask = packet_mask_in & ~keep_mask;
2868 if (unlikely(pkts_drop_mask != 0)) {
2869 /* printf("drop %p\n", (void *) pkts_drop_mask); */
2870 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2873 if (unlikely(hijack_mask != 0))
2874 rte_pipeline_ah_packet_hijack(p, hijack_mask);
2876 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
2877 vfw_pipe->counters->num_pkts_measurements++;
2879 end_tsc_measure(vfw_pipe, n_pkts);
2884 * The pipeline port-in action is used to do all the firewall and
2885 * connection tracking work for IPV6 packet.
2888 * A pointer to the pipeline.
2890 * A pointer to a burst of packets.
2892 * Number of packets to process.
2894 * A pointer to pipeline specific data.
2897 * 0 on success, negative on error.
2901 vfw_port_in_action_ipv6(struct rte_pipeline *p,
2902 struct rte_mbuf **pkts,
2903 __rte_unused uint32_t n_pkts, __rte_unused void *arg)
2905 struct vfw_ports_in_args *port_in_args =
2906 (struct vfw_ports_in_args *)arg;
2907 struct pipeline_vfw *vfw_pipe =
2908 (struct pipeline_vfw *)port_in_args->pipe;
2909 struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
2911 start_tsc_measure(vfw_pipe);
2913 uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
2914 uint64_t pkts_drop_mask;
2915 uint64_t hijack_mask = 0;
2916 uint64_t synproxy_reply_mask = 0; /* for synproxy */
2917 uint64_t keep_mask = packet_mask_in;
2919 uint64_t conntrack_mask = 0, connexist_mask = 0;
2920 struct rte_CT_helper ct_helper;
2924 * This routine uses a bit mask to represent which packets in the
2925 * "pkts" table are considered valid. Any table entry which exists
2926 * and is considered valid has the corresponding bit in the mask set.
2927 * Otherwise, it is cleared. Note that the mask is 64 bits,
2928 * but the number of packets in the table may be considerably less.
2929 * Any mask bits which do correspond to actual packets are cleared.
2930 * Various routines are called which may determine that an existing
2931 * packet is somehow invalid. The routine will return an altered bit
2932 * mask, with the bit cleared. At the end of all the checks,
2933 * packets are dropped if their mask bit is a zero
2936 rte_prefetch0(& vfw_pipe->counters);
2938 /* Pre-fetch all rte_mbuf header */
2939 for(j = 0; j < n_pkts; j++)
2940 rte_prefetch0(pkts[j]);
2942 memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
2943 rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
2944 rte_prefetch0(& vfw_pipe->counters->sum_latencies);
2947 printf("Enter in-port action with %p packet mask\n",
2948 (void *)packet_mask_in);
2949 vfw_pipe->counters->pkts_received =
2950 vfw_pipe->counters->pkts_received + n_pkts;
2952 printf("vfw_port_in_action pkts_received: %" PRIu64
2954 vfw_pipe->counters->pkts_received, n_pkts);
2956 /* first handle handle any previously buffered packets now released */
2957 vfw_handle_buffered_packets(p, vfw_pipe, ct,
2958 FORWARD_BUFFERED_PACKETS);
2960 /* now handle any new packets on input ports */
2961 if (likely(firewall_flag)) {
2962 keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
2963 keep_mask, vfw_pipe);
2964 vfw_pipe->counters->pkts_fw_forwarded +=
2965 __builtin_popcountll(keep_mask);
2970 rte_prefetch0((void*)vfw_pipe->plib_acl);
2971 rte_prefetch0((void*)vfw_rule_table_ipv6_active);
2972 #endif /* EN_SWP_ACL */
2973 keep_mask = lib_acl_ipv6_pkt_work_key(
2974 vfw_pipe->plib_acl, pkts, keep_mask,
2975 &vfw_pipe->counters->pkts_drop_without_rule,
2976 vfw_rule_table_ipv6_active,
2977 action_array_active,
2978 action_counter_table,
2979 &conntrack_mask, &connexist_mask);
2980 vfw_pipe->counters->pkts_acl_forwarded +=
2981 __builtin_popcountll(keep_mask);
2982 if (conntrack_mask > 0) {
2983 keep_mask = conntrack_mask;
2984 ct_helper.no_new_cnxn_mask = connexist_mask;
2985 cnxn_tracking_is_active = 1;
2987 cnxn_tracking_is_active = 0;
2988 #endif /* ACL_ENABLE */
2989 if (likely(cnxn_tracking_is_active)) {
2990 rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
2991 &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
2992 synproxy_reply_mask = ct_helper.reply_pkt_mask;
2993 hijack_mask = ct_helper.hijack_mask;
2998 for(j = 0; j < (n_pkts & 0x3LLU); j++) {
2999 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3001 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3004 rte_prefetch0((void*)in_port_dir_a);
3005 rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
3008 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3009 for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
3010 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3012 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3015 pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
3016 synproxy_reply_mask, vfw_pipe);
3018 for (j = i; j < n_pkts; j++) {
3019 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3021 rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
3024 for (; i < n_pkts; i++) {
3025 pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
3026 synproxy_reply_mask, vfw_pipe);
3029 rte_prefetch0((void*)in_port_dir_a);
3030 rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
3031 keep_mask = rte_vfw_arp_ipv6_packets(pkts, keep_mask,
3032 synproxy_reply_mask, vfw_pipe);
3035 if (vfw_debug > 1) {
3036 printf(" Exit in-port action with %p packet mask\n",
3038 if (keep_mask != packet_mask_in)
3039 printf("dropped packets, %p in, %p out\n",
3040 (void *)packet_mask_in,
3044 /* Update mask before returning, so that bad packets are dropped */
3046 pkts_drop_mask = packet_mask_in & ~keep_mask;
3048 if (unlikely(pkts_drop_mask != 0)) {
3049 /* printf("drop %p\n", (void *) pkts_drop_mask); */
3050 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3053 if (unlikely(hijack_mask != 0))
3054 rte_pipeline_ah_packet_hijack(p, hijack_mask);
3056 vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
3057 vfw_pipe->counters->num_pkts_measurements++;
3059 end_tsc_measure(vfw_pipe, n_pkts);
3066 * Parse arguments in config file.
3069 * A pointer to the pipeline.
3071 * A pointer to pipeline specific parameters.
3074 * 0 on success, negative on error.
3077 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
3078 struct pipeline_params *params)
3084 printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
3087 for (i = 0; i < params->n_args; i++) {
3088 char *arg_name = params->args_name[i];
3089 char *arg_value = params->args_value[i];
3091 printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
3092 atoi(arg_value), arg_value);
3094 status = lib_acl_parse_config(vfw_pipe->plib_acl,
3095 arg_name, arg_value, &vfw_n_rules);
3097 printf("rte_ct_set_configuration_options =%s,%s",
3098 arg_name, arg_value);
3100 } else if (status == 0)
3103 #endif /* traffic_type */
3104 if (strcmp(arg_name, "traffic_type") == 0) {
3105 int traffic_type = atoi(arg_value);
3107 if (traffic_type == 0 ||
3108 !(traffic_type == IP_VERSION_4 ||
3109 traffic_type == IP_VERSION_6)) {
3110 printf("not IPV4/IPV6");
3114 vfw_pipe->traffic_type = traffic_type;
3120 if (strcmp(arg_name, "n_flows") == 0) {
3121 int n_flows = atoi(arg_value);
3126 /* must be power of 2, round up if not */
3127 if (!rte_is_power_of_2(n_flows))
3128 n_flows = rte_align32pow2(n_flows);
3130 vfw_pipe->n_flows = n_flows;
3134 /* not firewall option, process as cnxn tracking option */
3135 status = rte_ct_set_configuration_options(
3136 vfw_pipe->cnxn_tracker,
3137 arg_name, arg_value);
3139 printf("rte_ct_set_configuration_options =%s,%s",
3140 arg_name, arg_value);
3142 } else if (status == 0)
3150 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
3153 static pipeline_msg_req_handler handlers[] = {
3154 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
3155 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
3156 pipeline_msg_req_stats_port_in_handler,
3157 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
3158 pipeline_msg_req_stats_port_out_handler,
3159 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
3160 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
3161 pipeline_msg_req_port_in_enable_handler,
3162 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
3163 pipeline_msg_req_port_in_disable_handler,
3164 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
3167 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
3169 static pipeline_msg_req_handler custom_handlers[] = {
3171 [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
3172 pipeline_vfw_msg_req_synproxy_flag_handler
3176 * Create and initialize Pipeline Back End (BE).
3179 * A pointer to the pipeline specific parameters..
3181 * A pointer to pipeline specific data.
3184 * A pointer to the pipeline create, NULL on error.
3187 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
3191 /* Check input arguments */
3192 if ((params == NULL) ||
3193 (params->n_ports_in == 0) || (params->n_ports_out == 0))
3197 printf("num ports in %d / num ports out %d\n",
3198 params->n_ports_in, params->n_ports_out);
3200 /* Create a single pipeline instance and initialize. */
3201 struct pipeline_vfw *pipe_vfw;
3203 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
3204 pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
3206 if (pipe_vfw == NULL)
3209 struct pipeline *pipe;
3211 pipe = &pipe_vfw->pipe;
3213 strncpy(pipe->name, params->name, sizeof(pipe->name));
3214 pipe->log_level = params->log_level;
3215 pipe_vfw->n_flows = 4096; /* small default value */
3216 pipe_vfw->traffic_type = MIX;
3217 pipe_vfw->pipeline_num = 0xff;
3218 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
3219 pipe_vfw->links_map[i] = 0xff;
3220 pipe_vfw->outport_id[i] = 0xff;
3222 PLOG(pipe, HIGH, "VFW");
3224 /* Create a firewall instance and initialize. */
3225 pipe_vfw->cnxn_tracker =
3226 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
3227 RTE_CACHE_LINE_SIZE);
3229 if (pipe_vfw->cnxn_tracker == NULL)
3232 /* Create a acl instance and initialize. */
3233 pipe_vfw->plib_acl =
3234 rte_zmalloc(NULL, sizeof(struct lib_acl),
3235 RTE_CACHE_LINE_SIZE);
3237 if (pipe_vfw->plib_acl == NULL)
3240 timer_lcore = rte_lcore_id();
3242 * Now allocate a counter block entry. It appears that the
3243 * initialization of all instances is serialized on core 0,
3244 * so no lock is necessary.
3246 struct rte_VFW_counter_block *counter_ptr;
3248 if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
3249 /* error, exceeded table bounds */
3252 rte_VFW_hi_counter_block_in_use++;
3254 &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
3255 strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
3257 pipe_vfw->counters = counter_ptr;
3259 rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
3260 /* Parse arguments */
3261 if (pipeline_vfw_parse_args(pipe_vfw, params))
3264 uint16_t pointers_offset =
3265 META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
3267 if (pipe_vfw->n_flows > 0)
3268 rte_ct_initialize_cnxn_tracker_with_synproxy(
3269 pipe_vfw->cnxn_tracker,
3274 pipe_vfw->counters->ct_counters =
3275 rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
3279 struct rte_pipeline_params pipeline_params = {
3280 .name = params->name,
3281 .socket_id = params->socket_id,
3282 .offset_port_id = META_DATA_OFFSET +
3283 offsetof(struct mbuf_tcp_meta_data, output_port)
3286 pipe->p = rte_pipeline_create(&pipeline_params);
3287 if (pipe->p == NULL) {
3296 * create a different "arg_ah" for each input port.
3297 * They differ only in the recorded port number. Unfortunately,
3298 * IP_PIPELINE does not pass port number in to input port handler
3301 uint32_t in_ports_arg_size =
3302 RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
3303 (params->n_ports_in));
3304 struct vfw_ports_in_args *port_in_args =
3305 (struct vfw_ports_in_args *)
3306 rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
3308 if (port_in_args == NULL)
3311 pipe->n_ports_in = params->n_ports_in;
3312 for (i = 0; i < pipe->n_ports_in; i++) {
3314 /* initialize this instance of port_in_args as necessary */
3315 port_in_args[i].pipe = pipe;
3316 port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
3318 struct rte_pipeline_port_in_params port_params = {
3320 pipeline_port_in_params_get_ops(¶ms->port_in
3323 pipeline_port_in_params_convert(¶ms->port_in
3325 .f_action = vfw_port_in_action,
3326 .arg_ah = &(port_in_args[i]),
3327 .burst_size = params->port_in[i].burst_size,
3329 if (pipe_vfw->traffic_type == IP_VERSION_4)
3330 port_params.f_action = vfw_port_in_action_ipv4;
3332 if (pipe_vfw->traffic_type == IP_VERSION_6)
3333 port_params.f_action = vfw_port_in_action_ipv6;
3334 int status = rte_pipeline_port_in_create(pipe->p, &port_params,
3335 &pipe->port_in_id[i]);
3338 rte_pipeline_free(pipe->p);
3345 pipe->n_ports_out = params->n_ports_out;
3346 for (i = 0; i < pipe->n_ports_out; i++) {
3347 struct rte_pipeline_port_out_params port_params = {
3348 .ops = pipeline_port_out_params_get_ops(
3349 ¶ms->port_out[i]),
3350 .arg_create = pipeline_port_out_params_convert(
3351 ¶ms->port_out[i]),
3356 int status = rte_pipeline_port_out_create(pipe->p, &port_params,
3357 &pipe->port_out_id[i]);
3360 rte_pipeline_free(pipe->p);
3366 int pipeline_num = 0;
3367 int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
3370 printf("sscanf unble to read pipeline id\n");
3371 pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
3372 register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
3373 set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
3374 set_outport_id(pipe_vfw->pipeline_num, pipe,
3375 pipe_vfw->outport_id);
3376 printf("pipeline_num=%d\n", pipeline_num);
3378 /*If this is the first VFW thread, create common VFW Rule tables*/
3379 if (rte_VFW_hi_counter_block_in_use == 0) {
3380 vfw_rule_table_ipv4_active =
3381 lib_acl_create_active_standby_table_ipv4(1,
3383 if (vfw_rule_table_ipv4_active == NULL) {
3384 printf("Failed to create active table for IPV4\n");
3385 rte_pipeline_free(pipe->p);
3386 rte_free(pipe_vfw->cnxn_tracker);
3387 rte_free(pipe_vfw->plib_acl);
3391 vfw_rule_table_ipv4_standby =
3392 lib_acl_create_active_standby_table_ipv4(2,
3394 if (vfw_rule_table_ipv4_standby == NULL) {
3395 printf("Failed to create standby table for IPV4\n");
3396 rte_pipeline_free(pipe->p);
3397 rte_free(pipe_vfw->cnxn_tracker);
3398 rte_free(pipe_vfw->plib_acl);
3403 vfw_rule_table_ipv6_active =
3404 lib_acl_create_active_standby_table_ipv6(1,
3407 if (vfw_rule_table_ipv6_active == NULL) {
3408 printf("Failed to create active table for IPV6\n");
3409 rte_pipeline_free(pipe->p);
3410 rte_free(pipe_vfw->cnxn_tracker);
3411 rte_free(pipe_vfw->plib_acl);
3415 vfw_rule_table_ipv6_standby =
3416 lib_acl_create_active_standby_table_ipv6(2,
3418 if (vfw_rule_table_ipv6_standby == NULL) {
3419 printf("Failed to create standby table for IPV6\n");
3420 rte_pipeline_free(pipe->p);
3421 rte_free(pipe_vfw->cnxn_tracker);
3422 rte_free(pipe_vfw->plib_acl);
3434 struct rte_pipeline_table_params table_params = {
3435 .ops = &rte_table_stub_ops,
3437 .f_action_hit = NULL,
3438 .f_action_miss = NULL,
3440 .action_data_size = 0,
3443 int status = rte_pipeline_table_create(pipe->p,
3445 &pipe->table_id[0]);
3448 rte_pipeline_free(pipe->p);
3453 struct rte_pipeline_table_entry default_entry = {
3454 .action = RTE_PIPELINE_ACTION_PORT_META
3457 struct rte_pipeline_table_entry *default_entry_ptr;
3459 status = rte_pipeline_table_default_entry_add(pipe->p,
3462 &default_entry_ptr);
3465 rte_pipeline_free(pipe->p);
3469 for (i = 0; i < pipe->n_ports_in; i++) {
3470 int status = rte_pipeline_port_in_connect_to_table(
3472 pipe->port_in_id[i],
3476 rte_pipeline_free(pipe->p);
3482 /* Enable input ports */
3483 for (i = 0; i < pipe->n_ports_in; i++) {
3485 rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
3488 rte_pipeline_free(pipe->p);
3494 /* Check pipeline consistency */
3495 if (rte_pipeline_check(pipe->p) < 0) {
3496 rte_pipeline_free(pipe->p);
3501 /* Message queues */
3502 pipe->n_msgq = params->n_msgq;
3503 for (i = 0; i < pipe->n_msgq; i++)
3504 pipe->msgq_in[i] = params->msgq_in[i];
3506 for (i = 0; i < pipe->n_msgq; i++)
3507 pipe->msgq_out[i] = params->msgq_out[i];
3509 /* Message handlers */
3510 memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
3511 memcpy(pipe_vfw->custom_handlers, custom_handlers,
3512 sizeof(pipe_vfw->custom_handlers));
3518 * Free resources and delete pipeline.
3521 * A pointer to the pipeline.
3524 * 0 on success, negative on error.
3526 static int pipeline_vfw_free(void *pipeline)
3528 struct pipeline *p = (struct pipeline *)pipeline;
3530 /* Check input arguments */
3534 /* Free resources */
3535 rte_pipeline_free(p->p);
3541 * Callback function to map input/output ports.
3544 * A pointer to the pipeline.
3548 * A pointer to the Output port.
3551 * 0 on success, negative on error.
3554 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
3557 struct pipeline *p = (struct pipeline *)pipeline;
3559 /* Check input arguments */
3560 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
3563 if (p->n_ports_in == 1) {
3572 * Callback function to process timers.
3575 * A pointer to the pipeline.
3578 * 0 on success, negative on error.
3580 static int pipeline_vfw_timer(void *pipeline)
3582 struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
3585 * handle any good buffered packets released by synproxy before checking
3586 * for packets relased by synproxy due to timeout.
3587 * Don't want packets missed
3590 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
3591 FORWARD_BUFFERED_PACKETS);
3593 pipeline_msg_req_handle(&p->pipe);
3594 rte_pipeline_flush(p->pipe.p);
3596 rte_ct_handle_expired_timers(p->cnxn_tracker);
3598 /* now handle packets released by synproxy due to timeout. */
3599 vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
3600 DELETE_BUFFERED_PACKETS);
3606 * Callback function to process CLI commands from FE.
3609 * A pointer to the pipeline.
3611 * A pointer to command specific data.
3614 * A pointer to message handler on success,
3615 * pipeline_msg_req_invalid_hander on error.
3617 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
3619 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
3620 struct pipeline_custom_msg_req *req = msg;
3621 pipeline_msg_req_handler f_handle;
3623 f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
3624 pipe_vfw->custom_handlers[req->subtype] :
3625 pipeline_msg_req_invalid_handler;
3627 if (f_handle == NULL)
3628 f_handle = pipeline_msg_req_invalid_handler;
3630 return f_handle(p, req);
3634 * Handler for synproxy ON/OFF CLI command.
3637 * A pointer to the pipeline.
3639 * A pointer to command specific data.
3642 * Response message contains status.
3645 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
3648 struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
3649 struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
3650 struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
3652 if (req->synproxy_flag == 0) {
3653 rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
3655 printf("synproxy turned OFF for %s\n", p->name);
3656 } else if (req->synproxy_flag == 1) {
3657 rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
3659 printf("synproxy turned ON for %s\n", p->name);
3661 printf("Invalid synproxy setting\n");
3668 struct pipeline_be_ops pipeline_vfw_be_ops = {
3669 .f_init = pipeline_vfw_init,
3670 .f_free = pipeline_vfw_free,
3672 .f_timer = pipeline_vfw_timer,
3673 .f_track = pipeline_vfw_track,