Support packets in flight
[samplevnf.git] / VNFs / vFW / pipeline / pipeline_vfw_be.c
1 /*
2 // Copyright (c) 2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 /**
18  * @file
19  * Pipeline VFW BE Implementation.
20  *
21  * Implementation of Pipeline VFW Back End (BE).
22  * Responsible for packet processing.
23  *
24  */
25
26 #define EN_SWP_ACL 1
27 //#define EN_SWP_ARP 1
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <string.h>
34 #include <unistd.h>
35
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
40 #include <rte_ip.h>
41 #include <rte_udp.h>
42 #include <rte_icmp.h>
43 #include <rte_byteorder.h>
44
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
63
64 #include "lib_arp.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
67 #include "gateway.h"
68
69 uint32_t timer_lcore;
70
71 uint8_t firewall_flag = 1;
72 uint8_t VFW_DEBUG = 0;
73 uint8_t cnxn_tracking_is_active = 1;
74 /**
75  * A structure defining the VFW pipeline input port per thread data.
76  */
77 struct vfw_ports_in_args {
78        struct pipeline *pipe;
79        struct rte_ct_cnxn_tracker *cnxn_tracker;
80 } __rte_cache_aligned;
81 /**
82  * A structure defining the VFW pipeline per thread data.
83  */
84 struct pipeline_vfw {
85        struct pipeline pipe;
86        pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
87
88        struct rte_ct_cnxn_tracker *cnxn_tracker;
89        struct rte_VFW_counter_block *counters;
90        struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
91        struct lib_acl *plib_acl;
92        /* timestamp retrieved during in-port computations */
93        uint32_t n_flows;
94        uint8_t pipeline_num;
95        uint8_t traffic_type;
96        uint8_t links_map[PIPELINE_MAX_PORT_IN];
97        uint8_t outport_id[PIPELINE_MAX_PORT_IN];
98
99 } __rte_cache_aligned;
100 /**
101  * A structure defining the mbuf meta data for VFW.
102  */
103 struct mbuf_tcp_meta_data {
104 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
105        uint32_t output_port;
106        struct rte_mbuf *next;       /* next pointer for chained buffers */
107 } __rte_cache_aligned;
108
109 #define DONT_CARE_TCP_PACKET 0
110 #define IS_NOT_TCP_PACKET 0
111 #define IS_TCP_PACKET 1
112
113 #define META_DATA_OFFSET 128
114
115 #define RTE_PKTMBUF_HEADROOM 128       /* where is this defined ? */
116 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
117 #define ETH_HDR_SIZE 14
118 #define PROTOCOL_START (IP_START + 9)
119
120 #define TCP_START (IP_START + 20)
121 #define RTE_LB_PORT_OFFSET 204       /* TODO: Need definition in LB header */
122 #define TCP_START_IPV6 (IP_START + 40)
123 #define PROTOCOL_START_IPV6 (IP_START + 6)
124 #define IP_HDR_DSCP_OFST 1
125
126 #define TCP_PROTOCOL 6
127 #define UDP_PROTOCOL 17
128
129 #define DELETE_BUFFERED_PACKETS 0
130 #define FORWARD_BUFFERED_PACKETS 1
131 #define DO_ARP 1
132 #define NO_ARP 0
133
134 #define IPv4_HEADER_SIZE 20
135 #define IPv6_HEADER_SIZE 40
136
137 #define IP_VERSION_4 4
138 #define IP_VERSION_6 6
139
140 /* IPv6 */
141 #define IP_HDR_SIZE_IPV6  40
142 #define IP_HDR_DSCP_OFST_IPV6 0
143 #define IP_HDR_LENGTH_OFST_IPV6 4
144 #define IP_HDR_PROTOCOL_OFST_IPV6 6
145 #define IP_HDR_DST_ADR_OFST_IPV6 24
146 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
147 /** The counter table for VFW pipeline per thread data.*/
148 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
149 __rte_cache_aligned;
150 int rte_VFW_hi_counter_block_in_use = -1;
151
152 /* a spin lock used during vfw initialization only */
153 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
154
155 /* Action Array */
156 struct pipeline_action_key *action_array_a;
157 struct pipeline_action_key *action_array_b;
158 struct pipeline_action_key *action_array_active;
159 struct pipeline_action_key *action_array_standby;
160 uint32_t action_array_size;
161 struct action_counter_block
162 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
163 __rte_cache_aligned;
164 /*
165   * Pipeline table strategy for firewall. Unfortunately, there does not seem to
166   * be any use for the built-in table lookup of ip_pipeline for the firewall.
167   * The main table requirement of the firewall is the hash table to maintain
168   * connection info, but that is implemented seperately in the connection
169   * tracking library. So a "dummy" table lookup will be performed.
170   * TODO: look into "stub" table and see if that can be used
171   * to avoid useless table lookup
172   */
173 uint64_t arp_pkts_mask;
174
175 /* Start TSC measurement */
176 /* Prefetch counters and pipe before this function */
177 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
178        vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
179        if (likely(vfw_pipe->counters->exit_timestamp))
180               vfw_pipe->counters->external_time_sum +=
181                      vfw_pipe->counters->entry_timestamp -
182                      vfw_pipe->counters->exit_timestamp;
183 }
184
185 /* End TSC measurement */
186 static inline void end_tsc_measure(
187        struct pipeline_vfw *vfw_pipe,
188        uint8_t n_pkts)
189 {
190        if (likely(n_pkts > 1)) {
191               vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
192               vfw_pipe->counters->internal_time_sum +=
193                      vfw_pipe->counters->exit_timestamp -
194                      vfw_pipe->counters->entry_timestamp;
195               vfw_pipe->counters->time_measurements++;
196        } else {
197               /* small counts skew results, ignore */
198               vfw_pipe->counters->exit_timestamp = 0;
199        }
200 }
201
202 /**
203  * Print packet for debugging.
204  *
205  * @param pkt
206  *  A pointer to the packet.
207  *
208  */
209 static __rte_unused  void print_pkt(struct rte_mbuf *pkt)
210 {
211        int i;
212        int size = (int)sizeof(struct mbuf_tcp_meta_data);
213        uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
214
215        printf("Meta-data:\n");
216        for (i = 0; i < size; i++) {
217               printf("%02x ", rd[i]);
218               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
219                      printf("\n");
220        }
221        printf("\n");
222        printf("IP and TCP/UDP headers:\n");
223        rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
224        for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
225               printf("%02x ", rd[i]);
226               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
227                      printf("\n");
228        }
229        printf("\n");
230 }
231
232 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
233 #define IP_ICMP_PROTOCOL 1
234 #define IP_TCP_PROTOCOL 6
235 #define IP_UDP_PROTOCOL 17
236 #define IPv6_FRAGMENT_HEADER 44
237
238 /**
239  * Return ethernet header structure form packet.
240  *
241  * @param pkt
242  *  A pointer to the packet.
243  *
244  */
245 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
246 {
247        return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
248                                                         ETHERNET_START);
249 }
250
251 /**
252  * Return IPV4 header structure form packet.
253  *
254  * @param pkt
255  *  A pointer to the packet.
256  *
257  */
258
259 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
260               struct rte_mbuf *pkt)
261 {
262        return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
263 }
264
265 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
266 {
267        /* NOTE: Only supporting IP headers with no options,
268         * so header is fixed size */
269        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
270               >> VERSION_NO_BYTE;
271
272        return ip_type == IPv4_HDR_VERSION;
273 }
274
275 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
276 {
277        /* NOTE: Only supporting IP headers with no options,
278         * so header is fixed size */
279        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
280               >> VERSION_NO_BYTE;
281
282        return ip_type == IPv6_HDR_VERSION;
283 }
284
285 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
286 {
287        if (likely(firewall_flag))
288               (*counter)++;
289 }
290
291 static uint8_t check_arp_icmp(
292               struct rte_mbuf *pkt,
293               struct pipeline_vfw *vfw_pipe)
294 {
295        struct ether_hdr *ehdr;
296        struct app_link_params *link;
297         uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
298                 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
299                 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
300
301         /* ARP outport number */
302        uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
303        struct ipv4_hdr *ipv4_h;
304        struct ipv6_hdr *ipv6_h;
305        link = &myApp->link_params[pkt->port];
306
307        ehdr = rte_vfw_get_ether_addr(pkt);
308        switch (rte_be_to_cpu_16(ehdr->ether_type)) {
309
310        case ETH_TYPE_ARP:
311               rte_pipeline_port_out_packet_insert(
312                             vfw_pipe->pipe.p,
313                             out_port,
314                             pkt);
315
316               vfw_pipe->counters->arpicmpPktCount++;
317
318               return 0;
319        case ETH_TYPE_IPV4:
320               ipv4_h = (struct ipv4_hdr *)
321                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
322               if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
323                             link->ip ==
324                             rte_be_to_cpu_32(ipv4_h->dst_addr)) {
325                      if (is_phy_port_privte(pkt->port)) {
326                             rte_pipeline_port_out_packet_insert(
327                                           vfw_pipe->pipe.p,
328                                           out_port,
329                                           pkt);
330
331                      vfw_pipe->counters->arpicmpPktCount++;
332                             return 0;
333                      }
334               }
335               break;
336 #ifdef IPV6
337         case ETH_TYPE_IPV6:
338                 ipv6_h = (struct ipv6_hdr *)
339                         RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
340
341                 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
342                         if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
343                                         || !memcmp(ipv6_h->dst_addr,
344                                                 solicited_node_multicast_addr,
345                                                 IPV6_ADD_CMP_MULTI)) {
346
347                                 rte_pipeline_port_out_packet_insert(
348                                                 vfw_pipe->pipe.p,
349                                                 out_port,
350                                                 pkt);
351
352                                 vfw_pipe->counters->arpicmpPktCount++;
353
354                         } else
355                                 vfw_pipe->counters->
356                                         pkts_drop_unsupported_type++;
357
358                         return 0;
359                 }
360                 break;
361 #endif
362        default:
363               break;
364 }
365        return 1;
366 }
367
368 /**
369  * Performs basic VFW ipv4 packet filtering.
370  * @param pkts
371  *  A pointer to the packets.
372  * @param pkts_mask
373  *  packet mask.
374  * @param vfw_pipe
375  *  A pointer to VFW pipeline.
376  */
377
378 static uint64_t
379 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
380                                  uint64_t pkts_mask,
381                                  struct pipeline_vfw *vfw_pipe)
382 {
383
384        /*
385         * Make use of cache prefetch. At beginning of loop, want to prefetch
386         * mbuf data for next iteration (not current one).
387         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
388         * is 20 bytes (extensions not supported), while the IPv6 header is 40
389         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
390         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
391         * need two pre-fetches.
392         */
393
394        uint8_t pos, next_pos = 0;
395        uint64_t pkt_mask;       /* bitmask representing a single packet */
396        struct rte_mbuf *pkt;
397        struct rte_mbuf *next_pkt = NULL;
398        struct ipv4_hdr *ihdr4;
399        void *next_iphdr = NULL;
400
401        if (unlikely(pkts_mask == 0))
402               return pkts_mask;
403        pos = (uint8_t) __builtin_ctzll(pkts_mask);
404        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
405        pkt = pkts[pos];
406
407        uint64_t bytes_processed = 0;
408        /* bitmap of packets left to process */
409        uint64_t pkts_to_process = pkts_mask;
410        /* bitmap of valid packets to return */
411        uint64_t valid_packets = pkts_mask;
412
413        rte_prefetch0(pkt);
414        /* prefetch counters, updated below. Most likely counters to update
415         * at beginnning */
416        rte_prefetch0(&vfw_pipe->counters);
417
418        do {                     /* always execute at least once */
419
420               /* remove this packet from remaining list */
421               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
422
423               if (likely(next_pkts_to_process)) {
424                      /* another packet to process after this, prefetch it */
425
426                      next_pos =
427                             (uint8_t) __builtin_ctzll(next_pkts_to_process);
428                      next_pkt = pkts[next_pos];
429                      next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
430                                    IP_START);
431                      rte_prefetch0(next_iphdr);
432               }
433
434               int discard = 0;
435               /* remove this packet from remaining list */
436               pkts_to_process &= ~pkt_mask;
437
438               if (enable_hwlb) {
439                       if (!check_arp_icmp(pkt, vfw_pipe)) {
440                               /* make next packet data the current */
441                               pkts_to_process = next_pkts_to_process;
442                               pos = next_pos;
443                               pkt = next_pkt;
444                               ihdr4 = next_iphdr;
445                               pkt_mask = 1LLU << pos;
446                               valid_packets &= ~pkt_mask;
447                               continue;
448                      }
449               }
450
451               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
452
453               bytes_processed += packet_length;
454
455               ihdr4 = (struct ipv4_hdr *)
456                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
457
458               /* verify that packet size according to mbuf is at least
459                * as large as the size according to the IP header.
460                */
461
462               uint32_t ip_length = rte_bswap16(ihdr4->total_length);
463
464               if (unlikely
465                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
466                      discard = 1;
467                      vfw_pipe->counters->pkts_drop_bad_size++;
468               }
469
470               /*
471                * IPv4 fragmented if: MF (more fragments) or Fragment
472                * Offset are non-zero. Header in Intel order, so flip
473                * constant to compensate. Note that IPv6 uses a header
474                * extension for identifying fragments.
475                */
476
477               int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
478               uint8_t ttl = ihdr4->time_to_live;
479
480               if (unlikely(fragmented)) {
481                      discard = 1;
482                      vfw_pipe->counters->pkts_drop_fragmented++;
483               }
484
485               if (unlikely(ttl <= 1)) {
486                      /*
487                       * about to decrement to zero (or is somehow
488                       * already zero), so discard
489                       */
490                      discard = 1;
491                      vfw_pipe->counters->pkts_drop_ttl++;
492               }
493
494               /*
495                * Dropping the packets other than TCP AND UDP.
496                */
497
498               uint8_t proto = ihdr4->next_proto_id;
499
500               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
501                                           proto == IP_UDP_PROTOCOL ||
502                                           proto == IP_ICMP_PROTOCOL))) {
503                      discard = 1;
504                      vfw_pipe->counters->
505                             pkts_drop_unsupported_type++;
506               }
507
508               if (unlikely(discard)) {
509                      valid_packets &= ~pkt_mask;
510               }
511
512               /* make next packet data the current */
513               pkts_to_process = next_pkts_to_process;
514               pos = next_pos;
515               pkt = next_pkt;
516               ihdr4 = next_iphdr;
517               pkt_mask = 1LLU << pos;
518
519        } while (pkts_to_process);
520
521        /* finalize counters, etc. */
522        vfw_pipe->counters->bytes_processed += bytes_processed;
523
524        if (likely(firewall_flag))
525               return valid_packets;
526        else
527               return pkts_mask;
528 }
529 /**
530  * Performs basic VFW IPV6 packet filtering.
531  * @param pkts
532  *  A pointer to the packets.
533  * @param pkts_mask
534  *  packet mask.
535  * @param vfw_pipe
536  *  A pointer to VFW pipeline.
537  */
538        static uint64_t
539 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
540               uint64_t pkts_mask,
541               struct pipeline_vfw *vfw_pipe)
542 {
543
544        /*
545         * Make use of cache prefetch. At beginning of loop, want to prefetch
546         * mbuf data for next iteration (not current one).
547         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
548         * is 20 bytes (extensions not supported), while the IPv6 header is 40
549         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
550         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
551         * need two pre-fetches.
552         */
553
554        uint8_t pos, next_pos = 0;
555        uint64_t pkt_mask;       /* bitmask representing a single packet */
556        struct rte_mbuf *pkt;
557        struct rte_mbuf *next_pkt = NULL;
558        struct ipv6_hdr *ihdr6;
559        void *next_iphdr = NULL;
560
561        if (unlikely(pkts_mask == 0))
562               return pkts_mask;
563        pos = (uint8_t) __builtin_ctzll(pkts_mask);
564        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
565        pkt = pkts[pos];
566
567        uint64_t bytes_processed = 0;
568        /* bitmap of packets left to process */
569        uint64_t pkts_to_process = pkts_mask;
570        /* bitmap of valid packets to return */
571        uint64_t valid_packets = pkts_mask;
572
573        /* prefetch counters, updated below. Most likely counters to update
574         * at beginnning */
575        rte_prefetch0(&vfw_pipe->counters);
576
577        do {                     /* always execute at least once */
578
579               /* remove this packet from remaining list */
580               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
581
582               if (likely(next_pkts_to_process)) {
583                      /* another packet to process after this, prefetch it */
584
585                      next_pos =
586                          (uint8_t) __builtin_ctzll(next_pkts_to_process);
587                      next_pkt = pkts[next_pos];
588                      next_iphdr =
589                          RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
590                      rte_prefetch0(next_iphdr);
591               }
592
593               int discard = 0;
594               /* remove this packet from remaining list */
595               pkts_to_process &= ~pkt_mask;
596
597               if (enable_hwlb) {
598                      if (!check_arp_icmp(pkt, vfw_pipe)) {
599                              /* make next packet data the current */
600                              pkts_to_process = next_pkts_to_process;
601                              pos = next_pos;
602                              pkt = next_pkt;
603                              ihdr6 = next_iphdr;
604                              pkt_mask = 1LLU << pos;
605                              valid_packets &= ~pkt_mask;
606                              continue;
607                      }
608               }
609
610               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
611
612               bytes_processed += packet_length;
613
614               ihdr6 = (struct ipv6_hdr *)
615                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
616
617               /*
618                * verify that packet size according to mbuf is at least
619                * as large as the size according to the IP header.
620                * For IPv6, note that size includes header extensions
621                * but not the base header size
622                */
623
624               uint32_t ip_length =
625                      rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
626
627               if (unlikely
628                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
629                      discard = 1;
630                      vfw_pipe->counters->pkts_drop_bad_size++;
631               }
632
633               /*
634                * Dropping the packets other than TCP AND UDP.
635                */
636
637               uint8_t proto = ihdr6->proto;
638
639               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
640                                           proto == IP_UDP_PROTOCOL ||
641                                           proto == IP_ICMP_PROTOCOL))) {
642                      discard = 1;
643                      if (proto == IPv6_FRAGMENT_HEADER)
644                             vfw_pipe->counters->
645                                    pkts_drop_fragmented++;
646                      else
647                             vfw_pipe->counters->
648                                    pkts_drop_unsupported_type++;
649               }
650
651               /*
652                * Behave like a router, and decrement the TTL of an
653                * IP packet. If this causes the TTL to become zero,
654                * the packet will be discarded. Unlike a router,
655                * no ICMP code 11 (Time * Exceeded) message will be
656                * sent back to the packet originator.
657                */
658
659               if (unlikely(ihdr6->hop_limits <= 1)) {
660                      /*
661                       * about to decrement to zero (or is somehow
662                       * already zero), so discard
663                       */
664                      discard = 1;
665                      vfw_pipe->counters->pkts_drop_ttl++;
666               }
667
668               if (unlikely(discard))
669                      valid_packets &= ~pkt_mask;
670               else
671                      ihdr6->hop_limits--;
672
673               /* make next packet data the current */
674               pkts_to_process = next_pkts_to_process;
675               pos = next_pos;
676               pkt = next_pkt;
677               ihdr6 = next_iphdr;
678               pkt_mask = 1LLU << pos;
679
680        } while (pkts_to_process);
681
682        /* finalize counters, etc. */
683        vfw_pipe->counters->bytes_processed += bytes_processed;
684
685        if (likely(firewall_flag))
686               return valid_packets;
687        else
688               return pkts_mask;
689 }
690
691 /**
692  * exchange the mac address so source becomes destination and vice versa.
693  *
694  * @param ehdr
695  *  A pointer to the ethernet header.
696  *
697  */
698 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
699 {
700        struct ether_addr saved_copy;
701
702        ether_addr_copy(&ehdr->d_addr, &saved_copy);
703        ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
704        ether_addr_copy(&saved_copy, &ehdr->s_addr);
705 }
706 #ifdef EN_SWP_ARP
707
708 /**
709  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
710  * To support synproxy, some (altered) packets may need to be sent back where
711  * they came from. The ip header has already been adjusted, but the ethernet
712  * header has not, so this must be performed here.
713  * Return an updated pkts_mask, since arp may drop some packets
714  *
715  * @param pkts
716  *  A pointer to the packet array.
717  * @param pkt_num
718  *  Packet num to start processing
719  * @param pkts_mask
720  *  Packet mask
721  * @param synproxy_reply_mask
722  *  Reply Packet mask for Synproxy
723  * @param vfw_pipe
724  *  A pointer to VFW pipeline.
725  */
726 static void
727 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
728               uint16_t pkt_num,
729               uint64_t *pkts_mask,
730               uint64_t synproxy_reply_mask,
731               struct pipeline_vfw *vfw_pipe)
732 {
733
734        uint8_t i;
735
736        struct mbuf_tcp_meta_data *meta_data_addr;
737        struct ether_hdr *ehdr;
738        struct rte_mbuf *pkt;
739
740        for (i = 0; i < 4; i++) {
741               uint32_t dest_if = INVALID_DESTIF;
742               /* bitmask representing only this packet */
743               uint64_t pkt_mask = 1LLU << (pkt_num + i);
744
745               pkt = pkts[i];
746
747               if(!(*pkts_mask & pkt_mask))
748                      continue;
749
750               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
751
752               meta_data_addr = (struct mbuf_tcp_meta_data *)
753                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
754               ehdr = rte_vfw_get_ether_addr(pkt);
755
756
757               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
758                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
759               uint32_t nhip = 0;
760
761               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
762               if (must_reverse)
763                      rte_sp_exchange_mac_addresses(ehdr);
764
765         struct arp_entry_data *ret_arp_data = NULL;
766         ret_arp_data = get_dest_mac_addr_port(dest_address,
767                        &dest_if, &ehdr->d_addr);
768         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
769
770         if (arp_cache_dest_mac_present(dest_if)) {
771                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
772                 update_nhip_access(dest_if);
773                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
774                         arp_send_buffered_pkts(ret_arp_data,
775                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
776
777                             }
778
779                      } else {
780                 if (unlikely(ret_arp_data == NULL)) {
781                         if (VFW_DEBUG)
782                         printf("%s: NHIP Not Found, nhip:%x , "
783                         "outport_id: %d\n", __func__, nhip,
784                         vfw_pipe->outport_id[dest_if]);
785
786                         /* Drop the pkt */
787                         vfw_pipe->counters->
788                                  pkts_drop_without_arp_entry++;
789                         continue;
790                             }
791                 if (ret_arp_data->status == INCOMPLETE ||
792                            ret_arp_data->status == PROBE) {
793                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
794                                         /* ICMP req sent, drop packet by
795                                                 * changing the mask */
796                                         vfw_pipe->counters->
797                                                 pkts_drop_without_arp_entry++;
798                                         continue;
799                                 } else {
800                                         //arp_pkts_mask |= pkt_mask;
801                                         *arp_hijack_mask |= pkt_mask;
802                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
803                                         continue;
804                      }
805               }
806         }
807        }
808 }
809
810
811 /**
812  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
813  * To support synproxy, some (altered) packets may need to be sent back where
814  * they came from. The ip header has already been adjusted, but the ethernet
815  * header has not, so this must be performed here.
816  * Return an updated pkts_mask, since arp may drop some packets
817  *
818  * @param pkts
819  *  A pointer to the packet.
820  * @param packet_num
821  *  Packet number to process
822  * @param pkts_mask
823  *  Packet mask pointer
824  * @param synproxy_reply_mask
825  *  Reply Packet mask for Synproxy
826  * @param vfw_pipe
827  *  A pointer to VFW pipeline.
828  */
829 static void
830 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
831               uint16_t pkt_num,
832               uint64_t *pkts_mask,
833               uint64_t synproxy_reply_mask,
834               struct pipeline_vfw *vfw_pipe)
835 {
836
837        uint32_t dest_if = INVALID_DESTIF;
838
839        struct mbuf_tcp_meta_data *meta_data_addr;
840        struct ether_hdr *ehdr;
841        struct rte_mbuf *pkt;
842        uint64_t pkt_mask = 1LLU << pkt_num;
843
844        pkt = pkts;
845
846        if(*pkts_mask & pkt_mask) {
847
848               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
849
850               meta_data_addr = (struct mbuf_tcp_meta_data *)
851                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
852               ehdr = rte_vfw_get_ether_addr(pkt);
853
854
855               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
856                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
857               uint32_t nhip = 0;
858
859               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
860               if (must_reverse)
861                      rte_sp_exchange_mac_addresses(ehdr);
862
863         struct arp_entry_data *ret_arp_data = NULL;
864                      ret_arp_data = get_dest_mac_addr_port(dest_address,
865                                    &dest_if, &ehdr->d_addr);
866                         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
867
868         if (arp_cache_dest_mac_present(dest_if)) {
869
870                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
871                 update_nhip_access(dest_if);
872                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
873                         arp_send_buffered_pkts(ret_arp_data,
874                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
875
876                             }
877                      } else {
878                 if (unlikely(ret_arp_data == NULL)) {
879
880                         if (VFW_DEBUG)
881                         printf("%s: NHIP Not Found, nhip:%x , "
882                         "outport_id: %d\n", __func__, nhip,
883                         vfw_pipe->outport_id[dest_if]);
884
885                         vfw_pipe->counters->
886                                 pkts_drop_without_arp_entry++;
887                         return;
888                             }
889                 if (ret_arp_data->status == INCOMPLETE ||
890                            ret_arp_data->status == PROBE) {
891                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
892                                         /* ICMP req sent, drop packet by
893                                                 * changing the mask */
894                                         vfw_pipe->counters->
895                                                 pkts_drop_without_arp_entry++;
896                                         return;
897                                 } else {
898                                         arp_pkts_mask |= pkt_mask;
899                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
900                                         return;
901                      }
902               }
903         }
904
905        }
906 }
907
908
909 /**
910  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
911  * To support synproxy, some (altered) packets may need to be sent back where
912  * they came from. The ip header has already been adjusted, but the ethernet
913  * header has not, so this must be performed here.
914  * Return an updated pkts_mask, since arp may drop some packets
915  *
916  * @param pkts
917  *  A pointer to the packets array.
918  * @param pkt_num
919  *  Packet number to start processing.
920  * @param pkts_mask
921  *  Packet mask pointer
922  * @param synproxy_reply_mask
923  *  Reply Packet mask for Synproxy
924  * @param vfw_pipe
925  *  A pointer to VFW pipeline.
926  */
927
928 static void
929 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
930               uint16_t pkt_num,
931               uint64_t *pkts_mask,
932               uint64_t synproxy_reply_mask,
933               struct pipeline_vfw *vfw_pipe)
934 {
935        uint8_t nh_ipv6[IPV6_ADD_SIZE];
936        struct ether_addr hw_addr;
937        struct mbuf_tcp_meta_data *meta_data_addr;
938        struct ether_hdr *ehdr;
939        struct rte_mbuf *pkt;
940        uint8_t i;
941
942        for (i = 0; i < 4; i++) {
943               uint32_t dest_if = INVALID_DESTIF;
944               /* bitmask representing only this packet */
945               uint64_t pkt_mask = 1LLU << (pkt_num + i);
946
947               pkt = pkts[i];
948
949               if(!(*pkts_mask & pkt_mask))
950                      continue;
951               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
952
953               meta_data_addr = (struct mbuf_tcp_meta_data *)
954                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
955               ehdr = rte_vfw_get_ether_addr(pkt);
956
957               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
958                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
959
960               uint8_t nhip[IPV6_ADD_SIZE];
961               uint8_t dest_address[IPV6_ADD_SIZE];
962
963               memset(nhip, 0, IPV6_ADD_SIZE);
964               if (must_reverse)
965                      rte_sp_exchange_mac_addresses(ehdr);
966
967               rte_mov16(dest_address, ihdr->dst_addr);
968               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
969               struct nd_entry_data *ret_nd_data = NULL;
970               ret_nd_data = get_dest_mac_address_ipv6_port(
971                                    &dest_address[0],
972                                    &dest_if,
973                                    &hw_addr,
974                                    &nh_ipv6[0]);
975
976                 meta_data_addr->output_port = vfw_pipe->
977                                     outport_id[dest_if];
978               if (nd_cache_dest_mac_present(dest_if)) {
979                     ether_addr_copy(get_link_hw_addr(dest_if),
980                                    &ehdr->s_addr);
981                     update_nhip_access(dest_if);
982
983                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
984                         nd_send_buffered_pkts(ret_nd_data,
985                                 &ehdr->d_addr, meta_data_addr->output_port);
986                     }
987               } else {
988                     if (unlikely(ret_nd_data == NULL)) {
989                          *pkts_mask &= ~pkt_mask;
990                           vfw_pipe->counters->
991                                 pkts_drop_without_arp_entry++;
992                           continue;
993                     }
994                     if (ret_nd_data->status == INCOMPLETE ||
995                           ret_nd_data->status == PROBE) {
996                           if (ret_nd_data->num_pkts >= NUM_DESC) {
997                                 /* Drop the pkt */
998                                 *pkts_mask &= ~pkt_mask;
999                                 vfw_pipe->counters->
1000                                         pkts_drop_without_arp_entry++;
1001                                 continue;
1002                           } else {
1003                                 arp_pkts_mask |= pkt_mask;
1004                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1005                                 continue;
1006                           }
1007                     }
1008               }
1009
1010        }
1011 }
1012
1013
1014 /**
1015  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1016  * To support synproxy, some (altered) packets may need to be sent back where
1017  * they came from. The ip header has already been adjusted, but the ethernet
1018  * header has not, so this must be performed here.
1019  * Return an updated pkts_mask, since arp may drop some packets
1020  *
1021  * @param pkts
1022  *  A pointer to the packets.
1023  * @param pkt_num
1024  *  Packet number to process.
1025  * @param pkts_mask
1026  *  Packet mask pointer
1027  * @param synproxy_reply_mask
1028  *  Reply Packet mask for Synproxy
1029  * @param vfw_pipe
1030  *  A pointer to VFW pipeline.
1031  */
1032
1033 static void
1034 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1035               uint16_t pkt_num,
1036               uint64_t *pkts_mask,
1037               uint64_t synproxy_reply_mask,
1038               struct pipeline_vfw *vfw_pipe)
1039 {
1040        uint8_t nh_ipv6[IPV6_ADD_SIZE];
1041        struct ether_addr hw_addr;
1042        struct mbuf_tcp_meta_data *meta_data_addr;
1043        struct ether_hdr *ehdr;
1044        struct rte_mbuf *pkt;
1045
1046        uint32_t dest_if = INVALID_DESTIF;
1047        /* bitmask representing only this packet */
1048        uint64_t pkt_mask = 1LLU << pkt_num;
1049
1050        pkt = pkts;
1051
1052        if(*pkts_mask & pkt_mask) {
1053
1054               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1055
1056               meta_data_addr = (struct mbuf_tcp_meta_data *)
1057                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1058               ehdr = rte_vfw_get_ether_addr(pkt);
1059
1060               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1061                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1062
1063               uint8_t nhip[IPV6_ADD_SIZE];
1064               uint8_t dest_address[IPV6_ADD_SIZE];
1065
1066               memset(nhip, 0, IPV6_ADD_SIZE);
1067               if (must_reverse)
1068                      rte_sp_exchange_mac_addresses(ehdr);
1069               rte_mov16(dest_address, ihdr->dst_addr);
1070               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1071               struct nd_entry_data *ret_nd_data = NULL;
1072               ret_nd_data = get_dest_mac_address_ipv6_port(
1073                                    &dest_address[0],
1074                                    &dest_if,
1075                                    &hw_addr,
1076                                    &nh_ipv6[0]);
1077               meta_data_addr->output_port = vfw_pipe->
1078                                     outport_id[dest_if];
1079               if (nd_cache_dest_mac_present(dest_if)) {
1080                      ether_addr_copy(get_link_hw_addr(dest_if),
1081                                    &ehdr->s_addr);
1082                     update_nhip_access(dest_if);
1083
1084                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1085                         nd_send_buffered_pkts(ret_nd_data,
1086                                 &ehdr->d_addr, meta_data_addr->output_port);
1087                      }
1088               } else {
1089                     if (unlikely(ret_nd_data == NULL)) {
1090                         *pkts_mask &= ~pkt_mask;
1091                         vfw_pipe->counters->
1092                                 pkts_drop_without_arp_entry++;
1093                         return;
1094                     }
1095                     if (ret_nd_data->status == INCOMPLETE ||
1096                           ret_nd_data->status == PROBE) {
1097                           if (ret_nd_data->num_pkts >= NUM_DESC) {
1098                                 /* Drop the pkt */
1099                                 *pkts_mask &= ~pkt_mask;
1100                                 vfw_pipe->counters->
1101                                     pkts_drop_without_arp_entry++;
1102                                 return;
1103                           } else {
1104                                 arp_pkts_mask |= pkt_mask;
1105                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1106                                 return;
1107                           }
1108                     }
1109               }
1110
1111        }
1112
1113 }
1114
1115 #else
1116
1117 /**
1118  * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1119  * To support synproxy, some (altered) packets may need to be sent back where
1120  * they came from. The ip header has already been adjusted, but the ethernet
1121  * header has not, so this must be performed here.
1122  * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1123  *
1124  * @param pkts
1125  *  A pointer to the packet array.
1126  * @param pkts_mask
1127  *  Packets mask to be processed
1128  * @param arp_hijack_mask
1129  *  Packets to be hijacked for arp buffering
1130  * @param vfw_pipe
1131  *  A pointer to VFW pipeline.
1132  */
1133 static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1134                 uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1135 {
1136         uint64_t pkts_to_arp = *pkts_mask;
1137
1138         for (; pkts_to_arp;) {
1139
1140                 struct mbuf_tcp_meta_data *meta_data_addr;
1141                 struct ether_hdr *ehdr;
1142                 struct rte_mbuf *pkt;
1143                 uint32_t src_phy_port;
1144
1145                 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1146                 /* bitmask representing only this packet */
1147                 uint64_t pkt_mask = 1LLU << pos;
1148                 /* remove this packet from remaining list */
1149                 pkts_to_arp &= ~pkt_mask;
1150                 pkt = pkts[pos];
1151
1152                 if(VFW_DEBUG) {
1153                         printf("----------------\n");
1154                         print_pkt(pkt);
1155                 }
1156
1157                 meta_data_addr = (struct mbuf_tcp_meta_data *)
1158                         RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1159
1160                 ehdr = (struct ether_hdr *)
1161                         RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1162
1163                 src_phy_port = pkt->port;
1164                 uint32_t dst_phy_port = INVALID_DESTIF;
1165
1166                 if(is_phy_port_privte(src_phy_port))
1167                     dst_phy_port = prv_to_pub_map[src_phy_port];
1168                 else
1169                     dst_phy_port = pub_to_prv_map[src_phy_port];
1170
1171
1172                 if(likely(is_gateway())){
1173                         struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
1174                                 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1175
1176                         /* Gateway Proc Starts */
1177
1178                         struct arp_entry_data *ret_arp_data = NULL;
1179                         struct ether_addr dst_mac;
1180                         uint32_t nhip = 0;
1181                         uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
1182
1183                         gw_get_route_nh_port_ipv4(dst_ip_addr, &dst_phy_port, &nhip, dst_phy_port);
1184
1185                         ret_arp_data = get_dest_mac_addr_ipv4(nhip, dst_phy_port, &dst_mac);
1186
1187                         /* Gateway Proc Ends */
1188
1189                         if (likely(arp_cache_dest_mac_present(dst_phy_port))) {
1190
1191                                 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1192                                 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1193
1194                                 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1195
1196                                 update_nhip_access(dst_phy_port);
1197
1198                                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1199
1200                                         arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
1201                                                         vfw_pipe->outport_id[dst_phy_port]);
1202                                 }
1203
1204                         } else {
1205                                 if (unlikely(ret_arp_data == NULL)) {
1206
1207                                         printf("NHIP Not Found\n");
1208
1209                                         /* Drop the pkt */
1210                                         vfw_pipe->counters->
1211                                                 pkts_drop_without_arp_entry++;
1212                                         continue;
1213                                 }
1214                                 if (ret_arp_data->status == INCOMPLETE ||
1215                                                 ret_arp_data->status == PROBE) {
1216                                         if (ret_arp_data->num_pkts >= NUM_DESC) {
1217                                                 /* ICMP req sent, drop packet by
1218                                                  * changing the mask */
1219                                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1220                                                 continue;
1221                                         } else {
1222                                                 *arp_hijack_mask |= pkt_mask;
1223                                                 arp_queue_unresolved_packet(ret_arp_data, pkt);
1224                                                 continue;
1225                                         }
1226                                 }
1227                         }
1228                 } else {
1229                         /* IP Pkt forwarding based on  pub/prv mapping */
1230                         meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1231
1232                         if(VFW_DEBUG) {
1233                                 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1234                                                 src_phy_port, dst_phy_port);
1235                         }
1236                 }
1237
1238                 if(VFW_DEBUG)
1239                         print_pkt(pkt);
1240         }
1241
1242 }
1243
1244 /**
1245  * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1246  * To support synproxy, some (altered) packets may need to be sent back where
1247  * they came from. The ip header has already been adjusted, but the ethernet
1248  * header has not, so this must be performed here.
1249  * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1250  *
1251  * @param pkts
1252  *  A pointer to the packet array.
1253  * @param pkts_mask
1254  *  Packets mask to be processed
1255  * @param arp_hijack_mask
1256  *  Packets to be hijacked for arp buffering
1257  * @param vfw_pipe
1258  *  A pointer to VFW pipeline.
1259  */
1260 static void vfw_fwd_pkts_ipv6(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1261                         uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1262 {
1263         uint64_t pkts_to_arp = *pkts_mask;
1264
1265         for (; pkts_to_arp;) {
1266
1267                 struct mbuf_tcp_meta_data *meta_data_addr;
1268                 struct ether_hdr *ehdr;
1269                 struct rte_mbuf *pkt;
1270                 uint32_t src_phy_port;
1271
1272                 struct nd_entry_data *ret_nd_data = NULL;
1273
1274                 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1275                 /* bitmask representing only this packet */
1276                 uint64_t pkt_mask = 1LLU << pos;
1277                 /* remove this packet from remaining list */
1278                 pkts_to_arp &= ~pkt_mask;
1279                 pkt = pkts[pos];
1280
1281                 if(VFW_DEBUG) {
1282                         printf("----------------\n");
1283                         print_pkt(pkt);
1284                 }
1285
1286                 meta_data_addr = (struct mbuf_tcp_meta_data *)
1287                         RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1288
1289                 ehdr = (struct ether_hdr *)
1290                         RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1291
1292                 src_phy_port = pkt->port;
1293                 uint32_t dst_phy_port = INVALID_DESTIF;
1294
1295                 if(is_gateway()){
1296                         struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
1297                                 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1298
1299                         /* Gateway Proc Starts */
1300
1301                         struct ether_addr dst_mac;
1302                         uint32_t dst_phy_port = INVALID_DESTIF;
1303                         uint8_t nhipv6[IPV6_ADD_SIZE];
1304                         uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
1305                         memset(nhipv6, 0, IPV6_ADD_SIZE);
1306                         src_phy_port = pkt->port;
1307                         rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
1308
1309                         gw_get_nh_port_ipv6(dest_ipv6_address, &dst_phy_port, nhipv6);
1310
1311                         ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dst_phy_port, &dst_mac);
1312
1313                         /* Gateway Proc Ends */
1314
1315                         if (nd_cache_dest_mac_present(dst_phy_port)) {
1316
1317                                 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1318                                 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1319
1320                                 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1321
1322                                 update_nhip_access(dst_phy_port);
1323
1324                                 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1325                                         nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
1326                                                         vfw_pipe->outport_id[dst_phy_port]);
1327                                 }
1328
1329                         } else {
1330                                 if (unlikely(ret_nd_data == NULL)) {
1331
1332                                         printf("NHIP Not Found\n");
1333
1334                                         /* Drop the pkt */
1335                                         vfw_pipe->counters->pkts_drop_without_arp_entry++;
1336                                         continue;
1337                                 }
1338                                 if (ret_nd_data->status == INCOMPLETE ||
1339                                                 ret_nd_data->status == PROBE) {
1340                                         if (ret_nd_data->num_pkts >= NUM_DESC) {
1341                                                 /* ICMP req sent, drop packet by
1342                                                  * changing the mask */
1343                                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1344                                                 continue;
1345                                         } else {
1346                                                 *arp_hijack_mask |= pkt_mask;
1347                                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1348                                                 continue;
1349                                         }
1350                                 }
1351                         }
1352
1353                 } else {
1354                         /* IP Pkt forwarding based on  pub/prv mapping */
1355                         if(is_phy_port_privte(src_phy_port))
1356                                 dst_phy_port = prv_to_pub_map[src_phy_port];
1357                         else
1358                                 dst_phy_port = pub_to_prv_map[src_phy_port];
1359
1360                         meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1361
1362                         if(VFW_DEBUG) {
1363                                 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1364                                                 src_phy_port, dst_phy_port);
1365                         }
1366                 }
1367                 if(VFW_DEBUG)
1368                         print_pkt(pkt);
1369         }
1370 }
1371
1372 #endif
1373 /**
1374  * Packets processing for connection tracking.
1375  *
1376  * @param vfw_pipe
1377  *  A pointer to the pipeline.
1378  * @param ct
1379  *  A pointer to the connetion tracker .
1380  * @param pkts
1381  *  A pointer to a burst of packets.
1382  * @param packet_mask_in
1383  *  Input packets Mask.
1384  */
1385
1386        static  uint64_t
1387 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1388               struct rte_ct_cnxn_tracker *ct,
1389                           struct rte_mbuf **pkts, uint64_t packet_mask_in)
1390 {
1391        uint64_t keep_mask = packet_mask_in;
1392        struct rte_synproxy_helper sp_helper;       /* for synproxy */
1393
1394        keep_mask =
1395            rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1396                                                     &sp_helper);
1397
1398        if (unlikely(sp_helper.hijack_mask))
1399               printf("buffered hijack pkts severe error\n");
1400
1401        if (unlikely(sp_helper.reply_pkt_mask))
1402               printf("buffered reply pkts severe error\n");
1403
1404        return keep_mask;
1405 }
1406
1407 /**
1408  * Free Packets from mbuf.
1409  *
1410  * @param ct
1411  *  A pointer to the connection tracker to increment drop counter.
1412  *
1413  * @param pkt
1414  *  Packet to be free.
1415  */
1416 static inline void
1417 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1418 {
1419        ct->counters->pkts_drop++;
1420        rte_pktmbuf_free(pkt);
1421 }
1422
1423 static void
1424 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1425                                     struct rte_pipeline *p,
1426                                     struct rte_mbuf **pkts,
1427                                     int num_pkts, uint64_t pkts_mask)
1428 {
1429        int i;
1430        struct mbuf_tcp_meta_data *meta_data_addr;
1431        uint64_t pkt_mask = 1;
1432
1433        /* any clear bits in low-order num_pkts bit of
1434         * pkt_mask must be discarded */
1435
1436        for (i = 0; i < num_pkts; i++) {
1437               struct rte_mbuf *pkt = pkts[i];
1438
1439               if (pkts_mask & pkt_mask) {
1440                      printf("vfw_output_or_delete_buffered_packets\n");
1441                      meta_data_addr = (struct mbuf_tcp_meta_data *)
1442                          RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1443                      rte_pipeline_port_out_packet_insert(
1444                                    p, meta_data_addr->output_port, pkt);
1445
1446               } else {
1447                      vfw_pktmbuf_free(ct, pkt);
1448               }
1449
1450               pkt_mask = pkt_mask << 1;
1451        }
1452 }
1453
1454 /**
1455  *Packet buffered for synproxy.
1456  *
1457  * @param p
1458  *  A pointer to the pipeline.
1459  * @param vfw_pipe
1460  *  A pointer to the vfw pipeline.
1461  * @param ct
1462  *  A pointer to the connection tracker.
1463  * @param forward_pkts
1464  *  Packet forwarded by synproxy.
1465  *
1466  */
1467 static void
1468 vfw_handle_buffered_packets(struct rte_pipeline *p,
1469                             struct pipeline_vfw *vfw_pipe,
1470                             struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1471 {
1472        struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1473
1474        if (likely(pkt_list == NULL))       /* only during proxy setup is != NULL */
1475               return;
1476
1477        int pkt_count = 0;
1478        uint64_t keep_mask = 0;
1479        struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1480        struct rte_mbuf *pkt;
1481
1482        while (pkt_list != NULL) {
1483               struct mbuf_tcp_meta_data *meta_data =
1484               (struct mbuf_tcp_meta_data *)
1485               RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1486
1487               /* detach head of list and advance list */
1488               pkt = pkt_list;
1489               pkt_list = meta_data->next;
1490
1491               if (forward_pkts) {
1492
1493                      pkts[pkt_count++] = pkt;
1494
1495                      if (pkt_count == PKT_BUFFER_SIZE) {
1496                             /* need to send out packets */
1497                             /* currently 0, set all bits */
1498                             keep_mask = ~keep_mask;
1499
1500                             keep_mask =
1501                                 vfw_process_buffered_pkts(vfw_pipe,
1502                                                          ct, pkts,
1503                                                          keep_mask);
1504                             vfw_output_or_delete_buffered_packets(
1505                                           ct, p,
1506                                           pkts,
1507                                           PKT_BUFFER_SIZE,
1508                                           keep_mask);
1509                             pkt_count = 0;
1510                             keep_mask = 0;
1511                      }
1512
1513               } else {
1514                      vfw_pktmbuf_free(ct, pkt);
1515               }
1516        }
1517
1518        if (pkt_count != 0) {
1519               /* need to send out packets */
1520               keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
1521
1522               keep_mask =
1523                      vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
1524                                    keep_mask);
1525
1526               vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
1527                             keep_mask);
1528
1529               pkt_count = 0;
1530               keep_mask = 0;
1531        }
1532 }
1533 /**
1534  * The pipeline port-in action is used to do all the firewall and
1535  * connection tracking work for IPV4 packets.
1536  *
1537  * @param p
1538  *  A pointer to the pipeline.
1539   * @param pkts
1540  *  A pointer to a burst of packets.
1541  * @param n_pkts
1542  *  Number of packets to process.
1543  * @param arg
1544  *  A pointer to pipeline specific data.
1545  *
1546  * @return
1547  *  0 on success, negative on error.
1548  */
1549
1550 static int
1551 vfw_port_in_action_ipv4(struct rte_pipeline *p,
1552               struct rte_mbuf **pkts,
1553               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1554 {
1555        struct vfw_ports_in_args *port_in_args =
1556               (struct vfw_ports_in_args *)arg;
1557        struct pipeline_vfw *vfw_pipe =
1558               (struct pipeline_vfw *)port_in_args->pipe;
1559        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1560
1561        start_tsc_measure(vfw_pipe);
1562
1563        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1564        uint64_t pkts_drop_mask;
1565        uint64_t synp_hijack_mask = 0;
1566        uint64_t arp_hijack_mask = 0;
1567 //       uint64_t synproxy_reply_mask;       /* for synproxy */
1568        uint64_t keep_mask = packet_mask_in;
1569
1570        uint64_t conntrack_mask = 0, connexist_mask = 0;
1571        struct rte_CT_helper ct_helper;
1572        uint8_t j;
1573
1574        /*
1575         * This routine uses a bit mask to represent which packets in the
1576         * "pkts" table are considered valid. Any table entry which exists
1577         * and is considered valid has the corresponding bit in the mask set.
1578         * Otherwise, it is cleared. Note that the mask is 64 bits,
1579         * but the number of packets in the table may be considerably less.
1580         * Any mask bits which do correspond to actual packets are cleared.
1581         * Various routines are called which may determine that an existing
1582         * packet is somehow invalid. The routine will return an altered bit
1583         * mask, with the bit cleared. At the end of all the checks,
1584         * packets are dropped if their mask bit is a zero
1585         */
1586
1587        rte_prefetch0(& vfw_pipe->counters);
1588
1589 #ifdef EN_SWP_ACL
1590        /* Pre-fetch all rte_mbuf header */
1591        for(j = 0; j < n_pkts; j++)
1592               rte_prefetch0(pkts[j]);
1593 #endif
1594        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1595 #ifdef EN_SWP_ACL
1596        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1597        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1598 #endif
1599
1600        if (unlikely(vfw_debug > 1))
1601               printf("Enter in-port action IPV4 with %p packet mask\n",
1602                             (void *)packet_mask_in);
1603        vfw_pipe->counters->pkts_received =
1604               vfw_pipe->counters->pkts_received + n_pkts;
1605
1606        if (unlikely(VFW_DEBUG))
1607               printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
1608                             " n_pkts: %u\n",
1609                             vfw_pipe->counters->pkts_received, n_pkts);
1610
1611        /* first handle handle any previously buffered packets now released */
1612        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1613                      FORWARD_BUFFERED_PACKETS);
1614
1615        /* now handle any new packets on input ports */
1616        if (likely(firewall_flag)) {
1617               keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
1618                             keep_mask, vfw_pipe);
1619               vfw_pipe->counters->pkts_fw_forwarded +=
1620                      __builtin_popcountll(keep_mask);
1621        }
1622 #ifdef ACL_ENABLE
1623 #ifdef EN_SWP_ACL
1624        rte_prefetch0((void*)vfw_pipe->plib_acl);
1625        rte_prefetch0((void*)vfw_rule_table_ipv4_active);
1626 #endif /* EN_SWP_ACL */
1627        keep_mask = lib_acl_ipv4_pkt_work_key(
1628                      vfw_pipe->plib_acl, pkts, keep_mask,
1629                      &vfw_pipe->counters->pkts_drop_without_rule,
1630                      vfw_rule_table_ipv4_active,
1631                      action_array_active,
1632                      action_counter_table,
1633                      &conntrack_mask, &connexist_mask);
1634        vfw_pipe->counters->pkts_acl_forwarded +=
1635               __builtin_popcountll(keep_mask);
1636        if (conntrack_mask > 0) {
1637               keep_mask = conntrack_mask;
1638               ct_helper.no_new_cnxn_mask = connexist_mask;
1639               cnxn_tracking_is_active = 1;
1640        } else
1641               cnxn_tracking_is_active = 0;
1642 #endif /* ACL_ENABLE */
1643
1644        if (likely(cnxn_tracking_is_active)) {
1645               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1646                             &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
1647 //              synproxy_reply_mask = ct_helper.reply_pkt_mask;
1648               synp_hijack_mask = ct_helper.hijack_mask;
1649
1650        }
1651
1652 #ifdef EN_SWP_ARP
1653        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1654                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1655                                    META_DATA_OFFSET));
1656                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1657                                    ETHERNET_START));
1658        }
1659        rte_prefetch0((void*)in_port_dir_a);
1660        rte_prefetch0((void*)prv_to_pub_map);
1661
1662        uint8_t i;
1663        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1664               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1665                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1666                                           META_DATA_OFFSET));
1667                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1668                                           ETHERNET_START));
1669               }
1670               pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
1671                             synproxy_reply_mask, vfw_pipe);
1672        }
1673        for (j = i; j < n_pkts; j++) {
1674               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1675                                    META_DATA_OFFSET));
1676               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1677                                    ETHERNET_START));
1678        }
1679        for (; i < n_pkts; i++) {
1680               pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
1681                             synproxy_reply_mask, vfw_pipe);
1682        }
1683 #else
1684        rte_prefetch0((void*)in_port_dir_a);
1685        rte_prefetch0((void*)prv_to_pub_map);
1686
1687         vfw_fwd_pkts_ipv4(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1688
1689 #endif
1690
1691        if (vfw_debug > 1) {
1692               printf("  Exit in-port action with %p packet mask\n",
1693                             (void *)keep_mask);
1694               if (keep_mask != packet_mask_in)
1695                      printf("dropped packets, %p in, %p out\n",
1696                                    (void *)packet_mask_in,
1697                                    (void *)keep_mask);
1698        }
1699
1700            /* Hijack the Synproxy and ARP buffered packets */
1701
1702        if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1703
1704 //                printf("Pkts hijacked arp = %lX, synp = %lX\n",
1705 //                                    arp_hijack_mask, synp_hijack_mask);
1706
1707                 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1708         }
1709
1710        pkts_drop_mask = packet_mask_in & ~keep_mask;
1711
1712        if (unlikely(pkts_drop_mask != 0)) {
1713               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1714               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1715        }
1716
1717        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1718        vfw_pipe->counters->num_pkts_measurements++;
1719
1720        end_tsc_measure(vfw_pipe, n_pkts);
1721
1722        return 0;
1723 }
1724 /**
1725  * The pipeline port-in action is used to do all the firewall and
1726  * connection tracking work for IPV6 packet.
1727  *
1728  * @param p
1729  *  A pointer to the pipeline.
1730   * @param pkts
1731  *  A pointer to a burst of packets.
1732  * @param n_pkts
1733  *  Number of packets to process.
1734  * @param arg
1735  *  A pointer to pipeline specific data.
1736  *
1737  * @return
1738  *  0 on success, negative on error.
1739  */
1740
1741 static int
1742 vfw_port_in_action_ipv6(struct rte_pipeline *p,
1743               struct rte_mbuf **pkts,
1744               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1745 {
1746        struct vfw_ports_in_args *port_in_args =
1747               (struct vfw_ports_in_args *)arg;
1748        struct pipeline_vfw *vfw_pipe =
1749               (struct pipeline_vfw *)port_in_args->pipe;
1750        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1751
1752        start_tsc_measure(vfw_pipe);
1753
1754        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1755        uint64_t pkts_drop_mask;
1756        uint64_t synp_hijack_mask = 0;
1757        uint64_t arp_hijack_mask = 0;
1758 //       uint64_t hijack_mask = 0;
1759 //       uint64_t synproxy_reply_mask = 0;       /* for synproxy */
1760        uint64_t keep_mask = packet_mask_in;
1761
1762        uint64_t conntrack_mask = 0, connexist_mask = 0;
1763        struct rte_CT_helper ct_helper;
1764        uint32_t j;
1765
1766        /*
1767         * This routine uses a bit mask to represent which packets in the
1768         * "pkts" table are considered valid. Any table entry which exists
1769         * and is considered valid has the corresponding bit in the mask set.
1770         * Otherwise, it is cleared. Note that the mask is 64 bits,
1771         * but the number of packets in the table may be considerably less.
1772         * Any mask bits which do correspond to actual packets are cleared.
1773         * Various routines are called which may determine that an existing
1774         * packet is somehow invalid. The routine will return an altered bit
1775         * mask, with the bit cleared. At the end of all the checks,
1776         * packets are dropped if their mask bit is a zero
1777         */
1778
1779        rte_prefetch0(& vfw_pipe->counters);
1780
1781        /* Pre-fetch all rte_mbuf header */
1782        for(j = 0; j < n_pkts; j++)
1783                rte_prefetch0(pkts[j]);
1784
1785        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1786        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1787        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1788
1789        if (vfw_debug > 1)
1790               printf("Enter in-port action with %p packet mask\n",
1791                             (void *)packet_mask_in);
1792        vfw_pipe->counters->pkts_received =
1793               vfw_pipe->counters->pkts_received + n_pkts;
1794        if (VFW_DEBUG)
1795               printf("vfw_port_in_action pkts_received: %" PRIu64
1796                             " n_pkts: %u\n",
1797                             vfw_pipe->counters->pkts_received, n_pkts);
1798
1799        /* first handle handle any previously buffered packets now released */
1800        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1801                      FORWARD_BUFFERED_PACKETS);
1802
1803        /* now handle any new packets on input ports */
1804        if (likely(firewall_flag)) {
1805               keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
1806                             keep_mask, vfw_pipe);
1807               vfw_pipe->counters->pkts_fw_forwarded +=
1808                      __builtin_popcountll(keep_mask);
1809        }
1810 #ifdef ACL_ENABLE
1811
1812 #ifdef EN_SWP_ACL
1813        rte_prefetch0((void*)vfw_pipe->plib_acl);
1814        rte_prefetch0((void*)vfw_rule_table_ipv6_active);
1815 #endif /* EN_SWP_ACL */
1816        keep_mask = lib_acl_ipv6_pkt_work_key(
1817                      vfw_pipe->plib_acl, pkts, keep_mask,
1818                      &vfw_pipe->counters->pkts_drop_without_rule,
1819                      vfw_rule_table_ipv6_active,
1820                      action_array_active,
1821                      action_counter_table,
1822                      &conntrack_mask, &connexist_mask);
1823        vfw_pipe->counters->pkts_acl_forwarded +=
1824               __builtin_popcountll(keep_mask);
1825        if (conntrack_mask > 0) {
1826               keep_mask = conntrack_mask;
1827               ct_helper.no_new_cnxn_mask = connexist_mask;
1828               cnxn_tracking_is_active = 1;
1829        } else
1830               cnxn_tracking_is_active = 0;
1831 #endif /* ACL_ENABLE */
1832        if (likely(cnxn_tracking_is_active)) {
1833               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1834                             &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
1835 //              synproxy_reply_mask = ct_helper.reply_pkt_mask;
1836               synp_hijack_mask = ct_helper.hijack_mask;
1837
1838        }
1839
1840 #ifdef EN_SWP_ARP
1841        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1842                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1843                                    META_DATA_OFFSET));
1844                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1845                                    ETHERNET_START));
1846        }
1847        rte_prefetch0((void*)in_port_dir_a);
1848  //      rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
1849        uint32_t i;
1850
1851        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1852               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1853                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1854                                           META_DATA_OFFSET));
1855                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1856                                           ETHERNET_START));
1857               }
1858               pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
1859                             synproxy_reply_mask, vfw_pipe);
1860        }
1861        for (j = i; j < n_pkts; j++) {
1862               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1863                                    META_DATA_OFFSET));
1864               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1865                                    ETHERNET_START));
1866        }
1867        for (; i < n_pkts; i++) {
1868               pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
1869                             synproxy_reply_mask, vfw_pipe);
1870        }
1871 #else
1872        rte_prefetch0((void*)in_port_dir_a);
1873
1874         vfw_fwd_pkts_ipv6(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1875
1876 #endif
1877
1878        if (vfw_debug > 1) {
1879               printf("  Exit in-port action with %p packet mask\n",
1880                             (void *)keep_mask);
1881               if (keep_mask != packet_mask_in)
1882                      printf("dropped packets, %p in, %p out\n",
1883                                    (void *)packet_mask_in,
1884                                    (void *)keep_mask);
1885        }
1886
1887         /* Hijack the Synproxy and ARP buffered packets */
1888
1889         if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1890
1891 //                printf("Pkts hijacked arp = %lX, synp = %lX\n",
1892 //                                    arp_hijack_mask, synp_hijack_mask);
1893
1894                 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1895         }
1896
1897        /* Update mask before returning, so that bad packets are dropped */
1898
1899        pkts_drop_mask = packet_mask_in & ~keep_mask;
1900
1901        if (unlikely(pkts_drop_mask != 0)) {
1902               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1903               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1904        }
1905
1906        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1907        vfw_pipe->counters->num_pkts_measurements++;
1908
1909        end_tsc_measure(vfw_pipe, n_pkts);
1910
1911        return 0;
1912 }
1913
1914
1915 /**
1916  * Parse arguments in config file.
1917  *
1918  * @param vfw_pipe
1919  *  A pointer to the pipeline.
1920  * @param params
1921  *  A pointer to pipeline specific parameters.
1922  *
1923  * @return
1924  *  0 on success, negative on error.
1925  */
1926 static int
1927 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
1928               struct pipeline_params *params)
1929 {
1930        uint32_t i;
1931        int status;
1932
1933        if (vfw_debug)
1934               printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
1935                             params->n_args);
1936
1937        for (i = 0; i < params->n_args; i++) {
1938               char *arg_name = params->args_name[i];
1939               char *arg_value = params->args_value[i];
1940
1941               printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
1942                             atoi(arg_value), arg_value);
1943 #ifdef ACL_ENABLE
1944               status = lib_acl_parse_config(vfw_pipe->plib_acl,
1945                                    arg_name, arg_value, &vfw_n_rules);
1946               if (status < 0) {
1947                      printf("rte_ct_set_configuration_options =%s,%s",
1948                                    arg_name, arg_value);
1949                      return -1;
1950               } else if (status == 0)
1951                      continue;
1952
1953 #endif              /* traffic_type */
1954               if (strcmp(arg_name, "traffic_type") == 0) {
1955                      int traffic_type = atoi(arg_value);
1956
1957                      if (traffic_type == 0 ||
1958                                    !(traffic_type == IP_VERSION_4 ||
1959                                           traffic_type == IP_VERSION_6)) {
1960                             printf("not IPV4/IPV6");
1961                             return -1;
1962                      }
1963
1964                      vfw_pipe->traffic_type = traffic_type;
1965                      continue;
1966               }
1967
1968
1969               /* n_flows */
1970               if (strcmp(arg_name, "n_flows") == 0) {
1971                      int n_flows = atoi(arg_value);
1972
1973                      if ((n_flows == 0) || (n_flows > 8000000))
1974                             return -1;
1975
1976                      /* must be power of 2, round up if not */
1977                      if (!rte_is_power_of_2(n_flows))
1978                             n_flows = rte_align32pow2(n_flows);
1979
1980                      vfw_pipe->n_flows = n_flows;
1981                      continue;
1982               }
1983
1984               /* not firewall option, process as cnxn tracking option */
1985               status = rte_ct_set_configuration_options(
1986                             vfw_pipe->cnxn_tracker,
1987                             arg_name, arg_value);
1988               if (status < 0) {
1989                      printf("rte_ct_set_configuration_options =%s,%s",
1990                                    arg_name, arg_value);
1991                      return -1;
1992               } else if (status == 0)
1993                      continue;
1994
1995        }
1996
1997        return 0;
1998 }
1999
2000 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
2001                                               void *msg);
2002
2003 static pipeline_msg_req_handler handlers[] = {
2004        [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
2005        [PIPELINE_MSG_REQ_STATS_PORT_IN] =
2006            pipeline_msg_req_stats_port_in_handler,
2007        [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
2008            pipeline_msg_req_stats_port_out_handler,
2009        [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
2010        [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
2011            pipeline_msg_req_port_in_enable_handler,
2012        [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
2013            pipeline_msg_req_port_in_disable_handler,
2014        [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
2015 };
2016
2017 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2018                                                     void *msg);
2019 static pipeline_msg_req_handler custom_handlers[] = {
2020
2021        [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
2022            pipeline_vfw_msg_req_synproxy_flag_handler
2023 };
2024
2025 /**
2026  * Create and initialize Pipeline Back End (BE).
2027  *
2028  * @param params
2029  *  A pointer to the pipeline specific parameters..
2030  * @param arg
2031  *  A pointer to pipeline specific data.
2032  *
2033  * @return
2034  *  A pointer to the pipeline create, NULL on error.
2035  */
2036 static void
2037 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
2038 {
2039        uint32_t size, i;
2040
2041        /* Check input arguments */
2042        if ((params == NULL) ||
2043                      (params->n_ports_in == 0) || (params->n_ports_out == 0))
2044               return NULL;
2045
2046        if (vfw_debug)
2047               printf("num ports in %d / num ports out %d\n",
2048                             params->n_ports_in, params->n_ports_out);
2049
2050        /* Create a single pipeline instance and initialize. */
2051        struct pipeline_vfw *pipe_vfw;
2052
2053        size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
2054        pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2055
2056        if (pipe_vfw == NULL)
2057               return NULL;
2058
2059        struct pipeline *pipe;
2060
2061        pipe = &pipe_vfw->pipe;
2062
2063        strncpy(pipe->name, params->name, sizeof(pipe->name));
2064        pipe->log_level = params->log_level;
2065        pipe_vfw->n_flows = 4096;       /* small default value */
2066        pipe_vfw->traffic_type = IP_VERSION_4;
2067        pipe_vfw->pipeline_num = 0xff;
2068        for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2069               pipe_vfw->links_map[i] = 0xff;
2070               pipe_vfw->outport_id[i] = 0xff;
2071        }
2072        PLOG(pipe, HIGH, "VFW");
2073
2074        /* Create a firewall instance and initialize. */
2075        pipe_vfw->cnxn_tracker =
2076               rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2077                             RTE_CACHE_LINE_SIZE);
2078
2079        if (pipe_vfw->cnxn_tracker == NULL)
2080               return NULL;
2081 #ifdef ACL_ENABLE
2082        /* Create a acl instance and initialize. */
2083        pipe_vfw->plib_acl =
2084               rte_zmalloc(NULL, sizeof(struct lib_acl),
2085                             RTE_CACHE_LINE_SIZE);
2086
2087        if (pipe_vfw->plib_acl == NULL)
2088               return NULL;
2089 #endif
2090        timer_lcore = rte_lcore_id();
2091        /*
2092         * Now allocate a counter block entry. It appears that the
2093         * initialization of all instances is serialized on core 0,
2094         * so no lock is necessary.
2095         */
2096        struct rte_VFW_counter_block *counter_ptr;
2097
2098        if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2099               /* error, exceeded table bounds */
2100               return NULL;
2101
2102        rte_VFW_hi_counter_block_in_use++;
2103        counter_ptr =
2104               &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2105        strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2106
2107        pipe_vfw->counters = counter_ptr;
2108
2109        rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2110        /* Parse arguments */
2111        if (pipeline_vfw_parse_args(pipe_vfw, params))
2112               return NULL;
2113
2114        uint16_t pointers_offset =
2115               META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2116
2117        if (pipe_vfw->n_flows > 0)
2118               rte_ct_initialize_cnxn_tracker_with_synproxy(
2119                             pipe_vfw->cnxn_tracker,
2120                             pipe_vfw->n_flows,
2121                             params->name,
2122                             pointers_offset);
2123
2124        pipe_vfw->counters->ct_counters =
2125               rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2126
2127        /* Pipeline */
2128        {
2129               struct rte_pipeline_params pipeline_params = {
2130                      .name = params->name,
2131                      .socket_id = params->socket_id,
2132                      .offset_port_id = META_DATA_OFFSET +
2133                             offsetof(struct mbuf_tcp_meta_data, output_port)
2134               };
2135
2136               pipe->p = rte_pipeline_create(&pipeline_params);
2137               if (pipe->p == NULL) {
2138                      rte_free(pipe_vfw);
2139                      return NULL;
2140               }
2141        }
2142
2143        /* Input ports */
2144
2145        /*
2146         * create a different "arg_ah" for each input port.
2147         * They differ only in the recorded port number. Unfortunately,
2148         * IP_PIPELINE does not pass port number in to input port handler
2149         */
2150
2151        uint32_t in_ports_arg_size =
2152               RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2153                             (params->n_ports_in));
2154        struct vfw_ports_in_args *port_in_args =
2155               (struct vfw_ports_in_args *)
2156               rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2157
2158        if (port_in_args == NULL)
2159               return NULL;
2160
2161        pipe->n_ports_in = params->n_ports_in;
2162        for (i = 0; i < pipe->n_ports_in; i++) {
2163
2164               /* initialize this instance of port_in_args as necessary */
2165               port_in_args[i].pipe = pipe;
2166               port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2167
2168               struct rte_pipeline_port_in_params port_params = {
2169                      .ops =
2170                             pipeline_port_in_params_get_ops(&params->port_in
2171                                           [i]),
2172                      .arg_create =
2173                             pipeline_port_in_params_convert(&params->port_in
2174                                           [i]),
2175                      .f_action = vfw_port_in_action_ipv4,
2176                      .arg_ah = &(port_in_args[i]),
2177                      .burst_size = params->port_in[i].burst_size,
2178               };
2179                if (pipe_vfw->traffic_type == IP_VERSION_6)
2180                      port_params.f_action = vfw_port_in_action_ipv6;
2181               int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2182                             &pipe->port_in_id[i]);
2183
2184               if (status) {
2185                      rte_pipeline_free(pipe->p);
2186                      rte_free(pipe_vfw);
2187                      return NULL;
2188               }
2189        }
2190
2191        /* Output ports */
2192        pipe->n_ports_out = params->n_ports_out;
2193        for (i = 0; i < pipe->n_ports_out; i++) {
2194               struct rte_pipeline_port_out_params port_params = {
2195                      .ops = pipeline_port_out_params_get_ops(
2196                                    &params->port_out[i]),
2197                      .arg_create = pipeline_port_out_params_convert(
2198                                    &params->port_out[i]),
2199                      .f_action = NULL,
2200                      .arg_ah = NULL,
2201               };
2202
2203               int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2204                             &pipe->port_out_id[i]);
2205
2206               if (status) {
2207                      rte_pipeline_free(pipe->p);
2208                      rte_free(pipe_vfw);
2209                      return NULL;
2210               }
2211        }
2212
2213        int pipeline_num = 0;
2214        int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2215
2216        if (dont_care < 0)
2217               printf("sscanf unble to read pipeline id\n");
2218        pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2219        register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2220        set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2221        set_outport_id(pipe_vfw->pipeline_num, pipe,
2222                      pipe_vfw->outport_id);
2223        printf("pipeline_num=%d\n", pipeline_num);
2224 #ifdef ACL_ENABLE
2225        /*If this is the first VFW thread, create common VFW Rule tables*/
2226        if (rte_VFW_hi_counter_block_in_use == 0) {
2227               vfw_rule_table_ipv4_active =
2228                      lib_acl_create_active_standby_table_ipv4(1,
2229                                    &vfw_n_rules);
2230               if (vfw_rule_table_ipv4_active == NULL) {
2231                      printf("Failed to create active table for IPV4\n");
2232                      rte_pipeline_free(pipe->p);
2233                      rte_free(pipe_vfw->cnxn_tracker);
2234                      rte_free(pipe_vfw->plib_acl);
2235                      rte_free(pipe_vfw);
2236                      return NULL;
2237               }
2238               vfw_rule_table_ipv4_standby =
2239                      lib_acl_create_active_standby_table_ipv4(2,
2240                                    &vfw_n_rules);
2241               if (vfw_rule_table_ipv4_standby == NULL) {
2242                      printf("Failed to create standby table for IPV4\n");
2243                      rte_pipeline_free(pipe->p);
2244                      rte_free(pipe_vfw->cnxn_tracker);
2245                      rte_free(pipe_vfw->plib_acl);
2246                      rte_free(pipe_vfw);
2247                      return NULL;
2248               }
2249
2250               vfw_rule_table_ipv6_active =
2251                      lib_acl_create_active_standby_table_ipv6(1,
2252                                    &vfw_n_rules);
2253
2254               if (vfw_rule_table_ipv6_active == NULL) {
2255                      printf("Failed to create active table for IPV6\n");
2256                      rte_pipeline_free(pipe->p);
2257                      rte_free(pipe_vfw->cnxn_tracker);
2258                      rte_free(pipe_vfw->plib_acl);
2259                      rte_free(pipe_vfw);
2260                      return NULL;
2261               }
2262               vfw_rule_table_ipv6_standby =
2263                      lib_acl_create_active_standby_table_ipv6(2,
2264                                    &vfw_n_rules);
2265               if (vfw_rule_table_ipv6_standby == NULL) {
2266                      printf("Failed to create standby table for IPV6\n");
2267                      rte_pipeline_free(pipe->p);
2268                      rte_free(pipe_vfw->cnxn_tracker);
2269                      rte_free(pipe_vfw->plib_acl);
2270                      rte_free(pipe_vfw);
2271                      return NULL;
2272               }
2273        }
2274
2275 #endif
2276
2277        /* Tables */
2278
2279        pipe->n_tables = 1;
2280
2281        struct rte_pipeline_table_params table_params = {
2282               .ops = &rte_table_stub_ops,
2283               .arg_create = NULL,
2284               .f_action_hit = NULL,
2285               .f_action_miss = NULL,
2286               .arg_ah = NULL,
2287               .action_data_size = 0,
2288        };
2289
2290        int status = rte_pipeline_table_create(pipe->p,
2291                      &table_params,
2292                      &pipe->table_id[0]);
2293
2294        if (status) {
2295               rte_pipeline_free(pipe->p);
2296               rte_free(pipe);
2297               return NULL;
2298        }
2299
2300        struct rte_pipeline_table_entry default_entry = {
2301               .action = RTE_PIPELINE_ACTION_PORT_META
2302        };
2303
2304        struct rte_pipeline_table_entry *default_entry_ptr;
2305
2306        status = rte_pipeline_table_default_entry_add(pipe->p,
2307                                                 pipe->table_id[0],
2308                                                 &default_entry,
2309                                                 &default_entry_ptr);
2310
2311        if (status) {
2312               rte_pipeline_free(pipe->p);
2313               rte_free(pipe);
2314               return NULL;
2315        }
2316        for (i = 0; i < pipe->n_ports_in; i++) {
2317               int status = rte_pipeline_port_in_connect_to_table(
2318                             pipe->p,
2319                             pipe->port_in_id[i],
2320                             pipe->table_id[0]);
2321
2322               if (status) {
2323                      rte_pipeline_free(pipe->p);
2324                      rte_free(pipe_vfw);
2325                      return NULL;
2326               }
2327        }
2328
2329        /* Enable input ports */
2330        for (i = 0; i < pipe->n_ports_in; i++) {
2331               int status =
2332                   rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2333
2334               if (status) {
2335                      rte_pipeline_free(pipe->p);
2336                      rte_free(pipe_vfw);
2337                      return NULL;
2338               }
2339        }
2340
2341        /* Check pipeline consistency */
2342        if (rte_pipeline_check(pipe->p) < 0) {
2343               rte_pipeline_free(pipe->p);
2344               rte_free(pipe_vfw);
2345               return NULL;
2346        }
2347
2348        /* Message queues */
2349        pipe->n_msgq = params->n_msgq;
2350        for (i = 0; i < pipe->n_msgq; i++)
2351               pipe->msgq_in[i] = params->msgq_in[i];
2352
2353        for (i = 0; i < pipe->n_msgq; i++)
2354               pipe->msgq_out[i] = params->msgq_out[i];
2355
2356        /* Message handlers */
2357        memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2358        memcpy(pipe_vfw->custom_handlers, custom_handlers,
2359               sizeof(pipe_vfw->custom_handlers));
2360
2361        return pipe_vfw;
2362 }
2363
2364 /**
2365  * Free resources and delete pipeline.
2366  *
2367  * @param pipeline
2368  *  A pointer to the pipeline.
2369  *
2370  * @return
2371  *  0 on success, negative on error.
2372  */
2373 static int pipeline_vfw_free(void *pipeline)
2374 {
2375        struct pipeline *p = (struct pipeline *)pipeline;
2376
2377        /* Check input arguments */
2378        if (p == NULL)
2379               return -1;
2380
2381        /* Free resources */
2382        rte_pipeline_free(p->p);
2383        rte_free(p);
2384        return 0;
2385 }
2386
2387 /**
2388  * Callback function to map input/output ports.
2389  *
2390  * @param pipeline
2391  *  A pointer to the pipeline.
2392  * @param port_in
2393  *  Input port ID
2394  * @param port_out
2395  *  A pointer to the Output port.
2396  *
2397  * @return
2398  *  0 on success, negative on error.
2399  */
2400 static int
2401 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2402                     uint32_t *port_out)
2403 {
2404        struct pipeline *p = (struct pipeline *)pipeline;
2405
2406        /* Check input arguments */
2407        if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2408               return -1;
2409
2410        if (p->n_ports_in == 1) {
2411               *port_out = 0;
2412               return 0;
2413        }
2414
2415        return -1;
2416 }
2417
2418 /**
2419  * Callback function to process timers.
2420  *
2421  * @param pipeline
2422  *  A pointer to the pipeline.
2423  *
2424  * @return
2425  *  0 on success, negative on error.
2426  */
2427 static int pipeline_vfw_timer(void *pipeline)
2428 {
2429        struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2430
2431        /*
2432         * handle any good buffered packets released by synproxy before checking
2433         * for packets relased by synproxy due to timeout.
2434         * Don't want packets missed
2435         */
2436
2437        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2438                                    FORWARD_BUFFERED_PACKETS);
2439
2440        pipeline_msg_req_handle(&p->pipe);
2441        rte_pipeline_flush(p->pipe.p);
2442
2443        rte_ct_handle_expired_timers(p->cnxn_tracker);
2444
2445        /* now handle packets released by synproxy due to timeout. */
2446        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2447                                    DELETE_BUFFERED_PACKETS);
2448
2449        return 0;
2450 }
2451
2452 /**
2453  * Callback function to process CLI commands from FE.
2454  *
2455  * @param p
2456  *  A pointer to the pipeline.
2457  * @param msg
2458  *  A pointer to command specific data.
2459  *
2460  * @return
2461  *  A pointer to message handler on success,
2462  *  pipeline_msg_req_invalid_hander on error.
2463  */
2464 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2465 {
2466        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2467        struct pipeline_custom_msg_req *req = msg;
2468        pipeline_msg_req_handler f_handle;
2469
2470        f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2471            pipe_vfw->custom_handlers[req->subtype] :
2472            pipeline_msg_req_invalid_handler;
2473
2474        if (f_handle == NULL)
2475               f_handle = pipeline_msg_req_invalid_handler;
2476
2477        return f_handle(p, req);
2478 }
2479
2480 /**
2481  * Handler for synproxy ON/OFF CLI command.
2482  *
2483  * @param p
2484  *  A pointer to the pipeline.
2485  * @param msg
2486  *  A pointer to command specific data.
2487  *
2488  * @return
2489  *  Response message contains status.
2490  */
2491
2492 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2493                                               void *msg)
2494 {
2495        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2496        struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2497        struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2498
2499        if (req->synproxy_flag == 0) {
2500               rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2501               rsp->status = 0;
2502               printf("synproxy turned OFF for %s\n", p->name);
2503        } else if (req->synproxy_flag == 1) {
2504               rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2505               rsp->status = 0;
2506               printf("synproxy turned ON for %s\n", p->name);
2507        } else {
2508               printf("Invalid synproxy setting\n");
2509               rsp->status = -1;
2510        }
2511
2512        return rsp;
2513 }
2514
2515 struct pipeline_be_ops pipeline_vfw_be_ops = {
2516        .f_init = pipeline_vfw_init,
2517        .f_free = pipeline_vfw_free,
2518        .f_run = NULL,
2519        .f_timer = pipeline_vfw_timer,
2520        .f_track = pipeline_vfw_track,
2521 };