vFW: changes for gateway packet forwarding
[samplevnf.git] / VNFs / vFW / pipeline / pipeline_vfw_be.c
1 /*
2 // Copyright (c) 2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 /**
18  * @file
19  * Pipeline VFW BE Implementation.
20  *
21  * Implementation of Pipeline VFW Back End (BE).
22  * Responsible for packet processing.
23  *
24  */
25
26 #define EN_SWP_ACL 1
27 //#define EN_SWP_ARP 1
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <string.h>
34 #include <unistd.h>
35
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
40 #include <rte_ip.h>
41 #include <rte_udp.h>
42 #include <rte_icmp.h>
43 #include <rte_byteorder.h>
44
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
63
64 #include "lib_arp.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
67 #include "gateway.h"
68
69 uint32_t timer_lcore;
70
71 uint8_t firewall_flag = 1;
72 uint8_t VFW_DEBUG = 0;
73 uint8_t cnxn_tracking_is_active = 1;
74 /**
75  * A structure defining the VFW pipeline input port per thread data.
76  */
77 struct vfw_ports_in_args {
78        struct pipeline *pipe;
79        struct rte_ct_cnxn_tracker *cnxn_tracker;
80 } __rte_cache_aligned;
81 /**
82  * A structure defining the VFW pipeline per thread data.
83  */
84 struct pipeline_vfw {
85        struct pipeline pipe;
86        pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
87
88        struct rte_ct_cnxn_tracker *cnxn_tracker;
89        struct rte_VFW_counter_block *counters;
90        struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
91        struct lib_acl *plib_acl;
92        /* timestamp retrieved during in-port computations */
93        uint32_t n_flows;
94        uint8_t pipeline_num;
95        uint8_t traffic_type;
96        uint8_t links_map[PIPELINE_MAX_PORT_IN];
97        uint8_t outport_id[PIPELINE_MAX_PORT_IN];
98
99 } __rte_cache_aligned;
100 /**
101  * A structure defining the mbuf meta data for VFW.
102  */
103 struct mbuf_tcp_meta_data {
104 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
105        uint32_t output_port;
106        struct rte_mbuf *next;       /* next pointer for chained buffers */
107 } __rte_cache_aligned;
108
109 #define DONT_CARE_TCP_PACKET 0
110 #define IS_NOT_TCP_PACKET 0
111 #define IS_TCP_PACKET 1
112
113 #define META_DATA_OFFSET 128
114
115 #define RTE_PKTMBUF_HEADROOM 128       /* where is this defined ? */
116 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
117 #define ETH_HDR_SIZE 14
118 #define PROTOCOL_START (IP_START + 9)
119
120 #define TCP_START (IP_START + 20)
121 #define RTE_LB_PORT_OFFSET 204       /* TODO: Need definition in LB header */
122 #define TCP_START_IPV6 (IP_START + 40)
123 #define PROTOCOL_START_IPV6 (IP_START + 6)
124 #define IP_HDR_DSCP_OFST 1
125
126 #define TCP_PROTOCOL 6
127 #define UDP_PROTOCOL 17
128
129 #define DELETE_BUFFERED_PACKETS 0
130 #define FORWARD_BUFFERED_PACKETS 1
131 #define DO_ARP 1
132 #define NO_ARP 0
133
134 #define IPv4_HEADER_SIZE 20
135 #define IPv6_HEADER_SIZE 40
136
137 #define IP_VERSION_4 4
138 #define IP_VERSION_6 6
139
140 /* IPv6 */
141 #define IP_HDR_SIZE_IPV6  40
142 #define IP_HDR_DSCP_OFST_IPV6 0
143 #define IP_HDR_LENGTH_OFST_IPV6 4
144 #define IP_HDR_PROTOCOL_OFST_IPV6 6
145 #define IP_HDR_DST_ADR_OFST_IPV6 24
146 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
147 /** The counter table for VFW pipeline per thread data.*/
148 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
149 __rte_cache_aligned;
150 int rte_VFW_hi_counter_block_in_use = -1;
151
152 /* a spin lock used during vfw initialization only */
153 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
154
155 /* Action Array */
156 struct pipeline_action_key *action_array_a;
157 struct pipeline_action_key *action_array_b;
158 struct pipeline_action_key *action_array_active;
159 struct pipeline_action_key *action_array_standby;
160 uint32_t action_array_size;
161 struct action_counter_block
162 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
163 __rte_cache_aligned;
164 /*
165   * Pipeline table strategy for firewall. Unfortunately, there does not seem to
166   * be any use for the built-in table lookup of ip_pipeline for the firewall.
167   * The main table requirement of the firewall is the hash table to maintain
168   * connection info, but that is implemented seperately in the connection
169   * tracking library. So a "dummy" table lookup will be performed.
170   * TODO: look into "stub" table and see if that can be used
171   * to avoid useless table lookup
172   */
173 uint64_t arp_pkts_mask;
174
175 /* Start TSC measurement */
176 /* Prefetch counters and pipe before this function */
177 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
178        vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
179        if (likely(vfw_pipe->counters->exit_timestamp))
180               vfw_pipe->counters->external_time_sum +=
181                      vfw_pipe->counters->entry_timestamp -
182                      vfw_pipe->counters->exit_timestamp;
183 }
184
185 /* End TSC measurement */
186 static inline void end_tsc_measure(
187        struct pipeline_vfw *vfw_pipe,
188        uint8_t n_pkts)
189 {
190        if (likely(n_pkts > 1)) {
191               vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
192               vfw_pipe->counters->internal_time_sum +=
193                      vfw_pipe->counters->exit_timestamp -
194                      vfw_pipe->counters->entry_timestamp;
195               vfw_pipe->counters->time_measurements++;
196        } else {
197               /* small counts skew results, ignore */
198               vfw_pipe->counters->exit_timestamp = 0;
199        }
200 }
201
202 /**
203  * Print packet for debugging.
204  *
205  * @param pkt
206  *  A pointer to the packet.
207  *
208  */
209 static __rte_unused  void print_pkt(struct rte_mbuf *pkt)
210 {
211        int i;
212        int size = (int)sizeof(struct mbuf_tcp_meta_data);
213        uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
214
215        printf("Meta-data:\n");
216        for (i = 0; i < size; i++) {
217               printf("%02x ", rd[i]);
218               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
219                      printf("\n");
220        }
221        printf("\n");
222        printf("IP and TCP/UDP headers:\n");
223        rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
224        for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
225               printf("%02x ", rd[i]);
226               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
227                      printf("\n");
228        }
229        printf("\n");
230 }
231
232 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
233 #define IP_ICMP_PROTOCOL 1
234 #define IP_TCP_PROTOCOL 6
235 #define IP_UDP_PROTOCOL 17
236 #define IPv6_FRAGMENT_HEADER 44
237
238 /**
239  * Return ethernet header structure form packet.
240  *
241  * @param pkt
242  *  A pointer to the packet.
243  *
244  */
245 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
246 {
247        return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
248                                                         ETHERNET_START);
249 }
250
251 /**
252  * Return IPV4 header structure form packet.
253  *
254  * @param pkt
255  *  A pointer to the packet.
256  *
257  */
258
259 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
260               struct rte_mbuf *pkt)
261 {
262        return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
263 }
264
265 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
266 {
267        /* NOTE: Only supporting IP headers with no options,
268         * so header is fixed size */
269        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
270               >> VERSION_NO_BYTE;
271
272        return ip_type == IPv4_HDR_VERSION;
273 }
274
275 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
276 {
277        /* NOTE: Only supporting IP headers with no options,
278         * so header is fixed size */
279        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
280               >> VERSION_NO_BYTE;
281
282        return ip_type == IPv6_HDR_VERSION;
283 }
284
285 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
286 {
287        if (likely(firewall_flag))
288               (*counter)++;
289 }
290
291 static uint8_t check_arp_icmp(
292               struct rte_mbuf *pkt,
293               struct pipeline_vfw *vfw_pipe)
294 {
295        struct ether_hdr *ehdr;
296        struct app_link_params *link;
297         uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
298                 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
299                 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
300
301         /* ARP outport number */
302        uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
303        struct ipv4_hdr *ipv4_h;
304        struct ipv6_hdr *ipv6_h;
305        link = &myApp->link_params[pkt->port];
306
307        ehdr = rte_vfw_get_ether_addr(pkt);
308        switch (rte_be_to_cpu_16(ehdr->ether_type)) {
309
310        case ETH_TYPE_ARP:
311               rte_pipeline_port_out_packet_insert(
312                             vfw_pipe->pipe.p,
313                             out_port,
314                             pkt);
315
316               vfw_pipe->counters->arpicmpPktCount++;
317
318               return 0;
319        case ETH_TYPE_IPV4:
320               ipv4_h = (struct ipv4_hdr *)
321                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
322               if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
323                             link->ip ==
324                             rte_be_to_cpu_32(ipv4_h->dst_addr)) {
325                      if (is_phy_port_privte(pkt->port)) {
326                             rte_pipeline_port_out_packet_insert(
327                                           vfw_pipe->pipe.p,
328                                           out_port,
329                                           pkt);
330
331                      vfw_pipe->counters->arpicmpPktCount++;
332                             return 0;
333                      }
334               }
335               break;
336 #ifdef IPV6
337         case ETH_TYPE_IPV6:
338                 ipv6_h = (struct ipv6_hdr *)
339                         RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
340
341                 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
342                         if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
343                                         || !memcmp(ipv6_h->dst_addr,
344                                                 solicited_node_multicast_addr,
345                                                 IPV6_ADD_CMP_MULTI)) {
346
347                                 rte_pipeline_port_out_packet_insert(
348                                                 vfw_pipe->pipe.p,
349                                                 out_port,
350                                                 pkt);
351
352                                 vfw_pipe->counters->arpicmpPktCount++;
353
354                         } else
355                                 vfw_pipe->counters->
356                                         pkts_drop_unsupported_type++;
357
358                         return 0;
359                 }
360                 break;
361 #endif
362        default:
363               break;
364 }
365        return 1;
366 }
367
368 /**
369  * Performs basic VFW ipv4 packet filtering.
370  * @param pkts
371  *  A pointer to the packets.
372  * @param pkts_mask
373  *  packet mask.
374  * @param vfw_pipe
375  *  A pointer to VFW pipeline.
376  */
377
378 static uint64_t
379 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
380                                  uint64_t pkts_mask,
381                                  struct pipeline_vfw *vfw_pipe)
382 {
383
384        /*
385         * Make use of cache prefetch. At beginning of loop, want to prefetch
386         * mbuf data for next iteration (not current one).
387         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
388         * is 20 bytes (extensions not supported), while the IPv6 header is 40
389         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
390         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
391         * need two pre-fetches.
392         */
393
394        uint8_t pos, next_pos = 0;
395        uint64_t pkt_mask;       /* bitmask representing a single packet */
396        struct rte_mbuf *pkt;
397        struct rte_mbuf *next_pkt = NULL;
398        struct ipv4_hdr *ihdr4;
399        void *next_iphdr = NULL;
400
401        if (unlikely(pkts_mask == 0))
402               return pkts_mask;
403        pos = (uint8_t) __builtin_ctzll(pkts_mask);
404        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
405        pkt = pkts[pos];
406
407        uint64_t bytes_processed = 0;
408        /* bitmap of packets left to process */
409        uint64_t pkts_to_process = pkts_mask;
410        /* bitmap of valid packets to return */
411        uint64_t valid_packets = pkts_mask;
412
413        rte_prefetch0(pkt);
414        /* prefetch counters, updated below. Most likely counters to update
415         * at beginnning */
416        rte_prefetch0(&vfw_pipe->counters);
417
418        do {                     /* always execute at least once */
419
420               /* remove this packet from remaining list */
421               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
422
423               if (likely(next_pkts_to_process)) {
424                      /* another packet to process after this, prefetch it */
425
426                      next_pos =
427                             (uint8_t) __builtin_ctzll(next_pkts_to_process);
428                      next_pkt = pkts[next_pos];
429                      next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
430                                    IP_START);
431                      rte_prefetch0(next_iphdr);
432               }
433
434               int discard = 0;
435               /* remove this packet from remaining list */
436               pkts_to_process &= ~pkt_mask;
437
438               if (enable_hwlb) {
439                       if (!check_arp_icmp(pkt, vfw_pipe)) {
440                               /* make next packet data the current */
441                               pkts_to_process = next_pkts_to_process;
442                               pos = next_pos;
443                               pkt = next_pkt;
444                               ihdr4 = next_iphdr;
445                               pkt_mask = 1LLU << pos;
446                               valid_packets &= ~pkt_mask;
447                               continue;
448                      }
449               }
450
451               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
452
453               bytes_processed += packet_length;
454
455               ihdr4 = (struct ipv4_hdr *)
456                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
457
458               /* verify that packet size according to mbuf is at least
459                * as large as the size according to the IP header.
460                */
461
462               uint32_t ip_length = rte_bswap16(ihdr4->total_length);
463
464               if (unlikely
465                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
466                      discard = 1;
467                      vfw_pipe->counters->pkts_drop_bad_size++;
468               }
469
470               /*
471                * IPv4 fragmented if: MF (more fragments) or Fragment
472                * Offset are non-zero. Header in Intel order, so flip
473                * constant to compensate. Note that IPv6 uses a header
474                * extension for identifying fragments.
475                */
476
477               int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
478               uint8_t ttl = ihdr4->time_to_live;
479
480               if (unlikely(fragmented)) {
481                      discard = 1;
482                      vfw_pipe->counters->pkts_drop_fragmented++;
483               }
484
485               if (unlikely(ttl <= 1)) {
486                      /*
487                       * about to decrement to zero (or is somehow
488                       * already zero), so discard
489                       */
490                      discard = 1;
491                      vfw_pipe->counters->pkts_drop_ttl++;
492               }
493
494               /*
495                * Dropping the packets other than TCP AND UDP.
496                */
497
498               uint8_t proto = ihdr4->next_proto_id;
499
500               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
501                                           proto == IP_UDP_PROTOCOL ||
502                                           proto == IP_ICMP_PROTOCOL))) {
503                      discard = 1;
504                      vfw_pipe->counters->
505                             pkts_drop_unsupported_type++;
506               }
507
508               if (unlikely(discard)) {
509                      valid_packets &= ~pkt_mask;
510               }
511
512               /* make next packet data the current */
513               pkts_to_process = next_pkts_to_process;
514               pos = next_pos;
515               pkt = next_pkt;
516               ihdr4 = next_iphdr;
517               pkt_mask = 1LLU << pos;
518
519        } while (pkts_to_process);
520
521        /* finalize counters, etc. */
522        vfw_pipe->counters->bytes_processed += bytes_processed;
523
524        if (likely(firewall_flag))
525               return valid_packets;
526        else
527               return pkts_mask;
528 }
529 /**
530  * Performs basic VFW IPV6 packet filtering.
531  * @param pkts
532  *  A pointer to the packets.
533  * @param pkts_mask
534  *  packet mask.
535  * @param vfw_pipe
536  *  A pointer to VFW pipeline.
537  */
538        static uint64_t
539 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
540               uint64_t pkts_mask,
541               struct pipeline_vfw *vfw_pipe)
542 {
543
544        /*
545         * Make use of cache prefetch. At beginning of loop, want to prefetch
546         * mbuf data for next iteration (not current one).
547         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
548         * is 20 bytes (extensions not supported), while the IPv6 header is 40
549         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
550         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
551         * need two pre-fetches.
552         */
553
554        uint8_t pos, next_pos = 0;
555        uint64_t pkt_mask;       /* bitmask representing a single packet */
556        struct rte_mbuf *pkt;
557        struct rte_mbuf *next_pkt = NULL;
558        struct ipv6_hdr *ihdr6;
559        void *next_iphdr = NULL;
560
561        if (unlikely(pkts_mask == 0))
562               return pkts_mask;
563        pos = (uint8_t) __builtin_ctzll(pkts_mask);
564        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
565        pkt = pkts[pos];
566
567        uint64_t bytes_processed = 0;
568        /* bitmap of packets left to process */
569        uint64_t pkts_to_process = pkts_mask;
570        /* bitmap of valid packets to return */
571        uint64_t valid_packets = pkts_mask;
572
573        /* prefetch counters, updated below. Most likely counters to update
574         * at beginnning */
575        rte_prefetch0(&vfw_pipe->counters);
576
577        do {                     /* always execute at least once */
578
579               /* remove this packet from remaining list */
580               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
581
582               if (likely(next_pkts_to_process)) {
583                      /* another packet to process after this, prefetch it */
584
585                      next_pos =
586                          (uint8_t) __builtin_ctzll(next_pkts_to_process);
587                      next_pkt = pkts[next_pos];
588                      next_iphdr =
589                          RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
590                      rte_prefetch0(next_iphdr);
591               }
592
593               int discard = 0;
594               /* remove this packet from remaining list */
595               pkts_to_process &= ~pkt_mask;
596
597               if (enable_hwlb) {
598                      if (!check_arp_icmp(pkt, vfw_pipe)) {
599                              /* make next packet data the current */
600                              pkts_to_process = next_pkts_to_process;
601                              pos = next_pos;
602                              pkt = next_pkt;
603                              ihdr6 = next_iphdr;
604                              pkt_mask = 1LLU << pos;
605                              valid_packets &= ~pkt_mask;
606                              continue;
607                      }
608               }
609
610               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
611
612               bytes_processed += packet_length;
613
614               ihdr6 = (struct ipv6_hdr *)
615                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
616
617               /*
618                * verify that packet size according to mbuf is at least
619                * as large as the size according to the IP header.
620                * For IPv6, note that size includes header extensions
621                * but not the base header size
622                */
623
624               uint32_t ip_length =
625                      rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
626
627               if (unlikely
628                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
629                      discard = 1;
630                      vfw_pipe->counters->pkts_drop_bad_size++;
631               }
632
633               /*
634                * Dropping the packets other than TCP AND UDP.
635                */
636
637               uint8_t proto = ihdr6->proto;
638
639               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
640                                           proto == IP_UDP_PROTOCOL ||
641                                           proto == IP_ICMP_PROTOCOL))) {
642                      discard = 1;
643                      if (proto == IPv6_FRAGMENT_HEADER)
644                             vfw_pipe->counters->
645                                    pkts_drop_fragmented++;
646                      else
647                             vfw_pipe->counters->
648                                    pkts_drop_unsupported_type++;
649               }
650
651               /*
652                * Behave like a router, and decrement the TTL of an
653                * IP packet. If this causes the TTL to become zero,
654                * the packet will be discarded. Unlike a router,
655                * no ICMP code 11 (Time * Exceeded) message will be
656                * sent back to the packet originator.
657                */
658
659               if (unlikely(ihdr6->hop_limits <= 1)) {
660                      /*
661                       * about to decrement to zero (or is somehow
662                       * already zero), so discard
663                       */
664                      discard = 1;
665                      vfw_pipe->counters->pkts_drop_ttl++;
666               }
667
668               if (unlikely(discard))
669                      valid_packets &= ~pkt_mask;
670               else
671                      ihdr6->hop_limits--;
672
673               /* make next packet data the current */
674               pkts_to_process = next_pkts_to_process;
675               pos = next_pos;
676               pkt = next_pkt;
677               ihdr6 = next_iphdr;
678               pkt_mask = 1LLU << pos;
679
680        } while (pkts_to_process);
681
682        /* finalize counters, etc. */
683        vfw_pipe->counters->bytes_processed += bytes_processed;
684
685        if (likely(firewall_flag))
686               return valid_packets;
687        else
688               return pkts_mask;
689 }
690
691 /**
692  * exchange the mac address so source becomes destination and vice versa.
693  *
694  * @param ehdr
695  *  A pointer to the ethernet header.
696  *
697  */
698 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
699 {
700        struct ether_addr saved_copy;
701
702        ether_addr_copy(&ehdr->d_addr, &saved_copy);
703        ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
704        ether_addr_copy(&saved_copy, &ehdr->s_addr);
705 }
706 #ifdef EN_SWP_ARP
707
708 /**
709  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
710  * To support synproxy, some (altered) packets may need to be sent back where
711  * they came from. The ip header has already been adjusted, but the ethernet
712  * header has not, so this must be performed here.
713  * Return an updated pkts_mask, since arp may drop some packets
714  *
715  * @param pkts
716  *  A pointer to the packet array.
717  * @param pkt_num
718  *  Packet num to start processing
719  * @param pkts_mask
720  *  Packet mask
721  * @param synproxy_reply_mask
722  *  Reply Packet mask for Synproxy
723  * @param vfw_pipe
724  *  A pointer to VFW pipeline.
725  */
726 static void
727 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
728               uint16_t pkt_num,
729               uint64_t *pkts_mask,
730               uint64_t synproxy_reply_mask,
731               struct pipeline_vfw *vfw_pipe)
732 {
733
734        uint8_t i;
735
736        struct mbuf_tcp_meta_data *meta_data_addr;
737        struct ether_hdr *ehdr;
738        struct rte_mbuf *pkt;
739
740        for (i = 0; i < 4; i++) {
741               uint32_t dest_if = INVALID_DESTIF;
742               /* bitmask representing only this packet */
743               uint64_t pkt_mask = 1LLU << (pkt_num + i);
744
745               pkt = pkts[i];
746
747               if(!(*pkts_mask & pkt_mask))
748                      continue;
749
750               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
751
752               meta_data_addr = (struct mbuf_tcp_meta_data *)
753                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
754               ehdr = rte_vfw_get_ether_addr(pkt);
755
756
757               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
758                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
759               uint32_t nhip = 0;
760
761               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
762               if (must_reverse)
763                      rte_sp_exchange_mac_addresses(ehdr);
764
765         struct arp_entry_data *ret_arp_data = NULL;
766         ret_arp_data = get_dest_mac_addr_port(dest_address,
767                        &dest_if, &ehdr->d_addr);
768         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
769
770         if (arp_cache_dest_mac_present(dest_if)) {
771                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
772                 update_nhip_access(dest_if);
773                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
774                         arp_send_buffered_pkts(ret_arp_data,
775                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
776
777                             }
778
779                      } else {
780                 if (unlikely(ret_arp_data == NULL)) {
781                         if (VFW_DEBUG)
782                         printf("%s: NHIP Not Found, nhip:%x , "
783                         "outport_id: %d\n", __func__, nhip,
784                         vfw_pipe->outport_id[dest_if]);
785
786                         /* Drop the pkt */
787                         vfw_pipe->counters->
788                                  pkts_drop_without_arp_entry++;
789                         continue;
790                             }
791                 if (ret_arp_data->status == INCOMPLETE ||
792                            ret_arp_data->status == PROBE) {
793                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
794                                         /* ICMP req sent, drop packet by
795                                                 * changing the mask */
796                                         vfw_pipe->counters->
797                                                 pkts_drop_without_arp_entry++;
798                                         continue;
799                                 } else {
800                                         //arp_pkts_mask |= pkt_mask;
801                                         *arp_hijack_mask |= pkt_mask;
802                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
803                                         continue;
804                      }
805               }
806         }
807        }
808 }
809
810
811 /**
812  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
813  * To support synproxy, some (altered) packets may need to be sent back where
814  * they came from. The ip header has already been adjusted, but the ethernet
815  * header has not, so this must be performed here.
816  * Return an updated pkts_mask, since arp may drop some packets
817  *
818  * @param pkts
819  *  A pointer to the packet.
820  * @param packet_num
821  *  Packet number to process
822  * @param pkts_mask
823  *  Packet mask pointer
824  * @param synproxy_reply_mask
825  *  Reply Packet mask for Synproxy
826  * @param vfw_pipe
827  *  A pointer to VFW pipeline.
828  */
829 static void
830 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
831               uint16_t pkt_num,
832               uint64_t *pkts_mask,
833               uint64_t synproxy_reply_mask,
834               struct pipeline_vfw *vfw_pipe)
835 {
836
837        uint32_t dest_if = INVALID_DESTIF;
838
839        struct mbuf_tcp_meta_data *meta_data_addr;
840        struct ether_hdr *ehdr;
841        struct rte_mbuf *pkt;
842        uint64_t pkt_mask = 1LLU << pkt_num;
843
844        pkt = pkts;
845
846        if(*pkts_mask & pkt_mask) {
847
848               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
849
850               meta_data_addr = (struct mbuf_tcp_meta_data *)
851                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
852               ehdr = rte_vfw_get_ether_addr(pkt);
853
854
855               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
856                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
857               uint32_t nhip = 0;
858
859               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
860               if (must_reverse)
861                      rte_sp_exchange_mac_addresses(ehdr);
862
863         struct arp_entry_data *ret_arp_data = NULL;
864                      ret_arp_data = get_dest_mac_addr_port(dest_address,
865                                    &dest_if, &ehdr->d_addr);
866                         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
867
868         if (arp_cache_dest_mac_present(dest_if)) {
869
870                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
871                 update_nhip_access(dest_if);
872                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
873                         arp_send_buffered_pkts(ret_arp_data,
874                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
875
876                             }
877                      } else {
878                 if (unlikely(ret_arp_data == NULL)) {
879
880                         if (VFW_DEBUG)
881                         printf("%s: NHIP Not Found, nhip:%x , "
882                         "outport_id: %d\n", __func__, nhip,
883                         vfw_pipe->outport_id[dest_if]);
884
885                         vfw_pipe->counters->
886                                 pkts_drop_without_arp_entry++;
887                         return;
888                             }
889                 if (ret_arp_data->status == INCOMPLETE ||
890                            ret_arp_data->status == PROBE) {
891                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
892                                         /* ICMP req sent, drop packet by
893                                                 * changing the mask */
894                                         vfw_pipe->counters->
895                                                 pkts_drop_without_arp_entry++;
896                                         return;
897                                 } else {
898                                         arp_pkts_mask |= pkt_mask;
899                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
900                                         return;
901                      }
902               }
903         }
904
905        }
906 }
907
908
909 /**
910  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
911  * To support synproxy, some (altered) packets may need to be sent back where
912  * they came from. The ip header has already been adjusted, but the ethernet
913  * header has not, so this must be performed here.
914  * Return an updated pkts_mask, since arp may drop some packets
915  *
916  * @param pkts
917  *  A pointer to the packets array.
918  * @param pkt_num
919  *  Packet number to start processing.
920  * @param pkts_mask
921  *  Packet mask pointer
922  * @param synproxy_reply_mask
923  *  Reply Packet mask for Synproxy
924  * @param vfw_pipe
925  *  A pointer to VFW pipeline.
926  */
927
928 static void
929 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
930               uint16_t pkt_num,
931               uint64_t *pkts_mask,
932               uint64_t synproxy_reply_mask,
933               struct pipeline_vfw *vfw_pipe)
934 {
935        uint8_t nh_ipv6[IPV6_ADD_SIZE];
936        struct ether_addr hw_addr;
937        struct mbuf_tcp_meta_data *meta_data_addr;
938        struct ether_hdr *ehdr;
939        struct rte_mbuf *pkt;
940        uint8_t i;
941
942        for (i = 0; i < 4; i++) {
943               uint32_t dest_if = INVALID_DESTIF;
944               /* bitmask representing only this packet */
945               uint64_t pkt_mask = 1LLU << (pkt_num + i);
946
947               pkt = pkts[i];
948
949               if(!(*pkts_mask & pkt_mask))
950                      continue;
951               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
952
953               meta_data_addr = (struct mbuf_tcp_meta_data *)
954                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
955               ehdr = rte_vfw_get_ether_addr(pkt);
956
957               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
958                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
959
960               uint8_t nhip[IPV6_ADD_SIZE];
961               uint8_t dest_address[IPV6_ADD_SIZE];
962
963               memset(nhip, 0, IPV6_ADD_SIZE);
964               if (must_reverse)
965                      rte_sp_exchange_mac_addresses(ehdr);
966
967               rte_mov16(dest_address, ihdr->dst_addr);
968               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
969               struct nd_entry_data *ret_nd_data = NULL;
970               ret_nd_data = get_dest_mac_address_ipv6_port(
971                                    &dest_address[0],
972                                    &dest_if,
973                                    &hw_addr,
974                                    &nh_ipv6[0]);
975
976                 meta_data_addr->output_port = vfw_pipe->
977                                     outport_id[dest_if];
978               if (nd_cache_dest_mac_present(dest_if)) {
979                     ether_addr_copy(get_link_hw_addr(dest_if),
980                                    &ehdr->s_addr);
981                     update_nhip_access(dest_if);
982
983                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
984                         nd_send_buffered_pkts(ret_nd_data,
985                                 &ehdr->d_addr, meta_data_addr->output_port);
986                     }
987               } else {
988                     if (unlikely(ret_nd_data == NULL)) {
989                          *pkts_mask &= ~pkt_mask;
990                           vfw_pipe->counters->
991                                 pkts_drop_without_arp_entry++;
992                           continue;
993                     }
994                     if (ret_nd_data->status == INCOMPLETE ||
995                           ret_nd_data->status == PROBE) {
996                           if (ret_nd_data->num_pkts >= NUM_DESC) {
997                                 /* Drop the pkt */
998                                 *pkts_mask &= ~pkt_mask;
999                                 vfw_pipe->counters->
1000                                         pkts_drop_without_arp_entry++;
1001                                 continue;
1002                           } else {
1003                                 arp_pkts_mask |= pkt_mask;
1004                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1005                                 continue;
1006                           }
1007                     }
1008               }
1009
1010        }
1011 }
1012
1013
1014 /**
1015  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1016  * To support synproxy, some (altered) packets may need to be sent back where
1017  * they came from. The ip header has already been adjusted, but the ethernet
1018  * header has not, so this must be performed here.
1019  * Return an updated pkts_mask, since arp may drop some packets
1020  *
1021  * @param pkts
1022  *  A pointer to the packets.
1023  * @param pkt_num
1024  *  Packet number to process.
1025  * @param pkts_mask
1026  *  Packet mask pointer
1027  * @param synproxy_reply_mask
1028  *  Reply Packet mask for Synproxy
1029  * @param vfw_pipe
1030  *  A pointer to VFW pipeline.
1031  */
1032
1033 static void
1034 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1035               uint16_t pkt_num,
1036               uint64_t *pkts_mask,
1037               uint64_t synproxy_reply_mask,
1038               struct pipeline_vfw *vfw_pipe)
1039 {
1040        uint8_t nh_ipv6[IPV6_ADD_SIZE];
1041        struct ether_addr hw_addr;
1042        struct mbuf_tcp_meta_data *meta_data_addr;
1043        struct ether_hdr *ehdr;
1044        struct rte_mbuf *pkt;
1045
1046        uint32_t dest_if = INVALID_DESTIF;
1047        /* bitmask representing only this packet */
1048        uint64_t pkt_mask = 1LLU << pkt_num;
1049
1050        pkt = pkts;
1051
1052        if(*pkts_mask & pkt_mask) {
1053
1054               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1055
1056               meta_data_addr = (struct mbuf_tcp_meta_data *)
1057                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1058               ehdr = rte_vfw_get_ether_addr(pkt);
1059
1060               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1061                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1062
1063               uint8_t nhip[IPV6_ADD_SIZE];
1064               uint8_t dest_address[IPV6_ADD_SIZE];
1065
1066               memset(nhip, 0, IPV6_ADD_SIZE);
1067               if (must_reverse)
1068                      rte_sp_exchange_mac_addresses(ehdr);
1069               rte_mov16(dest_address, ihdr->dst_addr);
1070               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1071               struct nd_entry_data *ret_nd_data = NULL;
1072               ret_nd_data = get_dest_mac_address_ipv6_port(
1073                                    &dest_address[0],
1074                                    &dest_if,
1075                                    &hw_addr,
1076                                    &nh_ipv6[0]);
1077               meta_data_addr->output_port = vfw_pipe->
1078                                     outport_id[dest_if];
1079               if (nd_cache_dest_mac_present(dest_if)) {
1080                      ether_addr_copy(get_link_hw_addr(dest_if),
1081                                    &ehdr->s_addr);
1082                     update_nhip_access(dest_if);
1083
1084                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1085                         nd_send_buffered_pkts(ret_nd_data,
1086                                 &ehdr->d_addr, meta_data_addr->output_port);
1087                      }
1088               } else {
1089                     if (unlikely(ret_nd_data == NULL)) {
1090                         *pkts_mask &= ~pkt_mask;
1091                         vfw_pipe->counters->
1092                                 pkts_drop_without_arp_entry++;
1093                         return;
1094                     }
1095                     if (ret_nd_data->status == INCOMPLETE ||
1096                           ret_nd_data->status == PROBE) {
1097                           if (ret_nd_data->num_pkts >= NUM_DESC) {
1098                                 /* Drop the pkt */
1099                                 *pkts_mask &= ~pkt_mask;
1100                                 vfw_pipe->counters->
1101                                     pkts_drop_without_arp_entry++;
1102                                 return;
1103                           } else {
1104                                 arp_pkts_mask |= pkt_mask;
1105                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1106                                 return;
1107                           }
1108                     }
1109               }
1110
1111        }
1112
1113 }
1114
1115 #else
1116
1117 /**
1118  * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1119  * To support synproxy, some (altered) packets may need to be sent back where
1120  * they came from. The ip header has already been adjusted, but the ethernet
1121  * header has not, so this must be performed here.
1122  * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1123  *
1124  * @param pkts
1125  *  A pointer to the packet array.
1126  * @param pkts_mask
1127  *  Packets mask to be processed
1128  * @param arp_hijack_mask
1129  *  Packets to be hijacked for arp buffering
1130  * @param vfw_pipe
1131  *  A pointer to VFW pipeline.
1132  */
1133 static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1134                 uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1135 {
1136         uint64_t pkts_to_arp = *pkts_mask;
1137
1138         for (; pkts_to_arp;) {
1139
1140                 struct mbuf_tcp_meta_data *meta_data_addr;
1141                 struct ether_hdr *ehdr;
1142                 struct rte_mbuf *pkt;
1143                 uint32_t src_phy_port;
1144
1145                 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1146                 /* bitmask representing only this packet */
1147                 uint64_t pkt_mask = 1LLU << pos;
1148                 /* remove this packet from remaining list */
1149                 pkts_to_arp &= ~pkt_mask;
1150                 pkt = pkts[pos];
1151
1152                 if(VFW_DEBUG) {
1153                         printf("----------------\n");
1154                         print_pkt(pkt);
1155                 }
1156
1157                 meta_data_addr = (struct mbuf_tcp_meta_data *)
1158                         RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1159
1160                 ehdr = (struct ether_hdr *)
1161                         RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1162
1163                 src_phy_port = pkt->port;
1164                 uint32_t dst_phy_port = INVALID_DESTIF;
1165
1166                 if(is_gateway()){
1167                         struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
1168                                 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1169
1170                         /* Gateway Proc Starts */
1171
1172                         struct arp_entry_data *ret_arp_data = NULL;
1173                         struct ether_addr dst_mac;
1174                         uint32_t nhip = 0;
1175                         uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
1176
1177                         gw_get_nh_port_ipv4(dst_ip_addr, &dst_phy_port, &nhip);
1178
1179                         ret_arp_data = get_dest_mac_addr_ipv4(nhip, dst_phy_port, &dst_mac);
1180
1181                         /* Gateway Proc Ends */
1182
1183                         if (arp_cache_dest_mac_present(dst_phy_port)) {
1184
1185                                 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1186                                 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1187
1188                                 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1189
1190                                 update_nhip_access(dst_phy_port);
1191
1192                                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1193
1194                                         arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
1195                                                         vfw_pipe->outport_id[dst_phy_port]);
1196                                 }
1197
1198                         } else {
1199                                 if (unlikely(ret_arp_data == NULL)) {
1200
1201                                         printf("NHIP Not Found\n");
1202
1203                                         /* Drop the pkt */
1204                                         vfw_pipe->counters->
1205                                                 pkts_drop_without_arp_entry++;
1206                                         continue;
1207                                 }
1208                                 if (ret_arp_data->status == INCOMPLETE ||
1209                                                 ret_arp_data->status == PROBE) {
1210                                         if (ret_arp_data->num_pkts >= NUM_DESC) {
1211                                                 /* ICMP req sent, drop packet by
1212                                                  * changing the mask */
1213                                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1214                                                 continue;
1215                                         } else {
1216                                                 *arp_hijack_mask |= pkt_mask;
1217                                                 arp_queue_unresolved_packet(ret_arp_data, pkt);
1218                                                 continue;
1219                                         }
1220                                 }
1221                         }
1222                 } else {
1223                         /* IP Pkt forwarding based on  pub/prv mapping */
1224                         if(is_phy_port_privte(src_phy_port))
1225                                 dst_phy_port = prv_to_pub_map[src_phy_port];
1226                         else
1227                                 dst_phy_port = pub_to_prv_map[src_phy_port];
1228
1229                         meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1230
1231                         if(VFW_DEBUG) {
1232                                 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1233                                                 src_phy_port, dst_phy_port);
1234                         }
1235                 }
1236
1237                 if(VFW_DEBUG)
1238                         print_pkt(pkt);
1239         }
1240
1241 }
1242
1243 /**
1244  * walk every valid mbuf (denoted by pkts_mask) and forward the packet.
1245  * To support synproxy, some (altered) packets may need to be sent back where
1246  * they came from. The ip header has already been adjusted, but the ethernet
1247  * header has not, so this must be performed here.
1248  * Return an updated pkts_mask and arp_hijack_mask since arp may drop some packets
1249  *
1250  * @param pkts
1251  *  A pointer to the packet array.
1252  * @param pkts_mask
1253  *  Packets mask to be processed
1254  * @param arp_hijack_mask
1255  *  Packets to be hijacked for arp buffering
1256  * @param vfw_pipe
1257  *  A pointer to VFW pipeline.
1258  */
1259 static void vfw_fwd_pkts_ipv6(struct rte_mbuf **pkts, uint64_t *pkts_mask,
1260                         uint64_t *arp_hijack_mask, struct pipeline_vfw *vfw_pipe)
1261 {
1262         uint64_t pkts_to_arp = *pkts_mask;
1263
1264         for (; pkts_to_arp;) {
1265
1266                 struct mbuf_tcp_meta_data *meta_data_addr;
1267                 struct ether_hdr *ehdr;
1268                 struct rte_mbuf *pkt;
1269                 uint32_t src_phy_port;
1270
1271                 struct nd_entry_data *ret_nd_data = NULL;
1272
1273                 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1274                 /* bitmask representing only this packet */
1275                 uint64_t pkt_mask = 1LLU << pos;
1276                 /* remove this packet from remaining list */
1277                 pkts_to_arp &= ~pkt_mask;
1278                 pkt = pkts[pos];
1279
1280                 if(VFW_DEBUG) {
1281                         printf("----------------\n");
1282                         print_pkt(pkt);
1283                 }
1284
1285                 meta_data_addr = (struct mbuf_tcp_meta_data *)
1286                         RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1287
1288                 ehdr = (struct ether_hdr *)
1289                         RTE_MBUF_METADATA_UINT32_PTR(pkt, ETHERNET_START);
1290
1291                 src_phy_port = pkt->port;
1292                 uint32_t dst_phy_port = INVALID_DESTIF;
1293
1294                 if(is_gateway()){
1295                         struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
1296                                 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1297
1298                         /* Gateway Proc Starts */
1299
1300                         struct ether_addr dst_mac;
1301                         uint32_t dst_phy_port = INVALID_DESTIF;
1302                         uint8_t nhipv6[IPV6_ADD_SIZE];
1303                         uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
1304                         memset(nhipv6, 0, IPV6_ADD_SIZE);
1305                         src_phy_port = pkt->port;
1306                         rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
1307
1308                         gw_get_nh_port_ipv6(dest_ipv6_address, &dst_phy_port, nhipv6);
1309
1310                         ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dst_phy_port, &dst_mac);
1311
1312                         /* Gateway Proc Ends */
1313
1314                         if (nd_cache_dest_mac_present(dst_phy_port)) {
1315
1316                                 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1317                                 ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
1318
1319                                 meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1320
1321                                 update_nhip_access(dst_phy_port);
1322
1323                                 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1324                                         nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
1325                                                         vfw_pipe->outport_id[dst_phy_port]);
1326                                 }
1327
1328                         } else {
1329                                 if (unlikely(ret_nd_data == NULL)) {
1330
1331                                         printf("NHIP Not Found\n");
1332
1333                                         /* Drop the pkt */
1334                                         vfw_pipe->counters->pkts_drop_without_arp_entry++;
1335                                         continue;
1336                                 }
1337                                 if (ret_nd_data->status == INCOMPLETE ||
1338                                                 ret_nd_data->status == PROBE) {
1339                                         if (ret_nd_data->num_pkts >= NUM_DESC) {
1340                                                 /* ICMP req sent, drop packet by
1341                                                  * changing the mask */
1342                                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1343                                                 continue;
1344                                         } else {
1345                                                 *arp_hijack_mask |= pkt_mask;
1346                                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1347                                                 continue;
1348                                         }
1349                                 }
1350                         }
1351
1352                 } else {
1353                         /* IP Pkt forwarding based on  pub/prv mapping */
1354                         if(is_phy_port_privte(src_phy_port))
1355                                 dst_phy_port = prv_to_pub_map[src_phy_port];
1356                         else
1357                                 dst_phy_port = pub_to_prv_map[src_phy_port];
1358
1359                         meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
1360
1361                         if(VFW_DEBUG) {
1362                                 printf("IP_PKT_FWD: src_phy_port=%d, dst_phy_port=%d\n",
1363                                                 src_phy_port, dst_phy_port);
1364                         }
1365                 }
1366                 if(VFW_DEBUG)
1367                         print_pkt(pkt);
1368         }
1369 }
1370
1371 #endif
1372 /**
1373  * Packets processing for connection tracking.
1374  *
1375  * @param vfw_pipe
1376  *  A pointer to the pipeline.
1377  * @param ct
1378  *  A pointer to the connetion tracker .
1379  * @param pkts
1380  *  A pointer to a burst of packets.
1381  * @param packet_mask_in
1382  *  Input packets Mask.
1383  */
1384
1385        static  uint64_t
1386 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1387               struct rte_ct_cnxn_tracker *ct,
1388                           struct rte_mbuf **pkts, uint64_t packet_mask_in)
1389 {
1390        uint64_t keep_mask = packet_mask_in;
1391        struct rte_synproxy_helper sp_helper;       /* for synproxy */
1392
1393        keep_mask =
1394            rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1395                                                     &sp_helper);
1396
1397        if (unlikely(sp_helper.hijack_mask))
1398               printf("buffered hijack pkts severe error\n");
1399
1400        if (unlikely(sp_helper.reply_pkt_mask))
1401               printf("buffered reply pkts severe error\n");
1402
1403        return keep_mask;
1404 }
1405
1406 /**
1407  * Free Packets from mbuf.
1408  *
1409  * @param ct
1410  *  A pointer to the connection tracker to increment drop counter.
1411  *
1412  * @param pkt
1413  *  Packet to be free.
1414  */
1415 static inline void
1416 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1417 {
1418        ct->counters->pkts_drop++;
1419        rte_pktmbuf_free(pkt);
1420 }
1421
1422 static void
1423 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1424                                     struct rte_pipeline *p,
1425                                     struct rte_mbuf **pkts,
1426                                     int num_pkts, uint64_t pkts_mask)
1427 {
1428        int i;
1429        struct mbuf_tcp_meta_data *meta_data_addr;
1430        uint64_t pkt_mask = 1;
1431
1432        /* any clear bits in low-order num_pkts bit of
1433         * pkt_mask must be discarded */
1434
1435        for (i = 0; i < num_pkts; i++) {
1436               struct rte_mbuf *pkt = pkts[i];
1437
1438               if (pkts_mask & pkt_mask) {
1439                      printf("vfw_output_or_delete_buffered_packets\n");
1440                      meta_data_addr = (struct mbuf_tcp_meta_data *)
1441                          RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1442                      rte_pipeline_port_out_packet_insert(
1443                                    p, meta_data_addr->output_port, pkt);
1444
1445               } else {
1446                      vfw_pktmbuf_free(ct, pkt);
1447               }
1448
1449               pkt_mask = pkt_mask << 1;
1450        }
1451 }
1452
1453 /**
1454  *Packet buffered for synproxy.
1455  *
1456  * @param p
1457  *  A pointer to the pipeline.
1458  * @param vfw_pipe
1459  *  A pointer to the vfw pipeline.
1460  * @param ct
1461  *  A pointer to the connection tracker.
1462  * @param forward_pkts
1463  *  Packet forwarded by synproxy.
1464  *
1465  */
1466 static void
1467 vfw_handle_buffered_packets(struct rte_pipeline *p,
1468                             struct pipeline_vfw *vfw_pipe,
1469                             struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1470 {
1471        struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1472
1473        if (likely(pkt_list == NULL))       /* only during proxy setup is != NULL */
1474               return;
1475
1476        int pkt_count = 0;
1477        uint64_t keep_mask = 0;
1478        struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1479        struct rte_mbuf *pkt;
1480
1481        while (pkt_list != NULL) {
1482               struct mbuf_tcp_meta_data *meta_data =
1483               (struct mbuf_tcp_meta_data *)
1484               RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1485
1486               /* detach head of list and advance list */
1487               pkt = pkt_list;
1488               pkt_list = meta_data->next;
1489
1490               if (forward_pkts) {
1491
1492                      pkts[pkt_count++] = pkt;
1493
1494                      if (pkt_count == PKT_BUFFER_SIZE) {
1495                             /* need to send out packets */
1496                             /* currently 0, set all bits */
1497                             keep_mask = ~keep_mask;
1498
1499                             keep_mask =
1500                                 vfw_process_buffered_pkts(vfw_pipe,
1501                                                          ct, pkts,
1502                                                          keep_mask);
1503                             vfw_output_or_delete_buffered_packets(
1504                                           ct, p,
1505                                           pkts,
1506                                           PKT_BUFFER_SIZE,
1507                                           keep_mask);
1508                             pkt_count = 0;
1509                             keep_mask = 0;
1510                      }
1511
1512               } else {
1513                      vfw_pktmbuf_free(ct, pkt);
1514               }
1515        }
1516
1517        if (pkt_count != 0) {
1518               /* need to send out packets */
1519               keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
1520
1521               keep_mask =
1522                      vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
1523                                    keep_mask);
1524
1525               vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
1526                             keep_mask);
1527
1528               pkt_count = 0;
1529               keep_mask = 0;
1530        }
1531 }
1532 /**
1533  * The pipeline port-in action is used to do all the firewall and
1534  * connection tracking work for IPV4 packets.
1535  *
1536  * @param p
1537  *  A pointer to the pipeline.
1538   * @param pkts
1539  *  A pointer to a burst of packets.
1540  * @param n_pkts
1541  *  Number of packets to process.
1542  * @param arg
1543  *  A pointer to pipeline specific data.
1544  *
1545  * @return
1546  *  0 on success, negative on error.
1547  */
1548
1549 static int
1550 vfw_port_in_action_ipv4(struct rte_pipeline *p,
1551               struct rte_mbuf **pkts,
1552               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1553 {
1554        struct vfw_ports_in_args *port_in_args =
1555               (struct vfw_ports_in_args *)arg;
1556        struct pipeline_vfw *vfw_pipe =
1557               (struct pipeline_vfw *)port_in_args->pipe;
1558        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1559
1560        start_tsc_measure(vfw_pipe);
1561
1562        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1563        uint64_t pkts_drop_mask;
1564        uint64_t synp_hijack_mask = 0;
1565        uint64_t arp_hijack_mask = 0;
1566 //       uint64_t synproxy_reply_mask;       /* for synproxy */
1567        uint64_t keep_mask = packet_mask_in;
1568
1569        uint64_t conntrack_mask = 0, connexist_mask = 0;
1570        struct rte_CT_helper ct_helper;
1571        uint8_t j;
1572
1573        /*
1574         * This routine uses a bit mask to represent which packets in the
1575         * "pkts" table are considered valid. Any table entry which exists
1576         * and is considered valid has the corresponding bit in the mask set.
1577         * Otherwise, it is cleared. Note that the mask is 64 bits,
1578         * but the number of packets in the table may be considerably less.
1579         * Any mask bits which do correspond to actual packets are cleared.
1580         * Various routines are called which may determine that an existing
1581         * packet is somehow invalid. The routine will return an altered bit
1582         * mask, with the bit cleared. At the end of all the checks,
1583         * packets are dropped if their mask bit is a zero
1584         */
1585
1586        rte_prefetch0(& vfw_pipe->counters);
1587
1588 #ifdef EN_SWP_ACL
1589        /* Pre-fetch all rte_mbuf header */
1590        for(j = 0; j < n_pkts; j++)
1591               rte_prefetch0(pkts[j]);
1592 #endif
1593        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1594 #ifdef EN_SWP_ACL
1595        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1596        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1597 #endif
1598
1599        if (unlikely(vfw_debug > 1))
1600               printf("Enter in-port action IPV4 with %p packet mask\n",
1601                             (void *)packet_mask_in);
1602        vfw_pipe->counters->pkts_received =
1603               vfw_pipe->counters->pkts_received + n_pkts;
1604
1605        if (unlikely(VFW_DEBUG))
1606               printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
1607                             " n_pkts: %u\n",
1608                             vfw_pipe->counters->pkts_received, n_pkts);
1609
1610        /* first handle handle any previously buffered packets now released */
1611        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1612                      FORWARD_BUFFERED_PACKETS);
1613
1614        /* now handle any new packets on input ports */
1615        if (likely(firewall_flag)) {
1616               keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
1617                             keep_mask, vfw_pipe);
1618               vfw_pipe->counters->pkts_fw_forwarded +=
1619                      __builtin_popcountll(keep_mask);
1620        }
1621 #ifdef ACL_ENABLE
1622 #ifdef EN_SWP_ACL
1623        rte_prefetch0((void*)vfw_pipe->plib_acl);
1624        rte_prefetch0((void*)vfw_rule_table_ipv4_active);
1625 #endif /* EN_SWP_ACL */
1626        keep_mask = lib_acl_ipv4_pkt_work_key(
1627                      vfw_pipe->plib_acl, pkts, keep_mask,
1628                      &vfw_pipe->counters->pkts_drop_without_rule,
1629                      vfw_rule_table_ipv4_active,
1630                      action_array_active,
1631                      action_counter_table,
1632                      &conntrack_mask, &connexist_mask);
1633        vfw_pipe->counters->pkts_acl_forwarded +=
1634               __builtin_popcountll(keep_mask);
1635        if (conntrack_mask > 0) {
1636               keep_mask = conntrack_mask;
1637               ct_helper.no_new_cnxn_mask = connexist_mask;
1638               cnxn_tracking_is_active = 1;
1639        } else
1640               cnxn_tracking_is_active = 0;
1641 #endif /* ACL_ENABLE */
1642
1643        if (likely(cnxn_tracking_is_active)) {
1644               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1645                             &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
1646 //              synproxy_reply_mask = ct_helper.reply_pkt_mask;
1647               synp_hijack_mask = ct_helper.hijack_mask;
1648
1649        }
1650
1651 #ifdef EN_SWP_ARP
1652        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1653                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1654                                    META_DATA_OFFSET));
1655                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1656                                    ETHERNET_START));
1657        }
1658        rte_prefetch0((void*)in_port_dir_a);
1659        rte_prefetch0((void*)prv_to_pub_map);
1660
1661        uint8_t i;
1662        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1663               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1664                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1665                                           META_DATA_OFFSET));
1666                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1667                                           ETHERNET_START));
1668               }
1669               pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
1670                             synproxy_reply_mask, vfw_pipe);
1671        }
1672        for (j = i; j < n_pkts; j++) {
1673               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1674                                    META_DATA_OFFSET));
1675               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1676                                    ETHERNET_START));
1677        }
1678        for (; i < n_pkts; i++) {
1679               pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
1680                             synproxy_reply_mask, vfw_pipe);
1681        }
1682 #else
1683        rte_prefetch0((void*)in_port_dir_a);
1684        rte_prefetch0((void*)prv_to_pub_map);
1685
1686         vfw_fwd_pkts_ipv4(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1687
1688 #endif
1689
1690        if (vfw_debug > 1) {
1691               printf("  Exit in-port action with %p packet mask\n",
1692                             (void *)keep_mask);
1693               if (keep_mask != packet_mask_in)
1694                      printf("dropped packets, %p in, %p out\n",
1695                                    (void *)packet_mask_in,
1696                                    (void *)keep_mask);
1697        }
1698
1699            /* Hijack the Synproxy and ARP buffered packets */
1700
1701        if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1702
1703 //                printf("Pkts hijacked arp = %lX, synp = %lX\n",
1704 //                                    arp_hijack_mask, synp_hijack_mask);
1705
1706                 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1707         }
1708
1709        pkts_drop_mask = packet_mask_in & ~keep_mask;
1710
1711        if (unlikely(pkts_drop_mask != 0)) {
1712               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1713               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1714        }
1715
1716        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1717        vfw_pipe->counters->num_pkts_measurements++;
1718
1719        end_tsc_measure(vfw_pipe, n_pkts);
1720
1721        return 0;
1722 }
1723 /**
1724  * The pipeline port-in action is used to do all the firewall and
1725  * connection tracking work for IPV6 packet.
1726  *
1727  * @param p
1728  *  A pointer to the pipeline.
1729   * @param pkts
1730  *  A pointer to a burst of packets.
1731  * @param n_pkts
1732  *  Number of packets to process.
1733  * @param arg
1734  *  A pointer to pipeline specific data.
1735  *
1736  * @return
1737  *  0 on success, negative on error.
1738  */
1739
1740 static int
1741 vfw_port_in_action_ipv6(struct rte_pipeline *p,
1742               struct rte_mbuf **pkts,
1743               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1744 {
1745        struct vfw_ports_in_args *port_in_args =
1746               (struct vfw_ports_in_args *)arg;
1747        struct pipeline_vfw *vfw_pipe =
1748               (struct pipeline_vfw *)port_in_args->pipe;
1749        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1750
1751        start_tsc_measure(vfw_pipe);
1752
1753        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1754        uint64_t pkts_drop_mask;
1755        uint64_t synp_hijack_mask = 0;
1756        uint64_t arp_hijack_mask = 0;
1757 //       uint64_t hijack_mask = 0;
1758 //       uint64_t synproxy_reply_mask = 0;       /* for synproxy */
1759        uint64_t keep_mask = packet_mask_in;
1760
1761        uint64_t conntrack_mask = 0, connexist_mask = 0;
1762        struct rte_CT_helper ct_helper;
1763        uint32_t j;
1764
1765        /*
1766         * This routine uses a bit mask to represent which packets in the
1767         * "pkts" table are considered valid. Any table entry which exists
1768         * and is considered valid has the corresponding bit in the mask set.
1769         * Otherwise, it is cleared. Note that the mask is 64 bits,
1770         * but the number of packets in the table may be considerably less.
1771         * Any mask bits which do correspond to actual packets are cleared.
1772         * Various routines are called which may determine that an existing
1773         * packet is somehow invalid. The routine will return an altered bit
1774         * mask, with the bit cleared. At the end of all the checks,
1775         * packets are dropped if their mask bit is a zero
1776         */
1777
1778        rte_prefetch0(& vfw_pipe->counters);
1779
1780        /* Pre-fetch all rte_mbuf header */
1781        for(j = 0; j < n_pkts; j++)
1782                rte_prefetch0(pkts[j]);
1783
1784        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1785        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1786        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1787
1788        if (vfw_debug > 1)
1789               printf("Enter in-port action with %p packet mask\n",
1790                             (void *)packet_mask_in);
1791        vfw_pipe->counters->pkts_received =
1792               vfw_pipe->counters->pkts_received + n_pkts;
1793        if (VFW_DEBUG)
1794               printf("vfw_port_in_action pkts_received: %" PRIu64
1795                             " n_pkts: %u\n",
1796                             vfw_pipe->counters->pkts_received, n_pkts);
1797
1798        /* first handle handle any previously buffered packets now released */
1799        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1800                      FORWARD_BUFFERED_PACKETS);
1801
1802        /* now handle any new packets on input ports */
1803        if (likely(firewall_flag)) {
1804               keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
1805                             keep_mask, vfw_pipe);
1806               vfw_pipe->counters->pkts_fw_forwarded +=
1807                      __builtin_popcountll(keep_mask);
1808        }
1809 #ifdef ACL_ENABLE
1810
1811 #ifdef EN_SWP_ACL
1812        rte_prefetch0((void*)vfw_pipe->plib_acl);
1813        rte_prefetch0((void*)vfw_rule_table_ipv6_active);
1814 #endif /* EN_SWP_ACL */
1815        keep_mask = lib_acl_ipv6_pkt_work_key(
1816                      vfw_pipe->plib_acl, pkts, keep_mask,
1817                      &vfw_pipe->counters->pkts_drop_without_rule,
1818                      vfw_rule_table_ipv6_active,
1819                      action_array_active,
1820                      action_counter_table,
1821                      &conntrack_mask, &connexist_mask);
1822        vfw_pipe->counters->pkts_acl_forwarded +=
1823               __builtin_popcountll(keep_mask);
1824        if (conntrack_mask > 0) {
1825               keep_mask = conntrack_mask;
1826               ct_helper.no_new_cnxn_mask = connexist_mask;
1827               cnxn_tracking_is_active = 1;
1828        } else
1829               cnxn_tracking_is_active = 0;
1830 #endif /* ACL_ENABLE */
1831        if (likely(cnxn_tracking_is_active)) {
1832               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1833                             &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
1834 //              synproxy_reply_mask = ct_helper.reply_pkt_mask;
1835               synp_hijack_mask = ct_helper.hijack_mask;
1836
1837        }
1838
1839 #ifdef EN_SWP_ARP
1840        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1841                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1842                                    META_DATA_OFFSET));
1843                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1844                                    ETHERNET_START));
1845        }
1846        rte_prefetch0((void*)in_port_dir_a);
1847  //      rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
1848        uint32_t i;
1849
1850        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1851               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1852                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1853                                           META_DATA_OFFSET));
1854                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1855                                           ETHERNET_START));
1856               }
1857               pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
1858                             synproxy_reply_mask, vfw_pipe);
1859        }
1860        for (j = i; j < n_pkts; j++) {
1861               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1862                                    META_DATA_OFFSET));
1863               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1864                                    ETHERNET_START));
1865        }
1866        for (; i < n_pkts; i++) {
1867               pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
1868                             synproxy_reply_mask, vfw_pipe);
1869        }
1870 #else
1871        rte_prefetch0((void*)in_port_dir_a);
1872
1873         vfw_fwd_pkts_ipv6(pkts, &keep_mask, &arp_hijack_mask, vfw_pipe);
1874
1875 #endif
1876
1877        if (vfw_debug > 1) {
1878               printf("  Exit in-port action with %p packet mask\n",
1879                             (void *)keep_mask);
1880               if (keep_mask != packet_mask_in)
1881                      printf("dropped packets, %p in, %p out\n",
1882                                    (void *)packet_mask_in,
1883                                    (void *)keep_mask);
1884        }
1885
1886         /* Hijack the Synproxy and ARP buffered packets */
1887
1888         if (unlikely(arp_hijack_mask || synp_hijack_mask)) {
1889
1890 //                printf("Pkts hijacked arp = %lX, synp = %lX\n",
1891 //                                    arp_hijack_mask, synp_hijack_mask);
1892
1893                 rte_pipeline_ah_packet_hijack(p,(arp_hijack_mask | synp_hijack_mask));
1894         }
1895
1896        /* Update mask before returning, so that bad packets are dropped */
1897
1898        pkts_drop_mask = packet_mask_in & ~keep_mask;
1899
1900        if (unlikely(pkts_drop_mask != 0)) {
1901               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1902               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1903        }
1904
1905        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1906        vfw_pipe->counters->num_pkts_measurements++;
1907
1908        end_tsc_measure(vfw_pipe, n_pkts);
1909
1910        return 0;
1911 }
1912
1913
1914 /**
1915  * Parse arguments in config file.
1916  *
1917  * @param vfw_pipe
1918  *  A pointer to the pipeline.
1919  * @param params
1920  *  A pointer to pipeline specific parameters.
1921  *
1922  * @return
1923  *  0 on success, negative on error.
1924  */
1925 static int
1926 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
1927               struct pipeline_params *params)
1928 {
1929        uint32_t i;
1930        int status;
1931
1932        if (vfw_debug)
1933               printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
1934                             params->n_args);
1935
1936        for (i = 0; i < params->n_args; i++) {
1937               char *arg_name = params->args_name[i];
1938               char *arg_value = params->args_value[i];
1939
1940               printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
1941                             atoi(arg_value), arg_value);
1942 #ifdef ACL_ENABLE
1943               status = lib_acl_parse_config(vfw_pipe->plib_acl,
1944                                    arg_name, arg_value, &vfw_n_rules);
1945               if (status < 0) {
1946                      printf("rte_ct_set_configuration_options =%s,%s",
1947                                    arg_name, arg_value);
1948                      return -1;
1949               } else if (status == 0)
1950                      continue;
1951
1952 #endif              /* traffic_type */
1953               if (strcmp(arg_name, "traffic_type") == 0) {
1954                      int traffic_type = atoi(arg_value);
1955
1956                      if (traffic_type == 0 ||
1957                                    !(traffic_type == IP_VERSION_4 ||
1958                                           traffic_type == IP_VERSION_6)) {
1959                             printf("not IPV4/IPV6");
1960                             return -1;
1961                      }
1962
1963                      vfw_pipe->traffic_type = traffic_type;
1964                      continue;
1965               }
1966
1967
1968               /* n_flows */
1969               if (strcmp(arg_name, "n_flows") == 0) {
1970                      int n_flows = atoi(arg_value);
1971
1972                      if (n_flows == 0)
1973                             return -1;
1974
1975                      /* must be power of 2, round up if not */
1976                      if (!rte_is_power_of_2(n_flows))
1977                             n_flows = rte_align32pow2(n_flows);
1978
1979                      vfw_pipe->n_flows = n_flows;
1980                      continue;
1981               }
1982
1983               /* not firewall option, process as cnxn tracking option */
1984               status = rte_ct_set_configuration_options(
1985                             vfw_pipe->cnxn_tracker,
1986                             arg_name, arg_value);
1987               if (status < 0) {
1988                      printf("rte_ct_set_configuration_options =%s,%s",
1989                                    arg_name, arg_value);
1990                      return -1;
1991               } else if (status == 0)
1992                      continue;
1993
1994        }
1995
1996        return 0;
1997 }
1998
1999 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
2000                                               void *msg);
2001
2002 static pipeline_msg_req_handler handlers[] = {
2003        [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
2004        [PIPELINE_MSG_REQ_STATS_PORT_IN] =
2005            pipeline_msg_req_stats_port_in_handler,
2006        [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
2007            pipeline_msg_req_stats_port_out_handler,
2008        [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
2009        [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
2010            pipeline_msg_req_port_in_enable_handler,
2011        [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
2012            pipeline_msg_req_port_in_disable_handler,
2013        [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
2014 };
2015
2016 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2017                                                     void *msg);
2018 static pipeline_msg_req_handler custom_handlers[] = {
2019
2020        [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
2021            pipeline_vfw_msg_req_synproxy_flag_handler
2022 };
2023
2024 /**
2025  * Create and initialize Pipeline Back End (BE).
2026  *
2027  * @param params
2028  *  A pointer to the pipeline specific parameters..
2029  * @param arg
2030  *  A pointer to pipeline specific data.
2031  *
2032  * @return
2033  *  A pointer to the pipeline create, NULL on error.
2034  */
2035 static void
2036 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
2037 {
2038        uint32_t size, i;
2039
2040        /* Check input arguments */
2041        if ((params == NULL) ||
2042                      (params->n_ports_in == 0) || (params->n_ports_out == 0))
2043               return NULL;
2044
2045        if (vfw_debug)
2046               printf("num ports in %d / num ports out %d\n",
2047                             params->n_ports_in, params->n_ports_out);
2048
2049        /* Create a single pipeline instance and initialize. */
2050        struct pipeline_vfw *pipe_vfw;
2051
2052        size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
2053        pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2054
2055        if (pipe_vfw == NULL)
2056               return NULL;
2057
2058        struct pipeline *pipe;
2059
2060        pipe = &pipe_vfw->pipe;
2061
2062        strncpy(pipe->name, params->name, sizeof(pipe->name));
2063        pipe->log_level = params->log_level;
2064        pipe_vfw->n_flows = 4096;       /* small default value */
2065        pipe_vfw->traffic_type = IP_VERSION_4;
2066        pipe_vfw->pipeline_num = 0xff;
2067        for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2068               pipe_vfw->links_map[i] = 0xff;
2069               pipe_vfw->outport_id[i] = 0xff;
2070        }
2071        PLOG(pipe, HIGH, "VFW");
2072
2073        /* Create a firewall instance and initialize. */
2074        pipe_vfw->cnxn_tracker =
2075               rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2076                             RTE_CACHE_LINE_SIZE);
2077
2078        if (pipe_vfw->cnxn_tracker == NULL)
2079               return NULL;
2080 #ifdef ACL_ENABLE
2081        /* Create a acl instance and initialize. */
2082        pipe_vfw->plib_acl =
2083               rte_zmalloc(NULL, sizeof(struct lib_acl),
2084                             RTE_CACHE_LINE_SIZE);
2085
2086        if (pipe_vfw->plib_acl == NULL)
2087               return NULL;
2088 #endif
2089        timer_lcore = rte_lcore_id();
2090        /*
2091         * Now allocate a counter block entry. It appears that the
2092         * initialization of all instances is serialized on core 0,
2093         * so no lock is necessary.
2094         */
2095        struct rte_VFW_counter_block *counter_ptr;
2096
2097        if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2098               /* error, exceeded table bounds */
2099               return NULL;
2100
2101        rte_VFW_hi_counter_block_in_use++;
2102        counter_ptr =
2103               &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2104        strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2105
2106        pipe_vfw->counters = counter_ptr;
2107
2108        rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2109        /* Parse arguments */
2110        if (pipeline_vfw_parse_args(pipe_vfw, params))
2111               return NULL;
2112
2113        uint16_t pointers_offset =
2114               META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2115
2116        if (pipe_vfw->n_flows > 0)
2117               rte_ct_initialize_cnxn_tracker_with_synproxy(
2118                             pipe_vfw->cnxn_tracker,
2119                             pipe_vfw->n_flows,
2120                             params->name,
2121                             pointers_offset);
2122
2123        pipe_vfw->counters->ct_counters =
2124               rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2125
2126        /* Pipeline */
2127        {
2128               struct rte_pipeline_params pipeline_params = {
2129                      .name = params->name,
2130                      .socket_id = params->socket_id,
2131                      .offset_port_id = META_DATA_OFFSET +
2132                             offsetof(struct mbuf_tcp_meta_data, output_port)
2133               };
2134
2135               pipe->p = rte_pipeline_create(&pipeline_params);
2136               if (pipe->p == NULL) {
2137                      rte_free(pipe_vfw);
2138                      return NULL;
2139               }
2140        }
2141
2142        /* Input ports */
2143
2144        /*
2145         * create a different "arg_ah" for each input port.
2146         * They differ only in the recorded port number. Unfortunately,
2147         * IP_PIPELINE does not pass port number in to input port handler
2148         */
2149
2150        uint32_t in_ports_arg_size =
2151               RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2152                             (params->n_ports_in));
2153        struct vfw_ports_in_args *port_in_args =
2154               (struct vfw_ports_in_args *)
2155               rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2156
2157        if (port_in_args == NULL)
2158               return NULL;
2159
2160        pipe->n_ports_in = params->n_ports_in;
2161        for (i = 0; i < pipe->n_ports_in; i++) {
2162
2163               /* initialize this instance of port_in_args as necessary */
2164               port_in_args[i].pipe = pipe;
2165               port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2166
2167               struct rte_pipeline_port_in_params port_params = {
2168                      .ops =
2169                             pipeline_port_in_params_get_ops(&params->port_in
2170                                           [i]),
2171                      .arg_create =
2172                             pipeline_port_in_params_convert(&params->port_in
2173                                           [i]),
2174                      .f_action = vfw_port_in_action_ipv4,
2175                      .arg_ah = &(port_in_args[i]),
2176                      .burst_size = params->port_in[i].burst_size,
2177               };
2178                if (pipe_vfw->traffic_type == IP_VERSION_6)
2179                      port_params.f_action = vfw_port_in_action_ipv6;
2180               int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2181                             &pipe->port_in_id[i]);
2182
2183               if (status) {
2184                      rte_pipeline_free(pipe->p);
2185                      rte_free(pipe_vfw);
2186                      return NULL;
2187               }
2188        }
2189
2190        /* Output ports */
2191        pipe->n_ports_out = params->n_ports_out;
2192        for (i = 0; i < pipe->n_ports_out; i++) {
2193               struct rte_pipeline_port_out_params port_params = {
2194                      .ops = pipeline_port_out_params_get_ops(
2195                                    &params->port_out[i]),
2196                      .arg_create = pipeline_port_out_params_convert(
2197                                    &params->port_out[i]),
2198                      .f_action = NULL,
2199                      .arg_ah = NULL,
2200               };
2201
2202               int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2203                             &pipe->port_out_id[i]);
2204
2205               if (status) {
2206                      rte_pipeline_free(pipe->p);
2207                      rte_free(pipe_vfw);
2208                      return NULL;
2209               }
2210        }
2211
2212        int pipeline_num = 0;
2213        int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2214
2215        if (dont_care < 0)
2216               printf("sscanf unble to read pipeline id\n");
2217        pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2218        register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2219        set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2220        set_outport_id(pipe_vfw->pipeline_num, pipe,
2221                      pipe_vfw->outport_id);
2222        printf("pipeline_num=%d\n", pipeline_num);
2223 #ifdef ACL_ENABLE
2224        /*If this is the first VFW thread, create common VFW Rule tables*/
2225        if (rte_VFW_hi_counter_block_in_use == 0) {
2226               vfw_rule_table_ipv4_active =
2227                      lib_acl_create_active_standby_table_ipv4(1,
2228                                    &vfw_n_rules);
2229               if (vfw_rule_table_ipv4_active == NULL) {
2230                      printf("Failed to create active table for IPV4\n");
2231                      rte_pipeline_free(pipe->p);
2232                      rte_free(pipe_vfw->cnxn_tracker);
2233                      rte_free(pipe_vfw->plib_acl);
2234                      rte_free(pipe_vfw);
2235                      return NULL;
2236               }
2237               vfw_rule_table_ipv4_standby =
2238                      lib_acl_create_active_standby_table_ipv4(2,
2239                                    &vfw_n_rules);
2240               if (vfw_rule_table_ipv4_standby == NULL) {
2241                      printf("Failed to create standby table for IPV4\n");
2242                      rte_pipeline_free(pipe->p);
2243                      rte_free(pipe_vfw->cnxn_tracker);
2244                      rte_free(pipe_vfw->plib_acl);
2245                      rte_free(pipe_vfw);
2246                      return NULL;
2247               }
2248
2249               vfw_rule_table_ipv6_active =
2250                      lib_acl_create_active_standby_table_ipv6(1,
2251                                    &vfw_n_rules);
2252
2253               if (vfw_rule_table_ipv6_active == NULL) {
2254                      printf("Failed to create active table for IPV6\n");
2255                      rte_pipeline_free(pipe->p);
2256                      rte_free(pipe_vfw->cnxn_tracker);
2257                      rte_free(pipe_vfw->plib_acl);
2258                      rte_free(pipe_vfw);
2259                      return NULL;
2260               }
2261               vfw_rule_table_ipv6_standby =
2262                      lib_acl_create_active_standby_table_ipv6(2,
2263                                    &vfw_n_rules);
2264               if (vfw_rule_table_ipv6_standby == NULL) {
2265                      printf("Failed to create standby table for IPV6\n");
2266                      rte_pipeline_free(pipe->p);
2267                      rte_free(pipe_vfw->cnxn_tracker);
2268                      rte_free(pipe_vfw->plib_acl);
2269                      rte_free(pipe_vfw);
2270                      return NULL;
2271               }
2272        }
2273
2274 #endif
2275
2276        /* Tables */
2277
2278        pipe->n_tables = 1;
2279
2280        struct rte_pipeline_table_params table_params = {
2281               .ops = &rte_table_stub_ops,
2282               .arg_create = NULL,
2283               .f_action_hit = NULL,
2284               .f_action_miss = NULL,
2285               .arg_ah = NULL,
2286               .action_data_size = 0,
2287        };
2288
2289        int status = rte_pipeline_table_create(pipe->p,
2290                      &table_params,
2291                      &pipe->table_id[0]);
2292
2293        if (status) {
2294               rte_pipeline_free(pipe->p);
2295               rte_free(pipe);
2296               return NULL;
2297        }
2298
2299        struct rte_pipeline_table_entry default_entry = {
2300               .action = RTE_PIPELINE_ACTION_PORT_META
2301        };
2302
2303        struct rte_pipeline_table_entry *default_entry_ptr;
2304
2305        status = rte_pipeline_table_default_entry_add(pipe->p,
2306                                                 pipe->table_id[0],
2307                                                 &default_entry,
2308                                                 &default_entry_ptr);
2309
2310        if (status) {
2311               rte_pipeline_free(pipe->p);
2312               rte_free(pipe);
2313               return NULL;
2314        }
2315        for (i = 0; i < pipe->n_ports_in; i++) {
2316               int status = rte_pipeline_port_in_connect_to_table(
2317                             pipe->p,
2318                             pipe->port_in_id[i],
2319                             pipe->table_id[0]);
2320
2321               if (status) {
2322                      rte_pipeline_free(pipe->p);
2323                      rte_free(pipe_vfw);
2324                      return NULL;
2325               }
2326        }
2327
2328        /* Enable input ports */
2329        for (i = 0; i < pipe->n_ports_in; i++) {
2330               int status =
2331                   rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2332
2333               if (status) {
2334                      rte_pipeline_free(pipe->p);
2335                      rte_free(pipe_vfw);
2336                      return NULL;
2337               }
2338        }
2339
2340        /* Check pipeline consistency */
2341        if (rte_pipeline_check(pipe->p) < 0) {
2342               rte_pipeline_free(pipe->p);
2343               rte_free(pipe_vfw);
2344               return NULL;
2345        }
2346
2347        /* Message queues */
2348        pipe->n_msgq = params->n_msgq;
2349        for (i = 0; i < pipe->n_msgq; i++)
2350               pipe->msgq_in[i] = params->msgq_in[i];
2351
2352        for (i = 0; i < pipe->n_msgq; i++)
2353               pipe->msgq_out[i] = params->msgq_out[i];
2354
2355        /* Message handlers */
2356        memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2357        memcpy(pipe_vfw->custom_handlers, custom_handlers,
2358               sizeof(pipe_vfw->custom_handlers));
2359
2360        return pipe_vfw;
2361 }
2362
2363 /**
2364  * Free resources and delete pipeline.
2365  *
2366  * @param pipeline
2367  *  A pointer to the pipeline.
2368  *
2369  * @return
2370  *  0 on success, negative on error.
2371  */
2372 static int pipeline_vfw_free(void *pipeline)
2373 {
2374        struct pipeline *p = (struct pipeline *)pipeline;
2375
2376        /* Check input arguments */
2377        if (p == NULL)
2378               return -1;
2379
2380        /* Free resources */
2381        rte_pipeline_free(p->p);
2382        rte_free(p);
2383        return 0;
2384 }
2385
2386 /**
2387  * Callback function to map input/output ports.
2388  *
2389  * @param pipeline
2390  *  A pointer to the pipeline.
2391  * @param port_in
2392  *  Input port ID
2393  * @param port_out
2394  *  A pointer to the Output port.
2395  *
2396  * @return
2397  *  0 on success, negative on error.
2398  */
2399 static int
2400 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2401                     uint32_t *port_out)
2402 {
2403        struct pipeline *p = (struct pipeline *)pipeline;
2404
2405        /* Check input arguments */
2406        if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2407               return -1;
2408
2409        if (p->n_ports_in == 1) {
2410               *port_out = 0;
2411               return 0;
2412        }
2413
2414        return -1;
2415 }
2416
2417 /**
2418  * Callback function to process timers.
2419  *
2420  * @param pipeline
2421  *  A pointer to the pipeline.
2422  *
2423  * @return
2424  *  0 on success, negative on error.
2425  */
2426 static int pipeline_vfw_timer(void *pipeline)
2427 {
2428        struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2429
2430        /*
2431         * handle any good buffered packets released by synproxy before checking
2432         * for packets relased by synproxy due to timeout.
2433         * Don't want packets missed
2434         */
2435
2436        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2437                                    FORWARD_BUFFERED_PACKETS);
2438
2439        pipeline_msg_req_handle(&p->pipe);
2440        rte_pipeline_flush(p->pipe.p);
2441
2442        rte_ct_handle_expired_timers(p->cnxn_tracker);
2443
2444        /* now handle packets released by synproxy due to timeout. */
2445        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2446                                    DELETE_BUFFERED_PACKETS);
2447
2448        return 0;
2449 }
2450
2451 /**
2452  * Callback function to process CLI commands from FE.
2453  *
2454  * @param p
2455  *  A pointer to the pipeline.
2456  * @param msg
2457  *  A pointer to command specific data.
2458  *
2459  * @return
2460  *  A pointer to message handler on success,
2461  *  pipeline_msg_req_invalid_hander on error.
2462  */
2463 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2464 {
2465        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2466        struct pipeline_custom_msg_req *req = msg;
2467        pipeline_msg_req_handler f_handle;
2468
2469        f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2470            pipe_vfw->custom_handlers[req->subtype] :
2471            pipeline_msg_req_invalid_handler;
2472
2473        if (f_handle == NULL)
2474               f_handle = pipeline_msg_req_invalid_handler;
2475
2476        return f_handle(p, req);
2477 }
2478
2479 /**
2480  * Handler for synproxy ON/OFF CLI command.
2481  *
2482  * @param p
2483  *  A pointer to the pipeline.
2484  * @param msg
2485  *  A pointer to command specific data.
2486  *
2487  * @return
2488  *  Response message contains status.
2489  */
2490
2491 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2492                                               void *msg)
2493 {
2494        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2495        struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2496        struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2497
2498        if (req->synproxy_flag == 0) {
2499               rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2500               rsp->status = 0;
2501               printf("synproxy turned OFF for %s\n", p->name);
2502        } else if (req->synproxy_flag == 1) {
2503               rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2504               rsp->status = 0;
2505               printf("synproxy turned ON for %s\n", p->name);
2506        } else {
2507               printf("Invalid synproxy setting\n");
2508               rsp->status = -1;
2509        }
2510
2511        return rsp;
2512 }
2513
2514 struct pipeline_be_ops pipeline_vfw_be_ops = {
2515        .f_init = pipeline_vfw_init,
2516        .f_free = pipeline_vfw_free,
2517        .f_run = NULL,
2518        .f_timer = pipeline_vfw_timer,
2519        .f_track = pipeline_vfw_track,
2520 };