[l2l3 stack] implements new nd state machine & nd buffering
[samplevnf.git] / VNFs / vFW / pipeline / pipeline_vfw_be.c
1 /*
2 // Copyright (c) 2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 /**
18  * @file
19  * Pipeline VFW BE Implementation.
20  *
21  * Implementation of Pipeline VFW Back End (BE).
22  * Responsible for packet processing.
23  *
24  */
25
26 #define EN_SWP_ACL 1
27 #define EN_SWP_ARP 1
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <string.h>
34 #include <unistd.h>
35
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
40 #include <rte_ip.h>
41 #include <rte_udp.h>
42 #include <rte_icmp.h>
43 #include <rte_byteorder.h>
44
45 #include <rte_table_lpm.h>
46 #include <rte_table_hash.h>
47 #include <rte_table_array.h>
48 #include <rte_table_acl.h>
49 #include <rte_table_stub.h>
50 #include <rte_timer.h>
51 #include <rte_cycles.h>
52 #include <rte_pipeline.h>
53 #include <rte_spinlock.h>
54 #include <rte_prefetch.h>
55 #include "pipeline_actions_common.h"
56 #include "hash_func.h"
57 #include "pipeline_vfw.h"
58 #include "pipeline_vfw_be.h"
59 #include "rte_cnxn_tracking.h"
60 #include "pipeline_arpicmp_be.h"
61 #include "vnf_common.h"
62 #include "vnf_define.h"
63
64 #include "lib_arp.h"
65 #include "lib_icmpv6.h"
66 #include "pipeline_common_fe.h"
67
68 uint32_t timer_lcore;
69
70 uint8_t firewall_flag = 1;
71 uint8_t VFW_DEBUG = 0;
72 uint8_t cnxn_tracking_is_active = 1;
73 /**
74  * A structure defining the VFW pipeline input port per thread data.
75  */
76 struct vfw_ports_in_args {
77        struct pipeline *pipe;
78        struct rte_ct_cnxn_tracker *cnxn_tracker;
79 } __rte_cache_aligned;
80 /**
81  * A structure defining the VFW pipeline per thread data.
82  */
83 struct pipeline_vfw {
84        struct pipeline pipe;
85        pipeline_msg_req_handler custom_handlers[PIPELINE_VFW_MSG_REQS];
86
87        struct rte_ct_cnxn_tracker *cnxn_tracker;
88        struct rte_VFW_counter_block *counters;
89        struct rte_mbuf *pkt_buffer[PKT_BUFFER_SIZE];
90        struct lib_acl *plib_acl;
91        /* timestamp retrieved during in-port computations */
92        uint32_t n_flows;
93        uint8_t pipeline_num;
94        uint8_t traffic_type;
95        uint8_t links_map[PIPELINE_MAX_PORT_IN];
96        uint8_t outport_id[PIPELINE_MAX_PORT_IN];
97        /* Local ARP & ND Tables */
98        struct lib_arp_route_table_entry
99               local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
100        uint8_t local_lib_arp_route_ent_cnt;
101        struct lib_nd_route_table_entry
102               local_lib_nd_route_table[MAX_ND_RT_ENTRY];
103        uint8_t local_lib_nd_route_ent_cnt;
104
105 } __rte_cache_aligned;
106 /**
107  * A structure defining the mbuf meta data for VFW.
108  */
109 struct mbuf_tcp_meta_data {
110 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
111        uint32_t output_port;
112        struct rte_mbuf *next;       /* next pointer for chained buffers */
113 } __rte_cache_aligned;
114
115 #define DONT_CARE_TCP_PACKET 0
116 #define IS_NOT_TCP_PACKET 0
117 #define IS_TCP_PACKET 1
118
119 #define META_DATA_OFFSET 128
120
121 #define RTE_PKTMBUF_HEADROOM 128       /* where is this defined ? */
122 #define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
123 #define ETH_HDR_SIZE 14
124 #define PROTOCOL_START (IP_START + 9)
125
126 #define TCP_START (IP_START + 20)
127 #define RTE_LB_PORT_OFFSET 204       /* TODO: Need definition in LB header */
128 #define TCP_START_IPV6 (IP_START + 40)
129 #define PROTOCOL_START_IPV6 (IP_START + 6)
130 #define IP_HDR_DSCP_OFST 1
131
132 #define TCP_PROTOCOL 6
133 #define UDP_PROTOCOL 17
134
135 #define DELETE_BUFFERED_PACKETS 0
136 #define FORWARD_BUFFERED_PACKETS 1
137 #define DO_ARP 1
138 #define NO_ARP 0
139
140 #define IPv4_HEADER_SIZE 20
141 #define IPv6_HEADER_SIZE 40
142
143 #define IP_VERSION_4 4
144 #define IP_VERSION_6 6
145 #define MIX 10
146 /* IPv6 */
147 #define IP_HDR_SIZE_IPV6  40
148 #define IP_HDR_DSCP_OFST_IPV6 0
149 #define IP_HDR_LENGTH_OFST_IPV6 4
150 #define IP_HDR_PROTOCOL_OFST_IPV6 6
151 #define IP_HDR_DST_ADR_OFST_IPV6 24
152 #define MAX_NUM_LOCAL_MAC_ADDRESS 16
153 /** The counter table for VFW pipeline per thread data.*/
154 struct rte_VFW_counter_block rte_vfw_counter_table[MAX_VFW_INSTANCES]
155 __rte_cache_aligned;
156 int rte_VFW_hi_counter_block_in_use = -1;
157
158 /* a spin lock used during vfw initialization only */
159 rte_spinlock_t rte_VFW_init_lock = RTE_SPINLOCK_INITIALIZER;
160
161 /* Action Array */
162 struct pipeline_action_key *action_array_a;
163 struct pipeline_action_key *action_array_b;
164 struct pipeline_action_key *action_array_active;
165 struct pipeline_action_key *action_array_standby;
166 uint32_t action_array_size;
167 struct action_counter_block
168 action_counter_table[MAX_VFW_INSTANCES][action_array_max]
169 __rte_cache_aligned;
170 /*
171   * Pipeline table strategy for firewall. Unfortunately, there does not seem to
172   * be any use for the built-in table lookup of ip_pipeline for the firewall.
173   * The main table requirement of the firewall is the hash table to maintain
174   * connection info, but that is implemented seperately in the connection
175   * tracking library. So a "dummy" table lookup will be performed.
176   * TODO: look into "stub" table and see if that can be used
177   * to avoid useless table lookup
178   */
179 uint64_t arp_pkts_mask;
180
181 /* Start TSC measurement */
182 /* Prefetch counters and pipe before this function */
183 static inline void start_tsc_measure(struct pipeline_vfw *vfw_pipe) {
184        vfw_pipe->counters->entry_timestamp = rte_get_tsc_cycles();
185        if (likely(vfw_pipe->counters->exit_timestamp))
186               vfw_pipe->counters->external_time_sum +=
187                      vfw_pipe->counters->entry_timestamp -
188                      vfw_pipe->counters->exit_timestamp;
189 }
190
191 /* End TSC measurement */
192 static inline void end_tsc_measure(
193        struct pipeline_vfw *vfw_pipe,
194        uint8_t n_pkts)
195 {
196        if (likely(n_pkts > 1)) {
197               vfw_pipe->counters->exit_timestamp = rte_get_tsc_cycles();
198               vfw_pipe->counters->internal_time_sum +=
199                      vfw_pipe->counters->exit_timestamp -
200                      vfw_pipe->counters->entry_timestamp;
201               vfw_pipe->counters->time_measurements++;
202        } else {
203               /* small counts skew results, ignore */
204               vfw_pipe->counters->exit_timestamp = 0;
205        }
206 }
207
208 /**
209  * Print packet for debugging.
210  *
211  * @param pkt
212  *  A pointer to the packet.
213  *
214  */
215 static __rte_unused  void print_pkt(struct rte_mbuf *pkt)
216 {
217        int i;
218        int size = (int)sizeof(struct mbuf_tcp_meta_data);
219        uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, META_DATA_OFFSET);
220
221        printf("Meta-data:\n");
222        for (i = 0; i < size; i++) {
223               printf("%02x ", rd[i]);
224               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
225                      printf("\n");
226        }
227        printf("\n");
228        printf("IP and TCP/UDP headers:\n");
229        rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
230        for (i = 0; i < IP_HDR_SIZE_IPV6; i++) {
231               printf("%02x ", rd[i]);
232               if ((i & TWO_BYTE_PRINT) == TWO_BYTE_PRINT)
233                      printf("\n");
234        }
235        printf("\n");
236 }
237
238 /* TODO: are the protocol numbers defined somewhere with meaningful names? */
239 #define IP_ICMP_PROTOCOL 1
240 #define IP_TCP_PROTOCOL 6
241 #define IP_UDP_PROTOCOL 17
242 #define IPv6_FRAGMENT_HEADER 44
243
244 /**
245  * Return ethernet header structure form packet.
246  *
247  * @param pkt
248  *  A pointer to the packet.
249  *
250  */
251 static inline struct ether_hdr *rte_vfw_get_ether_addr(struct rte_mbuf *pkt)
252 {
253        return (struct ether_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
254                                                         ETHERNET_START);
255 }
256
257 /**
258  * Return IPV4 header structure form packet.
259  *
260  * @param pkt
261  *  A pointer to the packet.
262  *
263  */
264
265 static inline struct ipv4_hdr *rte_vfw_get_IPv4_hdr_addr(
266               struct rte_mbuf *pkt)
267 {
268        return (struct ipv4_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
269 }
270
271 static inline int rte_vfw_is_IPv4(struct rte_mbuf *pkt)
272 {
273        /* NOTE: Only supporting IP headers with no options,
274         * so header is fixed size */
275        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
276               >> VERSION_NO_BYTE;
277
278        return ip_type == IPv4_HDR_VERSION;
279 }
280
281 static inline int rte_vfw_is_IPv6(struct rte_mbuf *pkt)
282 {
283        /* NOTE: Only supporting IP headers with no options,
284         * so header is fixed size */
285        uint8_t ip_type = RTE_MBUF_METADATA_UINT8(pkt, IP_START)
286               >> VERSION_NO_BYTE;
287
288        return ip_type == IPv6_HDR_VERSION;
289 }
290
291 static inline void rte_vfw_incr_drop_ctr(uint64_t *counter)
292 {
293        if (likely(firewall_flag))
294               (*counter)++;
295 }
296
297 static uint8_t check_arp_icmp(
298               struct rte_mbuf *pkt,
299               struct pipeline_vfw *vfw_pipe)
300 {
301        struct ether_hdr *ehdr;
302        struct app_link_params *link;
303         uint8_t solicited_node_multicast_addr[IPV6_ADD_SIZE] = {
304                 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
305                 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
306
307         /* ARP outport number */
308        uint16_t out_port = vfw_pipe->pipe.n_ports_out - 1;
309        struct ipv4_hdr *ipv4_h;
310        struct ipv6_hdr *ipv6_h;
311        link = &myApp->link_params[pkt->port];
312
313        ehdr = rte_vfw_get_ether_addr(pkt);
314        switch (rte_be_to_cpu_16(ehdr->ether_type)) {
315
316        case ETH_TYPE_ARP:
317               rte_pipeline_port_out_packet_insert(
318                             vfw_pipe->pipe.p,
319                             out_port,
320                             pkt);
321
322               vfw_pipe->counters->arpicmpPktCount++;
323
324               return 0;
325        case ETH_TYPE_IPV4:
326               ipv4_h = (struct ipv4_hdr *)
327                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
328               if ((ipv4_h->next_proto_id == IP_PROTOCOL_ICMP) &&
329                             link->ip ==
330                             rte_be_to_cpu_32(ipv4_h->dst_addr)) {
331                      if (is_phy_port_privte(pkt->port)) {
332                             rte_pipeline_port_out_packet_insert(
333                                           vfw_pipe->pipe.p,
334                                           out_port,
335                                           pkt);
336
337                      vfw_pipe->counters->arpicmpPktCount++;
338                             return 0;
339                      }
340               }
341               break;
342 #ifdef IPV6
343         case ETH_TYPE_IPV6:
344                 ipv6_h = (struct ipv6_hdr *)
345                         RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
346
347                 if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
348                         if (!memcmp(ipv6_h->dst_addr, link->ipv6, IPV6_ADD_SIZE)
349                                         || !memcmp(ipv6_h->dst_addr,
350                                                 solicited_node_multicast_addr,
351                                                 IPV6_ADD_CMP_MULTI)) {
352
353                                 rte_pipeline_port_out_packet_insert(
354                                                 vfw_pipe->pipe.p,
355                                                 out_port,
356                                                 pkt);
357
358                                 vfw_pipe->counters->arpicmpPktCount++;
359
360                         } else
361                                 vfw_pipe->counters->
362                                         pkts_drop_unsupported_type++;
363
364                         return 0;
365                 }
366                 break;
367 #endif
368        default:
369               break;
370 }
371        return 1;
372 }
373
374 /**
375  * Performs basic VFW ipv4 packet filtering.
376  * @param pkts
377  *  A pointer to the packets.
378  * @param pkts_mask
379  *  packet mask.
380  * @param vfw_pipe
381  *  A pointer to VFW pipeline.
382  */
383
384 static uint64_t
385 rte_vfw_ipv4_packet_filter_and_process(struct rte_mbuf **pkts,
386                                  uint64_t pkts_mask,
387                                  struct pipeline_vfw *vfw_pipe)
388 {
389
390        /*
391         * Make use of cache prefetch. At beginning of loop, want to prefetch
392         * mbuf data for next iteration (not current one).
393         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
394         * is 20 bytes (extensions not supported), while the IPv6 header is 40
395         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
396         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
397         * need two pre-fetches.
398         */
399
400        uint8_t pos, next_pos = 0;
401        uint64_t pkt_mask;       /* bitmask representing a single packet */
402        struct rte_mbuf *pkt;
403        struct rte_mbuf *next_pkt = NULL;
404        struct ipv4_hdr *ihdr4;
405        void *next_iphdr = NULL;
406
407        if (unlikely(pkts_mask == 0))
408               return pkts_mask;
409        pos = (uint8_t) __builtin_ctzll(pkts_mask);
410        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
411        pkt = pkts[pos];
412
413        uint64_t bytes_processed = 0;
414        /* bitmap of packets left to process */
415        uint64_t pkts_to_process = pkts_mask;
416        /* bitmap of valid packets to return */
417        uint64_t valid_packets = pkts_mask;
418
419        rte_prefetch0(pkt);
420        /* prefetch counters, updated below. Most likely counters to update
421         * at beginnning */
422        rte_prefetch0(&vfw_pipe->counters);
423
424        do {                     /* always execute at least once */
425
426               /* remove this packet from remaining list */
427               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
428
429               if (likely(next_pkts_to_process)) {
430                      /* another packet to process after this, prefetch it */
431
432                      next_pos =
433                             (uint8_t) __builtin_ctzll(next_pkts_to_process);
434                      next_pkt = pkts[next_pos];
435                      next_iphdr = RTE_MBUF_METADATA_UINT32_PTR(next_pkt,
436                                    IP_START);
437                      rte_prefetch0(next_iphdr);
438               }
439
440               int discard = 0;
441               /* remove this packet from remaining list */
442               pkts_to_process &= ~pkt_mask;
443
444               if (enable_hwlb) {
445                       if (!check_arp_icmp(pkt, vfw_pipe)) {
446                               /* make next packet data the current */
447                               pkts_to_process = next_pkts_to_process;
448                               pos = next_pos;
449                               pkt = next_pkt;
450                               ihdr4 = next_iphdr;
451                               pkt_mask = 1LLU << pos;
452                               valid_packets &= ~pkt_mask;
453                               continue;
454                      }
455               }
456
457               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
458
459               bytes_processed += packet_length;
460
461               ihdr4 = (struct ipv4_hdr *)
462                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
463
464               /* verify that packet size according to mbuf is at least
465                * as large as the size according to the IP header.
466                */
467
468               uint32_t ip_length = rte_bswap16(ihdr4->total_length);
469
470               if (unlikely
471                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
472                      discard = 1;
473                      vfw_pipe->counters->pkts_drop_bad_size++;
474               }
475
476               /*
477                * IPv4 fragmented if: MF (more fragments) or Fragment
478                * Offset are non-zero. Header in Intel order, so flip
479                * constant to compensate. Note that IPv6 uses a header
480                * extension for identifying fragments.
481                */
482
483               int fragmented = (ihdr4->fragment_offset & 0xff3f) != 0;
484               uint8_t ttl = ihdr4->time_to_live;
485
486               if (unlikely(fragmented)) {
487                      discard = 1;
488                      vfw_pipe->counters->pkts_drop_fragmented++;
489               }
490
491               if (unlikely(ttl <= 1)) {
492                      /*
493                       * about to decrement to zero (or is somehow
494                       * already zero), so discard
495                       */
496                      discard = 1;
497                      vfw_pipe->counters->pkts_drop_ttl++;
498               }
499
500               /*
501                * Dropping the packets other than TCP AND UDP.
502                */
503
504               uint8_t proto = ihdr4->next_proto_id;
505
506               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
507                                           proto == IP_UDP_PROTOCOL ||
508                                           proto == IP_ICMP_PROTOCOL))) {
509                      discard = 1;
510                      vfw_pipe->counters->
511                             pkts_drop_unsupported_type++;
512               }
513
514               if (unlikely(discard)) {
515                      valid_packets &= ~pkt_mask;
516               }
517
518               /* make next packet data the current */
519               pkts_to_process = next_pkts_to_process;
520               pos = next_pos;
521               pkt = next_pkt;
522               ihdr4 = next_iphdr;
523               pkt_mask = 1LLU << pos;
524
525        } while (pkts_to_process);
526
527        /* finalize counters, etc. */
528        vfw_pipe->counters->bytes_processed += bytes_processed;
529
530        if (likely(firewall_flag))
531               return valid_packets;
532        else
533               return pkts_mask;
534 }
535 /**
536  * Performs basic VFW IPV6 packet filtering.
537  * @param pkts
538  *  A pointer to the packets.
539  * @param pkts_mask
540  *  packet mask.
541  * @param vfw_pipe
542  *  A pointer to VFW pipeline.
543  */
544        static uint64_t
545 rte_vfw_ipv6_packet_filter_and_process(struct rte_mbuf **pkts,
546               uint64_t pkts_mask,
547               struct pipeline_vfw *vfw_pipe)
548 {
549
550        /*
551         * Make use of cache prefetch. At beginning of loop, want to prefetch
552         * mbuf data for next iteration (not current one).
553         * Note that ethernet header (14 bytes) is cache aligned. IPv4 header
554         * is 20 bytes (extensions not supported), while the IPv6 header is 40
555         * bytes. TCP header is 20 bytes, UDP is 8. One cache line prefetch
556         * will cover IPv4 and TCP or UDP, but to get IPv6 and TCP,
557         * need two pre-fetches.
558         */
559
560        uint8_t pos, next_pos = 0;
561        uint64_t pkt_mask;       /* bitmask representing a single packet */
562        struct rte_mbuf *pkt;
563        struct rte_mbuf *next_pkt = NULL;
564        struct ipv6_hdr *ihdr6;
565        void *next_iphdr = NULL;
566
567        if (unlikely(pkts_mask == 0))
568               return pkts_mask;
569        pos = (uint8_t) __builtin_ctzll(pkts_mask);
570        pkt_mask = 1LLU << pos;       /* bitmask representing only this packet */
571        pkt = pkts[pos];
572
573        uint64_t bytes_processed = 0;
574        /* bitmap of packets left to process */
575        uint64_t pkts_to_process = pkts_mask;
576        /* bitmap of valid packets to return */
577        uint64_t valid_packets = pkts_mask;
578
579        /* prefetch counters, updated below. Most likely counters to update
580         * at beginnning */
581        rte_prefetch0(&vfw_pipe->counters);
582
583        do {                     /* always execute at least once */
584
585               /* remove this packet from remaining list */
586               uint64_t next_pkts_to_process = pkts_to_process &= ~pkt_mask;
587
588               if (likely(next_pkts_to_process)) {
589                      /* another packet to process after this, prefetch it */
590
591                      next_pos =
592                          (uint8_t) __builtin_ctzll(next_pkts_to_process);
593                      next_pkt = pkts[next_pos];
594                      next_iphdr =
595                          RTE_MBUF_METADATA_UINT32_PTR(next_pkt, IP_START);
596                      rte_prefetch0(next_iphdr);
597               }
598
599               int discard = 0;
600               /* remove this packet from remaining list */
601               pkts_to_process &= ~pkt_mask;
602
603               if (enable_hwlb) {
604                      if (!check_arp_icmp(pkt, vfw_pipe)) {
605                              /* make next packet data the current */
606                              pkts_to_process = next_pkts_to_process;
607                              pos = next_pos;
608                              pkt = next_pkt;
609                              ihdr6 = next_iphdr;
610                              pkt_mask = 1LLU << pos;
611                              valid_packets &= ~pkt_mask;
612                              continue;
613                      }
614               }
615
616               uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
617
618               bytes_processed += packet_length;
619
620               ihdr6 = (struct ipv6_hdr *)
621                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
622
623               /*
624                * verify that packet size according to mbuf is at least
625                * as large as the size according to the IP header.
626                * For IPv6, note that size includes header extensions
627                * but not the base header size
628                */
629
630               uint32_t ip_length =
631                      rte_bswap16(ihdr6->payload_len) + IPv6_HEADER_SIZE;
632
633               if (unlikely
634                             (ip_length > (packet_length - ETH_HDR_SIZE))) {
635                      discard = 1;
636                      vfw_pipe->counters->pkts_drop_bad_size++;
637               }
638
639               /*
640                * Dropping the packets other than TCP AND UDP.
641                */
642
643               uint8_t proto = ihdr6->proto;
644
645               if (unlikely(!(proto == IP_TCP_PROTOCOL ||
646                                           proto == IP_UDP_PROTOCOL ||
647                                           proto == IP_ICMP_PROTOCOL))) {
648                      discard = 1;
649                      if (proto == IPv6_FRAGMENT_HEADER)
650                             vfw_pipe->counters->
651                                    pkts_drop_fragmented++;
652                      else
653                             vfw_pipe->counters->
654                                    pkts_drop_unsupported_type++;
655               }
656
657               /*
658                * Behave like a router, and decrement the TTL of an
659                * IP packet. If this causes the TTL to become zero,
660                * the packet will be discarded. Unlike a router,
661                * no ICMP code 11 (Time * Exceeded) message will be
662                * sent back to the packet originator.
663                */
664
665               if (unlikely(ihdr6->hop_limits <= 1)) {
666                      /*
667                       * about to decrement to zero (or is somehow
668                       * already zero), so discard
669                       */
670                      discard = 1;
671                      vfw_pipe->counters->pkts_drop_ttl++;
672               }
673
674               if (unlikely(discard))
675                      valid_packets &= ~pkt_mask;
676               else
677                      ihdr6->hop_limits--;
678
679               /* make next packet data the current */
680               pkts_to_process = next_pkts_to_process;
681               pos = next_pos;
682               pkt = next_pkt;
683               ihdr6 = next_iphdr;
684               pkt_mask = 1LLU << pos;
685
686        } while (pkts_to_process);
687
688        /* finalize counters, etc. */
689        vfw_pipe->counters->bytes_processed += bytes_processed;
690
691        if (likely(firewall_flag))
692               return valid_packets;
693        else
694               return pkts_mask;
695 }
696
697 /**
698  * exchange the mac address so source becomes destination and vice versa.
699  *
700  * @param ehdr
701  *  A pointer to the ethernet header.
702  *
703  */
704 static inline void rte_sp_exchange_mac_addresses(struct ether_hdr *ehdr)
705 {
706        struct ether_addr saved_copy;
707
708        ether_addr_copy(&ehdr->d_addr, &saved_copy);
709        ether_addr_copy(&ehdr->s_addr, &ehdr->d_addr);
710        ether_addr_copy(&saved_copy, &ehdr->s_addr);
711 }
712 #ifdef EN_SWP_ARP
713
714 /**
715  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
716  * To support synproxy, some (altered) packets may need to be sent back where
717  * they came from. The ip header has already been adjusted, but the ethernet
718  * header has not, so this must be performed here.
719  * Return an updated pkts_mask, since arp may drop some packets
720  *
721  * @param pkts
722  *  A pointer to the packet array.
723  * @param pkt_num
724  *  Packet num to start processing
725  * @param pkts_mask
726  *  Packet mask
727  * @param synproxy_reply_mask
728  *  Reply Packet mask for Synproxy
729  * @param vfw_pipe
730  *  A pointer to VFW pipeline.
731  */
732 static void
733 pkt4_work_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
734               uint16_t pkt_num,
735               uint64_t *pkts_mask,
736               uint64_t synproxy_reply_mask,
737               struct pipeline_vfw *vfw_pipe)
738 {
739
740        uint8_t i;
741
742        struct mbuf_tcp_meta_data *meta_data_addr;
743        struct ether_hdr *ehdr;
744        struct rte_mbuf *pkt;
745
746        for (i = 0; i < 4; i++) {
747               uint32_t dest_if = INVALID_DESTIF;
748               /* bitmask representing only this packet */
749               uint64_t pkt_mask = 1LLU << (pkt_num + i);
750
751               pkt = pkts[i];
752
753               if(!(*pkts_mask & pkt_mask))
754                      continue;
755
756               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
757
758               meta_data_addr = (struct mbuf_tcp_meta_data *)
759                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
760               ehdr = rte_vfw_get_ether_addr(pkt);
761
762
763               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
764                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
765               uint32_t nhip = 0;
766
767               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
768               if (must_reverse)
769                      rte_sp_exchange_mac_addresses(ehdr);
770
771         struct arp_entry_data *ret_arp_data = NULL;
772         ret_arp_data = get_dest_mac_addr_port(dest_address,
773                        &dest_if, &ehdr->d_addr);
774         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
775
776         if (arp_cache_dest_mac_present(dest_if)) {
777                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
778                 update_nhip_access(dest_if);
779                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
780                         arp_send_buffered_pkts(ret_arp_data,
781                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
782
783                             }
784
785                      } else {
786                 if (unlikely(ret_arp_data == NULL)) {
787                         if (VFW_DEBUG)
788                         printf("%s: NHIP Not Found, nhip:%x , "
789                         "outport_id: %d\n", __func__, nhip,
790                         vfw_pipe->outport_id[dest_if]);
791
792                         /* Drop the pkt */
793                         vfw_pipe->counters->
794                                  pkts_drop_without_arp_entry++;
795                         continue;
796                             }
797                 if (ret_arp_data->status == INCOMPLETE ||
798                            ret_arp_data->status == PROBE) {
799                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
800                                         /* ICMP req sent, drop packet by
801                                                 * changing the mask */
802                                         vfw_pipe->counters->pkts_drop_without_arp_entry++;
803                                         continue;
804                                 } else {
805                                         arp_pkts_mask |= pkt_mask;
806                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
807                                         continue;
808                      }
809               }
810         }
811        }
812 }
813
814
815 /**
816  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
817  * To support synproxy, some (altered) packets may need to be sent back where
818  * they came from. The ip header has already been adjusted, but the ethernet
819  * header has not, so this must be performed here.
820  * Return an updated pkts_mask, since arp may drop some packets
821  *
822  * @param pkts
823  *  A pointer to the packet.
824  * @param packet_num
825  *  Packet number to process
826  * @param pkts_mask
827  *  Packet mask pointer
828  * @param synproxy_reply_mask
829  *  Reply Packet mask for Synproxy
830  * @param vfw_pipe
831  *  A pointer to VFW pipeline.
832  */
833 static void
834 pkt_work_vfw_arp_ipv4_packets(struct rte_mbuf *pkts,
835               uint16_t pkt_num,
836               uint64_t *pkts_mask,
837               uint64_t synproxy_reply_mask,
838               struct pipeline_vfw *vfw_pipe)
839 {
840
841        uint32_t dest_if = INVALID_DESTIF;
842
843        struct mbuf_tcp_meta_data *meta_data_addr;
844        struct ether_hdr *ehdr;
845        struct rte_mbuf *pkt;
846        uint64_t pkt_mask = 1LLU << pkt_num;
847
848        pkt = pkts;
849
850        if(*pkts_mask & pkt_mask) {
851
852               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
853
854               meta_data_addr = (struct mbuf_tcp_meta_data *)
855                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
856               ehdr = rte_vfw_get_ether_addr(pkt);
857
858
859               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
860                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
861               uint32_t nhip = 0;
862
863               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
864               if (must_reverse)
865                      rte_sp_exchange_mac_addresses(ehdr);
866
867         struct arp_entry_data *ret_arp_data = NULL;
868                      ret_arp_data = get_dest_mac_addr_port(dest_address,
869                                    &dest_if, &ehdr->d_addr);
870                         meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
871
872         if (arp_cache_dest_mac_present(dest_if)) {
873
874                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
875                 update_nhip_access(dest_if);
876                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
877                         arp_send_buffered_pkts(ret_arp_data,
878                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
879
880                             }
881                      } else {
882                 if (unlikely(ret_arp_data == NULL)) {
883
884                         if (VFW_DEBUG)
885                         printf("%s: NHIP Not Found, nhip:%x , "
886                         "outport_id: %d\n", __func__, nhip,
887                         vfw_pipe->outport_id[dest_if]);
888
889                         vfw_pipe->counters->
890                                 pkts_drop_without_arp_entry++;
891                         return;
892                             }
893                 if (ret_arp_data->status == INCOMPLETE ||
894                            ret_arp_data->status == PROBE) {
895                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
896                                         /* ICMP req sent, drop packet by
897                                                 * changing the mask */
898                                         vfw_pipe->counters->pkts_drop_without_arp_entry++;
899                                         return;
900                                 } else {
901                                         arp_pkts_mask |= pkt_mask;
902                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
903                                         return;
904                      }
905               }
906         }
907
908        }
909 }
910
911
912 /**
913  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
914  * To support synproxy, some (altered) packets may need to be sent back where
915  * they came from. The ip header has already been adjusted, but the ethernet
916  * header has not, so this must be performed here.
917  * Return an updated pkts_mask, since arp may drop some packets
918  *
919  * @param pkts
920  *  A pointer to the packets array.
921  * @param pkt_num
922  *  Packet number to start processing.
923  * @param pkts_mask
924  *  Packet mask pointer
925  * @param synproxy_reply_mask
926  *  Reply Packet mask for Synproxy
927  * @param vfw_pipe
928  *  A pointer to VFW pipeline.
929  */
930
931 static void
932 pkt4_work_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
933               uint16_t pkt_num,
934               uint64_t *pkts_mask,
935               uint64_t synproxy_reply_mask,
936               struct pipeline_vfw *vfw_pipe)
937 {
938        uint8_t nh_ipv6[IPV6_ADD_SIZE];
939        struct ether_addr hw_addr;
940        struct mbuf_tcp_meta_data *meta_data_addr;
941        struct ether_hdr *ehdr;
942        struct rte_mbuf *pkt;
943        uint8_t i;
944
945        for (i = 0; i < 4; i++) {
946               uint32_t dest_if = INVALID_DESTIF;
947               /* bitmask representing only this packet */
948               uint64_t pkt_mask = 1LLU << (pkt_num + i);
949
950               pkt = pkts[i];
951
952               if(!(*pkts_mask & pkt_mask))
953                      continue;
954               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
955
956               meta_data_addr = (struct mbuf_tcp_meta_data *)
957                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
958               ehdr = rte_vfw_get_ether_addr(pkt);
959
960               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
961                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
962
963               uint8_t nhip[IPV6_ADD_SIZE];
964               uint8_t dest_address[IPV6_ADD_SIZE];
965
966               memset(nhip, 0, IPV6_ADD_SIZE);
967               if (must_reverse)
968                      rte_sp_exchange_mac_addresses(ehdr);
969
970               rte_mov16(dest_address, ihdr->dst_addr);
971               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
972               struct nd_entry_data *ret_nd_data = NULL;
973               ret_nd_data = get_dest_mac_address_ipv6_port(
974                                    &dest_address[0],
975                                    &dest_if,
976                                    &hw_addr,
977                                    &nh_ipv6[0]);
978
979                 meta_data_addr->output_port = vfw_pipe->
980                                     outport_id[dest_if];
981               if (nd_cache_dest_mac_present(dest_if)) {
982                     ether_addr_copy(get_link_hw_addr(dest_if),
983                                    &ehdr->s_addr);
984                     update_nhip_access(dest_if);
985
986                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
987                         nd_send_buffered_pkts(ret_nd_data,
988                                 &ehdr->d_addr, meta_data_addr->output_port);
989                     }
990               } else {
991                     if (unlikely(ret_nd_data == NULL)) {
992                          *pkts_mask &= ~pkt_mask;
993                           vfw_pipe->counters->
994                                 pkts_drop_without_arp_entry++;
995                           continue;
996                     }
997                     if (ret_nd_data->status == INCOMPLETE ||
998                           ret_nd_data->status == PROBE) {
999                           if (ret_nd_data->num_pkts >= NUM_DESC) {
1000                                 /* Drop the pkt */
1001                                 *pkts_mask &= ~pkt_mask;
1002                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1003                                 continue;
1004                           } else {
1005                                 arp_pkts_mask |= pkt_mask;
1006                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1007                                 continue;
1008                           }
1009                     }
1010               }
1011
1012        }
1013 }
1014
1015
1016 /**
1017  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1018  * To support synproxy, some (altered) packets may need to be sent back where
1019  * they came from. The ip header has already been adjusted, but the ethernet
1020  * header has not, so this must be performed here.
1021  * Return an updated pkts_mask, since arp may drop some packets
1022  *
1023  * @param pkts
1024  *  A pointer to the packets.
1025  * @param pkt_num
1026  *  Packet number to process.
1027  * @param pkts_mask
1028  *  Packet mask pointer
1029  * @param synproxy_reply_mask
1030  *  Reply Packet mask for Synproxy
1031  * @param vfw_pipe
1032  *  A pointer to VFW pipeline.
1033  */
1034
1035 static void
1036 pkt_work_vfw_arp_ipv6_packets(struct rte_mbuf *pkts,
1037               uint16_t pkt_num,
1038               uint64_t *pkts_mask,
1039               uint64_t synproxy_reply_mask,
1040               struct pipeline_vfw *vfw_pipe)
1041 {
1042        uint8_t nh_ipv6[IPV6_ADD_SIZE];
1043        struct ether_addr hw_addr;
1044        struct mbuf_tcp_meta_data *meta_data_addr;
1045        struct ether_hdr *ehdr;
1046        struct rte_mbuf *pkt;
1047
1048        uint32_t dest_if = INVALID_DESTIF;
1049        /* bitmask representing only this packet */
1050        uint64_t pkt_mask = 1LLU << pkt_num;
1051
1052        pkt = pkts;
1053
1054        if(*pkts_mask & pkt_mask) {
1055
1056               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1057
1058               meta_data_addr = (struct mbuf_tcp_meta_data *)
1059                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1060               ehdr = rte_vfw_get_ether_addr(pkt);
1061
1062               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1063                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1064
1065               uint8_t nhip[IPV6_ADD_SIZE];
1066               uint8_t dest_address[IPV6_ADD_SIZE];
1067
1068               memset(nhip, 0, IPV6_ADD_SIZE);
1069               if (must_reverse)
1070                      rte_sp_exchange_mac_addresses(ehdr);
1071               rte_mov16(dest_address, ihdr->dst_addr);
1072               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1073               struct nd_entry_data *ret_nd_data = NULL;
1074               ret_nd_data = get_dest_mac_address_ipv6_port(
1075                                    &dest_address[0],
1076                                    &dest_if,
1077                                    &hw_addr,
1078                                    &nh_ipv6[0]);
1079               meta_data_addr->output_port = vfw_pipe->
1080                                     outport_id[dest_if];
1081               if (nd_cache_dest_mac_present(dest_if)) {
1082                      ether_addr_copy(get_link_hw_addr(dest_if),
1083                                    &ehdr->s_addr);
1084                     update_nhip_access(dest_if);
1085
1086                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1087                         nd_send_buffered_pkts(ret_nd_data,
1088                                 &ehdr->d_addr, meta_data_addr->output_port);
1089                      }
1090               } else {
1091                     if (unlikely(ret_nd_data == NULL)) {
1092                         *pkts_mask &= ~pkt_mask;
1093                         vfw_pipe->counters->
1094                                 pkts_drop_without_arp_entry++;
1095                         return;
1096                     }
1097                     if (ret_nd_data->status == INCOMPLETE ||
1098                           ret_nd_data->status == PROBE) {
1099                           if (ret_nd_data->num_pkts >= NUM_DESC) {
1100                                 /* Drop the pkt */
1101                                 *pkts_mask &= ~pkt_mask;
1102                                 vfw_pipe->counters->pkts_drop_without_arp_entry++;
1103                                 return;
1104                           } else {
1105                                 arp_pkts_mask |= pkt_mask;
1106                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1107                                 return;
1108                           }
1109                     }
1110               }
1111
1112        }
1113
1114 }
1115
1116 #else
1117
1118 /**
1119  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1120  * To support synproxy, some (altered) packets may need to be sent back where
1121  * they came from. The ip header has already been adjusted, but the ethernet
1122  * header has not, so this must be performed here.
1123  * Return an updated pkts_mask, since arp may drop some packets
1124  *
1125  * @param pkts
1126  *  A pointer to the packet.
1127  * @param pkts_mask
1128  *  Packet mask
1129  * @param synproxy_reply_mask
1130  *  Reply Packet mask for Synproxy
1131  * @param vfw_pipe
1132  *  A pointer to VFW pipeline.
1133  */
1134 static uint64_t
1135 rte_vfw_arp_ipv4_packets(struct rte_mbuf **pkts,
1136               uint64_t pkts_mask,
1137               uint64_t synproxy_reply_mask,
1138               struct pipeline_vfw *vfw_pipe)
1139 {
1140        uint64_t pkts_to_arp = pkts_mask;
1141
1142        uint32_t ret;
1143        uint32_t dest_if = INVALID_DESTIF;
1144        for (; pkts_to_arp;) {
1145               struct ether_addr hw_addr;
1146               struct mbuf_tcp_meta_data *meta_data_addr;
1147               struct ether_hdr *ehdr;
1148               struct rte_mbuf *pkt;
1149               uint16_t phy_port;
1150
1151               uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1152               /* bitmask representing only this packet */
1153               uint64_t pkt_mask = 1LLU << pos;
1154               /* remove this packet from remaining list */
1155               pkts_to_arp &= ~pkt_mask;
1156               pkt = pkts[pos];
1157               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1158
1159               phy_port = pkt->port;
1160               meta_data_addr = (struct mbuf_tcp_meta_data *)
1161                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1162               ehdr = rte_vfw_get_ether_addr(pkt);
1163
1164
1165               struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
1166                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1167               uint32_t nhip = 0;
1168
1169               uint32_t dest_address = rte_bswap32(ihdr->dst_addr);
1170               if (must_reverse)
1171                      rte_sp_exchange_mac_addresses(ehdr);
1172                 struct arp_entry_data *ret_arp_data = NULL;
1173                      ret_arp_data = get_dest_mac_addr_port(dest_address,
1174                                    &dest_if, &ehdr->d_addr);
1175                 meta_data_addr->output_port =  vfw_pipe->outport_id[dest_if];
1176         if (arp_cache_dest_mac_present(dest_if)) {
1177
1178                 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1179                 update_nhip_access(dest_if);
1180                 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1181
1182                         arp_send_buffered_pkts(ret_arp_data,
1183                                  &ehdr->d_addr, vfw_pipe->outport_id[dest_if]);
1184
1185                             }
1186
1187                      } else {
1188                 if (unlikely(ret_arp_data == NULL)) {
1189
1190                         if (VFW_DEBUG)
1191                         printf("%s: NHIP Not Found, nhip:%x , "
1192                         "outport_id: %d\n", __func__, nhip,
1193                         vfw_pipe->outport_id[dest_if]);
1194
1195                         /* Drop the pkt */
1196                                    vfw_pipe->counters->
1197                                           pkts_drop_without_arp_entry++;
1198                         continue;
1199                      }
1200                 if (ret_arp_data->status == INCOMPLETE ||
1201                            ret_arp_data->status == PROBE) {
1202                                 if (ret_arp_data->num_pkts >= NUM_DESC) {
1203                                         /* ICMP req sent, drop packet by
1204                                                 * changing the mask */
1205                                         vfw_pipe->counters->pkts_drop_without_arp_entry++;
1206                                         continue;
1207                                 } else {
1208                                         arp_pkts_mask |= pkt_mask;
1209                                         arp_queue_unresolved_packet(ret_arp_data, pkt);
1210                                         continue;
1211               }
1212 }
1213         }
1214
1215        }
1216
1217        return pkts_mask;
1218 }
1219 /**
1220  * walk every valid mbuf (denoted by pkts_mask) and apply arp to the packet.
1221  * To support synproxy, some (altered) packets may need to be sent back where
1222  * they came from. The ip header has already been adjusted, but the ethernet
1223  * header has not, so this must be performed here.
1224  * Return an updated pkts_mask, since arp may drop some packets
1225  *
1226  * @param pkts
1227  *  A pointer to the packet.
1228  * @param pkts_mask
1229  *  Packet mask
1230  * @param synproxy_reply_mask
1231  *  Reply Packet mask for Synproxy
1232  * @param vfw_pipe
1233  *  A pointer to VFW pipeline.
1234  */
1235
1236        static uint64_t
1237 rte_vfw_arp_ipv6_packets(struct rte_mbuf **pkts,
1238               uint64_t pkts_mask,
1239               uint64_t synproxy_reply_mask,
1240               struct pipeline_vfw *vfw_pipe)
1241 {
1242        uint64_t pkts_to_arp = pkts_mask;
1243        uint8_t nh_ipv6[IPV6_ADD_SIZE];
1244        uint32_t ret;
1245        uint32_t dest_if = INVALID_DESTIF;
1246
1247        for (; pkts_to_arp;) {
1248               struct ether_addr hw_addr;
1249               struct mbuf_tcp_meta_data *meta_data_addr;
1250               struct ether_hdr *ehdr;
1251               struct rte_mbuf *pkt;
1252               uint16_t phy_port;
1253
1254               uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_arp);
1255               /* bitmask representing only this packet */
1256               uint64_t pkt_mask = 1LLU << pos;
1257               /* remove this packet from remaining list */
1258               pkts_to_arp &= ~pkt_mask;
1259               pkt = pkts[pos];
1260               int must_reverse = ((synproxy_reply_mask & pkt_mask) != 0);
1261
1262               phy_port = pkt->port;
1263               meta_data_addr = (struct mbuf_tcp_meta_data *)
1264                      RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1265               ehdr = rte_vfw_get_ether_addr(pkt);
1266
1267               struct ipv6_hdr *ihdr = (struct ipv6_hdr *)
1268                      RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1269
1270               uint8_t nhip[IPV6_ADD_SIZE];
1271               uint8_t dest_address[IPV6_ADD_SIZE];
1272
1273               memset(nhip, 0, IPV6_ADD_SIZE);
1274               if (must_reverse)
1275                      rte_sp_exchange_mac_addresses(ehdr);
1276
1277               rte_mov16(dest_address, ihdr->dst_addr);
1278               memset(nh_ipv6, 0, IPV6_ADD_SIZE);
1279               struct nd_entry_data *ret_nd_data = NULL;
1280               ret_nd_data = get_dest_mac_address_ipv6_port(
1281                                    &dest_address[0],
1282                                    &dest_if,
1283                                    &hw_addr,
1284                                    &nh_ipv6[0]);
1285
1286               meta_data_addr->output_port = vfw_pipe->
1287                                     outport_id[dest_if];
1288               if (nd_cache_dest_mac_present(dest_if)) {
1289                      ether_addr_copy(get_link_hw_addr(dest_if),
1290                                    &ehdr->s_addr);
1291                     update_nhip_access(dest_if);
1292
1293                     if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
1294                         nd_send_buffered_pkts(ret_nd_data,
1295                                &ehdr->d_addr, meta_data_addr->output_port);
1296                      }
1297
1298               } else {
1299                     if (unlikely(ret_nd_data == NULL)) {
1300                      pkts_mask &= ~pkt_mask;
1301                           vfw_pipe->counters->
1302                                 pkts_drop_without_arp_entry++;
1303                           continue;
1304                     }
1305                     if (ret_nd_data->status == INCOMPLETE ||
1306                           ret_nd_data->status == PROBE) {
1307                           if (ret_nd_data->num_pkts >= NUM_DESC) {
1308                                 /* Drop the pkt */
1309                                 pkts_mask &= ~pkt_mask;
1310                                 vfw_pipe->counters->
1311                                     pkts_drop_without_arp_entry++;
1312                                 continue;
1313                           } else {
1314                                 arp_pkts_mask |= pkt_mask;
1315                                 nd_queue_unresolved_packet(ret_nd_data, pkt);
1316                                 continue;
1317                           }
1318                     }
1319               }
1320
1321        }
1322
1323        return pkts_mask;
1324 }
1325
1326 #endif
1327 /**
1328  * Packets processing for connection tracking.
1329  *
1330  * @param vfw_pipe
1331  *  A pointer to the pipeline.
1332  * @param ct
1333  *  A pointer to the connetion tracker .
1334  * @param pkts
1335  *  A pointer to a burst of packets.
1336  * @param packet_mask_in
1337  *  Input packets Mask.
1338  */
1339
1340        static  uint64_t
1341 vfw_process_buffered_pkts(__rte_unused struct pipeline_vfw *vfw_pipe,
1342               struct rte_ct_cnxn_tracker *ct,
1343                           struct rte_mbuf **pkts, uint64_t packet_mask_in)
1344 {
1345        uint64_t keep_mask = packet_mask_in;
1346        struct rte_synproxy_helper sp_helper;       /* for synproxy */
1347
1348        keep_mask =
1349            rte_ct_cnxn_tracker_batch_lookup_with_synproxy(ct, pkts, keep_mask,
1350                                                     &sp_helper);
1351
1352        if (unlikely(sp_helper.hijack_mask))
1353               printf("buffered hijack pkts severe error\n");
1354
1355        if (unlikely(sp_helper.reply_pkt_mask))
1356               printf("buffered reply pkts severe error\n");
1357
1358        return keep_mask;
1359 }
1360
1361 /**
1362  * Free Packets from mbuf.
1363  *
1364  * @param ct
1365  *  A pointer to the connection tracker to increment drop counter.
1366  *
1367  * @param pkt
1368  *  Packet to be free.
1369  */
1370 static inline void
1371 vfw_pktmbuf_free(struct rte_ct_cnxn_tracker *ct, struct rte_mbuf *pkt)
1372 {
1373        ct->counters->pkts_drop++;
1374        rte_pktmbuf_free(pkt);
1375 }
1376
1377 static void
1378 vfw_output_or_delete_buffered_packets(struct rte_ct_cnxn_tracker *ct,
1379                                     struct rte_pipeline *p,
1380                                     struct rte_mbuf **pkts,
1381                                     int num_pkts, uint64_t pkts_mask)
1382 {
1383        int i;
1384        struct mbuf_tcp_meta_data *meta_data_addr;
1385        uint64_t pkt_mask = 1;
1386
1387        /* any clear bits in low-order num_pkts bit of
1388         * pkt_mask must be discarded */
1389
1390        for (i = 0; i < num_pkts; i++) {
1391               struct rte_mbuf *pkt = pkts[i];
1392
1393               if (pkts_mask & pkt_mask) {
1394                      printf("vfw_output_or_delete_buffered_packets\n");
1395                      meta_data_addr = (struct mbuf_tcp_meta_data *)
1396                          RTE_MBUF_METADATA_UINT32_PTR(pkt, META_DATA_OFFSET);
1397                      rte_pipeline_port_out_packet_insert(
1398                                    p, meta_data_addr->output_port, pkt);
1399
1400               } else {
1401                      vfw_pktmbuf_free(ct, pkt);
1402               }
1403
1404               pkt_mask = pkt_mask << 1;
1405        }
1406 }
1407
1408 /**
1409  *Packet buffered for synproxy.
1410  *
1411  * @param p
1412  *  A pointer to the pipeline.
1413  * @param vfw_pipe
1414  *  A pointer to the vfw pipeline.
1415  * @param ct
1416  *  A pointer to the connection tracker.
1417  * @param forward_pkts
1418  *  Packet forwarded by synproxy.
1419  *
1420  */
1421 static void
1422 vfw_handle_buffered_packets(struct rte_pipeline *p,
1423                             struct pipeline_vfw *vfw_pipe,
1424                             struct rte_ct_cnxn_tracker *ct, int forward_pkts)
1425 {
1426        struct rte_mbuf *pkt_list = rte_ct_get_buffered_synproxy_packets(ct);
1427
1428        if (likely(pkt_list == NULL))       /* only during proxy setup is != NULL */
1429               return;
1430
1431        int pkt_count = 0;
1432        uint64_t keep_mask = 0;
1433        struct rte_mbuf **pkts = vfw_pipe->pkt_buffer;
1434        struct rte_mbuf *pkt;
1435
1436        while (pkt_list != NULL) {
1437               struct mbuf_tcp_meta_data *meta_data =
1438               (struct mbuf_tcp_meta_data *)
1439               RTE_MBUF_METADATA_UINT32_PTR(pkt_list, META_DATA_OFFSET);
1440
1441               /* detach head of list and advance list */
1442               pkt = pkt_list;
1443               pkt_list = meta_data->next;
1444
1445               if (forward_pkts) {
1446
1447                      pkts[pkt_count++] = pkt;
1448
1449                      if (pkt_count == PKT_BUFFER_SIZE) {
1450                             /* need to send out packets */
1451                             /* currently 0, set all bits */
1452                             keep_mask = ~keep_mask;
1453
1454                             keep_mask =
1455                                 vfw_process_buffered_pkts(vfw_pipe,
1456                                                          ct, pkts,
1457                                                          keep_mask);
1458                             vfw_output_or_delete_buffered_packets(
1459                                           ct, p,
1460                                           pkts,
1461                                           PKT_BUFFER_SIZE,
1462                                           keep_mask);
1463                             pkt_count = 0;
1464                             keep_mask = 0;
1465                      }
1466
1467               } else {
1468                      vfw_pktmbuf_free(ct, pkt);
1469               }
1470        }
1471
1472        if (pkt_count != 0) {
1473               /* need to send out packets */
1474               keep_mask = RTE_LEN2MASK(pkt_count, uint64_t);
1475
1476               keep_mask =
1477                      vfw_process_buffered_pkts(vfw_pipe, ct, pkts,
1478                                    keep_mask);
1479
1480               vfw_output_or_delete_buffered_packets(ct, p, pkts, pkt_count,
1481                             keep_mask);
1482
1483               pkt_count = 0;
1484               keep_mask = 0;
1485        }
1486 }
1487 /**
1488  * The pipeline port-in action is used to do all the firewall and
1489  * connection tracking work for IPV4 packets.
1490  *
1491  * @param p
1492  *  A pointer to the pipeline.
1493   * @param pkts
1494  *  A pointer to a burst of packets.
1495  * @param n_pkts
1496  *  Number of packets to process.
1497  * @param arg
1498  *  A pointer to pipeline specific data.
1499  *
1500  * @return
1501  *  0 on success, negative on error.
1502  */
1503
1504 static int
1505 vfw_port_in_action_ipv4(struct rte_pipeline *p,
1506               struct rte_mbuf **pkts,
1507               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1508 {
1509        struct vfw_ports_in_args *port_in_args =
1510               (struct vfw_ports_in_args *)arg;
1511        struct pipeline_vfw *vfw_pipe =
1512               (struct pipeline_vfw *)port_in_args->pipe;
1513        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1514
1515        start_tsc_measure(vfw_pipe);
1516
1517        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1518        uint64_t pkts_drop_mask;
1519        uint64_t hijack_mask = 0;
1520        arp_pkts_mask = 0;
1521        uint64_t synproxy_reply_mask = 0;       /* for synproxy */
1522        uint64_t keep_mask = packet_mask_in;
1523
1524        uint64_t conntrack_mask = 0, connexist_mask = 0;
1525        struct rte_CT_helper ct_helper;
1526        uint8_t j;
1527
1528        /*
1529         * This routine uses a bit mask to represent which packets in the
1530         * "pkts" table are considered valid. Any table entry which exists
1531         * and is considered valid has the corresponding bit in the mask set.
1532         * Otherwise, it is cleared. Note that the mask is 64 bits,
1533         * but the number of packets in the table may be considerably less.
1534         * Any mask bits which do correspond to actual packets are cleared.
1535         * Various routines are called which may determine that an existing
1536         * packet is somehow invalid. The routine will return an altered bit
1537         * mask, with the bit cleared. At the end of all the checks,
1538         * packets are dropped if their mask bit is a zero
1539         */
1540
1541        rte_prefetch0(& vfw_pipe->counters);
1542
1543 #ifdef EN_SWP_ACL
1544        /* Pre-fetch all rte_mbuf header */
1545        for(j = 0; j < n_pkts; j++)
1546               rte_prefetch0(pkts[j]);
1547 #endif
1548        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1549 #ifdef EN_SWP_ACL
1550        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1551        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1552 #endif
1553
1554        if (unlikely(vfw_debug > 1))
1555               printf("Enter in-port action IPV4 with %p packet mask\n",
1556                             (void *)packet_mask_in);
1557        vfw_pipe->counters->pkts_received =
1558               vfw_pipe->counters->pkts_received + n_pkts;
1559
1560        if (unlikely(VFW_DEBUG))
1561               printf("vfw_port_in_action_ipv4 pkts_received: %" PRIu64
1562                             " n_pkts: %u\n",
1563                             vfw_pipe->counters->pkts_received, n_pkts);
1564
1565        /* first handle handle any previously buffered packets now released */
1566        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1567                      FORWARD_BUFFERED_PACKETS);
1568
1569        /* now handle any new packets on input ports */
1570        if (likely(firewall_flag)) {
1571               keep_mask = rte_vfw_ipv4_packet_filter_and_process(pkts,
1572                             keep_mask, vfw_pipe);
1573               vfw_pipe->counters->pkts_fw_forwarded +=
1574                      __builtin_popcountll(keep_mask);
1575        }
1576 #ifdef ACL_ENABLE
1577 #ifdef EN_SWP_ACL
1578        rte_prefetch0((void*)vfw_pipe->plib_acl);
1579        rte_prefetch0((void*)vfw_rule_table_ipv4_active);
1580 #endif /* EN_SWP_ACL */
1581        keep_mask = lib_acl_ipv4_pkt_work_key(
1582                      vfw_pipe->plib_acl, pkts, keep_mask,
1583                      &vfw_pipe->counters->pkts_drop_without_rule,
1584                      vfw_rule_table_ipv4_active,
1585                      action_array_active,
1586                      action_counter_table,
1587                      &conntrack_mask, &connexist_mask);
1588        vfw_pipe->counters->pkts_acl_forwarded +=
1589               __builtin_popcountll(keep_mask);
1590        if (conntrack_mask > 0) {
1591               keep_mask = conntrack_mask;
1592               ct_helper.no_new_cnxn_mask = connexist_mask;
1593               cnxn_tracking_is_active = 1;
1594        } else
1595               cnxn_tracking_is_active = 0;
1596 #endif /* ACL_ENABLE */
1597
1598        if (likely(cnxn_tracking_is_active)) {
1599               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1600                             &keep_mask, &ct_helper, IPv4_HEADER_SIZE);
1601               synproxy_reply_mask = ct_helper.reply_pkt_mask;
1602               hijack_mask = ct_helper.hijack_mask;
1603
1604        }
1605
1606 #ifdef EN_SWP_ARP
1607        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1608                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1609                                    META_DATA_OFFSET));
1610                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1611                                    ETHERNET_START));
1612        }
1613        rte_prefetch0((void*)in_port_dir_a);
1614        rte_prefetch0((void*)prv_to_pub_map);
1615
1616        uint8_t i;
1617        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1618               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1619                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1620                                           META_DATA_OFFSET));
1621                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1622                                           ETHERNET_START));
1623               }
1624               pkt4_work_vfw_arp_ipv4_packets(&pkts[i], i, &keep_mask,
1625                             synproxy_reply_mask, vfw_pipe);
1626        }
1627        for (j = i; j < n_pkts; j++) {
1628               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1629                                    META_DATA_OFFSET));
1630               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1631                                    ETHERNET_START));
1632        }
1633        for (; i < n_pkts; i++) {
1634               pkt_work_vfw_arp_ipv4_packets(pkts[i], i, &keep_mask,
1635                             synproxy_reply_mask, vfw_pipe);
1636        }
1637 #else
1638        rte_prefetch0((void*)in_port_dir_a);
1639        rte_prefetch0((void*)prv_to_pub_map);
1640        rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
1641        keep_mask = rte_vfw_arp_ipv4_packets(pkts, keep_mask,
1642                      synproxy_reply_mask, vfw_pipe);
1643 #endif
1644
1645        if (vfw_debug > 1) {
1646               printf("  Exit in-port action with %p packet mask\n",
1647                             (void *)keep_mask);
1648               if (keep_mask != packet_mask_in)
1649                      printf("dropped packets, %p in, %p out\n",
1650                                    (void *)packet_mask_in,
1651                                    (void *)keep_mask);
1652        }
1653
1654        /* Update mask before returning, so that bad packets are dropped */
1655         if (arp_pkts_mask) {
1656                 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1657         }
1658
1659        pkts_drop_mask = packet_mask_in & ~keep_mask;
1660
1661        if (unlikely(pkts_drop_mask != 0)) {
1662               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1663               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1664        }
1665
1666        if (unlikely(hijack_mask != 0))
1667               rte_pipeline_ah_packet_hijack(p, hijack_mask);
1668
1669        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1670        vfw_pipe->counters->num_pkts_measurements++;
1671
1672        end_tsc_measure(vfw_pipe, n_pkts);
1673
1674        return 0;
1675 }
1676 /**
1677  * The pipeline port-in action is used to do all the firewall and
1678  * connection tracking work for IPV6 packet.
1679  *
1680  * @param p
1681  *  A pointer to the pipeline.
1682   * @param pkts
1683  *  A pointer to a burst of packets.
1684  * @param n_pkts
1685  *  Number of packets to process.
1686  * @param arg
1687  *  A pointer to pipeline specific data.
1688  *
1689  * @return
1690  *  0 on success, negative on error.
1691  */
1692
1693 static int
1694 vfw_port_in_action_ipv6(struct rte_pipeline *p,
1695               struct rte_mbuf **pkts,
1696               __rte_unused uint32_t n_pkts, __rte_unused void *arg)
1697 {
1698        struct vfw_ports_in_args *port_in_args =
1699               (struct vfw_ports_in_args *)arg;
1700        struct pipeline_vfw *vfw_pipe =
1701               (struct pipeline_vfw *)port_in_args->pipe;
1702        struct rte_ct_cnxn_tracker *ct = port_in_args->cnxn_tracker;
1703
1704        start_tsc_measure(vfw_pipe);
1705
1706        uint64_t packet_mask_in = RTE_LEN2MASK(n_pkts, uint64_t);
1707        uint64_t pkts_drop_mask;
1708        uint64_t hijack_mask = 0;
1709        uint64_t synproxy_reply_mask = 0;       /* for synproxy */
1710        uint64_t keep_mask = packet_mask_in;
1711
1712        uint64_t conntrack_mask = 0, connexist_mask = 0;
1713        struct rte_CT_helper ct_helper;
1714        uint32_t j;
1715
1716        /*
1717         * This routine uses a bit mask to represent which packets in the
1718         * "pkts" table are considered valid. Any table entry which exists
1719         * and is considered valid has the corresponding bit in the mask set.
1720         * Otherwise, it is cleared. Note that the mask is 64 bits,
1721         * but the number of packets in the table may be considerably less.
1722         * Any mask bits which do correspond to actual packets are cleared.
1723         * Various routines are called which may determine that an existing
1724         * packet is somehow invalid. The routine will return an altered bit
1725         * mask, with the bit cleared. At the end of all the checks,
1726         * packets are dropped if their mask bit is a zero
1727         */
1728
1729        rte_prefetch0(& vfw_pipe->counters);
1730
1731        /* Pre-fetch all rte_mbuf header */
1732        for(j = 0; j < n_pkts; j++)
1733                rte_prefetch0(pkts[j]);
1734
1735        memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
1736        rte_prefetch0(& vfw_pipe->counters->pkts_drop_ttl);
1737        rte_prefetch0(& vfw_pipe->counters->sum_latencies);
1738
1739        if (vfw_debug > 1)
1740               printf("Enter in-port action with %p packet mask\n",
1741                             (void *)packet_mask_in);
1742        vfw_pipe->counters->pkts_received =
1743               vfw_pipe->counters->pkts_received + n_pkts;
1744        if (VFW_DEBUG)
1745               printf("vfw_port_in_action pkts_received: %" PRIu64
1746                             " n_pkts: %u\n",
1747                             vfw_pipe->counters->pkts_received, n_pkts);
1748
1749        /* first handle handle any previously buffered packets now released */
1750        vfw_handle_buffered_packets(p, vfw_pipe, ct,
1751                      FORWARD_BUFFERED_PACKETS);
1752
1753        /* now handle any new packets on input ports */
1754        if (likely(firewall_flag)) {
1755               keep_mask = rte_vfw_ipv6_packet_filter_and_process(pkts,
1756                             keep_mask, vfw_pipe);
1757               vfw_pipe->counters->pkts_fw_forwarded +=
1758                      __builtin_popcountll(keep_mask);
1759        }
1760 #ifdef ACL_ENABLE
1761
1762 #ifdef EN_SWP_ACL
1763        rte_prefetch0((void*)vfw_pipe->plib_acl);
1764        rte_prefetch0((void*)vfw_rule_table_ipv6_active);
1765 #endif /* EN_SWP_ACL */
1766        keep_mask = lib_acl_ipv6_pkt_work_key(
1767                      vfw_pipe->plib_acl, pkts, keep_mask,
1768                      &vfw_pipe->counters->pkts_drop_without_rule,
1769                      vfw_rule_table_ipv6_active,
1770                      action_array_active,
1771                      action_counter_table,
1772                      &conntrack_mask, &connexist_mask);
1773        vfw_pipe->counters->pkts_acl_forwarded +=
1774               __builtin_popcountll(keep_mask);
1775        if (conntrack_mask > 0) {
1776               keep_mask = conntrack_mask;
1777               ct_helper.no_new_cnxn_mask = connexist_mask;
1778               cnxn_tracking_is_active = 1;
1779        } else
1780               cnxn_tracking_is_active = 0;
1781 #endif /* ACL_ENABLE */
1782        if (likely(cnxn_tracking_is_active)) {
1783               rte_ct_cnxn_tracker_batch_lookup_type(ct, pkts,
1784                             &keep_mask, &ct_helper, IPv6_HEADER_SIZE);
1785               synproxy_reply_mask = ct_helper.reply_pkt_mask;
1786               hijack_mask = ct_helper.hijack_mask;
1787
1788        }
1789
1790 #ifdef EN_SWP_ARP
1791        for(j = 0; j < (n_pkts & 0x3LLU); j++) {
1792                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1793                                    META_DATA_OFFSET));
1794                rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1795                                    ETHERNET_START));
1796        }
1797        rte_prefetch0((void*)in_port_dir_a);
1798        rte_prefetch0(vfw_pipe->local_lib_nd_route_table);
1799        uint32_t i;
1800
1801        for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
1802               for (j = i+4; ((j < n_pkts) && (j < i+8)); j++) {
1803                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1804                                           META_DATA_OFFSET));
1805                      rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1806                                           ETHERNET_START));
1807               }
1808               pkt4_work_vfw_arp_ipv6_packets(&pkts[i], i, &keep_mask,
1809                             synproxy_reply_mask, vfw_pipe);
1810        }
1811        for (j = i; j < n_pkts; j++) {
1812               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1813                                    META_DATA_OFFSET));
1814               rte_prefetch0(RTE_MBUF_METADATA_UINT32_PTR(pkts[j],
1815                                    ETHERNET_START));
1816        }
1817        for (; i < n_pkts; i++) {
1818               pkt_work_vfw_arp_ipv6_packets(pkts[i], i, &keep_mask,
1819                             synproxy_reply_mask, vfw_pipe);
1820        }
1821 #else
1822        rte_prefetch0((void*)in_port_dir_a);
1823        rte_prefetch0((void*) & vfw_pipe->local_lib_arp_route_table);
1824        keep_mask = rte_vfw_arp_ipv6_packets(pkts, keep_mask,
1825                      synproxy_reply_mask, vfw_pipe);
1826 #endif
1827
1828        if (vfw_debug > 1) {
1829               printf("  Exit in-port action with %p packet mask\n",
1830                             (void *)keep_mask);
1831               if (keep_mask != packet_mask_in)
1832                      printf("dropped packets, %p in, %p out\n",
1833                                    (void *)packet_mask_in,
1834                                    (void *)keep_mask);
1835        }
1836
1837        /* Update mask before returning, so that bad packets are dropped */
1838
1839        pkts_drop_mask = packet_mask_in & ~keep_mask;
1840
1841        if (unlikely(pkts_drop_mask != 0)) {
1842               /* printf("drop %p\n", (void *) pkts_drop_mask); */
1843               rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1844        }
1845
1846        if (unlikely(hijack_mask != 0))
1847               rte_pipeline_ah_packet_hijack(p, hijack_mask);
1848
1849        vfw_pipe->counters->num_batch_pkts_sum += n_pkts;
1850        vfw_pipe->counters->num_pkts_measurements++;
1851
1852        end_tsc_measure(vfw_pipe, n_pkts);
1853
1854        return 0;
1855 }
1856
1857
1858 /**
1859  * Parse arguments in config file.
1860  *
1861  * @param vfw_pipe
1862  *  A pointer to the pipeline.
1863  * @param params
1864  *  A pointer to pipeline specific parameters.
1865  *
1866  * @return
1867  *  0 on success, negative on error.
1868  */
1869 static int
1870 pipeline_vfw_parse_args(struct pipeline_vfw *vfw_pipe,
1871               struct pipeline_params *params)
1872 {
1873        uint32_t i;
1874        int status;
1875
1876        if (vfw_debug)
1877               printf("VFW pipeline_vfw_parse_args params->n_args: %d\n",
1878                             params->n_args);
1879
1880        for (i = 0; i < params->n_args; i++) {
1881               char *arg_name = params->args_name[i];
1882               char *arg_value = params->args_value[i];
1883
1884               printf("VFW args[%d]: %s %d, %s\n", i, arg_name,
1885                             atoi(arg_value), arg_value);
1886 #ifdef ACL_ENABLE
1887               status = lib_acl_parse_config(vfw_pipe->plib_acl,
1888                                    arg_name, arg_value, &vfw_n_rules);
1889               if (status < 0) {
1890                      printf("rte_ct_set_configuration_options =%s,%s",
1891                                    arg_name, arg_value);
1892                      return -1;
1893               } else if (status == 0)
1894                      continue;
1895
1896 #endif              /* traffic_type */
1897               if (strcmp(arg_name, "traffic_type") == 0) {
1898                      int traffic_type = atoi(arg_value);
1899
1900                      if (traffic_type == 0 ||
1901                                    !(traffic_type == IP_VERSION_4 ||
1902                                           traffic_type == IP_VERSION_6)) {
1903                             printf("not IPV4/IPV6");
1904                             return -1;
1905                      }
1906
1907                      vfw_pipe->traffic_type = traffic_type;
1908                      continue;
1909               }
1910
1911
1912               /* n_flows */
1913               if (strcmp(arg_name, "n_flows") == 0) {
1914                      int n_flows = atoi(arg_value);
1915
1916                      if (n_flows == 0)
1917                             return -1;
1918
1919                      /* must be power of 2, round up if not */
1920                      if (!rte_is_power_of_2(n_flows))
1921                             n_flows = rte_align32pow2(n_flows);
1922
1923                      vfw_pipe->n_flows = n_flows;
1924                      continue;
1925               }
1926
1927               /* not firewall option, process as cnxn tracking option */
1928               status = rte_ct_set_configuration_options(
1929                             vfw_pipe->cnxn_tracker,
1930                             arg_name, arg_value);
1931               if (status < 0) {
1932                      printf("rte_ct_set_configuration_options =%s,%s",
1933                                    arg_name, arg_value);
1934                      return -1;
1935               } else if (status == 0)
1936                      continue;
1937
1938        }
1939
1940        return 0;
1941 }
1942
1943 static void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p,
1944                                               void *msg);
1945
1946 static pipeline_msg_req_handler handlers[] = {
1947        [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
1948        [PIPELINE_MSG_REQ_STATS_PORT_IN] =
1949            pipeline_msg_req_stats_port_in_handler,
1950        [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
1951            pipeline_msg_req_stats_port_out_handler,
1952        [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
1953        [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
1954            pipeline_msg_req_port_in_enable_handler,
1955        [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
1956            pipeline_msg_req_port_in_disable_handler,
1957        [PIPELINE_MSG_REQ_CUSTOM] = pipeline_vfw_msg_req_custom_handler,
1958 };
1959
1960 static void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
1961                                                     void *msg);
1962 static pipeline_msg_req_handler custom_handlers[] = {
1963
1964        [PIPELINE_VFW_MSG_REQ_SYNPROXY_FLAGS] =
1965            pipeline_vfw_msg_req_synproxy_flag_handler
1966 };
1967
1968 /**
1969  * Create and initialize Pipeline Back End (BE).
1970  *
1971  * @param params
1972  *  A pointer to the pipeline specific parameters..
1973  * @param arg
1974  *  A pointer to pipeline specific data.
1975  *
1976  * @return
1977  *  A pointer to the pipeline create, NULL on error.
1978  */
1979 static void
1980 *pipeline_vfw_init(struct pipeline_params *params, __rte_unused void *arg)
1981 {
1982        uint32_t size, i;
1983
1984        /* Check input arguments */
1985        if ((params == NULL) ||
1986                      (params->n_ports_in == 0) || (params->n_ports_out == 0))
1987               return NULL;
1988
1989        if (vfw_debug)
1990               printf("num ports in %d / num ports out %d\n",
1991                             params->n_ports_in, params->n_ports_out);
1992
1993        /* Create a single pipeline instance and initialize. */
1994        struct pipeline_vfw *pipe_vfw;
1995
1996        size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_vfw));
1997        pipe_vfw = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
1998
1999        if (pipe_vfw == NULL)
2000               return NULL;
2001
2002        struct pipeline *pipe;
2003
2004        pipe = &pipe_vfw->pipe;
2005
2006        strncpy(pipe->name, params->name, sizeof(pipe->name));
2007        pipe->log_level = params->log_level;
2008        pipe_vfw->n_flows = 4096;       /* small default value */
2009        pipe_vfw->traffic_type = MIX;
2010        pipe_vfw->pipeline_num = 0xff;
2011        for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2012               pipe_vfw->links_map[i] = 0xff;
2013               pipe_vfw->outport_id[i] = 0xff;
2014        }
2015        PLOG(pipe, HIGH, "VFW");
2016
2017        /* Create a firewall instance and initialize. */
2018        pipe_vfw->cnxn_tracker =
2019               rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2020                             RTE_CACHE_LINE_SIZE);
2021
2022        if (pipe_vfw->cnxn_tracker == NULL)
2023               return NULL;
2024 #ifdef ACL_ENABLE
2025        /* Create a acl instance and initialize. */
2026        pipe_vfw->plib_acl =
2027               rte_zmalloc(NULL, sizeof(struct lib_acl),
2028                             RTE_CACHE_LINE_SIZE);
2029
2030        if (pipe_vfw->plib_acl == NULL)
2031               return NULL;
2032 #endif
2033        timer_lcore = rte_lcore_id();
2034        /*
2035         * Now allocate a counter block entry. It appears that the
2036         * initialization of all instances is serialized on core 0,
2037         * so no lock is necessary.
2038         */
2039        struct rte_VFW_counter_block *counter_ptr;
2040
2041        if (rte_VFW_hi_counter_block_in_use == MAX_VFW_INSTANCES)
2042               /* error, exceeded table bounds */
2043               return NULL;
2044
2045        rte_VFW_hi_counter_block_in_use++;
2046        counter_ptr =
2047               &rte_vfw_counter_table[rte_VFW_hi_counter_block_in_use];
2048        strncpy(counter_ptr->name, params->name, sizeof(counter_ptr->name));
2049
2050        pipe_vfw->counters = counter_ptr;
2051
2052        rte_ct_initialize_default_timeouts(pipe_vfw->cnxn_tracker);
2053        /* Parse arguments */
2054        if (pipeline_vfw_parse_args(pipe_vfw, params))
2055               return NULL;
2056
2057        uint16_t pointers_offset =
2058               META_DATA_OFFSET + offsetof(struct mbuf_tcp_meta_data, next);
2059
2060        if (pipe_vfw->n_flows > 0)
2061               rte_ct_initialize_cnxn_tracker_with_synproxy(
2062                             pipe_vfw->cnxn_tracker,
2063                             pipe_vfw->n_flows,
2064                             params->name,
2065                             pointers_offset);
2066
2067        pipe_vfw->counters->ct_counters =
2068               rte_ct_get_counter_address(pipe_vfw->cnxn_tracker);
2069
2070        /* Pipeline */
2071        {
2072               struct rte_pipeline_params pipeline_params = {
2073                      .name = params->name,
2074                      .socket_id = params->socket_id,
2075                      .offset_port_id = META_DATA_OFFSET +
2076                             offsetof(struct mbuf_tcp_meta_data, output_port)
2077               };
2078
2079               pipe->p = rte_pipeline_create(&pipeline_params);
2080               if (pipe->p == NULL) {
2081                      rte_free(pipe_vfw);
2082                      return NULL;
2083               }
2084        }
2085
2086        /* Input ports */
2087
2088        /*
2089         * create a different "arg_ah" for each input port.
2090         * They differ only in the recorded port number. Unfortunately,
2091         * IP_PIPELINE does not pass port number in to input port handler
2092         */
2093
2094        uint32_t in_ports_arg_size =
2095               RTE_CACHE_LINE_ROUNDUP((sizeof(struct vfw_ports_in_args)) *
2096                             (params->n_ports_in));
2097        struct vfw_ports_in_args *port_in_args =
2098               (struct vfw_ports_in_args *)
2099               rte_zmalloc(NULL, in_ports_arg_size, RTE_CACHE_LINE_SIZE);
2100
2101        if (port_in_args == NULL)
2102               return NULL;
2103
2104        pipe->n_ports_in = params->n_ports_in;
2105        for (i = 0; i < pipe->n_ports_in; i++) {
2106
2107               /* initialize this instance of port_in_args as necessary */
2108               port_in_args[i].pipe = pipe;
2109               port_in_args[i].cnxn_tracker = pipe_vfw->cnxn_tracker;
2110
2111               struct rte_pipeline_port_in_params port_params = {
2112                      .ops =
2113                             pipeline_port_in_params_get_ops(&params->port_in
2114                                           [i]),
2115                      .arg_create =
2116                             pipeline_port_in_params_convert(&params->port_in
2117                                           [i]),
2118                      .f_action = vfw_port_in_action_ipv4,
2119                      .arg_ah = &(port_in_args[i]),
2120                      .burst_size = params->port_in[i].burst_size,
2121               };
2122                if (pipe_vfw->traffic_type == IP_VERSION_6)
2123                      port_params.f_action = vfw_port_in_action_ipv6;
2124               int status = rte_pipeline_port_in_create(pipe->p, &port_params,
2125                             &pipe->port_in_id[i]);
2126
2127               if (status) {
2128                      rte_pipeline_free(pipe->p);
2129                      rte_free(pipe_vfw);
2130                      return NULL;
2131               }
2132        }
2133
2134        /* Output ports */
2135        pipe->n_ports_out = params->n_ports_out;
2136        for (i = 0; i < pipe->n_ports_out; i++) {
2137               struct rte_pipeline_port_out_params port_params = {
2138                      .ops = pipeline_port_out_params_get_ops(
2139                                    &params->port_out[i]),
2140                      .arg_create = pipeline_port_out_params_convert(
2141                                    &params->port_out[i]),
2142                      .f_action = NULL,
2143                      .arg_ah = NULL,
2144               };
2145
2146               int status = rte_pipeline_port_out_create(pipe->p, &port_params,
2147                             &pipe->port_out_id[i]);
2148
2149               if (status) {
2150                      rte_pipeline_free(pipe->p);
2151                      rte_free(pipe_vfw);
2152                      return NULL;
2153               }
2154        }
2155
2156        int pipeline_num = 0;
2157        int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2158
2159        if (dont_care < 0)
2160               printf("sscanf unble to read pipeline id\n");
2161        pipe_vfw->pipeline_num = (uint8_t) pipeline_num;
2162        register_pipeline_Qs(pipe_vfw->pipeline_num, pipe);
2163        set_link_map(pipe_vfw->pipeline_num, pipe, pipe_vfw->links_map);
2164        set_outport_id(pipe_vfw->pipeline_num, pipe,
2165                      pipe_vfw->outport_id);
2166        printf("pipeline_num=%d\n", pipeline_num);
2167 #ifdef ACL_ENABLE
2168        /*If this is the first VFW thread, create common VFW Rule tables*/
2169        if (rte_VFW_hi_counter_block_in_use == 0) {
2170               vfw_rule_table_ipv4_active =
2171                      lib_acl_create_active_standby_table_ipv4(1,
2172                                    &vfw_n_rules);
2173               if (vfw_rule_table_ipv4_active == NULL) {
2174                      printf("Failed to create active table for IPV4\n");
2175                      rte_pipeline_free(pipe->p);
2176                      rte_free(pipe_vfw->cnxn_tracker);
2177                      rte_free(pipe_vfw->plib_acl);
2178                      rte_free(pipe_vfw);
2179                      return NULL;
2180               }
2181               vfw_rule_table_ipv4_standby =
2182                      lib_acl_create_active_standby_table_ipv4(2,
2183                                    &vfw_n_rules);
2184               if (vfw_rule_table_ipv4_standby == NULL) {
2185                      printf("Failed to create standby table for IPV4\n");
2186                      rte_pipeline_free(pipe->p);
2187                      rte_free(pipe_vfw->cnxn_tracker);
2188                      rte_free(pipe_vfw->plib_acl);
2189                      rte_free(pipe_vfw);
2190                      return NULL;
2191               }
2192
2193               vfw_rule_table_ipv6_active =
2194                      lib_acl_create_active_standby_table_ipv6(1,
2195                                    &vfw_n_rules);
2196
2197               if (vfw_rule_table_ipv6_active == NULL) {
2198                      printf("Failed to create active table for IPV6\n");
2199                      rte_pipeline_free(pipe->p);
2200                      rte_free(pipe_vfw->cnxn_tracker);
2201                      rte_free(pipe_vfw->plib_acl);
2202                      rte_free(pipe_vfw);
2203                      return NULL;
2204               }
2205               vfw_rule_table_ipv6_standby =
2206                      lib_acl_create_active_standby_table_ipv6(2,
2207                                    &vfw_n_rules);
2208               if (vfw_rule_table_ipv6_standby == NULL) {
2209                      printf("Failed to create standby table for IPV6\n");
2210                      rte_pipeline_free(pipe->p);
2211                      rte_free(pipe_vfw->cnxn_tracker);
2212                      rte_free(pipe_vfw->plib_acl);
2213                      rte_free(pipe_vfw);
2214                      return NULL;
2215               }
2216        }
2217
2218 #endif
2219
2220        /* Tables */
2221
2222        pipe->n_tables = 1;
2223
2224        struct rte_pipeline_table_params table_params = {
2225               .ops = &rte_table_stub_ops,
2226               .arg_create = NULL,
2227               .f_action_hit = NULL,
2228               .f_action_miss = NULL,
2229               .arg_ah = NULL,
2230               .action_data_size = 0,
2231        };
2232
2233        int status = rte_pipeline_table_create(pipe->p,
2234                      &table_params,
2235                      &pipe->table_id[0]);
2236
2237        if (status) {
2238               rte_pipeline_free(pipe->p);
2239               rte_free(pipe);
2240               return NULL;
2241        }
2242
2243        struct rte_pipeline_table_entry default_entry = {
2244               .action = RTE_PIPELINE_ACTION_PORT_META
2245        };
2246
2247        struct rte_pipeline_table_entry *default_entry_ptr;
2248
2249        status = rte_pipeline_table_default_entry_add(pipe->p,
2250                                                 pipe->table_id[0],
2251                                                 &default_entry,
2252                                                 &default_entry_ptr);
2253
2254        if (status) {
2255               rte_pipeline_free(pipe->p);
2256               rte_free(pipe);
2257               return NULL;
2258        }
2259        for (i = 0; i < pipe->n_ports_in; i++) {
2260               int status = rte_pipeline_port_in_connect_to_table(
2261                             pipe->p,
2262                             pipe->port_in_id[i],
2263                             pipe->table_id[0]);
2264
2265               if (status) {
2266                      rte_pipeline_free(pipe->p);
2267                      rte_free(pipe_vfw);
2268                      return NULL;
2269               }
2270        }
2271
2272        /* Enable input ports */
2273        for (i = 0; i < pipe->n_ports_in; i++) {
2274               int status =
2275                   rte_pipeline_port_in_enable(pipe->p, pipe->port_in_id[i]);
2276
2277               if (status) {
2278                      rte_pipeline_free(pipe->p);
2279                      rte_free(pipe_vfw);
2280                      return NULL;
2281               }
2282        }
2283
2284        /* Check pipeline consistency */
2285        if (rte_pipeline_check(pipe->p) < 0) {
2286               rte_pipeline_free(pipe->p);
2287               rte_free(pipe_vfw);
2288               return NULL;
2289        }
2290
2291        /* Message queues */
2292        pipe->n_msgq = params->n_msgq;
2293        for (i = 0; i < pipe->n_msgq; i++)
2294               pipe->msgq_in[i] = params->msgq_in[i];
2295
2296        for (i = 0; i < pipe->n_msgq; i++)
2297               pipe->msgq_out[i] = params->msgq_out[i];
2298
2299        /* Message handlers */
2300        memcpy(pipe->handlers, handlers, sizeof(pipe->handlers));
2301        memcpy(pipe_vfw->custom_handlers, custom_handlers,
2302               sizeof(pipe_vfw->custom_handlers));
2303
2304        return pipe_vfw;
2305 }
2306
2307 /**
2308  * Free resources and delete pipeline.
2309  *
2310  * @param pipeline
2311  *  A pointer to the pipeline.
2312  *
2313  * @return
2314  *  0 on success, negative on error.
2315  */
2316 static int pipeline_vfw_free(void *pipeline)
2317 {
2318        struct pipeline *p = (struct pipeline *)pipeline;
2319
2320        /* Check input arguments */
2321        if (p == NULL)
2322               return -1;
2323
2324        /* Free resources */
2325        rte_pipeline_free(p->p);
2326        rte_free(p);
2327        return 0;
2328 }
2329
2330 /**
2331  * Callback function to map input/output ports.
2332  *
2333  * @param pipeline
2334  *  A pointer to the pipeline.
2335  * @param port_in
2336  *  Input port ID
2337  * @param port_out
2338  *  A pointer to the Output port.
2339  *
2340  * @return
2341  *  0 on success, negative on error.
2342  */
2343 static int
2344 pipeline_vfw_track(void *pipeline, __rte_unused uint32_t port_in,
2345                     uint32_t *port_out)
2346 {
2347        struct pipeline *p = (struct pipeline *)pipeline;
2348
2349        /* Check input arguments */
2350        if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
2351               return -1;
2352
2353        if (p->n_ports_in == 1) {
2354               *port_out = 0;
2355               return 0;
2356        }
2357
2358        return -1;
2359 }
2360
2361 /**
2362  * Callback function to process timers.
2363  *
2364  * @param pipeline
2365  *  A pointer to the pipeline.
2366  *
2367  * @return
2368  *  0 on success, negative on error.
2369  */
2370 static int pipeline_vfw_timer(void *pipeline)
2371 {
2372        struct pipeline_vfw *p = (struct pipeline_vfw *)pipeline;
2373
2374        /*
2375         * handle any good buffered packets released by synproxy before checking
2376         * for packets relased by synproxy due to timeout.
2377         * Don't want packets missed
2378         */
2379
2380        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2381                                    FORWARD_BUFFERED_PACKETS);
2382
2383        pipeline_msg_req_handle(&p->pipe);
2384        rte_pipeline_flush(p->pipe.p);
2385
2386        rte_ct_handle_expired_timers(p->cnxn_tracker);
2387
2388        /* now handle packets released by synproxy due to timeout. */
2389        vfw_handle_buffered_packets(p->pipe.p, p, p->cnxn_tracker,
2390                                    DELETE_BUFFERED_PACKETS);
2391
2392        return 0;
2393 }
2394
2395 /**
2396  * Callback function to process CLI commands from FE.
2397  *
2398  * @param p
2399  *  A pointer to the pipeline.
2400  * @param msg
2401  *  A pointer to command specific data.
2402  *
2403  * @return
2404  *  A pointer to message handler on success,
2405  *  pipeline_msg_req_invalid_hander on error.
2406  */
2407 void *pipeline_vfw_msg_req_custom_handler(struct pipeline *p, void *msg)
2408 {
2409        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2410        struct pipeline_custom_msg_req *req = msg;
2411        pipeline_msg_req_handler f_handle;
2412
2413        f_handle = (req->subtype < PIPELINE_VFW_MSG_REQS) ?
2414            pipe_vfw->custom_handlers[req->subtype] :
2415            pipeline_msg_req_invalid_handler;
2416
2417        if (f_handle == NULL)
2418               f_handle = pipeline_msg_req_invalid_handler;
2419
2420        return f_handle(p, req);
2421 }
2422
2423 /**
2424  * Handler for synproxy ON/OFF CLI command.
2425  *
2426  * @param p
2427  *  A pointer to the pipeline.
2428  * @param msg
2429  *  A pointer to command specific data.
2430  *
2431  * @return
2432  *  Response message contains status.
2433  */
2434
2435 void *pipeline_vfw_msg_req_synproxy_flag_handler(struct pipeline *p,
2436                                               void *msg)
2437 {
2438        struct pipeline_vfw *pipe_vfw = (struct pipeline_vfw *)p;
2439        struct pipeline_vfw_synproxy_flag_msg_req *req = msg;
2440        struct pipeline_vfw_synproxy_flag_msg_rsp *rsp = msg;
2441
2442        if (req->synproxy_flag == 0) {
2443               rte_ct_disable_synproxy(pipe_vfw->cnxn_tracker);
2444               rsp->status = 0;
2445               printf("synproxy turned OFF for %s\n", p->name);
2446        } else if (req->synproxy_flag == 1) {
2447               rte_ct_enable_synproxy(pipe_vfw->cnxn_tracker);
2448               rsp->status = 0;
2449               printf("synproxy turned ON for %s\n", p->name);
2450        } else {
2451               printf("Invalid synproxy setting\n");
2452               rsp->status = -1;
2453        }
2454
2455        return rsp;
2456 }
2457
2458 struct pipeline_be_ops pipeline_vfw_be_ops = {
2459        .f_init = pipeline_vfw_init,
2460        .f_free = pipeline_vfw_free,
2461        .f_run = NULL,
2462        .f_timer = pipeline_vfw_timer,
2463        .f_track = pipeline_vfw_track,
2464 };