2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline ACL BE Implementation.
21 * Implementation of Pipeline ACL Back End (BE).
22 * Responsible for packet processing.
27 #include <rte_common.h>
28 #include <rte_malloc.h>
29 #include <rte_ether.h>
32 #include <rte_byteorder.h>
33 #include <rte_table_acl.h>
34 #include <rte_table_stub.h>
35 #include "pipeline_arpicmp_be.h"
36 #include "vnf_common.h"
37 #include "pipeline_common_be.h"
38 #include <rte_pipeline.h>
41 #include <rte_timer.h>
42 #include <rte_cycles.h>
44 #include "pipeline_acl.h"
45 #include "pipeline_acl_be.h"
46 #include "rte_cnxn_tracking.h"
47 #include "pipeline_actions_common.h"
49 #include "lib_icmpv6.h"
52 static uint8_t acl_prv_que_port_index[PIPELINE_MAX_PORT_IN];
53 extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
54 uint8_t netmask_ipv6[]);
61 * A structure defining the ACL pipeline per thread data.
65 pipeline_msg_req_handler custom_handlers[PIPELINE_ACL_MSG_REQS];
68 uint32_t n_rule_fields;
69 struct rte_acl_field_def *field_format;
70 uint32_t field_format_size;
72 /* Connection Tracker */
73 struct rte_ct_cnxn_tracker *cnxn_tracker;
74 struct rte_ACL_counter_block *counters;
75 int action_counter_index;
76 /* timestamp retrieved during in-port computations */
77 uint64_t in_port_time_stamp;
82 uint8_t links_map[PIPELINE_MAX_PORT_IN];
83 uint8_t port_out_id[PIPELINE_MAX_PORT_IN];
85 struct acl_table_entry *acl_entries_ipv4[RTE_PORT_IN_BURST_SIZE_MAX];
86 struct acl_table_entry *acl_entries_ipv6[RTE_PORT_IN_BURST_SIZE_MAX];
88 } __rte_cache_aligned;
91 * A structure defining the mbuf meta data for ACL.
93 struct mbuf_acl_meta_data {
94 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
96 /* next hop ip address used by ARP code */
98 } __rte_cache_aligned;
100 #define META_DATA_OFFSET 128
102 struct rte_ACL_counter_block rte_acl_counter_table[MAX_ACL_INSTANCES]
104 int rte_ACL_hi_counter_block_in_use = -1;
106 /* a spin lock used during acl initialization only */
107 rte_spinlock_t rte_ACL_init_lock = RTE_SPINLOCK_INITIALIZER;
110 struct pipeline_action_key *action_array_a;
111 struct pipeline_action_key *action_array_b;
112 struct pipeline_action_key *action_array_active;
113 struct pipeline_action_key *action_array_standby;
114 uint32_t action_array_size;
116 struct action_counter_block
117 action_counter_table[MAX_ACL_INSTANCES][action_array_max]
120 static void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg);
122 static pipeline_msg_req_handler handlers[] = {
123 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
124 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
125 pipeline_msg_req_stats_port_in_handler,
126 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
127 pipeline_msg_req_stats_port_out_handler,
128 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
129 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
130 pipeline_msg_req_port_in_enable_handler,
131 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
132 pipeline_msg_req_port_in_disable_handler,
133 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_acl_msg_req_custom_handler,
136 static void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg);
138 static pipeline_msg_req_handler custom_handlers[] = {
139 [PIPELINE_ACL_MSG_REQ_DBG] = pipeline_acl_msg_req_dbg_handler,
141 uint64_t arp_pkts_mask;
145 static uint8_t check_arp_icmp(struct rte_mbuf *pkt,
146 uint64_t pkt_mask, struct pipeline_acl *p_acl)
148 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
149 struct ipv6_hdr *ipv6_h;
150 uint16_t *eth_proto =
151 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
152 struct app_link_params *link;
154 //uint32_t *port_out_id = RTE_MBUF_METADATA_UINT32_PTR(pk
155 // offsetof(struct mbuf_acl_meta_dat
157 /* ARP outport number */
158 uint16_t out_port = p_acl->p.n_ports_out - 1;
161 uint32_t prot_offset;
163 link = &myApp->link_params[pkt->port];
165 switch (rte_be_to_cpu_16(*eth_proto)) {
168 rte_pipeline_port_out_packet_insert(p_acl->p.p, out_port, pkt);
171 * Pkt mask should be changed, and not changing the
174 p_acl->arpPktCount++;
179 /* header room + eth hdr size +
180 * src_aadr offset in ip header
182 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
183 ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
184 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
186 prot_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
187 IP_HDR_PROTOCOL_OFST;
188 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
190 if ((*protocol == IP_PROTOCOL_ICMP) &&
191 link->ip == rte_be_to_cpu_32(*dst_addr)) {
193 if (is_phy_port_privte(pkt->port)) {
195 rte_pipeline_port_out_packet_insert
196 (p_acl->p.p, out_port, pkt);
198 * Pkt mask should be changed,
199 * and not changing the drop mask
201 p_acl->arpPktCount++;
213 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
214 ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST;
215 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
218 uint32_t prot_offset_ipv6 = MBUF_HDR_ROOM +
219 ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
220 struct ipv6_hdr *ipv6_h;
222 ipv6_h = (struct ipv6_hdr *)MBUF_HDR_ROOM +
224 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
227 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
228 (link->ip == rte_be_to_cpu_32(dst_addr[3]))) {
230 if (is_phy_port_privte(pkt->port)) {
232 rte_pipeline_port_out_packet_insert
233 (p_acl->p.p, out_port, pkt);
235 * Pkt mask should be changed,
236 * and not changing the drop mask
238 p_acl->arpPktCount++;
248 #define IP_START (MBUF_HDR_ROOM + ETH_HDR_SIZE)
251 ipv6_h = (struct ipv6_hdr *)
252 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
254 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
256 rte_be_to_cpu_32(ipv6_h->dst_addr[3]))) {
258 if (is_phy_port_privte(pkt->port)) {
259 rte_pipeline_port_out_packet_insert(
264 p_acl->arpPktCount++;
279 * Print packet for debugging.
282 * A pointer to the packet.
285 void print_pkt_acl(struct rte_mbuf *pkt)
289 printf("Packet Contents:\n");
290 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
292 for (i = 0; i < 20; i++) {
293 for (j = 0; j < 20; j++)
294 printf("%02x ", rd[(20 * i) + j]);
300 * Main packet processing function.
301 * 64 packet bit mask are used to identify which packets to forward.
302 * Performs the following:
303 * - Burst lookup packets in the IPv4 ACL Rule Table.
304 * - Burst lookup packets in the IPv6 ACL Rule Table.
305 * - Lookup Action Table, perform actions.
306 * - Burst lookup Connection Tracking, if enabled.
307 * - Lookup MAC address.
309 * - Packets with bit mask set are forwarded
312 * A pointer to the pipeline.
314 * A pointer to a burst of packets.
316 * Number of packets to process.
318 * A pointer to pipeline specific data.
321 * 0 on success, negative on error.
324 pkt_work_acl_key(struct rte_pipeline *p,
325 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
328 struct pipeline_acl *p_acl = arg;
330 p_acl->counters->pkts_received =
331 p_acl->counters->pkts_received + n_pkts;
333 printf("pkt_work_acl_key pkts_received: %" PRIu64
334 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
336 uint64_t lookup_hit_mask = 0;
337 uint64_t lookup_hit_mask_ipv4 = 0;
338 uint64_t lookup_hit_mask_ipv6 = 0;
339 uint64_t lookup_miss_mask = 0;
340 uint64_t conntrack_mask = 0;
341 uint64_t connexist_mask = 0;
342 uint32_t dest_address = 0;
345 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
346 uint64_t keep_mask = pkts_mask;
350 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
352 if (acl_ipv4_enabled) {
354 printf("ACL IPV4 Lookup Mask Before = %p\n",
357 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
358 pkts_mask, &lookup_hit_mask_ipv4,
360 p_acl->acl_entries_ipv4);
362 printf("ACL IPV4 Lookup Mask After = %p\n",
363 (void *)lookup_hit_mask_ipv4);
366 if (acl_ipv6_enabled) {
368 printf("ACL IPV6 Lookup Mask Before = %p\n",
371 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
372 pkts_mask, &lookup_hit_mask_ipv6,
374 p_acl->acl_entries_ipv6);
376 printf("ACL IPV6 Lookup Mask After = %p\n",
377 (void *)lookup_hit_mask_ipv6);
380 /* Merge lookup results since we process both IPv4 and IPv6 below */
381 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
383 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
385 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
386 pkts_mask = lookup_hit_mask;
387 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
389 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
390 p_acl->counters->pkts_drop,
391 __builtin_popcountll(lookup_miss_mask));
393 uint64_t pkts_to_process = lookup_hit_mask;
394 /* bitmap of packets left to process for ARP */
396 for (; pkts_to_process;) {
397 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
398 uint64_t pkt_mask = 1LLU << pos;
399 /* bitmask representing only this packet */
401 pkts_to_process &= ~pkt_mask;
402 /* remove this packet from remaining list */
403 struct rte_mbuf *pkt = pkts[pos];
406 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
407 pkts_mask &= ~(1LLU << pos);
412 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
413 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
415 if (hdr_chk == IPv4_HDR_VERSION) {
417 struct acl_table_entry *entry =
418 (struct acl_table_entry *)
419 p_acl->acl_entries_ipv4[pos];
420 uint16_t phy_port = entry->head.port_id;
421 uint32_t action_id = entry->action_id;
424 printf("action_id = %u\n", action_id);
426 uint32_t dscp_offset =
427 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
429 if (action_array_active[action_id].action_bitmap &
432 [p_acl->action_counter_index]
433 [action_id].packetCount++;
435 [p_acl->action_counter_index]
436 [action_id].byteCount +=
437 rte_pktmbuf_pkt_len(pkt);
439 printf("Action Count Packet Count: %"
440 PRIu64 " Byte Count: %" PRIu64
443 [p_acl->action_counter_index]
444 [action_id].packetCount,
446 [p_acl->action_counter_index]
447 [action_id].byteCount);
450 if (action_array_active[action_id].action_bitmap &
451 acl_action_packet_drop) {
453 /* Drop packet by changing the mask */
455 printf("ACL before drop pkt_mask "
456 " %lu, pkt_num %d\n",
458 pkts_mask &= ~(1LLU << pos);
460 printf("ACL after drop pkt_mask "
463 p_acl->counters->pkts_drop++;
466 if (action_array_active[action_id].action_bitmap &
469 action_array_active[action_id].fwd_port;
470 entry->head.port_id = phy_port;
472 printf("Action FWD Port ID: %u\n",
476 if (action_array_active[action_id].action_bitmap &
479 action_array_active[action_id].nat_port;
480 entry->head.port_id = phy_port;
482 printf("Action NAT Port ID: %u\n",
486 if (action_array_active[action_id].action_bitmap &
489 /* Set DSCP priority */
490 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
493 action_array_active[action_id].dscp_priority
497 ("Action DSCP DSCP Priority: %u\n",
501 if (action_array_active[action_id].action_bitmap &
502 acl_action_packet_accept) {
504 printf("Action Accept\n");
506 if (action_array_active[action_id].action_bitmap
507 & acl_action_conntrack) {
509 /* Set conntrack bit for this pkt */
510 conntrack_mask |= pkt_mask;
512 printf("ACL Conntrack enabled: "
514 (void *)conntrack_mask,
518 if (action_array_active[action_id].action_bitmap
519 & acl_action_connexist) {
521 /* Set conntrack bit for this pkt */
522 conntrack_mask |= pkt_mask;
524 /* Set connexist bit for this pkt for public -> private */
525 /* Private -> public packet will open the connection */
526 if (action_array_active
527 [action_id].private_public ==
529 connexist_mask |= pkt_mask;
532 printf("ACL Connexist enabled "
533 "conntrack: %p connexist: %p pkt_mask: %p\n",
534 (void *)conntrack_mask,
535 (void *)connexist_mask,
541 if (hdr_chk == IPv6_HDR_VERSION) {
543 struct acl_table_entry *entry =
544 (struct acl_table_entry *)
545 p_acl->acl_entries_ipv6[pos];
546 uint16_t phy_port = entry->head.port_id;
547 uint32_t action_id = entry->action_id;
550 printf("action_id = %u\n", action_id);
552 if (action_array_active[action_id].action_bitmap &
555 [p_acl->action_counter_index]
556 [action_id].packetCount++;
558 [p_acl->action_counter_index]
559 [action_id].byteCount +=
560 rte_pktmbuf_pkt_len(pkt);
562 printf("Action Count Packet Count: %"
563 PRIu64 " Byte Count: %" PRIu64
566 [p_acl->action_counter_index]
567 [action_id].packetCount,
569 [p_acl->action_counter_index]
570 [action_id].byteCount);
573 if (action_array_active[action_id].action_bitmap &
574 acl_action_packet_drop) {
575 /* Drop packet by changing the mask */
577 printf("ACL before drop pkt_mask "
580 pkts_mask &= ~(1LLU << pos);
582 printf("ACL after drop pkt_mask "
585 p_acl->counters->pkts_drop++;
589 if (action_array_active[action_id].action_bitmap &
592 action_array_active[action_id].fwd_port;
593 entry->head.port_id = phy_port;
595 printf("Action FWD Port ID: %u\n",
599 if (action_array_active[action_id].action_bitmap &
602 action_array_active[action_id].nat_port;
603 entry->head.port_id = phy_port;
605 printf("Action NAT Port ID: %u\n",
609 if (action_array_active[action_id].action_bitmap &
612 /* Set DSCP priority */
613 uint32_t dscp_offset =
614 MBUF_HDR_ROOM + ETH_HDR_SIZE +
615 IP_HDR_DSCP_OFST_IPV6;
617 RTE_MBUF_METADATA_UINT16_PTR(pkt,
619 uint16_t dscp_value =
621 (RTE_MBUF_METADATA_UINT16
622 (pkt, dscp_offset)) & 0XF00F);
624 action_array_active[action_id].dscp_priority
626 uint16_t dscp_temp = dscp_store;
628 dscp_temp = dscp_temp << 4;
629 *dscp = rte_bswap16(dscp_temp | dscp_value);
632 ("Action DSCP DSCP Priority: %u\n",
636 if (action_array_active[action_id].action_bitmap &
637 acl_action_packet_accept) {
639 printf("Action Accept\n");
641 if (action_array_active[action_id].action_bitmap
642 & acl_action_conntrack) {
644 /* Set conntrack bit for this pkt */
645 conntrack_mask |= pkt_mask;
647 printf("ACL Conntrack enabled: "
648 " %p pkt_mask: %p\n",
649 (void *)conntrack_mask,
653 if (action_array_active[action_id].action_bitmap
654 & acl_action_connexist) {
656 /* Set conntrack bit for this pkt */
657 conntrack_mask |= pkt_mask;
659 /* Set connexist bit for this pkt for public -> private */
660 /* Private -> public packet will open the connection */
661 if (action_array_active
662 [action_id].private_public ==
664 connexist_mask |= pkt_mask;
667 printf("ACL Connexist enabled "
668 "conntrack: %p connexist: %p pkt_mask: %p\n",
669 (void *)conntrack_mask,
670 (void *)connexist_mask,
677 /* Only call connection tracker if required */
678 if (conntrack_mask > 0) {
681 ("ACL Call Conntrack Before = %p Connexist = %p\n",
682 (void *)conntrack_mask, (void *)connexist_mask);
684 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
685 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
687 printf("ACL Call Conntrack After = %p\n",
688 (void *)conntrack_mask);
690 /* Only change pkt mask for pkts that have conntrack enabled */
691 /* Need to loop through packets to check if conntrack enabled */
692 pkts_to_process = pkts_mask;
693 for (; pkts_to_process;) {
694 uint32_t action_id = 0;
696 (uint8_t) __builtin_ctzll(pkts_to_process);
697 uint64_t pkt_mask = 1LLU << pos;
698 /* bitmask representing only this packet */
700 pkts_to_process &= ~pkt_mask;
701 /* remove this packet from remaining list */
702 struct rte_mbuf *pkt = pkts[pos];
704 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
709 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
710 if (hdr_chk == IPv4_HDR_VERSION) {
711 struct acl_table_entry *entry =
712 (struct acl_table_entry *)
713 p_acl->acl_entries_ipv4[pos];
714 action_id = entry->action_id;
716 struct acl_table_entry *entry =
717 (struct acl_table_entry *)
718 p_acl->acl_entries_ipv6[pos];
719 action_id = entry->action_id;
722 if ((action_array_active[action_id].action_bitmap &
723 acl_action_conntrack)
724 || (action_array_active[action_id].action_bitmap &
725 acl_action_connexist)) {
727 if (conntrack_mask & pkt_mask) {
729 printf("ACL Conntrack Accept "
733 /* Drop packet by changing the mask */
735 printf("ACL Conntrack Drop "
738 pkts_mask &= ~pkt_mask;
739 p_acl->counters->pkts_drop++;
745 pkts_to_process = pkts_mask;
746 /* bitmap of packets left to process for ARP */
748 for (; pkts_to_process;) {
749 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
750 uint64_t pkt_mask = 1LLU << pos;
751 /* bitmask representing only this packet */
753 pkts_to_process &= ~pkt_mask;
754 /* remove this packet from remaining list */
755 struct rte_mbuf *pkt = pkts[pos];
758 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
759 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
761 if (hdr_chk == IPv4_HDR_VERSION) {
763 struct acl_table_entry *entry =
764 (struct acl_table_entry *)
765 p_acl->acl_entries_ipv4[pos];
766 uint16_t phy_port = pkt->port;
767 uint32_t *port_out_id =
768 RTE_MBUF_METADATA_UINT32_PTR(pkt,
775 ("phy_port = %i, links_map[phy_port] = %i\n",
776 phy_port, p_acl->links_map[phy_port]);
777 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
779 uint32_t dest_if = INVALID_DESTIF;
780 uint32_t src_phy_port = pkt->port;
784 /* Gateway Proc Starts */
785 struct ether_hdr *ehdr = (struct ether_hdr *)
786 RTE_MBUF_METADATA_UINT32_PTR(pkt,
787 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
789 struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
790 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
792 struct arp_entry_data *ret_arp_data = NULL;
793 struct ether_addr dst_mac;
795 uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
797 gw_get_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip);
799 ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
801 /* Gateway Proc Ends */
802 if (arp_cache_dest_mac_present(dest_if)) {
804 ether_addr_copy(&dst_mac, &ehdr->d_addr);
805 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
807 *port_out_id = p_acl->port_out_id[dest_if];
809 update_nhip_access(dest_if);
810 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
811 printf("sending buffered packets\n");
812 arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
813 p_acl->port_out_id[dest_if]);
816 p_acl->counters->tpkts_processed++;
817 p_acl->counters->bytes_processed +=
820 if (unlikely(ret_arp_data == NULL)) {
822 printf("%s: NHIP Not Found, "
823 "outport_id: %d\n", __func__,
824 p_acl->port_out_id[dest_if]);
827 pkts_mask &= ~(1LLU << pos);
829 printf("ACL after drop pkt_mask "
832 p_acl->counters->pkts_drop++;
836 if (ret_arp_data->status == INCOMPLETE ||
837 ret_arp_data->status == PROBE) {
838 if (ret_arp_data->num_pkts >= NUM_DESC) {
840 pkts_mask &= ~(1LLU << pos);
842 printf("ACL after drop pkt_mask "
845 p_acl->counters->pkts_drop++;
848 arp_pkts_mask |= pkt_mask;
849 arp_queue_unresolved_packet(ret_arp_data,
857 /* IP Pkt forwarding based on pub/prv mapping */
858 if(is_phy_port_privte(src_phy_port))
859 dest_if = prv_to_pub_map[src_phy_port];
861 dest_if = pub_to_prv_map[src_phy_port];
863 *port_out_id = p_acl->port_out_id[dest_if];
866 } /* end of if (hdr_chk == IPv4_HDR_VERSION) */
868 if (hdr_chk == IPv6_HDR_VERSION) {
870 struct acl_table_entry *entry =
871 (struct acl_table_entry *)
872 p_acl->acl_entries_ipv6[pos];
873 //uint16_t phy_port = entry->head.port_id;
874 uint16_t phy_port = pkt->port;
875 uint32_t *port_out_id =
876 RTE_MBUF_METADATA_UINT32_PTR(pkt,
882 printf("phy_port = %i, "
883 "links_map[phy_port] = %i\n",
884 phy_port, p_acl->links_map[phy_port]);
886 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
888 uint32_t dest_if = INVALID_DESTIF;
889 uint32_t src_phy_port = pkt->port;
893 /* Gateway Proc Starts */
894 struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
895 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
897 struct ether_hdr *ehdr = (struct ether_hdr *)
898 RTE_MBUF_METADATA_UINT32_PTR(pkt,
899 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
901 struct ether_addr dst_mac;
902 uint8_t nhipv6[IPV6_ADD_SIZE];
903 uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
904 struct nd_entry_data *ret_nd_data = NULL;
906 memset(nhipv6, 0, IPV6_ADD_SIZE);
907 rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
909 gw_get_nh_port_ipv6(dest_ipv6_address,
912 ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dest_if, &dst_mac);
914 /* Gateway Proc Ends */
916 if (nd_cache_dest_mac_present(dest_if)) {
918 ether_addr_copy(&dst_mac, &ehdr->d_addr);
919 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
921 *port_out_id = p_acl->port_out_id[dest_if];
923 update_nhip_access(dest_if);
925 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
926 printf("sending buffered packets\n");
927 p_acl->counters->tpkts_processed +=
928 ret_nd_data->num_pkts;
929 nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
930 p_acl->port_out_id[dest_if]);
932 p_acl->counters->tpkts_processed++;
933 p_acl->counters->bytes_processed +=
936 if (unlikely(ret_nd_data == NULL)) {
938 printf("ACL before drop pkt_mask "
939 "%lu, pkt_num %d\n", pkts_mask, pos);
940 pkts_mask &= ~(1LLU << pos);
942 printf("ACL after drop pkt_mask "
943 "%lu, pkt_num %d\n", pkts_mask, pos);
944 p_acl->counters->pkts_drop++;
948 if (ret_nd_data->status == INCOMPLETE ||
949 ret_nd_data->status == PROBE) {
950 if (ret_nd_data->num_pkts >= NUM_DESC) {
953 printf("ACL before drop pkt_mask "
954 "%lu, pkt_num %d\n", pkts_mask, pos);
955 pkts_mask &= ~(1LLU << pos);
957 printf("ACL after drop pkt_mask "
958 "%lu, pkt_num %d\n", pkts_mask, pos);
959 p_acl->counters->pkts_drop++;
962 arp_pkts_mask |= pkt_mask;
963 nd_queue_unresolved_packet(ret_nd_data,
971 /* IP Pkt forwarding based on pub/prv mapping */
972 if(is_phy_port_privte(src_phy_port))
973 dest_if = prv_to_pub_map[src_phy_port];
975 dest_if = pub_to_prv_map[src_phy_port];
977 *port_out_id = p_acl->port_out_id[dest_if];
981 } /* if (hdr_chk == IPv6_HDR_VERSION) */
983 pkts_drop_mask = keep_mask & ~pkts_mask;
984 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
985 keep_mask = pkts_mask;
988 keep_mask &= ~(arp_pkts_mask);
989 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
992 /* don't bother measuring if traffic very low, might skew stats */
993 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
995 if (packets_this_iteration > 1) {
996 uint64_t latency_this_iteration =
997 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
999 p_acl->counters->sum_latencies += latency_this_iteration;
1000 p_acl->counters->count_latencies++;
1004 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1011 * Main packet processing function.
1012 * 64 packet bit mask are used to identify which packets to forward.
1013 * Performs the following:
1014 * - Burst lookup packets in the IPv4 ACL Rule Table.
1015 * - Burst lookup packets in the IPv6 ACL Rule Table.
1016 * - Lookup Action Table, perform actions.
1017 * - Burst lookup Connection Tracking, if enabled.
1018 * - Lookup MAC address.
1020 * - Packets with bit mask set are forwarded
1023 * A pointer to the pipeline.
1025 * A pointer to a burst of packets.
1027 * Number of packets to process.
1029 * A pointer to pipeline specific data.
1032 * 0 on success, negative on error.
1035 pkt_work_acl_ipv4_key(struct rte_pipeline *p,
1036 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1039 struct pipeline_acl *p_acl = arg;
1041 p_acl->counters->pkts_received =
1042 p_acl->counters->pkts_received + n_pkts;
1044 printf("pkt_work_acl_key pkts_received: %" PRIu64
1045 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1047 uint64_t lookup_hit_mask = 0;
1048 uint64_t lookup_hit_mask_ipv4 = 0;
1049 uint64_t lookup_hit_mask_ipv6 = 0;
1050 uint64_t lookup_miss_mask = 0;
1051 uint64_t conntrack_mask = 0;
1052 uint64_t connexist_mask = 0;
1053 uint32_t dest_address = 0;
1056 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1057 uint64_t keep_mask = pkts_mask;
1061 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1063 if (acl_ipv4_enabled) {
1065 printf("ACL IPV4 Lookup Mask Before = %p\n",
1068 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
1069 pkts_mask, &lookup_hit_mask_ipv4,
1071 p_acl->acl_entries_ipv4);
1073 printf("ACL IPV4 Lookup Mask After = %p\n",
1074 (void *)lookup_hit_mask_ipv4);
1077 /* Merge lookup results since we process both IPv4 and IPv6 below */
1078 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1080 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1082 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1083 pkts_mask = lookup_hit_mask;
1084 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1086 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1087 p_acl->counters->pkts_drop,
1088 __builtin_popcountll(lookup_miss_mask));
1090 uint64_t pkts_to_process = lookup_hit_mask;
1091 /* bitmap of packets left to process for ARP */
1093 for (; pkts_to_process;) {
1094 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1095 uint64_t pkt_mask = 1LLU << pos;
1096 /* bitmask representing only this packet */
1098 pkts_to_process &= ~pkt_mask;
1099 /* remove this packet from remaining list */
1100 struct rte_mbuf *pkt = pkts[pos];
1103 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1104 pkts_mask &= ~(1LLU << pos);
1109 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1110 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1112 if (hdr_chk == IPv4_HDR_VERSION) {
1113 struct acl_table_entry *entry =
1114 (struct acl_table_entry *)
1115 p_acl->acl_entries_ipv4[pos];
1116 uint16_t phy_port = entry->head.port_id;
1117 uint32_t action_id = entry->action_id;
1120 printf("action_id = %u\n", action_id);
1122 uint32_t dscp_offset =
1123 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1125 if (action_array_active[action_id].action_bitmap &
1127 action_counter_table
1128 [p_acl->action_counter_index]
1129 [action_id].packetCount++;
1130 action_counter_table
1131 [p_acl->action_counter_index]
1132 [action_id].byteCount +=
1133 rte_pktmbuf_pkt_len(pkt);
1135 printf("Action Count Packet Count: %"
1136 PRIu64 " Byte Count: %" PRIu64
1138 action_counter_table
1139 [p_acl->action_counter_index]
1140 [action_id].packetCount,
1141 action_counter_table
1142 [p_acl->action_counter_index]
1143 [action_id].byteCount);
1146 if (action_array_active[action_id].action_bitmap &
1147 acl_action_packet_drop) {
1149 /* Drop packet by changing the mask */
1151 printf("ACL before drop pkt_mask "
1152 "%lu, pkt_num %d\n",
1154 pkts_mask &= ~(1LLU << pos);
1156 printf("ACL after drop pkt_mask "
1157 " %lu, pkt_num %d\n",
1159 p_acl->counters->pkts_drop++;
1162 if (action_array_active[action_id].action_bitmap &
1165 action_array_active[action_id].fwd_port;
1166 entry->head.port_id = phy_port;
1168 printf("Action FWD Port ID: %u\n",
1172 if (action_array_active[action_id].action_bitmap &
1175 action_array_active[action_id].nat_port;
1176 entry->head.port_id = phy_port;
1178 printf("Action NAT Port ID: %u\n",
1182 if (action_array_active[action_id].action_bitmap &
1185 /* Set DSCP priority */
1186 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1189 action_array_active[action_id].dscp_priority
1193 ("Action DSCP DSCP Priority: %u\n",
1197 if (action_array_active[action_id].action_bitmap &
1198 acl_action_packet_accept) {
1200 printf("Action Accept\n");
1202 if (action_array_active[action_id].action_bitmap
1203 & acl_action_conntrack) {
1205 /* Set conntrack bit for this pkt */
1206 conntrack_mask |= pkt_mask;
1208 printf("ACL Conntrack "
1209 "enabled: %p pkt_mask: %p\n",
1210 (void *)conntrack_mask,
1214 if (action_array_active[action_id].action_bitmap
1215 & acl_action_connexist) {
1217 /* Set conntrack bit for this pkt */
1218 conntrack_mask |= pkt_mask;
1220 /* Set connexist bit for this pkt for public -> private */
1221 /* Private -> public packet will open the connection */
1222 if (action_array_active
1223 [action_id].private_public ==
1225 connexist_mask |= pkt_mask;
1228 printf("ACL Connexist "
1229 "enabled conntrack: %p connexist: %p pkt_mask: %p\n",
1230 (void *)conntrack_mask,
1231 (void *)connexist_mask,
1237 if (hdr_chk == IPv6_HDR_VERSION) {
1239 struct acl_table_entry *entry =
1240 (struct acl_table_entry *)
1241 p_acl->acl_entries_ipv6[pos];
1242 uint16_t phy_port = entry->head.port_id;
1243 uint32_t action_id = entry->action_id;
1246 printf("action_id = %u\n", action_id);
1248 if (action_array_active[action_id].action_bitmap &
1250 action_counter_table
1251 [p_acl->action_counter_index]
1252 [action_id].packetCount++;
1253 action_counter_table
1254 [p_acl->action_counter_index]
1255 [action_id].byteCount +=
1256 rte_pktmbuf_pkt_len(pkt);
1258 printf("Action Count Packet Count: %"
1259 PRIu64 " Byte Count: %" PRIu64
1261 action_counter_table
1262 [p_acl->action_counter_index]
1263 [action_id].packetCount,
1264 action_counter_table
1265 [p_acl->action_counter_index]
1266 [action_id].byteCount);
1269 if (action_array_active[action_id].action_bitmap &
1270 acl_action_packet_drop) {
1271 /* Drop packet by changing the mask */
1274 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1276 pkts_mask &= ~(1LLU << pos);
1279 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1281 p_acl->counters->pkts_drop++;
1285 if (action_array_active[action_id].action_bitmap &
1288 action_array_active[action_id].fwd_port;
1289 entry->head.port_id = phy_port;
1291 printf("Action FWD Port ID: %u\n",
1295 if (action_array_active[action_id].action_bitmap &
1298 action_array_active[action_id].nat_port;
1299 entry->head.port_id = phy_port;
1301 printf("Action NAT Port ID: %u\n",
1305 if (action_array_active[action_id].action_bitmap &
1308 /* Set DSCP priority */
1309 uint32_t dscp_offset =
1310 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1311 IP_HDR_DSCP_OFST_IPV6;
1313 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1315 uint16_t dscp_value =
1317 (RTE_MBUF_METADATA_UINT16
1318 (pkt, dscp_offset)) & 0XF00F);
1319 uint8_t dscp_store =
1320 action_array_active[action_id].dscp_priority
1322 uint16_t dscp_temp = dscp_store;
1324 dscp_temp = dscp_temp << 4;
1325 *dscp = rte_bswap16(dscp_temp | dscp_value);
1328 ("Action DSCP DSCP Priority: %u\n",
1332 if (action_array_active[action_id].action_bitmap &
1333 acl_action_packet_accept) {
1335 printf("Action Accept\n");
1337 if (action_array_active[action_id].action_bitmap
1338 & acl_action_conntrack) {
1340 /* Set conntrack bit for this pkt */
1341 conntrack_mask |= pkt_mask;
1343 printf("ACL Conntrack "
1344 "enabled: %p pkt_mask: %p\n",
1345 (void *)conntrack_mask,
1349 if (action_array_active[action_id].action_bitmap
1350 & acl_action_connexist) {
1352 /* Set conntrack bit for this pkt */
1353 conntrack_mask |= pkt_mask;
1355 /* Set connexist bit for this pkt for public -> private */
1356 /* Private -> public packet will open the connection */
1357 if (action_array_active
1358 [action_id].private_public ==
1360 connexist_mask |= pkt_mask;
1363 printf("ACL Connexist enabled "
1364 "conntrack: %p connexist: %p pkt_mask: %p\n",
1365 (void *)conntrack_mask,
1366 (void *)connexist_mask,
1373 /* Only call connection tracker if required */
1374 if (conntrack_mask > 0) {
1377 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1378 (void *)conntrack_mask, (void *)connexist_mask);
1380 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1381 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1383 printf("ACL Call Conntrack After = %p\n",
1384 (void *)conntrack_mask);
1386 /* Only change pkt mask for pkts that have conntrack enabled */
1387 /* Need to loop through packets to check if conntrack enabled */
1388 pkts_to_process = pkts_mask;
1389 for (; pkts_to_process;) {
1390 uint32_t action_id = 0;
1392 (uint8_t) __builtin_ctzll(pkts_to_process);
1393 uint64_t pkt_mask = 1LLU << pos;
1394 /* bitmask representing only this packet */
1396 pkts_to_process &= ~pkt_mask;
1397 /* remove this packet from remaining list */
1398 struct rte_mbuf *pkt = pkts[pos];
1400 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1404 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1405 if (hdr_chk == IPv4_HDR_VERSION) {
1406 struct acl_table_entry *entry =
1407 (struct acl_table_entry *)
1408 p_acl->acl_entries_ipv4[pos];
1409 action_id = entry->action_id;
1411 struct acl_table_entry *entry =
1412 (struct acl_table_entry *)
1413 p_acl->acl_entries_ipv6[pos];
1414 action_id = entry->action_id;
1417 if ((action_array_active[action_id].action_bitmap &
1418 acl_action_conntrack)
1419 || (action_array_active[action_id].action_bitmap &
1420 acl_action_connexist)) {
1422 if (conntrack_mask & pkt_mask) {
1424 printf("ACL Conntrack Accept "
1428 /* Drop packet by changing the mask */
1430 printf("ACL Conntrack Drop "
1433 pkts_mask &= ~pkt_mask;
1434 p_acl->counters->pkts_drop++;
1440 pkts_to_process = pkts_mask;
1441 /* bitmap of packets left to process for ARP */
1443 for (; pkts_to_process;) {
1444 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1445 uint64_t pkt_mask = 1LLU << pos;
1446 /* bitmask representing only this packet */
1448 pkts_to_process &= ~pkt_mask;
1449 /* remove this packet from remaining list */
1450 struct rte_mbuf *pkt = pkts[pos];
1453 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1454 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1456 if (hdr_chk == IPv4_HDR_VERSION) {
1458 struct acl_table_entry *entry =
1459 (struct acl_table_entry *)
1460 p_acl->acl_entries_ipv4[pos];
1461 //uint16_t phy_port = entry->head.port_id;
1462 uint16_t phy_port = pkt->port;
1463 uint32_t *port_out_id =
1464 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1471 ("phy_port = %i, links_map[phy_port] = %i\n",
1472 phy_port, p_acl->links_map[phy_port]);
1474 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1476 uint32_t dest_if = INVALID_DESTIF;
1477 uint32_t src_phy_port = pkt->port;
1481 /* Gateway Proc Starts */
1482 struct ether_hdr *ehdr = (struct ether_hdr *)
1483 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1484 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
1486 struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
1487 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1489 struct arp_entry_data *ret_arp_data = NULL;
1490 struct ether_addr dst_mac;
1491 uint32_t dest_if = INVALID_DESTIF;
1493 uint32_t src_phy_port = pkt->port;
1494 uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
1496 gw_get_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip);
1498 ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
1500 /* Gateway Proc Ends */
1501 if (arp_cache_dest_mac_present(dest_if)) {
1503 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1504 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1506 *port_out_id = p_acl->port_out_id[dest_if];
1508 update_nhip_access(dest_if);
1509 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1510 printf("sending buffered packets\n");
1511 arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
1512 p_acl->port_out_id[dest_if]);
1514 p_acl->counters->tpkts_processed++;
1515 p_acl->counters->bytes_processed += packet_length;
1517 if (unlikely(ret_arp_data == NULL)) {
1520 printf("%s: NHIP Not Found, "
1521 "outport_id: %d\n", __func__,
1522 p_acl->port_out_id[dest_if]);
1525 pkts_mask &= ~(1LLU << pos);
1527 printf("ACL after drop pkt_mask "
1528 "%lu, pkt_num %d\n",
1530 p_acl->counters->pkts_drop++;
1534 if (ret_arp_data->status == INCOMPLETE ||
1535 ret_arp_data->status == PROBE) {
1536 if (ret_arp_data->num_pkts >= NUM_DESC) {
1538 pkts_mask &= ~(1LLU << pos);
1540 printf("ACL after drop pkt_mask "
1541 "%lu, pkt_num %d\n",
1543 p_acl->counters->pkts_drop++;
1546 arp_pkts_mask |= pkt_mask;
1547 arp_queue_unresolved_packet(ret_arp_data, pkt);
1554 /* IP Pkt forwarding based on pub/prv mapping */
1555 if(is_phy_port_privte(src_phy_port))
1556 dest_if = prv_to_pub_map[src_phy_port];
1558 dest_if = pub_to_prv_map[src_phy_port];
1560 *port_out_id = p_acl->port_out_id[dest_if];
1566 pkts_drop_mask = keep_mask & ~pkts_mask;
1567 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1568 keep_mask = pkts_mask;
1570 if (arp_pkts_mask) {
1571 keep_mask &= ~(arp_pkts_mask);
1572 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1575 /* don't bother measuring if traffic very low, might skew stats */
1576 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1578 if (packets_this_iteration > 1) {
1579 uint64_t latency_this_iteration =
1580 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1581 p_acl->counters->sum_latencies += latency_this_iteration;
1582 p_acl->counters->count_latencies++;
1585 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1592 * Main packet processing function.
1593 * 64 packet bit mask are used to identify which packets to forward.
1594 * Performs the following:
1595 * - Burst lookup packets in the IPv4 ACL Rule Table.
1596 * - Burst lookup packets in the IPv6 ACL Rule Table.
1597 * - Lookup Action Table, perform actions.
1598 * - Burst lookup Connection Tracking, if enabled.
1599 * - Lookup MAC address.
1601 * - Packets with bit mask set are forwarded
1604 * A pointer to the pipeline.
1606 * A pointer to a burst of packets.
1608 * Number of packets to process.
1610 * A pointer to pipeline specific data.
1613 * 0 on success, negative on error.
1616 pkt_work_acl_ipv6_key(struct rte_pipeline *p,
1617 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1620 struct pipeline_acl *p_acl = arg;
1622 p_acl->counters->pkts_received =
1623 p_acl->counters->pkts_received + n_pkts;
1625 printf("pkt_work_acl_key pkts_received: %" PRIu64
1626 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1628 uint64_t lookup_hit_mask = 0;
1629 uint64_t lookup_hit_mask_ipv4 = 0;
1630 uint64_t lookup_hit_mask_ipv6 = 0;
1631 uint64_t lookup_miss_mask = 0;
1632 uint64_t conntrack_mask = 0;
1633 uint64_t connexist_mask = 0;
1634 uint32_t dest_address = 0;
1637 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1638 uint64_t keep_mask = pkts_mask;
1642 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1644 if (acl_ipv6_enabled) {
1646 printf("ACL IPV6 Lookup Mask Before = %p\n",
1649 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
1650 pkts_mask, &lookup_hit_mask_ipv6,
1652 p_acl->acl_entries_ipv6);
1654 printf("ACL IPV6 Lookup Mask After = %p\n",
1655 (void *)lookup_hit_mask_ipv6);
1658 /* Merge lookup results since we process both IPv4 and IPv6 below */
1659 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1661 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1663 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1664 pkts_mask = lookup_hit_mask;
1665 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1667 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1668 p_acl->counters->pkts_drop,
1669 __builtin_popcountll(lookup_miss_mask));
1671 uint64_t pkts_to_process = lookup_hit_mask;
1672 /* bitmap of packets left to process for ARP */
1674 for (; pkts_to_process;) {
1675 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1676 uint64_t pkt_mask = 1LLU << pos;
1677 /* bitmask representing only this packet */
1679 pkts_to_process &= ~pkt_mask;
1680 /* remove this packet from remaining list */
1681 struct rte_mbuf *pkt = pkts[pos];
1684 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1685 pkts_mask &= ~(1LLU << pos);
1689 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1690 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1692 if (hdr_chk == IPv4_HDR_VERSION) {
1693 struct acl_table_entry *entry =
1694 (struct acl_table_entry *)
1695 p_acl->acl_entries_ipv4[pos];
1696 uint16_t phy_port = entry->head.port_id;
1697 uint32_t action_id = entry->action_id;
1700 printf("action_id = %u\n", action_id);
1702 uint32_t dscp_offset =
1703 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1705 if (action_array_active[action_id].action_bitmap &
1707 action_counter_table
1708 [p_acl->action_counter_index]
1709 [action_id].packetCount++;
1710 action_counter_table
1711 [p_acl->action_counter_index]
1712 [action_id].byteCount +=
1713 rte_pktmbuf_pkt_len(pkt);
1715 printf("Action Count Packet Count: %"
1716 PRIu64 " Byte Count: %" PRIu64
1718 action_counter_table
1719 [p_acl->action_counter_index]
1720 [action_id].packetCount,
1721 action_counter_table
1722 [p_acl->action_counter_index]
1723 [action_id].byteCount);
1726 if (action_array_active[action_id].action_bitmap &
1727 acl_action_packet_drop) {
1729 /* Drop packet by changing the mask */
1732 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1734 pkts_mask &= ~(1LLU << pos);
1737 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1739 p_acl->counters->pkts_drop++;
1742 if (action_array_active[action_id].action_bitmap &
1745 action_array_active[action_id].fwd_port;
1746 entry->head.port_id = phy_port;
1748 printf("Action FWD Port ID: %u\n",
1752 if (action_array_active[action_id].action_bitmap &
1755 action_array_active[action_id].nat_port;
1756 entry->head.port_id = phy_port;
1758 printf("Action NAT Port ID: %u\n",
1762 if (action_array_active[action_id].action_bitmap &
1765 /* Set DSCP priority */
1766 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1769 action_array_active[action_id].dscp_priority
1773 ("Action DSCP DSCP Priority: %u\n",
1777 if (action_array_active[action_id].action_bitmap &
1778 acl_action_packet_accept) {
1780 printf("Action Accept\n");
1782 if (action_array_active[action_id].action_bitmap
1783 & acl_action_conntrack) {
1785 /* Set conntrack bit for this pkt */
1786 conntrack_mask |= pkt_mask;
1788 printf("ACL Conntrack enabled: "
1789 " %p pkt_mask: %p\n",
1790 (void *)conntrack_mask,
1794 if (action_array_active[action_id].action_bitmap
1795 & acl_action_connexist) {
1797 /* Set conntrack bit for this pkt */
1798 conntrack_mask |= pkt_mask;
1800 /* Set connexist bit for this pkt for public -> private */
1801 /* Private -> public packet will open the connection */
1802 if (action_array_active
1803 [action_id].private_public ==
1805 connexist_mask |= pkt_mask;
1808 printf("ACL Connexist enabled "
1809 "conntrack: %p connexist: %p pkt_mask: %p\n",
1810 (void *)conntrack_mask,
1811 (void *)connexist_mask,
1818 if (hdr_chk == IPv6_HDR_VERSION) {
1820 struct acl_table_entry *entry =
1821 (struct acl_table_entry *)
1822 p_acl->acl_entries_ipv6[pos];
1823 uint16_t phy_port = entry->head.port_id;
1824 uint32_t action_id = entry->action_id;
1827 printf("action_id = %u\n", action_id);
1829 if (action_array_active[action_id].action_bitmap &
1831 action_counter_table
1832 [p_acl->action_counter_index]
1833 [action_id].packetCount++;
1834 action_counter_table
1835 [p_acl->action_counter_index]
1836 [action_id].byteCount +=
1837 rte_pktmbuf_pkt_len(pkt);
1839 printf("Action Count Packet Count: %"
1840 PRIu64 " Byte Count: %" PRIu64
1842 action_counter_table
1843 [p_acl->action_counter_index]
1844 [action_id].packetCount,
1845 action_counter_table
1846 [p_acl->action_counter_index]
1847 [action_id].byteCount);
1850 if (action_array_active[action_id].action_bitmap &
1851 acl_action_packet_drop) {
1852 /* Drop packet by changing the mask */
1854 printf("ACL before drop pkt_mask "
1855 "%lu, pkt_num %d\n",
1857 pkts_mask &= ~(1LLU << pos);
1859 printf("ACL after drop pkt_mask "
1860 "%lu, pkt_num %d\n",
1862 p_acl->counters->pkts_drop++;
1866 if (action_array_active[action_id].action_bitmap &
1869 action_array_active[action_id].fwd_port;
1870 entry->head.port_id = phy_port;
1872 printf("Action FWD Port ID: %u\n",
1876 if (action_array_active[action_id].action_bitmap &
1879 action_array_active[action_id].nat_port;
1880 entry->head.port_id = phy_port;
1882 printf("Action NAT Port ID: %u\n",
1886 if (action_array_active[action_id].action_bitmap &
1889 /* Set DSCP priority */
1890 uint32_t dscp_offset =
1891 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1892 IP_HDR_DSCP_OFST_IPV6;
1894 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1896 uint16_t dscp_value =
1898 (RTE_MBUF_METADATA_UINT16
1899 (pkt, dscp_offset)) & 0XF00F);
1900 uint8_t dscp_store =
1901 action_array_active[action_id].dscp_priority
1903 uint16_t dscp_temp = dscp_store;
1905 dscp_temp = dscp_temp << 4;
1906 *dscp = rte_bswap16(dscp_temp | dscp_value);
1909 ("Action DSCP DSCP Priority: %u\n",
1913 if (action_array_active[action_id].action_bitmap &
1914 acl_action_packet_accept) {
1916 printf("Action Accept\n");
1918 if (action_array_active[action_id].action_bitmap
1919 & acl_action_conntrack) {
1921 /* Set conntrack bit for this pkt */
1922 conntrack_mask |= pkt_mask;
1924 printf("ACL Conntrack enabled: "
1925 " %p pkt_mask: %p\n",
1926 (void *)conntrack_mask,
1930 if (action_array_active[action_id].action_bitmap
1931 & acl_action_connexist) {
1933 /* Set conntrack bit for this pkt */
1934 conntrack_mask |= pkt_mask;
1936 /* Set connexist bit for this pkt for public -> private */
1937 /* Private -> public packet will open the connection */
1938 if (action_array_active
1939 [action_id].private_public ==
1941 connexist_mask |= pkt_mask;
1944 printf("ACL Connexist enabled "
1945 "conntrack: %p connexist: %p pkt_mask: %p\n",
1946 (void *)conntrack_mask,
1947 (void *)connexist_mask,
1953 /* Only call connection tracker if required */
1954 if (conntrack_mask > 0) {
1957 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1958 (void *)conntrack_mask, (void *)connexist_mask);
1960 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1961 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1963 printf("ACL Call Conntrack After = %p\n",
1964 (void *)conntrack_mask);
1966 /* Only change pkt mask for pkts that have conntrack enabled */
1967 /* Need to loop through packets to check if conntrack enabled */
1968 pkts_to_process = pkts_mask;
1969 for (; pkts_to_process;) {
1970 uint32_t action_id = 0;
1972 (uint8_t) __builtin_ctzll(pkts_to_process);
1973 uint64_t pkt_mask = 1LLU << pos;
1974 /* bitmask representing only this packet */
1976 pkts_to_process &= ~pkt_mask;
1977 /* remove this packet from remaining list */
1978 struct rte_mbuf *pkt = pkts[pos];
1980 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1984 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1985 if (hdr_chk == IPv4_HDR_VERSION) {
1986 struct acl_table_entry *entry =
1987 (struct acl_table_entry *)
1988 p_acl->acl_entries_ipv4[pos];
1989 action_id = entry->action_id;
1991 struct acl_table_entry *entry =
1992 (struct acl_table_entry *)
1993 p_acl->acl_entries_ipv6[pos];
1994 action_id = entry->action_id;
1997 if ((action_array_active[action_id].action_bitmap &
1998 acl_action_conntrack)
1999 || (action_array_active[action_id].action_bitmap &
2000 acl_action_connexist)) {
2002 if (conntrack_mask & pkt_mask) {
2004 printf("ACL Conntrack Accept "
2008 /* Drop packet by changing the mask */
2011 ("ACL Conntrack Drop packet = %p\n",
2013 pkts_mask &= ~pkt_mask;
2014 p_acl->counters->pkts_drop++;
2020 pkts_to_process = pkts_mask;
2021 /* bitmap of packets left to process for ARP */
2023 for (; pkts_to_process;) {
2024 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
2025 uint64_t pkt_mask = 1LLU << pos;
2026 /* bitmask representing only this packet */
2028 pkts_to_process &= ~pkt_mask;
2029 /* remove this packet from remaining list */
2030 struct rte_mbuf *pkt = pkts[pos];
2033 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
2034 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2036 if (hdr_chk == IPv6_HDR_VERSION) {
2038 struct acl_table_entry *entry =
2039 (struct acl_table_entry *)
2040 p_acl->acl_entries_ipv6[pos];
2041 //uint16_t phy_port = entry->head.port_id;
2042 uint16_t phy_port = pkt->port;
2043 uint32_t *port_out_id =
2044 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2052 ("phy_port = %i,links_map[phy_port] = %i\n",
2053 phy_port, p_acl->links_map[phy_port]);
2055 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
2057 uint32_t dest_if = INVALID_DESTIF;
2058 uint32_t src_phy_port = pkt->port;
2062 /* Gateway Proc Starts */
2063 struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
2064 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
2066 struct ether_hdr *ehdr = (struct ether_hdr *)
2067 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2068 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
2070 struct ether_addr dst_mac;
2071 uint32_t dest_if = INVALID_DESTIF;
2072 uint8_t nhipv6[IPV6_ADD_SIZE];
2073 uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
2074 uint32_t src_phy_port;
2075 struct nd_entry_data *ret_nd_data = NULL;
2077 memset(nhipv6, 0, IPV6_ADD_SIZE);
2078 src_phy_port = pkt->port;
2079 rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
2081 gw_get_nh_port_ipv6(dest_ipv6_address,
2084 ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dest_if, &dst_mac);
2086 /* Gateway Proc Ends */
2088 if (nd_cache_dest_mac_present(dest_if)) {
2090 ether_addr_copy(&dst_mac, &ehdr->d_addr);
2091 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
2093 *port_out_id = p_acl->port_out_id[dest_if];
2095 update_nhip_access(dest_if);
2097 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
2098 printf("sending buffered packets\n");
2099 p_acl->counters->tpkts_processed +=
2100 ret_nd_data->num_pkts;
2101 nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
2102 p_acl->port_out_id[dest_if]);
2104 p_acl->counters->tpkts_processed++;
2105 p_acl->counters->bytes_processed += packet_length;
2107 if (unlikely(ret_nd_data == NULL)) {
2109 printf("ACL before drop pkt_mask "
2110 "%lu, pkt_num %d\n", pkts_mask, pos);
2111 pkts_mask &= ~(1LLU << pos);
2113 printf("ACL after drop pkt_mask "
2114 "%lu, pkt_num %d\n", pkts_mask, pos);
2115 p_acl->counters->pkts_drop++;
2119 if (ret_nd_data->status == INCOMPLETE ||
2120 ret_nd_data->status == PROBE) {
2121 if (ret_nd_data->num_pkts >= NUM_DESC) {
2124 printf("ACL before drop pkt_mask "
2125 "%lu, pkt_num %d\n", pkts_mask, pos);
2126 pkts_mask &= ~(1LLU << pos);
2128 printf("ACL after drop pkt_mask "
2129 "%lu, pkt_num %d\n", pkts_mask, pos);
2130 p_acl->counters->pkts_drop++;
2133 arp_pkts_mask |= pkt_mask;
2134 nd_queue_unresolved_packet(ret_nd_data,
2142 /* IP Pkt forwarding based on pub/prv mapping */
2143 if(is_phy_port_privte(src_phy_port))
2144 dest_if = prv_to_pub_map[src_phy_port];
2146 dest_if = pub_to_prv_map[src_phy_port];
2148 *port_out_id = p_acl->port_out_id[dest_if];
2153 } /* end of for loop */
2155 pkts_drop_mask = keep_mask & ~pkts_mask;
2156 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2157 keep_mask = pkts_mask;
2159 if (arp_pkts_mask) {
2160 keep_mask &= ~(arp_pkts_mask);
2161 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
2164 /* don't bother measuring if traffic very low, might skew stats */
2165 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
2167 if (packets_this_iteration > 1) {
2168 uint64_t latency_this_iteration =
2169 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
2170 p_acl->counters->sum_latencies += latency_this_iteration;
2171 p_acl->counters->count_latencies++;
2174 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
2180 static struct rte_acl_field_def field_format_ipv4[] = {
2183 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2184 .size = sizeof(uint8_t),
2187 .offset = sizeof(struct ether_hdr) +
2188 offsetof(struct ipv4_hdr, next_proto_id),
2191 /* Source IP address (IPv4) */
2193 .type = RTE_ACL_FIELD_TYPE_MASK,
2194 .size = sizeof(uint32_t),
2197 .offset = sizeof(struct ether_hdr) +
2198 offsetof(struct ipv4_hdr, src_addr),
2201 /* Destination IP address (IPv4) */
2203 .type = RTE_ACL_FIELD_TYPE_MASK,
2204 .size = sizeof(uint32_t),
2207 .offset = sizeof(struct ether_hdr) +
2208 offsetof(struct ipv4_hdr, dst_addr),
2213 .type = RTE_ACL_FIELD_TYPE_RANGE,
2214 .size = sizeof(uint16_t),
2217 .offset = sizeof(struct ether_hdr) +
2218 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2221 /* Destination Port */
2223 .type = RTE_ACL_FIELD_TYPE_RANGE,
2224 .size = sizeof(uint16_t),
2227 .offset = sizeof(struct ether_hdr) +
2228 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2232 #define SIZEOF_VLAN_HDR 4
2234 static struct rte_acl_field_def field_format_vlan_ipv4[] = {
2237 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2238 .size = sizeof(uint8_t),
2241 .offset = sizeof(struct ether_hdr) +
2242 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, next_proto_id),
2245 /* Source IP address (IPv4) */
2247 .type = RTE_ACL_FIELD_TYPE_MASK,
2248 .size = sizeof(uint32_t),
2251 .offset = sizeof(struct ether_hdr) +
2252 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, src_addr),
2255 /* Destination IP address (IPv4) */
2257 .type = RTE_ACL_FIELD_TYPE_MASK,
2258 .size = sizeof(uint32_t),
2261 .offset = sizeof(struct ether_hdr) +
2262 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, dst_addr),
2267 .type = RTE_ACL_FIELD_TYPE_RANGE,
2268 .size = sizeof(uint16_t),
2271 .offset = sizeof(struct ether_hdr) +
2273 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2276 /* Destination Port */
2278 .type = RTE_ACL_FIELD_TYPE_RANGE,
2279 .size = sizeof(uint16_t),
2282 .offset = sizeof(struct ether_hdr) +
2284 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2288 #define SIZEOF_QINQ_HEADER 8
2290 static struct rte_acl_field_def field_format_qinq_ipv4[] = {
2293 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2294 .size = sizeof(uint8_t),
2297 .offset = sizeof(struct ether_hdr) +
2298 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, next_proto_id),
2301 /* Source IP address (IPv4) */
2303 .type = RTE_ACL_FIELD_TYPE_MASK,
2304 .size = sizeof(uint32_t),
2307 .offset = sizeof(struct ether_hdr) +
2308 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, src_addr),
2311 /* Destination IP address (IPv4) */
2313 .type = RTE_ACL_FIELD_TYPE_MASK,
2314 .size = sizeof(uint32_t),
2317 .offset = sizeof(struct ether_hdr) +
2318 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, dst_addr),
2323 .type = RTE_ACL_FIELD_TYPE_RANGE,
2324 .size = sizeof(uint16_t),
2327 .offset = sizeof(struct ether_hdr) +
2328 SIZEOF_QINQ_HEADER +
2329 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2332 /* Destination Port */
2334 .type = RTE_ACL_FIELD_TYPE_RANGE,
2335 .size = sizeof(uint16_t),
2338 .offset = sizeof(struct ether_hdr) +
2339 SIZEOF_QINQ_HEADER +
2340 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2344 static struct rte_acl_field_def field_format_ipv6[] = {
2347 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2348 .size = sizeof(uint8_t),
2351 .offset = sizeof(struct ether_hdr) +
2352 offsetof(struct ipv6_hdr, proto),
2355 /* Source IP address (IPv6) */
2357 .type = RTE_ACL_FIELD_TYPE_MASK,
2358 .size = sizeof(uint32_t),
2361 .offset = sizeof(struct ether_hdr) +
2362 offsetof(struct ipv6_hdr, src_addr),
2366 .type = RTE_ACL_FIELD_TYPE_MASK,
2367 .size = sizeof(uint32_t),
2370 .offset = sizeof(struct ether_hdr) +
2371 offsetof(struct ipv6_hdr, src_addr) + sizeof(uint32_t),
2376 .type = RTE_ACL_FIELD_TYPE_MASK,
2377 .size = sizeof(uint32_t),
2380 .offset = sizeof(struct ether_hdr) +
2381 offsetof(struct ipv6_hdr, src_addr) + 2 * sizeof(uint32_t),
2386 .type = RTE_ACL_FIELD_TYPE_MASK,
2387 .size = sizeof(uint32_t),
2390 .offset = sizeof(struct ether_hdr) +
2391 offsetof(struct ipv6_hdr, src_addr) + 3 * sizeof(uint32_t),
2395 /* Destination IP address (IPv6) */
2397 .type = RTE_ACL_FIELD_TYPE_MASK,
2398 .size = sizeof(uint32_t),
2401 .offset = sizeof(struct ether_hdr) +
2402 offsetof(struct ipv6_hdr, dst_addr),
2406 .type = RTE_ACL_FIELD_TYPE_MASK,
2407 .size = sizeof(uint32_t),
2410 .offset = sizeof(struct ether_hdr) +
2411 offsetof(struct ipv6_hdr, dst_addr) + sizeof(uint32_t),
2416 .type = RTE_ACL_FIELD_TYPE_MASK,
2417 .size = sizeof(uint32_t),
2420 .offset = sizeof(struct ether_hdr) +
2421 offsetof(struct ipv6_hdr, dst_addr) + 2 * sizeof(uint32_t),
2426 .type = RTE_ACL_FIELD_TYPE_MASK,
2427 .size = sizeof(uint32_t),
2430 .offset = sizeof(struct ether_hdr) +
2431 offsetof(struct ipv6_hdr, dst_addr) + 3 * sizeof(uint32_t),
2437 .type = RTE_ACL_FIELD_TYPE_RANGE,
2438 .size = sizeof(uint16_t),
2441 .offset = sizeof(struct ether_hdr) +
2442 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, src_port),
2445 /* Destination Port */
2447 .type = RTE_ACL_FIELD_TYPE_RANGE,
2448 .size = sizeof(uint16_t),
2451 .offset = sizeof(struct ether_hdr) +
2452 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, dst_port),
2457 * Parse arguments in config file.
2460 * A pointer to the pipeline.
2462 * A pointer to pipeline specific parameters.
2465 * 0 on success, negative on error.
2468 pipeline_acl_parse_args(struct pipeline_acl *p, struct pipeline_params *params)
2470 uint32_t n_rules_present = 0;
2471 uint32_t pkt_type_present = 0;
2473 uint8_t prv_que_handler_present = 0;
2474 uint8_t n_prv_in_port = 0;
2477 p->n_rules = 4 * 1024;
2478 acl_n_rules = 4 * 1024;
2479 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2480 p->field_format = field_format_ipv4;
2481 p->field_format_size = sizeof(field_format_ipv4);
2483 for (i = 0; i < params->n_args; i++) {
2484 char *arg_name = params->args_name[i];
2485 char *arg_value = params->args_value[i];
2487 if (strcmp(arg_name, "n_rules") == 0) {
2488 if (n_rules_present)
2490 n_rules_present = 1;
2492 p->n_rules = atoi(arg_value);
2493 acl_n_rules = atoi(arg_value);
2497 if (strcmp(arg_name, "pkt_type") == 0) {
2498 if (pkt_type_present)
2500 pkt_type_present = 1;
2503 if (strcmp(arg_value, "ipv4") == 0) {
2504 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2505 p->field_format = field_format_ipv4;
2506 p->field_format_size =
2507 sizeof(field_format_ipv4);
2512 if (strcmp(arg_value, "vlan_ipv4") == 0) {
2514 RTE_DIM(field_format_vlan_ipv4);
2515 p->field_format = field_format_vlan_ipv4;
2516 p->field_format_size =
2517 sizeof(field_format_vlan_ipv4);
2522 if (strcmp(arg_value, "qinq_ipv4") == 0) {
2524 RTE_DIM(field_format_qinq_ipv4);
2525 p->field_format = field_format_qinq_ipv4;
2526 p->field_format_size =
2527 sizeof(field_format_qinq_ipv4);
2532 if (strcmp(arg_value, "ipv6") == 0) {
2533 p->n_rule_fields = RTE_DIM(field_format_ipv6);
2534 p->field_format = field_format_ipv6;
2535 p->field_format_size =
2536 sizeof(field_format_ipv6);
2544 if (strcmp(arg_name, "traffic_type") == 0) {
2545 int traffic_type = atoi(arg_value);
2547 if (traffic_type == 0
2548 || !(traffic_type == IPv4_HDR_VERSION
2549 || traffic_type == IPv6_HDR_VERSION)) {
2550 printf("not IPVR4/IPVR6");
2554 p->traffic_type = traffic_type;
2558 if (strcmp(arg_name, "prv_que_handler") == 0) {
2560 if (prv_que_handler_present) {
2561 printf("Duplicate pktq_in_prv ..\n\n");
2564 prv_que_handler_present = 1;
2569 /* get the first token */
2570 token = strtok(arg_value, "(");
2571 token = strtok(token, ")");
2572 token = strtok(token, ",");
2573 printf("***** prv_que_handler *****\n");
2576 printf("string is null\n");
2577 printf("prv_que_handler is invalid\n");
2580 printf("string is :%s\n", token);
2582 while (token != NULL) {
2583 printf(" %s\n", token);
2584 rxport = atoi(token);
2585 acl_prv_que_port_index[n_prv_in_port++] =
2587 token = strtok(NULL, ",");
2590 if (n_prv_in_port == 0) {
2591 printf("VNF common parse err - no prv RX phy port\n");
2598 if (strcmp(arg_name, "n_flows") == 0) {
2599 p->n_flows = atoi(arg_value);
2600 if (p->n_flows == 0)
2603 continue;/* needed when multiple parms are checked */
2612 * Create and initialize Pipeline Back End (BE).
2615 * A pointer to the pipeline.
2617 * A pointer to pipeline specific data.
2620 * A pointer to the pipeline create, NULL on error.
2622 static void *pipeline_acl_init(struct pipeline_params *params,
2623 __rte_unused void *arg)
2626 struct pipeline_acl *p_acl;
2629 /* Check input arguments */
2630 if ((params == NULL) ||
2631 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2634 /* Memory allocation */
2635 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_acl));
2636 p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2637 p_acl = (struct pipeline_acl *)p;
2641 strncpy(p->name, params->name, PIPELINE_NAME_SIZE);
2642 p->log_level = params->log_level;
2644 PLOG(p, HIGH, "ACL");
2647 * p_acl->links_map[0] = 0xff;
2648 * p_acl->links_map[1] = 0xff;]
2650 p_acl->traffic_type = IPv4_HDR_VERSION;
2651 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2652 p_acl->links_map[i] = 0xff;
2653 p_acl->port_out_id[i] = 0xff;
2654 acl_prv_que_port_index[i] = 0;
2657 p_acl->pipeline_num = 0xff;
2659 /* if(enable_hwlb || enable_flow_dir) */
2660 // lib_arp_init(params, arg);
2662 p_acl->n_flows = 4096; /* small default value */
2663 /* Create a single firewall instance and initialize. */
2664 p_acl->cnxn_tracker =
2665 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2666 RTE_CACHE_LINE_SIZE);
2668 if (p_acl->cnxn_tracker == NULL)
2672 * Now allocate a counter block entry.It appears that the initialization
2673 * of all instances is serialized on core 0, so no lock is necessary.
2675 struct rte_ACL_counter_block *counter_ptr;
2677 if (rte_ACL_hi_counter_block_in_use == MAX_ACL_INSTANCES) {
2678 /* error, exceeded table bounds */
2682 rte_ACL_hi_counter_block_in_use++;
2683 counter_ptr = &rte_acl_counter_table[rte_ACL_hi_counter_block_in_use];
2684 strncpy(counter_ptr->name, params->name,PIPELINE_NAME_SIZE);
2685 p_acl->action_counter_index = rte_ACL_hi_counter_block_in_use;
2687 p_acl->counters = counter_ptr;
2689 rte_ct_initialize_default_timeouts(p_acl->cnxn_tracker);
2690 p_acl->arpPktCount = 0;
2692 /* Parse arguments */
2693 if (pipeline_acl_parse_args(p_acl, params))
2695 /*n_flows already checked, ignore Klockwork issue */
2696 if (p_acl->n_flows > 0) {
2697 rte_ct_initialize_cnxn_tracker(p_acl->cnxn_tracker,
2698 p_acl->n_flows, params->name);
2699 p_acl->counters->ct_counters =
2700 rte_ct_get_counter_address(p_acl->cnxn_tracker);
2702 printf("ACL invalid p_acl->n_flows: %u\n", p_acl->n_flows);
2708 struct rte_pipeline_params pipeline_params = {
2709 .name = params->name,
2710 .socket_id = params->socket_id,
2711 .offset_port_id = META_DATA_OFFSET +
2712 offsetof(struct mbuf_acl_meta_data, output_port),
2715 p->p = rte_pipeline_create(&pipeline_params);
2723 p->n_ports_in = params->n_ports_in;
2724 for (i = 0; i < p->n_ports_in; i++) {
2725 struct rte_pipeline_port_in_params port_params = {
2727 pipeline_port_in_params_get_ops(¶ms->port_in
2730 pipeline_port_in_params_convert(¶ms->port_in
2732 .f_action = pkt_work_acl_key,
2734 .burst_size = params->port_in[i].burst_size,
2736 if (p_acl->traffic_type == IPv4_HDR_VERSION)
2737 port_params.f_action = pkt_work_acl_ipv4_key;
2739 if (p_acl->traffic_type == IPv6_HDR_VERSION)
2740 port_params.f_action = pkt_work_acl_ipv6_key;
2742 int status = rte_pipeline_port_in_create(p->p,
2747 rte_pipeline_free(p->p);
2754 p->n_ports_out = params->n_ports_out;
2755 for (i = 0; i < p->n_ports_out; i++) {
2756 struct rte_pipeline_port_out_params port_params = {
2758 pipeline_port_out_params_get_ops(¶ms->port_out
2761 pipeline_port_out_params_convert(¶ms->port_out
2767 int status = rte_pipeline_port_out_create(p->p,
2769 &p->port_out_id[i]);
2772 rte_pipeline_free(p->p);
2778 int pipeline_num = 0;
2780 int temp = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2781 p_acl->pipeline_num = (uint8_t) pipeline_num;
2782 /* set_phy_outport_map(p_acl->pipeline_num, p_acl->links_map);*/
2783 register_pipeline_Qs(p_acl->pipeline_num, p);
2784 set_link_map(p_acl->pipeline_num, p, p_acl->links_map);
2785 set_outport_id(p_acl->pipeline_num, p, p_acl->port_out_id);
2787 /* If this is the first ACL thread, create common ACL Rule tables */
2788 if (rte_ACL_hi_counter_block_in_use == 0) {
2790 printf("Create ACL Tables rte_socket_id(): %i\n",
2793 /* Create IPV4 ACL Rule Tables */
2794 struct rte_table_acl_params common_ipv4_table_acl_params = {
2796 .n_rules = acl_n_rules,
2797 .n_rule_fields = RTE_DIM(field_format_ipv4),
2800 memcpy(common_ipv4_table_acl_params.field_format,
2801 field_format_ipv4, sizeof(field_format_ipv4));
2803 uint32_t ipv4_entry_size = sizeof(struct acl_table_entry);
2805 acl_rule_table_ipv4_active =
2806 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2810 if (acl_rule_table_ipv4_active == NULL) {
2812 ("Failed to create common ACL IPV4A Rule table\n");
2813 rte_pipeline_free(p->p);
2818 /* Create second IPV4 Table */
2819 common_ipv4_table_acl_params.name = "ACLIPV4B";
2820 acl_rule_table_ipv4_standby =
2821 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2825 if (acl_rule_table_ipv4_standby == NULL) {
2827 ("Failed to create common ACL IPV4B Rule table\n");
2828 rte_pipeline_free(p->p);
2833 /* Create IPV6 ACL Rule Tables */
2834 struct rte_table_acl_params common_ipv6_table_acl_params = {
2836 .n_rules = acl_n_rules,
2837 .n_rule_fields = RTE_DIM(field_format_ipv6),
2840 memcpy(common_ipv6_table_acl_params.field_format,
2841 field_format_ipv6, sizeof(field_format_ipv6));
2843 uint32_t ipv6_entry_size = sizeof(struct acl_table_entry);
2845 acl_rule_table_ipv6_active =
2846 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
2850 if (acl_rule_table_ipv6_active == NULL) {
2852 ("Failed to create common ACL IPV6A Rule table\n");
2853 rte_pipeline_free(p->p);
2858 /* Create second IPV6 table */
2859 common_ipv6_table_acl_params.name = "ACLIPV6B";
2860 acl_rule_table_ipv6_standby =
2861 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
2865 if (acl_rule_table_ipv6_standby == NULL) {
2867 ("Failed to create common ACL IPV6B Rule table\n");
2868 rte_pipeline_free(p->p);
2878 struct rte_pipeline_table_params table_params = {
2879 .ops = &rte_table_stub_ops,
2881 .f_action_hit = NULL,
2882 .f_action_miss = NULL,
2884 .action_data_size = 0,
2887 int status = rte_pipeline_table_create(p->p,
2892 rte_pipeline_free(p->p);
2897 struct rte_pipeline_table_entry default_entry = {
2898 .action = RTE_PIPELINE_ACTION_PORT_META
2901 struct rte_pipeline_table_entry *default_entry_ptr;
2903 status = rte_pipeline_table_default_entry_add(p->p,
2906 &default_entry_ptr);
2909 rte_pipeline_free(p->p);
2915 /* Connecting input ports to tables */
2916 for (i = 0; i < p->n_ports_in; i++) {
2917 int status = rte_pipeline_port_in_connect_to_table(p->p,
2924 rte_pipeline_free(p->p);
2930 /* Enable input ports */
2931 for (i = 0; i < p->n_ports_in; i++) {
2932 int status = rte_pipeline_port_in_enable(p->p,
2936 rte_pipeline_free(p->p);
2942 /* Check pipeline consistency */
2943 if (rte_pipeline_check(p->p) < 0) {
2944 rte_pipeline_free(p->p);
2949 /* Message queues */
2950 p->n_msgq = params->n_msgq;
2951 for (i = 0; i < p->n_msgq; i++)
2952 p->msgq_in[i] = params->msgq_in[i];
2953 for (i = 0; i < p->n_msgq; i++)
2954 p->msgq_out[i] = params->msgq_out[i];
2956 /* Message handlers */
2957 memcpy(p->handlers, handlers, sizeof(p->handlers));
2958 memcpy(p_acl->custom_handlers,
2959 custom_handlers, sizeof(p_acl->custom_handlers));
2965 * Free resources and delete pipeline.
2968 * A pointer to the pipeline.
2971 * 0 on success, negative on error.
2973 static int pipeline_acl_free(void *pipeline)
2975 struct pipeline *p = (struct pipeline *)pipeline;
2977 /* Check input arguments */
2981 /* Free resources */
2982 rte_pipeline_free(p->p);
2988 * Callback function to map input/output ports.
2991 * A pointer to the pipeline.
2995 * A pointer to the Output port.
2998 * 0 on success, negative on error.
3001 pipeline_acl_track(void *pipeline,
3002 __rte_unused uint32_t port_in, uint32_t *port_out)
3004 struct pipeline *p = (struct pipeline *)pipeline;
3006 /* Check input arguments */
3007 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
3010 if (p->n_ports_in == 1) {
3019 * Callback function to process timers.
3022 * A pointer to the pipeline.
3025 * 0 on success, negative on error.
3027 static int pipeline_acl_timer(void *pipeline)
3030 struct pipeline *p = (struct pipeline *)pipeline;
3031 struct pipeline_acl *p_acl = (struct pipeline_acl *)pipeline;
3033 pipeline_msg_req_handle(p);
3034 rte_pipeline_flush(p->p);
3036 rte_ct_handle_expired_timers(p_acl->cnxn_tracker);
3042 * Callback function to process CLI commands from FE.
3045 * A pointer to the pipeline.
3047 * A pointer to command specific data.
3050 * A pointer to message handler on success,
3051 * pipeline_msg_req_invalid_hander on error.
3053 void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg)
3055 struct pipeline_acl *p_acl = (struct pipeline_acl *)p;
3056 struct pipeline_custom_msg_req *req = msg;
3057 pipeline_msg_req_handler f_handle;
3059 f_handle = (req->subtype < PIPELINE_ACL_MSG_REQS) ?
3060 p_acl->custom_handlers[req->subtype] :
3061 pipeline_msg_req_invalid_handler;
3063 if (f_handle == NULL)
3064 f_handle = pipeline_msg_req_invalid_handler;
3066 return f_handle(p, req);
3070 * Handler for DBG CLI command.
3073 * A pointer to the pipeline.
3075 * A pointer to command specific data.
3078 * A pointer to response message.
3079 * Response message contains status.
3081 void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg)
3084 struct pipeline_acl_dbg_msg_req *req = msg;
3085 struct pipeline_acl_dbg_msg_rsp *rsp = msg;
3087 if (req->dbg == 0) {
3088 printf("DBG turned OFF\n");
3091 } else if (req->dbg == 1) {
3092 printf("DBG turned ON\n");
3096 printf("Invalid DBG setting\n");
3103 struct pipeline_be_ops pipeline_acl_be_ops = {
3104 .f_init = pipeline_acl_init,
3105 .f_free = pipeline_acl_free,
3107 .f_timer = pipeline_acl_timer,
3108 .f_track = pipeline_acl_track,