2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline ACL BE Implementation.
21 * Implementation of Pipeline ACL Back End (BE).
22 * Responsible for packet processing.
27 #include <rte_common.h>
28 #include <rte_malloc.h>
29 #include <rte_ether.h>
32 #include <rte_byteorder.h>
33 #include <rte_table_acl.h>
34 #include <rte_table_stub.h>
35 #include "pipeline_arpicmp_be.h"
36 #include "vnf_common.h"
37 #include "pipeline_common_be.h"
38 #include <rte_pipeline.h>
41 #include <rte_timer.h>
42 #include <rte_cycles.h>
44 #include "pipeline_acl.h"
45 #include "pipeline_acl_be.h"
46 #include "rte_cnxn_tracking.h"
47 #include "pipeline_actions_common.h"
49 #include "lib_icmpv6.h"
52 static uint8_t acl_prv_que_port_index[PIPELINE_MAX_PORT_IN];
53 extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
54 uint8_t netmask_ipv6[]);
61 * A structure defining the ACL pipeline per thread data.
65 pipeline_msg_req_handler custom_handlers[PIPELINE_ACL_MSG_REQS];
68 uint32_t n_rule_fields;
69 struct rte_acl_field_def *field_format;
70 uint32_t field_format_size;
72 /* Connection Tracker */
73 struct rte_ct_cnxn_tracker *cnxn_tracker;
74 struct rte_ACL_counter_block *counters;
75 int action_counter_index;
76 /* timestamp retrieved during in-port computations */
77 uint64_t in_port_time_stamp;
82 uint8_t links_map[PIPELINE_MAX_PORT_IN];
83 uint8_t port_out_id[PIPELINE_MAX_PORT_IN];
85 struct acl_table_entry *acl_entries_ipv4[RTE_PORT_IN_BURST_SIZE_MAX];
86 struct acl_table_entry *acl_entries_ipv6[RTE_PORT_IN_BURST_SIZE_MAX];
88 } __rte_cache_aligned;
91 * A structure defining the mbuf meta data for ACL.
93 struct mbuf_acl_meta_data {
94 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
96 /* next hop ip address used by ARP code */
98 } __rte_cache_aligned;
100 #define META_DATA_OFFSET 128
102 struct rte_ACL_counter_block rte_acl_counter_table[MAX_ACL_INSTANCES]
104 int rte_ACL_hi_counter_block_in_use = -1;
106 /* a spin lock used during acl initialization only */
107 rte_spinlock_t rte_ACL_init_lock = RTE_SPINLOCK_INITIALIZER;
110 struct pipeline_action_key *action_array_a;
111 struct pipeline_action_key *action_array_b;
112 struct pipeline_action_key *action_array_active;
113 struct pipeline_action_key *action_array_standby;
114 uint32_t action_array_size;
116 struct action_counter_block
117 action_counter_table[MAX_ACL_INSTANCES][action_array_max]
120 static void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg);
122 static pipeline_msg_req_handler handlers[] = {
123 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
124 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
125 pipeline_msg_req_stats_port_in_handler,
126 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
127 pipeline_msg_req_stats_port_out_handler,
128 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
129 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
130 pipeline_msg_req_port_in_enable_handler,
131 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
132 pipeline_msg_req_port_in_disable_handler,
133 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_acl_msg_req_custom_handler,
136 static void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg);
138 static pipeline_msg_req_handler custom_handlers[] = {
139 [PIPELINE_ACL_MSG_REQ_DBG] = pipeline_acl_msg_req_dbg_handler,
141 uint64_t arp_pkts_mask;
145 static uint8_t check_arp_icmp(struct rte_mbuf *pkt,
146 uint64_t pkt_mask, struct pipeline_acl *p_acl)
148 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
149 struct ipv6_hdr *ipv6_h;
150 uint16_t *eth_proto =
151 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
152 struct app_link_params *link;
154 //uint32_t *port_out_id = RTE_MBUF_METADATA_UINT32_PTR(pk
155 // offsetof(struct mbuf_acl_meta_dat
157 /* ARP outport number */
158 uint16_t out_port = p_acl->p.n_ports_out - 1;
161 uint32_t prot_offset;
163 link = &myApp->link_params[pkt->port];
165 switch (rte_be_to_cpu_16(*eth_proto)) {
168 rte_pipeline_port_out_packet_insert(p_acl->p.p, out_port, pkt);
171 * Pkt mask should be changed, and not changing the
174 p_acl->arpPktCount++;
179 /* header room + eth hdr size +
180 * src_aadr offset in ip header
182 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
183 ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
184 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
186 prot_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
187 IP_HDR_PROTOCOL_OFST;
188 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
190 if ((*protocol == IP_PROTOCOL_ICMP) &&
191 link->ip == rte_be_to_cpu_32(*dst_addr)) {
193 if (is_phy_port_privte(pkt->port)) {
195 rte_pipeline_port_out_packet_insert
196 (p_acl->p.p, out_port, pkt);
198 * Pkt mask should be changed,
199 * and not changing the drop mask
201 p_acl->arpPktCount++;
213 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
214 ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST;
215 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
218 uint32_t prot_offset_ipv6 = MBUF_HDR_ROOM +
219 ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
220 struct ipv6_hdr *ipv6_h;
222 ipv6_h = (struct ipv6_hdr *)MBUF_HDR_ROOM +
224 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
227 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
228 (link->ip == rte_be_to_cpu_32(dst_addr[3]))) {
230 if (is_phy_port_privte(pkt->port)) {
232 rte_pipeline_port_out_packet_insert
233 (p_acl->p.p, out_port, pkt);
235 * Pkt mask should be changed,
236 * and not changing the drop mask
238 p_acl->arpPktCount++;
248 #define IP_START (MBUF_HDR_ROOM + ETH_HDR_SIZE)
251 ipv6_h = (struct ipv6_hdr *)
252 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
254 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
256 rte_be_to_cpu_32(ipv6_h->dst_addr[3]))) {
258 if (is_phy_port_privte(pkt->port)) {
259 rte_pipeline_port_out_packet_insert(
264 p_acl->arpPktCount++;
279 * Print packet for debugging.
282 * A pointer to the packet.
285 void print_pkt_acl(struct rte_mbuf *pkt)
289 printf("Packet Contents:\n");
290 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
292 for (i = 0; i < 20; i++) {
293 for (j = 0; j < 20; j++)
294 printf("%02x ", rd[(20 * i) + j]);
300 * Main packet processing function.
301 * 64 packet bit mask are used to identify which packets to forward.
302 * Performs the following:
303 * - Burst lookup packets in the IPv4 ACL Rule Table.
304 * - Burst lookup packets in the IPv6 ACL Rule Table.
305 * - Lookup Action Table, perform actions.
306 * - Burst lookup Connection Tracking, if enabled.
307 * - Lookup MAC address.
309 * - Packets with bit mask set are forwarded
312 * A pointer to the pipeline.
314 * A pointer to a burst of packets.
316 * Number of packets to process.
318 * A pointer to pipeline specific data.
321 * 0 on success, negative on error.
324 pkt_work_acl_key(struct rte_pipeline *p,
325 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
328 struct pipeline_acl *p_acl = arg;
330 p_acl->counters->pkts_received =
331 p_acl->counters->pkts_received + n_pkts;
333 printf("pkt_work_acl_key pkts_received: %" PRIu64
334 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
336 uint64_t lookup_hit_mask = 0;
337 uint64_t lookup_hit_mask_ipv4 = 0;
338 uint64_t lookup_hit_mask_ipv6 = 0;
339 uint64_t lookup_miss_mask = 0;
340 uint64_t conntrack_mask = 0;
341 uint64_t connexist_mask = 0;
342 uint32_t dest_address = 0;
345 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
346 uint64_t keep_mask = pkts_mask;
350 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
352 if (acl_ipv4_enabled) {
354 printf("ACL IPV4 Lookup Mask Before = %p\n",
357 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
358 pkts_mask, &lookup_hit_mask_ipv4,
360 p_acl->acl_entries_ipv4);
362 printf("ACL IPV4 Lookup Mask After = %p\n",
363 (void *)lookup_hit_mask_ipv4);
366 if (acl_ipv6_enabled) {
368 printf("ACL IPV6 Lookup Mask Before = %p\n",
371 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
372 pkts_mask, &lookup_hit_mask_ipv6,
374 p_acl->acl_entries_ipv6);
376 printf("ACL IPV6 Lookup Mask After = %p\n",
377 (void *)lookup_hit_mask_ipv6);
380 /* Merge lookup results since we process both IPv4 and IPv6 below */
381 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
383 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
385 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
386 pkts_mask = lookup_hit_mask;
387 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
389 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
390 p_acl->counters->pkts_drop,
391 __builtin_popcountll(lookup_miss_mask));
393 uint64_t pkts_to_process = lookup_hit_mask;
394 /* bitmap of packets left to process for ARP */
396 for (; pkts_to_process;) {
397 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
398 uint64_t pkt_mask = 1LLU << pos;
399 /* bitmask representing only this packet */
401 pkts_to_process &= ~pkt_mask;
402 /* remove this packet from remaining list */
403 struct rte_mbuf *pkt = pkts[pos];
406 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
407 pkts_mask &= ~(1LLU << pos);
412 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
413 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
415 if (hdr_chk == IPv4_HDR_VERSION) {
417 struct acl_table_entry *entry =
418 (struct acl_table_entry *)
419 p_acl->acl_entries_ipv4[pos];
420 uint16_t phy_port = entry->head.port_id;
421 uint32_t action_id = entry->action_id;
424 printf("action_id = %u\n", action_id);
426 uint32_t dscp_offset =
427 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
429 if (action_array_active[action_id].action_bitmap &
432 [p_acl->action_counter_index]
433 [action_id].packetCount++;
435 [p_acl->action_counter_index]
436 [action_id].byteCount +=
437 rte_pktmbuf_pkt_len(pkt);
439 printf("Action Count Packet Count: %"
440 PRIu64 " Byte Count: %" PRIu64
443 [p_acl->action_counter_index]
444 [action_id].packetCount,
446 [p_acl->action_counter_index]
447 [action_id].byteCount);
450 if (action_array_active[action_id].action_bitmap &
451 acl_action_packet_drop) {
453 /* Drop packet by changing the mask */
455 printf("ACL before drop pkt_mask "
456 " %lu, pkt_num %d\n",
458 pkts_mask &= ~(1LLU << pos);
460 printf("ACL after drop pkt_mask "
463 p_acl->counters->pkts_drop++;
466 if (action_array_active[action_id].action_bitmap &
469 action_array_active[action_id].fwd_port;
470 entry->head.port_id = phy_port;
472 printf("Action FWD Port ID: %u\n",
476 if (action_array_active[action_id].action_bitmap &
479 action_array_active[action_id].nat_port;
480 entry->head.port_id = phy_port;
482 printf("Action NAT Port ID: %u\n",
486 if (action_array_active[action_id].action_bitmap &
489 /* Set DSCP priority */
490 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
493 action_array_active[action_id].dscp_priority
497 ("Action DSCP DSCP Priority: %u\n",
501 if (action_array_active[action_id].action_bitmap &
502 acl_action_packet_accept) {
504 printf("Action Accept\n");
506 if (action_array_active[action_id].action_bitmap
507 & acl_action_conntrack) {
509 /* Set conntrack bit for this pkt */
510 conntrack_mask |= pkt_mask;
512 printf("ACL Conntrack enabled: "
514 (void *)conntrack_mask,
518 if (action_array_active[action_id].action_bitmap
519 & acl_action_connexist) {
521 /* Set conntrack bit for this pkt */
522 conntrack_mask |= pkt_mask;
524 /* Set connexist bit for this pkt for public -> private */
525 /* Private -> public packet will open the connection */
526 if (action_array_active
527 [action_id].private_public ==
529 connexist_mask |= pkt_mask;
532 printf("ACL Connexist enabled "
533 "conntrack: %p connexist: %p pkt_mask: %p\n",
534 (void *)conntrack_mask,
535 (void *)connexist_mask,
541 if (hdr_chk == IPv6_HDR_VERSION) {
543 struct acl_table_entry *entry =
544 (struct acl_table_entry *)
545 p_acl->acl_entries_ipv6[pos];
546 uint16_t phy_port = entry->head.port_id;
547 uint32_t action_id = entry->action_id;
550 printf("action_id = %u\n", action_id);
552 if (action_array_active[action_id].action_bitmap &
555 [p_acl->action_counter_index]
556 [action_id].packetCount++;
558 [p_acl->action_counter_index]
559 [action_id].byteCount +=
560 rte_pktmbuf_pkt_len(pkt);
562 printf("Action Count Packet Count: %"
563 PRIu64 " Byte Count: %" PRIu64
566 [p_acl->action_counter_index]
567 [action_id].packetCount,
569 [p_acl->action_counter_index]
570 [action_id].byteCount);
573 if (action_array_active[action_id].action_bitmap &
574 acl_action_packet_drop) {
575 /* Drop packet by changing the mask */
577 printf("ACL before drop pkt_mask "
580 pkts_mask &= ~(1LLU << pos);
582 printf("ACL after drop pkt_mask "
585 p_acl->counters->pkts_drop++;
589 if (action_array_active[action_id].action_bitmap &
592 action_array_active[action_id].fwd_port;
593 entry->head.port_id = phy_port;
595 printf("Action FWD Port ID: %u\n",
599 if (action_array_active[action_id].action_bitmap &
602 action_array_active[action_id].nat_port;
603 entry->head.port_id = phy_port;
605 printf("Action NAT Port ID: %u\n",
609 if (action_array_active[action_id].action_bitmap &
612 /* Set DSCP priority */
613 uint32_t dscp_offset =
614 MBUF_HDR_ROOM + ETH_HDR_SIZE +
615 IP_HDR_DSCP_OFST_IPV6;
617 RTE_MBUF_METADATA_UINT16_PTR(pkt,
619 uint16_t dscp_value =
621 (RTE_MBUF_METADATA_UINT16
622 (pkt, dscp_offset)) & 0XF00F);
624 action_array_active[action_id].dscp_priority
626 uint16_t dscp_temp = dscp_store;
628 dscp_temp = dscp_temp << 4;
629 *dscp = rte_bswap16(dscp_temp | dscp_value);
632 ("Action DSCP DSCP Priority: %u\n",
636 if (action_array_active[action_id].action_bitmap &
637 acl_action_packet_accept) {
639 printf("Action Accept\n");
641 if (action_array_active[action_id].action_bitmap
642 & acl_action_conntrack) {
644 /* Set conntrack bit for this pkt */
645 conntrack_mask |= pkt_mask;
647 printf("ACL Conntrack enabled: "
648 " %p pkt_mask: %p\n",
649 (void *)conntrack_mask,
653 if (action_array_active[action_id].action_bitmap
654 & acl_action_connexist) {
656 /* Set conntrack bit for this pkt */
657 conntrack_mask |= pkt_mask;
659 /* Set connexist bit for this pkt for public -> private */
660 /* Private -> public packet will open the connection */
661 if (action_array_active
662 [action_id].private_public ==
664 connexist_mask |= pkt_mask;
667 printf("ACL Connexist enabled "
668 "conntrack: %p connexist: %p pkt_mask: %p\n",
669 (void *)conntrack_mask,
670 (void *)connexist_mask,
677 /* Only call connection tracker if required */
678 if (conntrack_mask > 0) {
681 ("ACL Call Conntrack Before = %p Connexist = %p\n",
682 (void *)conntrack_mask, (void *)connexist_mask);
684 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
685 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
687 printf("ACL Call Conntrack After = %p\n",
688 (void *)conntrack_mask);
690 /* Only change pkt mask for pkts that have conntrack enabled */
691 /* Need to loop through packets to check if conntrack enabled */
692 pkts_to_process = pkts_mask;
693 for (; pkts_to_process;) {
694 uint32_t action_id = 0;
696 (uint8_t) __builtin_ctzll(pkts_to_process);
697 uint64_t pkt_mask = 1LLU << pos;
698 /* bitmask representing only this packet */
700 pkts_to_process &= ~pkt_mask;
701 /* remove this packet from remaining list */
702 struct rte_mbuf *pkt = pkts[pos];
704 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
709 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
710 if (hdr_chk == IPv4_HDR_VERSION) {
711 struct acl_table_entry *entry =
712 (struct acl_table_entry *)
713 p_acl->acl_entries_ipv4[pos];
714 action_id = entry->action_id;
716 struct acl_table_entry *entry =
717 (struct acl_table_entry *)
718 p_acl->acl_entries_ipv6[pos];
719 action_id = entry->action_id;
722 if ((action_array_active[action_id].action_bitmap &
723 acl_action_conntrack)
724 || (action_array_active[action_id].action_bitmap &
725 acl_action_connexist)) {
727 if (conntrack_mask & pkt_mask) {
729 printf("ACL Conntrack Accept "
733 /* Drop packet by changing the mask */
735 printf("ACL Conntrack Drop "
738 pkts_mask &= ~pkt_mask;
739 p_acl->counters->pkts_drop++;
745 pkts_to_process = pkts_mask;
746 /* bitmap of packets left to process for ARP */
748 for (; pkts_to_process;) {
749 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
750 uint64_t pkt_mask = 1LLU << pos;
751 /* bitmask representing only this packet */
753 pkts_to_process &= ~pkt_mask;
754 /* remove this packet from remaining list */
755 struct rte_mbuf *pkt = pkts[pos];
758 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
759 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
761 if (hdr_chk == IPv4_HDR_VERSION) {
763 struct acl_table_entry *entry =
764 (struct acl_table_entry *)
765 p_acl->acl_entries_ipv4[pos];
766 uint16_t phy_port = pkt->port;
767 uint32_t *port_out_id =
768 RTE_MBUF_METADATA_UINT32_PTR(pkt,
775 ("phy_port = %i, links_map[phy_port] = %i\n",
776 phy_port, p_acl->links_map[phy_port]);
777 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
779 uint32_t dest_if = INVALID_DESTIF;
780 uint32_t dst_phy_port = INVALID_DESTIF;
781 uint32_t src_phy_port = pkt->port;
782 if(is_phy_port_privte(src_phy_port))
783 dst_phy_port = prv_to_pub_map[src_phy_port];
785 dst_phy_port = pub_to_prv_map[src_phy_port];
790 /* Gateway Proc Starts */
791 struct ether_hdr *ehdr = (struct ether_hdr *)
792 RTE_MBUF_METADATA_UINT32_PTR(pkt,
793 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
795 struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
796 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
798 struct arp_entry_data *ret_arp_data = NULL;
799 struct ether_addr dst_mac;
801 uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
803 gw_get_route_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip, dst_phy_port);
805 ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
807 /* Gateway Proc Ends */
808 if (arp_cache_dest_mac_present(dest_if)) {
810 ether_addr_copy(&dst_mac, &ehdr->d_addr);
811 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
813 *port_out_id = p_acl->port_out_id[dest_if];
815 update_nhip_access(dest_if);
816 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
817 printf("sending buffered packets\n");
818 arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
819 p_acl->port_out_id[dest_if]);
822 p_acl->counters->tpkts_processed++;
823 p_acl->counters->bytes_processed +=
826 if (unlikely(ret_arp_data == NULL)) {
828 printf("%s: NHIP Not Found, "
829 "outport_id: %d\n", __func__,
830 p_acl->port_out_id[dest_if]);
833 pkts_mask &= ~(1LLU << pos);
835 printf("ACL after drop pkt_mask "
838 p_acl->counters->pkts_drop++;
842 if (ret_arp_data->status == INCOMPLETE ||
843 ret_arp_data->status == PROBE) {
844 if (ret_arp_data->num_pkts >= NUM_DESC) {
846 pkts_mask &= ~(1LLU << pos);
848 printf("ACL after drop pkt_mask "
851 p_acl->counters->pkts_drop++;
854 arp_pkts_mask |= pkt_mask;
855 arp_queue_unresolved_packet(ret_arp_data,
863 /* IP Pkt forwarding based on pub/prv mapping */
864 if(is_phy_port_privte(src_phy_port))
865 dest_if = prv_to_pub_map[src_phy_port];
867 dest_if = pub_to_prv_map[src_phy_port];
869 *port_out_id = p_acl->port_out_id[dest_if];
872 } /* end of if (hdr_chk == IPv4_HDR_VERSION) */
874 if (hdr_chk == IPv6_HDR_VERSION) {
876 struct acl_table_entry *entry =
877 (struct acl_table_entry *)
878 p_acl->acl_entries_ipv6[pos];
879 //uint16_t phy_port = entry->head.port_id;
880 uint16_t phy_port = pkt->port;
881 uint32_t *port_out_id =
882 RTE_MBUF_METADATA_UINT32_PTR(pkt,
888 printf("phy_port = %i, "
889 "links_map[phy_port] = %i\n",
890 phy_port, p_acl->links_map[phy_port]);
892 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
894 uint32_t dest_if = INVALID_DESTIF;
895 uint32_t src_phy_port = pkt->port;
899 /* Gateway Proc Starts */
900 struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
901 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
903 struct ether_hdr *ehdr = (struct ether_hdr *)
904 RTE_MBUF_METADATA_UINT32_PTR(pkt,
905 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
907 struct ether_addr dst_mac;
908 uint8_t nhipv6[IPV6_ADD_SIZE];
909 uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
910 struct nd_entry_data *ret_nd_data = NULL;
912 memset(nhipv6, 0, IPV6_ADD_SIZE);
913 rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
915 gw_get_nh_port_ipv6(dest_ipv6_address,
918 ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dest_if, &dst_mac);
920 /* Gateway Proc Ends */
922 if (nd_cache_dest_mac_present(dest_if)) {
924 ether_addr_copy(&dst_mac, &ehdr->d_addr);
925 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
927 *port_out_id = p_acl->port_out_id[dest_if];
929 update_nhip_access(dest_if);
931 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
932 printf("sending buffered packets\n");
933 p_acl->counters->tpkts_processed +=
934 ret_nd_data->num_pkts;
935 nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
936 p_acl->port_out_id[dest_if]);
938 p_acl->counters->tpkts_processed++;
939 p_acl->counters->bytes_processed +=
942 if (unlikely(ret_nd_data == NULL)) {
944 printf("ACL before drop pkt_mask "
945 "%lu, pkt_num %d\n", pkts_mask, pos);
946 pkts_mask &= ~(1LLU << pos);
948 printf("ACL after drop pkt_mask "
949 "%lu, pkt_num %d\n", pkts_mask, pos);
950 p_acl->counters->pkts_drop++;
954 if (ret_nd_data->status == INCOMPLETE ||
955 ret_nd_data->status == PROBE) {
956 if (ret_nd_data->num_pkts >= NUM_DESC) {
959 printf("ACL before drop pkt_mask "
960 "%lu, pkt_num %d\n", pkts_mask, pos);
961 pkts_mask &= ~(1LLU << pos);
963 printf("ACL after drop pkt_mask "
964 "%lu, pkt_num %d\n", pkts_mask, pos);
965 p_acl->counters->pkts_drop++;
968 arp_pkts_mask |= pkt_mask;
969 nd_queue_unresolved_packet(ret_nd_data,
977 /* IP Pkt forwarding based on pub/prv mapping */
978 if(is_phy_port_privte(src_phy_port))
979 dest_if = prv_to_pub_map[src_phy_port];
981 dest_if = pub_to_prv_map[src_phy_port];
983 *port_out_id = p_acl->port_out_id[dest_if];
987 } /* if (hdr_chk == IPv6_HDR_VERSION) */
989 pkts_drop_mask = keep_mask & ~pkts_mask;
990 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
991 keep_mask = pkts_mask;
994 keep_mask &= ~(arp_pkts_mask);
995 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
998 /* don't bother measuring if traffic very low, might skew stats */
999 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1001 if (packets_this_iteration > 1) {
1002 uint64_t latency_this_iteration =
1003 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1005 p_acl->counters->sum_latencies += latency_this_iteration;
1006 p_acl->counters->count_latencies++;
1010 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1017 * Main packet processing function.
1018 * 64 packet bit mask are used to identify which packets to forward.
1019 * Performs the following:
1020 * - Burst lookup packets in the IPv4 ACL Rule Table.
1021 * - Burst lookup packets in the IPv6 ACL Rule Table.
1022 * - Lookup Action Table, perform actions.
1023 * - Burst lookup Connection Tracking, if enabled.
1024 * - Lookup MAC address.
1026 * - Packets with bit mask set are forwarded
1029 * A pointer to the pipeline.
1031 * A pointer to a burst of packets.
1033 * Number of packets to process.
1035 * A pointer to pipeline specific data.
1038 * 0 on success, negative on error.
1041 pkt_work_acl_ipv4_key(struct rte_pipeline *p,
1042 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1045 struct pipeline_acl *p_acl = arg;
1047 p_acl->counters->pkts_received =
1048 p_acl->counters->pkts_received + n_pkts;
1050 printf("pkt_work_acl_key pkts_received: %" PRIu64
1051 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1053 uint64_t lookup_hit_mask = 0;
1054 uint64_t lookup_hit_mask_ipv4 = 0;
1055 uint64_t lookup_hit_mask_ipv6 = 0;
1056 uint64_t lookup_miss_mask = 0;
1057 uint64_t conntrack_mask = 0;
1058 uint64_t connexist_mask = 0;
1059 uint32_t dest_address = 0;
1062 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1063 uint64_t keep_mask = pkts_mask;
1067 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1069 if (acl_ipv4_enabled) {
1071 printf("ACL IPV4 Lookup Mask Before = %p\n",
1074 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
1075 pkts_mask, &lookup_hit_mask_ipv4,
1077 p_acl->acl_entries_ipv4);
1079 printf("ACL IPV4 Lookup Mask After = %p\n",
1080 (void *)lookup_hit_mask_ipv4);
1083 /* Merge lookup results since we process both IPv4 and IPv6 below */
1084 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1086 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1088 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1089 pkts_mask = lookup_hit_mask;
1090 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1092 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1093 p_acl->counters->pkts_drop,
1094 __builtin_popcountll(lookup_miss_mask));
1096 uint64_t pkts_to_process = lookup_hit_mask;
1097 /* bitmap of packets left to process for ARP */
1099 for (; pkts_to_process;) {
1100 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1101 uint64_t pkt_mask = 1LLU << pos;
1102 /* bitmask representing only this packet */
1104 pkts_to_process &= ~pkt_mask;
1105 /* remove this packet from remaining list */
1106 struct rte_mbuf *pkt = pkts[pos];
1109 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1110 pkts_mask &= ~(1LLU << pos);
1115 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1116 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1118 if (hdr_chk == IPv4_HDR_VERSION) {
1119 struct acl_table_entry *entry =
1120 (struct acl_table_entry *)
1121 p_acl->acl_entries_ipv4[pos];
1122 uint16_t phy_port = entry->head.port_id;
1123 uint32_t action_id = entry->action_id;
1126 printf("action_id = %u\n", action_id);
1128 uint32_t dscp_offset =
1129 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1131 if (action_array_active[action_id].action_bitmap &
1133 action_counter_table
1134 [p_acl->action_counter_index]
1135 [action_id].packetCount++;
1136 action_counter_table
1137 [p_acl->action_counter_index]
1138 [action_id].byteCount +=
1139 rte_pktmbuf_pkt_len(pkt);
1141 printf("Action Count Packet Count: %"
1142 PRIu64 " Byte Count: %" PRIu64
1144 action_counter_table
1145 [p_acl->action_counter_index]
1146 [action_id].packetCount,
1147 action_counter_table
1148 [p_acl->action_counter_index]
1149 [action_id].byteCount);
1152 if (action_array_active[action_id].action_bitmap &
1153 acl_action_packet_drop) {
1155 /* Drop packet by changing the mask */
1157 printf("ACL before drop pkt_mask "
1158 "%lu, pkt_num %d\n",
1160 pkts_mask &= ~(1LLU << pos);
1162 printf("ACL after drop pkt_mask "
1163 " %lu, pkt_num %d\n",
1165 p_acl->counters->pkts_drop++;
1168 if (action_array_active[action_id].action_bitmap &
1171 action_array_active[action_id].fwd_port;
1172 entry->head.port_id = phy_port;
1174 printf("Action FWD Port ID: %u\n",
1178 if (action_array_active[action_id].action_bitmap &
1181 action_array_active[action_id].nat_port;
1182 entry->head.port_id = phy_port;
1184 printf("Action NAT Port ID: %u\n",
1188 if (action_array_active[action_id].action_bitmap &
1191 /* Set DSCP priority */
1192 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1195 action_array_active[action_id].dscp_priority
1199 ("Action DSCP DSCP Priority: %u\n",
1203 if (action_array_active[action_id].action_bitmap &
1204 acl_action_packet_accept) {
1206 printf("Action Accept\n");
1208 if (action_array_active[action_id].action_bitmap
1209 & acl_action_conntrack) {
1211 /* Set conntrack bit for this pkt */
1212 conntrack_mask |= pkt_mask;
1214 printf("ACL Conntrack "
1215 "enabled: %p pkt_mask: %p\n",
1216 (void *)conntrack_mask,
1220 if (action_array_active[action_id].action_bitmap
1221 & acl_action_connexist) {
1223 /* Set conntrack bit for this pkt */
1224 conntrack_mask |= pkt_mask;
1226 /* Set connexist bit for this pkt for public -> private */
1227 /* Private -> public packet will open the connection */
1228 if (action_array_active
1229 [action_id].private_public ==
1231 connexist_mask |= pkt_mask;
1234 printf("ACL Connexist "
1235 "enabled conntrack: %p connexist: %p pkt_mask: %p\n",
1236 (void *)conntrack_mask,
1237 (void *)connexist_mask,
1243 if (hdr_chk == IPv6_HDR_VERSION) {
1245 struct acl_table_entry *entry =
1246 (struct acl_table_entry *)
1247 p_acl->acl_entries_ipv6[pos];
1248 uint16_t phy_port = entry->head.port_id;
1249 uint32_t action_id = entry->action_id;
1252 printf("action_id = %u\n", action_id);
1254 if (action_array_active[action_id].action_bitmap &
1256 action_counter_table
1257 [p_acl->action_counter_index]
1258 [action_id].packetCount++;
1259 action_counter_table
1260 [p_acl->action_counter_index]
1261 [action_id].byteCount +=
1262 rte_pktmbuf_pkt_len(pkt);
1264 printf("Action Count Packet Count: %"
1265 PRIu64 " Byte Count: %" PRIu64
1267 action_counter_table
1268 [p_acl->action_counter_index]
1269 [action_id].packetCount,
1270 action_counter_table
1271 [p_acl->action_counter_index]
1272 [action_id].byteCount);
1275 if (action_array_active[action_id].action_bitmap &
1276 acl_action_packet_drop) {
1277 /* Drop packet by changing the mask */
1280 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1282 pkts_mask &= ~(1LLU << pos);
1285 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1287 p_acl->counters->pkts_drop++;
1291 if (action_array_active[action_id].action_bitmap &
1294 action_array_active[action_id].fwd_port;
1295 entry->head.port_id = phy_port;
1297 printf("Action FWD Port ID: %u\n",
1301 if (action_array_active[action_id].action_bitmap &
1304 action_array_active[action_id].nat_port;
1305 entry->head.port_id = phy_port;
1307 printf("Action NAT Port ID: %u\n",
1311 if (action_array_active[action_id].action_bitmap &
1314 /* Set DSCP priority */
1315 uint32_t dscp_offset =
1316 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1317 IP_HDR_DSCP_OFST_IPV6;
1319 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1321 uint16_t dscp_value =
1323 (RTE_MBUF_METADATA_UINT16
1324 (pkt, dscp_offset)) & 0XF00F);
1325 uint8_t dscp_store =
1326 action_array_active[action_id].dscp_priority
1328 uint16_t dscp_temp = dscp_store;
1330 dscp_temp = dscp_temp << 4;
1331 *dscp = rte_bswap16(dscp_temp | dscp_value);
1334 ("Action DSCP DSCP Priority: %u\n",
1338 if (action_array_active[action_id].action_bitmap &
1339 acl_action_packet_accept) {
1341 printf("Action Accept\n");
1343 if (action_array_active[action_id].action_bitmap
1344 & acl_action_conntrack) {
1346 /* Set conntrack bit for this pkt */
1347 conntrack_mask |= pkt_mask;
1349 printf("ACL Conntrack "
1350 "enabled: %p pkt_mask: %p\n",
1351 (void *)conntrack_mask,
1355 if (action_array_active[action_id].action_bitmap
1356 & acl_action_connexist) {
1358 /* Set conntrack bit for this pkt */
1359 conntrack_mask |= pkt_mask;
1361 /* Set connexist bit for this pkt for public -> private */
1362 /* Private -> public packet will open the connection */
1363 if (action_array_active
1364 [action_id].private_public ==
1366 connexist_mask |= pkt_mask;
1369 printf("ACL Connexist enabled "
1370 "conntrack: %p connexist: %p pkt_mask: %p\n",
1371 (void *)conntrack_mask,
1372 (void *)connexist_mask,
1379 /* Only call connection tracker if required */
1380 if (conntrack_mask > 0) {
1383 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1384 (void *)conntrack_mask, (void *)connexist_mask);
1386 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1387 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1389 printf("ACL Call Conntrack After = %p\n",
1390 (void *)conntrack_mask);
1392 /* Only change pkt mask for pkts that have conntrack enabled */
1393 /* Need to loop through packets to check if conntrack enabled */
1394 pkts_to_process = pkts_mask;
1395 for (; pkts_to_process;) {
1396 uint32_t action_id = 0;
1398 (uint8_t) __builtin_ctzll(pkts_to_process);
1399 uint64_t pkt_mask = 1LLU << pos;
1400 /* bitmask representing only this packet */
1402 pkts_to_process &= ~pkt_mask;
1403 /* remove this packet from remaining list */
1404 struct rte_mbuf *pkt = pkts[pos];
1406 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1410 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1411 if (hdr_chk == IPv4_HDR_VERSION) {
1412 struct acl_table_entry *entry =
1413 (struct acl_table_entry *)
1414 p_acl->acl_entries_ipv4[pos];
1415 action_id = entry->action_id;
1417 struct acl_table_entry *entry =
1418 (struct acl_table_entry *)
1419 p_acl->acl_entries_ipv6[pos];
1420 action_id = entry->action_id;
1423 if ((action_array_active[action_id].action_bitmap &
1424 acl_action_conntrack)
1425 || (action_array_active[action_id].action_bitmap &
1426 acl_action_connexist)) {
1428 if (conntrack_mask & pkt_mask) {
1430 printf("ACL Conntrack Accept "
1434 /* Drop packet by changing the mask */
1436 printf("ACL Conntrack Drop "
1439 pkts_mask &= ~pkt_mask;
1440 p_acl->counters->pkts_drop++;
1446 pkts_to_process = pkts_mask;
1447 /* bitmap of packets left to process for ARP */
1449 for (; pkts_to_process;) {
1450 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1451 uint64_t pkt_mask = 1LLU << pos;
1452 /* bitmask representing only this packet */
1454 pkts_to_process &= ~pkt_mask;
1455 /* remove this packet from remaining list */
1456 struct rte_mbuf *pkt = pkts[pos];
1459 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1460 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1462 if (hdr_chk == IPv4_HDR_VERSION) {
1464 struct acl_table_entry *entry =
1465 (struct acl_table_entry *)
1466 p_acl->acl_entries_ipv4[pos];
1467 //uint16_t phy_port = entry->head.port_id;
1468 uint16_t phy_port = pkt->port;
1469 uint32_t *port_out_id =
1470 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1477 ("phy_port = %i, links_map[phy_port] = %i\n",
1478 phy_port, p_acl->links_map[phy_port]);
1480 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1482 uint32_t dest_if = INVALID_DESTIF;
1483 uint32_t src_phy_port = pkt->port;
1484 uint32_t dst_phy_port = INVALID_DESTIF;
1485 if(is_phy_port_privte(src_phy_port))
1486 dst_phy_port = prv_to_pub_map[src_phy_port];
1488 dst_phy_port = pub_to_prv_map[src_phy_port];
1492 /* Gateway Proc Starts */
1493 struct ether_hdr *ehdr = (struct ether_hdr *)
1494 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1495 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
1497 struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
1498 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
1500 struct arp_entry_data *ret_arp_data = NULL;
1501 struct ether_addr dst_mac;
1502 uint32_t dest_if = INVALID_DESTIF;
1504 uint32_t src_phy_port = pkt->port;
1505 uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
1507 gw_get_route_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip, dst_phy_port);
1509 ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
1511 /* Gateway Proc Ends */
1512 if (arp_cache_dest_mac_present(dest_if)) {
1514 ether_addr_copy(&dst_mac, &ehdr->d_addr);
1515 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
1517 *port_out_id = p_acl->port_out_id[dest_if];
1519 update_nhip_access(dest_if);
1520 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1521 printf("sending buffered packets\n");
1522 arp_send_buffered_pkts(ret_arp_data, &ehdr->d_addr,
1523 p_acl->port_out_id[dest_if]);
1525 p_acl->counters->tpkts_processed++;
1526 p_acl->counters->bytes_processed += packet_length;
1528 if (unlikely(ret_arp_data == NULL)) {
1531 printf("%s: NHIP Not Found, "
1532 "outport_id: %d\n", __func__,
1533 p_acl->port_out_id[dest_if]);
1536 pkts_mask &= ~(1LLU << pos);
1538 printf("ACL after drop pkt_mask "
1539 "%lu, pkt_num %d\n",
1541 p_acl->counters->pkts_drop++;
1545 if (ret_arp_data->status == INCOMPLETE ||
1546 ret_arp_data->status == PROBE) {
1547 if (ret_arp_data->num_pkts >= NUM_DESC) {
1549 pkts_mask &= ~(1LLU << pos);
1551 printf("ACL after drop pkt_mask "
1552 "%lu, pkt_num %d\n",
1554 p_acl->counters->pkts_drop++;
1557 arp_pkts_mask |= pkt_mask;
1558 arp_queue_unresolved_packet(ret_arp_data, pkt);
1565 /* IP Pkt forwarding based on pub/prv mapping */
1566 if(is_phy_port_privte(src_phy_port))
1567 dest_if = prv_to_pub_map[src_phy_port];
1569 dest_if = pub_to_prv_map[src_phy_port];
1571 *port_out_id = p_acl->port_out_id[dest_if];
1577 pkts_drop_mask = keep_mask & ~pkts_mask;
1578 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1579 keep_mask = pkts_mask;
1581 if (arp_pkts_mask) {
1582 keep_mask &= ~(arp_pkts_mask);
1583 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1586 /* don't bother measuring if traffic very low, might skew stats */
1587 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1589 if (packets_this_iteration > 1) {
1590 uint64_t latency_this_iteration =
1591 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1592 p_acl->counters->sum_latencies += latency_this_iteration;
1593 p_acl->counters->count_latencies++;
1596 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1603 * Main packet processing function.
1604 * 64 packet bit mask are used to identify which packets to forward.
1605 * Performs the following:
1606 * - Burst lookup packets in the IPv4 ACL Rule Table.
1607 * - Burst lookup packets in the IPv6 ACL Rule Table.
1608 * - Lookup Action Table, perform actions.
1609 * - Burst lookup Connection Tracking, if enabled.
1610 * - Lookup MAC address.
1612 * - Packets with bit mask set are forwarded
1615 * A pointer to the pipeline.
1617 * A pointer to a burst of packets.
1619 * Number of packets to process.
1621 * A pointer to pipeline specific data.
1624 * 0 on success, negative on error.
1627 pkt_work_acl_ipv6_key(struct rte_pipeline *p,
1628 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1631 struct pipeline_acl *p_acl = arg;
1633 p_acl->counters->pkts_received =
1634 p_acl->counters->pkts_received + n_pkts;
1636 printf("pkt_work_acl_key pkts_received: %" PRIu64
1637 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1639 uint64_t lookup_hit_mask = 0;
1640 uint64_t lookup_hit_mask_ipv4 = 0;
1641 uint64_t lookup_hit_mask_ipv6 = 0;
1642 uint64_t lookup_miss_mask = 0;
1643 uint64_t conntrack_mask = 0;
1644 uint64_t connexist_mask = 0;
1645 uint32_t dest_address = 0;
1648 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1649 uint64_t keep_mask = pkts_mask;
1653 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1655 if (acl_ipv6_enabled) {
1657 printf("ACL IPV6 Lookup Mask Before = %p\n",
1660 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
1661 pkts_mask, &lookup_hit_mask_ipv6,
1663 p_acl->acl_entries_ipv6);
1665 printf("ACL IPV6 Lookup Mask After = %p\n",
1666 (void *)lookup_hit_mask_ipv6);
1669 /* Merge lookup results since we process both IPv4 and IPv6 below */
1670 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1672 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1674 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1675 pkts_mask = lookup_hit_mask;
1676 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1678 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1679 p_acl->counters->pkts_drop,
1680 __builtin_popcountll(lookup_miss_mask));
1682 uint64_t pkts_to_process = lookup_hit_mask;
1683 /* bitmap of packets left to process for ARP */
1685 for (; pkts_to_process;) {
1686 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1687 uint64_t pkt_mask = 1LLU << pos;
1688 /* bitmask representing only this packet */
1690 pkts_to_process &= ~pkt_mask;
1691 /* remove this packet from remaining list */
1692 struct rte_mbuf *pkt = pkts[pos];
1695 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1696 pkts_mask &= ~(1LLU << pos);
1700 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1701 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1703 if (hdr_chk == IPv4_HDR_VERSION) {
1704 struct acl_table_entry *entry =
1705 (struct acl_table_entry *)
1706 p_acl->acl_entries_ipv4[pos];
1707 uint16_t phy_port = entry->head.port_id;
1708 uint32_t action_id = entry->action_id;
1711 printf("action_id = %u\n", action_id);
1713 uint32_t dscp_offset =
1714 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1716 if (action_array_active[action_id].action_bitmap &
1718 action_counter_table
1719 [p_acl->action_counter_index]
1720 [action_id].packetCount++;
1721 action_counter_table
1722 [p_acl->action_counter_index]
1723 [action_id].byteCount +=
1724 rte_pktmbuf_pkt_len(pkt);
1726 printf("Action Count Packet Count: %"
1727 PRIu64 " Byte Count: %" PRIu64
1729 action_counter_table
1730 [p_acl->action_counter_index]
1731 [action_id].packetCount,
1732 action_counter_table
1733 [p_acl->action_counter_index]
1734 [action_id].byteCount);
1737 if (action_array_active[action_id].action_bitmap &
1738 acl_action_packet_drop) {
1740 /* Drop packet by changing the mask */
1743 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1745 pkts_mask &= ~(1LLU << pos);
1748 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1750 p_acl->counters->pkts_drop++;
1753 if (action_array_active[action_id].action_bitmap &
1756 action_array_active[action_id].fwd_port;
1757 entry->head.port_id = phy_port;
1759 printf("Action FWD Port ID: %u\n",
1763 if (action_array_active[action_id].action_bitmap &
1766 action_array_active[action_id].nat_port;
1767 entry->head.port_id = phy_port;
1769 printf("Action NAT Port ID: %u\n",
1773 if (action_array_active[action_id].action_bitmap &
1776 /* Set DSCP priority */
1777 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1780 action_array_active[action_id].dscp_priority
1784 ("Action DSCP DSCP Priority: %u\n",
1788 if (action_array_active[action_id].action_bitmap &
1789 acl_action_packet_accept) {
1791 printf("Action Accept\n");
1793 if (action_array_active[action_id].action_bitmap
1794 & acl_action_conntrack) {
1796 /* Set conntrack bit for this pkt */
1797 conntrack_mask |= pkt_mask;
1799 printf("ACL Conntrack enabled: "
1800 " %p pkt_mask: %p\n",
1801 (void *)conntrack_mask,
1805 if (action_array_active[action_id].action_bitmap
1806 & acl_action_connexist) {
1808 /* Set conntrack bit for this pkt */
1809 conntrack_mask |= pkt_mask;
1811 /* Set connexist bit for this pkt for public -> private */
1812 /* Private -> public packet will open the connection */
1813 if (action_array_active
1814 [action_id].private_public ==
1816 connexist_mask |= pkt_mask;
1819 printf("ACL Connexist enabled "
1820 "conntrack: %p connexist: %p pkt_mask: %p\n",
1821 (void *)conntrack_mask,
1822 (void *)connexist_mask,
1829 if (hdr_chk == IPv6_HDR_VERSION) {
1831 struct acl_table_entry *entry =
1832 (struct acl_table_entry *)
1833 p_acl->acl_entries_ipv6[pos];
1834 uint16_t phy_port = entry->head.port_id;
1835 uint32_t action_id = entry->action_id;
1838 printf("action_id = %u\n", action_id);
1840 if (action_array_active[action_id].action_bitmap &
1842 action_counter_table
1843 [p_acl->action_counter_index]
1844 [action_id].packetCount++;
1845 action_counter_table
1846 [p_acl->action_counter_index]
1847 [action_id].byteCount +=
1848 rte_pktmbuf_pkt_len(pkt);
1850 printf("Action Count Packet Count: %"
1851 PRIu64 " Byte Count: %" PRIu64
1853 action_counter_table
1854 [p_acl->action_counter_index]
1855 [action_id].packetCount,
1856 action_counter_table
1857 [p_acl->action_counter_index]
1858 [action_id].byteCount);
1861 if (action_array_active[action_id].action_bitmap &
1862 acl_action_packet_drop) {
1863 /* Drop packet by changing the mask */
1865 printf("ACL before drop pkt_mask "
1866 "%lu, pkt_num %d\n",
1868 pkts_mask &= ~(1LLU << pos);
1870 printf("ACL after drop pkt_mask "
1871 "%lu, pkt_num %d\n",
1873 p_acl->counters->pkts_drop++;
1877 if (action_array_active[action_id].action_bitmap &
1880 action_array_active[action_id].fwd_port;
1881 entry->head.port_id = phy_port;
1883 printf("Action FWD Port ID: %u\n",
1887 if (action_array_active[action_id].action_bitmap &
1890 action_array_active[action_id].nat_port;
1891 entry->head.port_id = phy_port;
1893 printf("Action NAT Port ID: %u\n",
1897 if (action_array_active[action_id].action_bitmap &
1900 /* Set DSCP priority */
1901 uint32_t dscp_offset =
1902 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1903 IP_HDR_DSCP_OFST_IPV6;
1905 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1907 uint16_t dscp_value =
1909 (RTE_MBUF_METADATA_UINT16
1910 (pkt, dscp_offset)) & 0XF00F);
1911 uint8_t dscp_store =
1912 action_array_active[action_id].dscp_priority
1914 uint16_t dscp_temp = dscp_store;
1916 dscp_temp = dscp_temp << 4;
1917 *dscp = rte_bswap16(dscp_temp | dscp_value);
1920 ("Action DSCP DSCP Priority: %u\n",
1924 if (action_array_active[action_id].action_bitmap &
1925 acl_action_packet_accept) {
1927 printf("Action Accept\n");
1929 if (action_array_active[action_id].action_bitmap
1930 & acl_action_conntrack) {
1932 /* Set conntrack bit for this pkt */
1933 conntrack_mask |= pkt_mask;
1935 printf("ACL Conntrack enabled: "
1936 " %p pkt_mask: %p\n",
1937 (void *)conntrack_mask,
1941 if (action_array_active[action_id].action_bitmap
1942 & acl_action_connexist) {
1944 /* Set conntrack bit for this pkt */
1945 conntrack_mask |= pkt_mask;
1947 /* Set connexist bit for this pkt for public -> private */
1948 /* Private -> public packet will open the connection */
1949 if (action_array_active
1950 [action_id].private_public ==
1952 connexist_mask |= pkt_mask;
1955 printf("ACL Connexist enabled "
1956 "conntrack: %p connexist: %p pkt_mask: %p\n",
1957 (void *)conntrack_mask,
1958 (void *)connexist_mask,
1964 /* Only call connection tracker if required */
1965 if (conntrack_mask > 0) {
1968 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1969 (void *)conntrack_mask, (void *)connexist_mask);
1971 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1972 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1974 printf("ACL Call Conntrack After = %p\n",
1975 (void *)conntrack_mask);
1977 /* Only change pkt mask for pkts that have conntrack enabled */
1978 /* Need to loop through packets to check if conntrack enabled */
1979 pkts_to_process = pkts_mask;
1980 for (; pkts_to_process;) {
1981 uint32_t action_id = 0;
1983 (uint8_t) __builtin_ctzll(pkts_to_process);
1984 uint64_t pkt_mask = 1LLU << pos;
1985 /* bitmask representing only this packet */
1987 pkts_to_process &= ~pkt_mask;
1988 /* remove this packet from remaining list */
1989 struct rte_mbuf *pkt = pkts[pos];
1991 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1995 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1996 if (hdr_chk == IPv4_HDR_VERSION) {
1997 struct acl_table_entry *entry =
1998 (struct acl_table_entry *)
1999 p_acl->acl_entries_ipv4[pos];
2000 action_id = entry->action_id;
2002 struct acl_table_entry *entry =
2003 (struct acl_table_entry *)
2004 p_acl->acl_entries_ipv6[pos];
2005 action_id = entry->action_id;
2008 if ((action_array_active[action_id].action_bitmap &
2009 acl_action_conntrack)
2010 || (action_array_active[action_id].action_bitmap &
2011 acl_action_connexist)) {
2013 if (conntrack_mask & pkt_mask) {
2015 printf("ACL Conntrack Accept "
2019 /* Drop packet by changing the mask */
2022 ("ACL Conntrack Drop packet = %p\n",
2024 pkts_mask &= ~pkt_mask;
2025 p_acl->counters->pkts_drop++;
2031 pkts_to_process = pkts_mask;
2032 /* bitmap of packets left to process for ARP */
2034 for (; pkts_to_process;) {
2035 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
2036 uint64_t pkt_mask = 1LLU << pos;
2037 /* bitmask representing only this packet */
2039 pkts_to_process &= ~pkt_mask;
2040 /* remove this packet from remaining list */
2041 struct rte_mbuf *pkt = pkts[pos];
2044 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
2045 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2047 if (hdr_chk == IPv6_HDR_VERSION) {
2049 struct acl_table_entry *entry =
2050 (struct acl_table_entry *)
2051 p_acl->acl_entries_ipv6[pos];
2052 //uint16_t phy_port = entry->head.port_id;
2053 uint16_t phy_port = pkt->port;
2054 uint32_t *port_out_id =
2055 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2063 ("phy_port = %i,links_map[phy_port] = %i\n",
2064 phy_port, p_acl->links_map[phy_port]);
2066 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
2068 uint32_t dest_if = INVALID_DESTIF;
2069 uint32_t src_phy_port = pkt->port;
2073 /* Gateway Proc Starts */
2074 struct ipv6_hdr *ipv6hdr = (struct ipv6_hdr *)
2075 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
2077 struct ether_hdr *ehdr = (struct ether_hdr *)
2078 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2079 META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM);
2081 struct ether_addr dst_mac;
2082 uint32_t dest_if = INVALID_DESTIF;
2083 uint8_t nhipv6[IPV6_ADD_SIZE];
2084 uint8_t dest_ipv6_address[IPV6_ADD_SIZE];
2085 uint32_t src_phy_port;
2086 struct nd_entry_data *ret_nd_data = NULL;
2088 memset(nhipv6, 0, IPV6_ADD_SIZE);
2089 src_phy_port = pkt->port;
2090 rte_mov16(dest_ipv6_address, (uint8_t *)ipv6hdr->dst_addr);
2092 gw_get_nh_port_ipv6(dest_ipv6_address,
2095 ret_nd_data = get_dest_mac_addr_ipv6(nhipv6, dest_if, &dst_mac);
2097 /* Gateway Proc Ends */
2099 if (nd_cache_dest_mac_present(dest_if)) {
2101 ether_addr_copy(&dst_mac, &ehdr->d_addr);
2102 ether_addr_copy(get_link_hw_addr(dest_if), &ehdr->s_addr);
2104 *port_out_id = p_acl->port_out_id[dest_if];
2106 update_nhip_access(dest_if);
2108 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
2109 printf("sending buffered packets\n");
2110 p_acl->counters->tpkts_processed +=
2111 ret_nd_data->num_pkts;
2112 nd_send_buffered_pkts(ret_nd_data, &ehdr->d_addr,
2113 p_acl->port_out_id[dest_if]);
2115 p_acl->counters->tpkts_processed++;
2116 p_acl->counters->bytes_processed += packet_length;
2118 if (unlikely(ret_nd_data == NULL)) {
2120 printf("ACL before drop pkt_mask "
2121 "%lu, pkt_num %d\n", pkts_mask, pos);
2122 pkts_mask &= ~(1LLU << pos);
2124 printf("ACL after drop pkt_mask "
2125 "%lu, pkt_num %d\n", pkts_mask, pos);
2126 p_acl->counters->pkts_drop++;
2130 if (ret_nd_data->status == INCOMPLETE ||
2131 ret_nd_data->status == PROBE) {
2132 if (ret_nd_data->num_pkts >= NUM_DESC) {
2135 printf("ACL before drop pkt_mask "
2136 "%lu, pkt_num %d\n", pkts_mask, pos);
2137 pkts_mask &= ~(1LLU << pos);
2139 printf("ACL after drop pkt_mask "
2140 "%lu, pkt_num %d\n", pkts_mask, pos);
2141 p_acl->counters->pkts_drop++;
2144 arp_pkts_mask |= pkt_mask;
2145 nd_queue_unresolved_packet(ret_nd_data,
2153 /* IP Pkt forwarding based on pub/prv mapping */
2154 if(is_phy_port_privte(src_phy_port))
2155 dest_if = prv_to_pub_map[src_phy_port];
2157 dest_if = pub_to_prv_map[src_phy_port];
2159 *port_out_id = p_acl->port_out_id[dest_if];
2164 } /* end of for loop */
2166 pkts_drop_mask = keep_mask & ~pkts_mask;
2167 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2168 keep_mask = pkts_mask;
2170 if (arp_pkts_mask) {
2171 keep_mask &= ~(arp_pkts_mask);
2172 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
2175 /* don't bother measuring if traffic very low, might skew stats */
2176 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
2178 if (packets_this_iteration > 1) {
2179 uint64_t latency_this_iteration =
2180 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
2181 p_acl->counters->sum_latencies += latency_this_iteration;
2182 p_acl->counters->count_latencies++;
2185 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
2191 static struct rte_acl_field_def field_format_ipv4[] = {
2194 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2195 .size = sizeof(uint8_t),
2198 .offset = sizeof(struct ether_hdr) +
2199 offsetof(struct ipv4_hdr, next_proto_id),
2202 /* Source IP address (IPv4) */
2204 .type = RTE_ACL_FIELD_TYPE_MASK,
2205 .size = sizeof(uint32_t),
2208 .offset = sizeof(struct ether_hdr) +
2209 offsetof(struct ipv4_hdr, src_addr),
2212 /* Destination IP address (IPv4) */
2214 .type = RTE_ACL_FIELD_TYPE_MASK,
2215 .size = sizeof(uint32_t),
2218 .offset = sizeof(struct ether_hdr) +
2219 offsetof(struct ipv4_hdr, dst_addr),
2224 .type = RTE_ACL_FIELD_TYPE_RANGE,
2225 .size = sizeof(uint16_t),
2228 .offset = sizeof(struct ether_hdr) +
2229 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2232 /* Destination Port */
2234 .type = RTE_ACL_FIELD_TYPE_RANGE,
2235 .size = sizeof(uint16_t),
2238 .offset = sizeof(struct ether_hdr) +
2239 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2243 #define SIZEOF_VLAN_HDR 4
2245 static struct rte_acl_field_def field_format_vlan_ipv4[] = {
2248 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2249 .size = sizeof(uint8_t),
2252 .offset = sizeof(struct ether_hdr) +
2253 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, next_proto_id),
2256 /* Source IP address (IPv4) */
2258 .type = RTE_ACL_FIELD_TYPE_MASK,
2259 .size = sizeof(uint32_t),
2262 .offset = sizeof(struct ether_hdr) +
2263 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, src_addr),
2266 /* Destination IP address (IPv4) */
2268 .type = RTE_ACL_FIELD_TYPE_MASK,
2269 .size = sizeof(uint32_t),
2272 .offset = sizeof(struct ether_hdr) +
2273 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, dst_addr),
2278 .type = RTE_ACL_FIELD_TYPE_RANGE,
2279 .size = sizeof(uint16_t),
2282 .offset = sizeof(struct ether_hdr) +
2284 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2287 /* Destination Port */
2289 .type = RTE_ACL_FIELD_TYPE_RANGE,
2290 .size = sizeof(uint16_t),
2293 .offset = sizeof(struct ether_hdr) +
2295 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2299 #define SIZEOF_QINQ_HEADER 8
2301 static struct rte_acl_field_def field_format_qinq_ipv4[] = {
2304 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2305 .size = sizeof(uint8_t),
2308 .offset = sizeof(struct ether_hdr) +
2309 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, next_proto_id),
2312 /* Source IP address (IPv4) */
2314 .type = RTE_ACL_FIELD_TYPE_MASK,
2315 .size = sizeof(uint32_t),
2318 .offset = sizeof(struct ether_hdr) +
2319 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, src_addr),
2322 /* Destination IP address (IPv4) */
2324 .type = RTE_ACL_FIELD_TYPE_MASK,
2325 .size = sizeof(uint32_t),
2328 .offset = sizeof(struct ether_hdr) +
2329 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, dst_addr),
2334 .type = RTE_ACL_FIELD_TYPE_RANGE,
2335 .size = sizeof(uint16_t),
2338 .offset = sizeof(struct ether_hdr) +
2339 SIZEOF_QINQ_HEADER +
2340 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2343 /* Destination Port */
2345 .type = RTE_ACL_FIELD_TYPE_RANGE,
2346 .size = sizeof(uint16_t),
2349 .offset = sizeof(struct ether_hdr) +
2350 SIZEOF_QINQ_HEADER +
2351 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2355 static struct rte_acl_field_def field_format_ipv6[] = {
2358 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2359 .size = sizeof(uint8_t),
2362 .offset = sizeof(struct ether_hdr) +
2363 offsetof(struct ipv6_hdr, proto),
2366 /* Source IP address (IPv6) */
2368 .type = RTE_ACL_FIELD_TYPE_MASK,
2369 .size = sizeof(uint32_t),
2372 .offset = sizeof(struct ether_hdr) +
2373 offsetof(struct ipv6_hdr, src_addr),
2377 .type = RTE_ACL_FIELD_TYPE_MASK,
2378 .size = sizeof(uint32_t),
2381 .offset = sizeof(struct ether_hdr) +
2382 offsetof(struct ipv6_hdr, src_addr) + sizeof(uint32_t),
2387 .type = RTE_ACL_FIELD_TYPE_MASK,
2388 .size = sizeof(uint32_t),
2391 .offset = sizeof(struct ether_hdr) +
2392 offsetof(struct ipv6_hdr, src_addr) + 2 * sizeof(uint32_t),
2397 .type = RTE_ACL_FIELD_TYPE_MASK,
2398 .size = sizeof(uint32_t),
2401 .offset = sizeof(struct ether_hdr) +
2402 offsetof(struct ipv6_hdr, src_addr) + 3 * sizeof(uint32_t),
2406 /* Destination IP address (IPv6) */
2408 .type = RTE_ACL_FIELD_TYPE_MASK,
2409 .size = sizeof(uint32_t),
2412 .offset = sizeof(struct ether_hdr) +
2413 offsetof(struct ipv6_hdr, dst_addr),
2417 .type = RTE_ACL_FIELD_TYPE_MASK,
2418 .size = sizeof(uint32_t),
2421 .offset = sizeof(struct ether_hdr) +
2422 offsetof(struct ipv6_hdr, dst_addr) + sizeof(uint32_t),
2427 .type = RTE_ACL_FIELD_TYPE_MASK,
2428 .size = sizeof(uint32_t),
2431 .offset = sizeof(struct ether_hdr) +
2432 offsetof(struct ipv6_hdr, dst_addr) + 2 * sizeof(uint32_t),
2437 .type = RTE_ACL_FIELD_TYPE_MASK,
2438 .size = sizeof(uint32_t),
2441 .offset = sizeof(struct ether_hdr) +
2442 offsetof(struct ipv6_hdr, dst_addr) + 3 * sizeof(uint32_t),
2448 .type = RTE_ACL_FIELD_TYPE_RANGE,
2449 .size = sizeof(uint16_t),
2452 .offset = sizeof(struct ether_hdr) +
2453 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, src_port),
2456 /* Destination Port */
2458 .type = RTE_ACL_FIELD_TYPE_RANGE,
2459 .size = sizeof(uint16_t),
2462 .offset = sizeof(struct ether_hdr) +
2463 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, dst_port),
2468 * Parse arguments in config file.
2471 * A pointer to the pipeline.
2473 * A pointer to pipeline specific parameters.
2476 * 0 on success, negative on error.
2479 pipeline_acl_parse_args(struct pipeline_acl *p, struct pipeline_params *params)
2481 uint32_t n_rules_present = 0;
2482 uint32_t pkt_type_present = 0;
2484 uint8_t prv_que_handler_present = 0;
2485 uint8_t n_prv_in_port = 0;
2488 p->n_rules = 4 * 1024;
2489 acl_n_rules = 4 * 1024;
2490 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2491 p->field_format = field_format_ipv4;
2492 p->field_format_size = sizeof(field_format_ipv4);
2494 for (i = 0; i < params->n_args; i++) {
2495 char *arg_name = params->args_name[i];
2496 char *arg_value = params->args_value[i];
2498 if (strcmp(arg_name, "n_rules") == 0) {
2499 if (n_rules_present)
2501 n_rules_present = 1;
2503 p->n_rules = atoi(arg_value);
2504 acl_n_rules = atoi(arg_value);
2508 if (strcmp(arg_name, "pkt_type") == 0) {
2509 if (pkt_type_present)
2511 pkt_type_present = 1;
2514 if (strcmp(arg_value, "ipv4") == 0) {
2515 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2516 p->field_format = field_format_ipv4;
2517 p->field_format_size =
2518 sizeof(field_format_ipv4);
2523 if (strcmp(arg_value, "vlan_ipv4") == 0) {
2525 RTE_DIM(field_format_vlan_ipv4);
2526 p->field_format = field_format_vlan_ipv4;
2527 p->field_format_size =
2528 sizeof(field_format_vlan_ipv4);
2533 if (strcmp(arg_value, "qinq_ipv4") == 0) {
2535 RTE_DIM(field_format_qinq_ipv4);
2536 p->field_format = field_format_qinq_ipv4;
2537 p->field_format_size =
2538 sizeof(field_format_qinq_ipv4);
2543 if (strcmp(arg_value, "ipv6") == 0) {
2544 p->n_rule_fields = RTE_DIM(field_format_ipv6);
2545 p->field_format = field_format_ipv6;
2546 p->field_format_size =
2547 sizeof(field_format_ipv6);
2555 if (strcmp(arg_name, "traffic_type") == 0) {
2556 int traffic_type = atoi(arg_value);
2558 if (traffic_type == 0
2559 || !(traffic_type == IPv4_HDR_VERSION
2560 || traffic_type == IPv6_HDR_VERSION)) {
2561 printf("not IPVR4/IPVR6");
2565 p->traffic_type = traffic_type;
2569 if (strcmp(arg_name, "prv_que_handler") == 0) {
2571 if (prv_que_handler_present) {
2572 printf("Duplicate pktq_in_prv ..\n\n");
2575 prv_que_handler_present = 1;
2580 /* get the first token */
2581 token = strtok(arg_value, "(");
2582 token = strtok(token, ")");
2583 token = strtok(token, ",");
2584 printf("***** prv_que_handler *****\n");
2587 printf("string is null\n");
2588 printf("prv_que_handler is invalid\n");
2591 printf("string is :%s\n", token);
2593 while (token != NULL) {
2594 printf(" %s\n", token);
2595 rxport = atoi(token);
2596 acl_prv_que_port_index[n_prv_in_port++] =
2598 token = strtok(NULL, ",");
2601 if (n_prv_in_port == 0) {
2602 printf("VNF common parse err - no prv RX phy port\n");
2609 if (strcmp(arg_name, "n_flows") == 0) {
2610 p->n_flows = atoi(arg_value);
2611 if (p->n_flows == 0)
2614 continue;/* needed when multiple parms are checked */
2623 * Create and initialize Pipeline Back End (BE).
2626 * A pointer to the pipeline.
2628 * A pointer to pipeline specific data.
2631 * A pointer to the pipeline create, NULL on error.
2633 static void *pipeline_acl_init(struct pipeline_params *params,
2634 __rte_unused void *arg)
2637 struct pipeline_acl *p_acl;
2640 /* Check input arguments */
2641 if ((params == NULL) ||
2642 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2645 /* Memory allocation */
2646 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_acl));
2647 p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2648 p_acl = (struct pipeline_acl *)p;
2652 strncpy(p->name, params->name, PIPELINE_NAME_SIZE);
2653 p->log_level = params->log_level;
2655 PLOG(p, HIGH, "ACL");
2658 * p_acl->links_map[0] = 0xff;
2659 * p_acl->links_map[1] = 0xff;]
2661 p_acl->traffic_type = IPv4_HDR_VERSION;
2662 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2663 p_acl->links_map[i] = 0xff;
2664 p_acl->port_out_id[i] = 0xff;
2665 acl_prv_que_port_index[i] = 0;
2668 p_acl->pipeline_num = 0xff;
2670 /* if(enable_hwlb || enable_flow_dir) */
2671 // lib_arp_init(params, arg);
2673 p_acl->n_flows = 4096; /* small default value */
2674 /* Create a single firewall instance and initialize. */
2675 p_acl->cnxn_tracker =
2676 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2677 RTE_CACHE_LINE_SIZE);
2679 if (p_acl->cnxn_tracker == NULL)
2683 * Now allocate a counter block entry.It appears that the initialization
2684 * of all instances is serialized on core 0, so no lock is necessary.
2686 struct rte_ACL_counter_block *counter_ptr;
2688 if (rte_ACL_hi_counter_block_in_use == MAX_ACL_INSTANCES) {
2689 /* error, exceeded table bounds */
2693 rte_ACL_hi_counter_block_in_use++;
2694 counter_ptr = &rte_acl_counter_table[rte_ACL_hi_counter_block_in_use];
2695 strncpy(counter_ptr->name, params->name,PIPELINE_NAME_SIZE);
2696 p_acl->action_counter_index = rte_ACL_hi_counter_block_in_use;
2698 p_acl->counters = counter_ptr;
2700 rte_ct_initialize_default_timeouts(p_acl->cnxn_tracker);
2701 p_acl->arpPktCount = 0;
2703 /* Parse arguments */
2704 if (pipeline_acl_parse_args(p_acl, params))
2706 /*n_flows already checked, ignore Klockwork issue */
2707 if (p_acl->n_flows > 0) {
2708 rte_ct_initialize_cnxn_tracker(p_acl->cnxn_tracker,
2709 p_acl->n_flows, params->name);
2710 p_acl->counters->ct_counters =
2711 rte_ct_get_counter_address(p_acl->cnxn_tracker);
2713 printf("ACL invalid p_acl->n_flows: %u\n", p_acl->n_flows);
2719 struct rte_pipeline_params pipeline_params = {
2720 .name = params->name,
2721 .socket_id = params->socket_id,
2722 .offset_port_id = META_DATA_OFFSET +
2723 offsetof(struct mbuf_acl_meta_data, output_port),
2726 p->p = rte_pipeline_create(&pipeline_params);
2734 p->n_ports_in = params->n_ports_in;
2735 for (i = 0; i < p->n_ports_in; i++) {
2736 struct rte_pipeline_port_in_params port_params = {
2738 pipeline_port_in_params_get_ops(¶ms->port_in
2741 pipeline_port_in_params_convert(¶ms->port_in
2743 .f_action = pkt_work_acl_key,
2745 .burst_size = params->port_in[i].burst_size,
2747 if (p_acl->traffic_type == IPv4_HDR_VERSION)
2748 port_params.f_action = pkt_work_acl_ipv4_key;
2750 if (p_acl->traffic_type == IPv6_HDR_VERSION)
2751 port_params.f_action = pkt_work_acl_ipv6_key;
2753 int status = rte_pipeline_port_in_create(p->p,
2758 rte_pipeline_free(p->p);
2765 p->n_ports_out = params->n_ports_out;
2766 for (i = 0; i < p->n_ports_out; i++) {
2767 struct rte_pipeline_port_out_params port_params = {
2769 pipeline_port_out_params_get_ops(¶ms->port_out
2772 pipeline_port_out_params_convert(¶ms->port_out
2778 int status = rte_pipeline_port_out_create(p->p,
2780 &p->port_out_id[i]);
2783 rte_pipeline_free(p->p);
2789 int pipeline_num = 0;
2791 int temp = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2792 p_acl->pipeline_num = (uint8_t) pipeline_num;
2793 /* set_phy_outport_map(p_acl->pipeline_num, p_acl->links_map);*/
2794 register_pipeline_Qs(p_acl->pipeline_num, p);
2795 set_link_map(p_acl->pipeline_num, p, p_acl->links_map);
2796 set_outport_id(p_acl->pipeline_num, p, p_acl->port_out_id);
2798 /* If this is the first ACL thread, create common ACL Rule tables */
2799 if (rte_ACL_hi_counter_block_in_use == 0) {
2801 printf("Create ACL Tables rte_socket_id(): %i\n",
2804 /* Create IPV4 ACL Rule Tables */
2805 struct rte_table_acl_params common_ipv4_table_acl_params = {
2807 .n_rules = acl_n_rules,
2808 .n_rule_fields = RTE_DIM(field_format_ipv4),
2811 memcpy(common_ipv4_table_acl_params.field_format,
2812 field_format_ipv4, sizeof(field_format_ipv4));
2814 uint32_t ipv4_entry_size = sizeof(struct acl_table_entry);
2816 acl_rule_table_ipv4_active =
2817 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2821 if (acl_rule_table_ipv4_active == NULL) {
2823 ("Failed to create common ACL IPV4A Rule table\n");
2824 rte_pipeline_free(p->p);
2829 /* Create second IPV4 Table */
2830 common_ipv4_table_acl_params.name = "ACLIPV4B";
2831 acl_rule_table_ipv4_standby =
2832 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2836 if (acl_rule_table_ipv4_standby == NULL) {
2838 ("Failed to create common ACL IPV4B Rule table\n");
2839 rte_pipeline_free(p->p);
2844 /* Create IPV6 ACL Rule Tables */
2845 struct rte_table_acl_params common_ipv6_table_acl_params = {
2847 .n_rules = acl_n_rules,
2848 .n_rule_fields = RTE_DIM(field_format_ipv6),
2851 memcpy(common_ipv6_table_acl_params.field_format,
2852 field_format_ipv6, sizeof(field_format_ipv6));
2854 uint32_t ipv6_entry_size = sizeof(struct acl_table_entry);
2856 acl_rule_table_ipv6_active =
2857 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
2861 if (acl_rule_table_ipv6_active == NULL) {
2863 ("Failed to create common ACL IPV6A Rule table\n");
2864 rte_pipeline_free(p->p);
2869 /* Create second IPV6 table */
2870 common_ipv6_table_acl_params.name = "ACLIPV6B";
2871 acl_rule_table_ipv6_standby =
2872 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
2876 if (acl_rule_table_ipv6_standby == NULL) {
2878 ("Failed to create common ACL IPV6B Rule table\n");
2879 rte_pipeline_free(p->p);
2889 struct rte_pipeline_table_params table_params = {
2890 .ops = &rte_table_stub_ops,
2892 .f_action_hit = NULL,
2893 .f_action_miss = NULL,
2895 .action_data_size = 0,
2898 int status = rte_pipeline_table_create(p->p,
2903 rte_pipeline_free(p->p);
2908 struct rte_pipeline_table_entry default_entry = {
2909 .action = RTE_PIPELINE_ACTION_PORT_META
2912 struct rte_pipeline_table_entry *default_entry_ptr;
2914 status = rte_pipeline_table_default_entry_add(p->p,
2917 &default_entry_ptr);
2920 rte_pipeline_free(p->p);
2926 /* Connecting input ports to tables */
2927 for (i = 0; i < p->n_ports_in; i++) {
2928 int status = rte_pipeline_port_in_connect_to_table(p->p,
2935 rte_pipeline_free(p->p);
2941 /* Enable input ports */
2942 for (i = 0; i < p->n_ports_in; i++) {
2943 int status = rte_pipeline_port_in_enable(p->p,
2947 rte_pipeline_free(p->p);
2953 /* Check pipeline consistency */
2954 if (rte_pipeline_check(p->p) < 0) {
2955 rte_pipeline_free(p->p);
2960 /* Message queues */
2961 p->n_msgq = params->n_msgq;
2962 for (i = 0; i < p->n_msgq; i++)
2963 p->msgq_in[i] = params->msgq_in[i];
2964 for (i = 0; i < p->n_msgq; i++)
2965 p->msgq_out[i] = params->msgq_out[i];
2967 /* Message handlers */
2968 memcpy(p->handlers, handlers, sizeof(p->handlers));
2969 memcpy(p_acl->custom_handlers,
2970 custom_handlers, sizeof(p_acl->custom_handlers));
2976 * Free resources and delete pipeline.
2979 * A pointer to the pipeline.
2982 * 0 on success, negative on error.
2984 static int pipeline_acl_free(void *pipeline)
2986 struct pipeline *p = (struct pipeline *)pipeline;
2988 /* Check input arguments */
2992 /* Free resources */
2993 rte_pipeline_free(p->p);
2999 * Callback function to map input/output ports.
3002 * A pointer to the pipeline.
3006 * A pointer to the Output port.
3009 * 0 on success, negative on error.
3012 pipeline_acl_track(void *pipeline,
3013 __rte_unused uint32_t port_in, uint32_t *port_out)
3015 struct pipeline *p = (struct pipeline *)pipeline;
3017 /* Check input arguments */
3018 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
3021 if (p->n_ports_in == 1) {
3030 * Callback function to process timers.
3033 * A pointer to the pipeline.
3036 * 0 on success, negative on error.
3038 static int pipeline_acl_timer(void *pipeline)
3041 struct pipeline *p = (struct pipeline *)pipeline;
3042 struct pipeline_acl *p_acl = (struct pipeline_acl *)pipeline;
3044 pipeline_msg_req_handle(p);
3045 rte_pipeline_flush(p->p);
3047 rte_ct_handle_expired_timers(p_acl->cnxn_tracker);
3053 * Callback function to process CLI commands from FE.
3056 * A pointer to the pipeline.
3058 * A pointer to command specific data.
3061 * A pointer to message handler on success,
3062 * pipeline_msg_req_invalid_hander on error.
3064 void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg)
3066 struct pipeline_acl *p_acl = (struct pipeline_acl *)p;
3067 struct pipeline_custom_msg_req *req = msg;
3068 pipeline_msg_req_handler f_handle;
3070 f_handle = (req->subtype < PIPELINE_ACL_MSG_REQS) ?
3071 p_acl->custom_handlers[req->subtype] :
3072 pipeline_msg_req_invalid_handler;
3074 if (f_handle == NULL)
3075 f_handle = pipeline_msg_req_invalid_handler;
3077 return f_handle(p, req);
3081 * Handler for DBG CLI command.
3084 * A pointer to the pipeline.
3086 * A pointer to command specific data.
3089 * A pointer to response message.
3090 * Response message contains status.
3092 void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg)
3095 struct pipeline_acl_dbg_msg_req *req = msg;
3096 struct pipeline_acl_dbg_msg_rsp *rsp = msg;
3098 if (req->dbg == 0) {
3099 printf("DBG turned OFF\n");
3102 } else if (req->dbg == 1) {
3103 printf("DBG turned ON\n");
3107 printf("Invalid DBG setting\n");
3114 struct pipeline_be_ops pipeline_acl_be_ops = {
3115 .f_init = pipeline_acl_init,
3116 .f_free = pipeline_acl_free,
3118 .f_timer = pipeline_acl_timer,
3119 .f_track = pipeline_acl_track,