2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline ACL BE Implementation.
21 * Implementation of Pipeline ACL Back End (BE).
22 * Responsible for packet processing.
27 #include <rte_common.h>
28 #include <rte_malloc.h>
29 #include <rte_ether.h>
32 #include <rte_byteorder.h>
33 #include <rte_table_acl.h>
34 #include <rte_table_stub.h>
35 #include "pipeline_arpicmp_be.h"
36 #include "vnf_common.h"
37 #include "pipeline_common_be.h"
38 #include <rte_pipeline.h>
41 #include <rte_timer.h>
42 #include <rte_cycles.h>
44 #include "pipeline_acl.h"
45 #include "pipeline_acl_be.h"
46 #include "rte_cnxn_tracking.h"
47 #include "pipeline_actions_common.h"
49 #include "lib_icmpv6.h"
50 static uint8_t acl_prv_que_port_index[PIPELINE_MAX_PORT_IN];
51 extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
52 uint8_t netmask_ipv6[]);
59 * A structure defining the ACL pipeline per thread data.
63 pipeline_msg_req_handler custom_handlers[PIPELINE_ACL_MSG_REQS];
66 uint32_t n_rule_fields;
67 struct rte_acl_field_def *field_format;
68 uint32_t field_format_size;
70 /* Connection Tracker */
71 struct rte_ct_cnxn_tracker *cnxn_tracker;
72 struct rte_ACL_counter_block *counters;
73 int action_counter_index;
74 /* timestamp retrieved during in-port computations */
75 uint64_t in_port_time_stamp;
80 uint8_t links_map[PIPELINE_MAX_PORT_IN];
81 uint8_t port_out_id[PIPELINE_MAX_PORT_IN];
83 struct acl_table_entry *acl_entries_ipv4[RTE_PORT_IN_BURST_SIZE_MAX];
84 struct acl_table_entry *acl_entries_ipv6[RTE_PORT_IN_BURST_SIZE_MAX];
86 /* Local ARP & ND Tables */
87 struct lib_arp_route_table_entry
88 local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
89 uint8_t local_lib_arp_route_ent_cnt;
90 struct lib_nd_route_table_entry
91 local_lib_nd_route_table[MAX_ND_RT_ENTRY];
92 uint8_t local_lib_nd_route_ent_cnt;
94 } __rte_cache_aligned;
97 * A structure defining the mbuf meta data for ACL.
99 struct mbuf_acl_meta_data {
100 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
101 uint32_t output_port;
102 /* next hop ip address used by ARP code */
104 } __rte_cache_aligned;
106 #define META_DATA_OFFSET 128
108 struct rte_ACL_counter_block rte_acl_counter_table[MAX_ACL_INSTANCES]
110 int rte_ACL_hi_counter_block_in_use = -1;
112 /* a spin lock used during acl initialization only */
113 rte_spinlock_t rte_ACL_init_lock = RTE_SPINLOCK_INITIALIZER;
116 struct pipeline_action_key *action_array_a;
117 struct pipeline_action_key *action_array_b;
118 struct pipeline_action_key *action_array_active;
119 struct pipeline_action_key *action_array_standby;
120 uint32_t action_array_size;
122 struct action_counter_block
123 action_counter_table[MAX_ACL_INSTANCES][action_array_max]
126 static void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg);
128 static pipeline_msg_req_handler handlers[] = {
129 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
130 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
131 pipeline_msg_req_stats_port_in_handler,
132 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
133 pipeline_msg_req_stats_port_out_handler,
134 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
135 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
136 pipeline_msg_req_port_in_enable_handler,
137 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
138 pipeline_msg_req_port_in_disable_handler,
139 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_acl_msg_req_custom_handler,
142 static void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg);
144 static pipeline_msg_req_handler custom_handlers[] = {
145 [PIPELINE_ACL_MSG_REQ_DBG] = pipeline_acl_msg_req_dbg_handler,
147 uint64_t arp_pkts_mask;
150 uint32_t local_get_nh_ipv4(uint32_t ip,
152 uint32_t *nhip, struct pipeline_acl *p_acl)
156 for (i = 0; i < p_acl->local_lib_arp_route_ent_cnt; i++) {
157 if (((p_acl->local_lib_arp_route_table[i].ip &
158 p_acl->local_lib_arp_route_table[i].mask) ==
159 (ip & p_acl->local_lib_arp_route_table[i].mask))) {
160 *port = p_acl->local_lib_arp_route_table[i].port;
162 *nhip = p_acl->local_lib_arp_route_table[i].nh;
169 static uint32_t local_get_nh_ipv6(uint8_t *ip,
171 uint8_t nhip[], struct pipeline_acl *p_acl)
174 uint8_t netmask_ipv6[16],netip_nd[16],netip_in[16];
175 uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
176 memset (netmask_ipv6, 0, sizeof(netmask_ipv6));
177 memset (netip_nd, 0, sizeof(netip_nd));
178 memset (netip_in, 0, sizeof(netip_in));
180 for (i = 0; i < p_acl->local_lib_nd_route_ent_cnt; i++) {
182 convert_prefixlen_to_netmask_ipv6
183 (p_acl->local_lib_nd_route_table[i].depth, netmask_ipv6);
185 for (k = 0; k < 16; k++)
186 if (p_acl->local_lib_nd_route_table[i].ipv6[k] &
189 netip_nd[k] = p_acl->
190 local_lib_nd_route_table[i].ipv6[k];
194 for (l = 0; l < 16; l++)
195 if (ip[l] & netmask_ipv6[l]) {
202 if ((depthflags == depthflags1) && (memcmp(netip_nd, netip_in,
203 sizeof(netip_nd)) == 0)){
204 *port = p_acl->local_lib_nd_route_table[i].port;
206 for (j = 0; j < 16; j++)
208 p_acl->local_lib_nd_route_table[i].
219 static uint8_t check_arp_icmp(struct rte_mbuf *pkt,
220 uint64_t pkt_mask, struct pipeline_acl *p_acl)
222 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
223 struct ipv6_hdr *ipv6_h;
224 uint16_t *eth_proto =
225 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
226 struct app_link_params *link;
228 //uint32_t *port_out_id = RTE_MBUF_METADATA_UINT32_PTR(pk
229 // offsetof(struct mbuf_acl_meta_dat
231 /* ARP outport number */
232 uint16_t out_port = p_acl->p.n_ports_out - 1;
235 uint32_t prot_offset;
237 link = &myApp->link_params[pkt->port];
239 switch (rte_be_to_cpu_16(*eth_proto)) {
242 rte_pipeline_port_out_packet_insert(p_acl->p.p, out_port, pkt);
245 * Pkt mask should be changed, and not changing the
248 p_acl->arpPktCount++;
253 /* header room + eth hdr size +
254 * src_aadr offset in ip header
256 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
257 ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
258 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
260 prot_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
261 IP_HDR_PROTOCOL_OFST;
262 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
264 if ((*protocol == IP_PROTOCOL_ICMP) &&
265 link->ip == rte_be_to_cpu_32(*dst_addr)) {
267 if (is_phy_port_privte(pkt->port)) {
269 rte_pipeline_port_out_packet_insert
270 (p_acl->p.p, out_port, pkt);
272 * Pkt mask should be changed,
273 * and not changing the drop mask
275 p_acl->arpPktCount++;
287 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
288 ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST;
289 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
292 uint32_t prot_offset_ipv6 = MBUF_HDR_ROOM +
293 ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
294 struct ipv6_hdr *ipv6_h;
296 ipv6_h = (struct ipv6_hdr *)MBUF_HDR_ROOM +
298 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
301 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
302 (link->ip == rte_be_to_cpu_32(dst_addr[3]))) {
304 if (is_phy_port_privte(pkt->port)) {
306 rte_pipeline_port_out_packet_insert
307 (p_acl->p.p, out_port, pkt);
309 * Pkt mask should be changed,
310 * and not changing the drop mask
312 p_acl->arpPktCount++;
322 #define IP_START (MBUF_HDR_ROOM + ETH_HDR_SIZE)
325 ipv6_h = (struct ipv6_hdr *)
326 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
328 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
330 rte_be_to_cpu_32(ipv6_h->dst_addr[3]))) {
332 if (is_phy_port_privte(pkt->port)) {
333 rte_pipeline_port_out_packet_insert(
338 p_acl->arpPktCount++;
353 * Print packet for debugging.
356 * A pointer to the packet.
359 void print_pkt_acl(struct rte_mbuf *pkt)
363 printf("Packet Contents:\n");
364 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
366 for (i = 0; i < 20; i++) {
367 for (j = 0; j < 20; j++)
368 printf("%02x ", rd[(20 * i) + j]);
374 * Main packet processing function.
375 * 64 packet bit mask are used to identify which packets to forward.
376 * Performs the following:
377 * - Burst lookup packets in the IPv4 ACL Rule Table.
378 * - Burst lookup packets in the IPv6 ACL Rule Table.
379 * - Lookup Action Table, perform actions.
380 * - Burst lookup Connection Tracking, if enabled.
381 * - Lookup MAC address.
383 * - Packets with bit mask set are forwarded
386 * A pointer to the pipeline.
388 * A pointer to a burst of packets.
390 * Number of packets to process.
392 * A pointer to pipeline specific data.
395 * 0 on success, negative on error.
398 pkt_work_acl_key(struct rte_pipeline *p,
399 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
402 struct pipeline_acl *p_acl = arg;
404 p_acl->counters->pkts_received =
405 p_acl->counters->pkts_received + n_pkts;
407 printf("pkt_work_acl_key pkts_received: %" PRIu64
408 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
410 uint64_t lookup_hit_mask = 0;
411 uint64_t lookup_hit_mask_ipv4 = 0;
412 uint64_t lookup_hit_mask_ipv6 = 0;
413 uint64_t lookup_miss_mask = 0;
414 uint64_t conntrack_mask = 0;
415 uint64_t connexist_mask = 0;
416 uint32_t dest_address = 0;
420 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
421 uint64_t keep_mask = pkts_mask;
425 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
427 if (acl_ipv4_enabled) {
429 printf("ACL IPV4 Lookup Mask Before = %p\n",
432 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
433 pkts_mask, &lookup_hit_mask_ipv4,
435 p_acl->acl_entries_ipv4);
437 printf("ACL IPV4 Lookup Mask After = %p\n",
438 (void *)lookup_hit_mask_ipv4);
441 if (acl_ipv6_enabled) {
443 printf("ACL IPV6 Lookup Mask Before = %p\n",
446 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
447 pkts_mask, &lookup_hit_mask_ipv6,
449 p_acl->acl_entries_ipv6);
451 printf("ACL IPV6 Lookup Mask After = %p\n",
452 (void *)lookup_hit_mask_ipv6);
455 /* Merge lookup results since we process both IPv4 and IPv6 below */
456 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
458 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
460 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
461 pkts_mask = lookup_hit_mask;
462 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
464 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
465 p_acl->counters->pkts_drop,
466 __builtin_popcountll(lookup_miss_mask));
468 uint64_t pkts_to_process = lookup_hit_mask;
469 /* bitmap of packets left to process for ARP */
471 for (; pkts_to_process;) {
472 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
473 uint64_t pkt_mask = 1LLU << pos;
474 /* bitmask representing only this packet */
476 pkts_to_process &= ~pkt_mask;
477 /* remove this packet from remaining list */
478 struct rte_mbuf *pkt = pkts[pos];
481 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
482 pkts_mask &= ~(1LLU << pos);
487 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
488 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
490 if (hdr_chk == IPv4_HDR_VERSION) {
492 struct acl_table_entry *entry =
493 (struct acl_table_entry *)
494 p_acl->acl_entries_ipv4[pos];
495 uint16_t phy_port = entry->head.port_id;
496 uint32_t action_id = entry->action_id;
499 printf("action_id = %u\n", action_id);
501 uint32_t dscp_offset =
502 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
504 if (action_array_active[action_id].action_bitmap &
507 [p_acl->action_counter_index]
508 [action_id].packetCount++;
510 [p_acl->action_counter_index]
511 [action_id].byteCount +=
512 rte_pktmbuf_pkt_len(pkt);
514 printf("Action Count Packet Count: %"
515 PRIu64 " Byte Count: %" PRIu64
518 [p_acl->action_counter_index]
519 [action_id].packetCount,
521 [p_acl->action_counter_index]
522 [action_id].byteCount);
525 if (action_array_active[action_id].action_bitmap &
526 acl_action_packet_drop) {
528 /* Drop packet by changing the mask */
530 printf("ACL before drop pkt_mask "
531 " %lu, pkt_num %d\n",
533 pkts_mask &= ~(1LLU << pos);
535 printf("ACL after drop pkt_mask "
538 p_acl->counters->pkts_drop++;
541 if (action_array_active[action_id].action_bitmap &
544 action_array_active[action_id].fwd_port;
545 entry->head.port_id = phy_port;
547 printf("Action FWD Port ID: %u\n",
551 if (action_array_active[action_id].action_bitmap &
554 action_array_active[action_id].nat_port;
555 entry->head.port_id = phy_port;
557 printf("Action NAT Port ID: %u\n",
561 if (action_array_active[action_id].action_bitmap &
564 /* Set DSCP priority */
565 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
568 action_array_active[action_id].dscp_priority
572 ("Action DSCP DSCP Priority: %u\n",
576 if (action_array_active[action_id].action_bitmap &
577 acl_action_packet_accept) {
579 printf("Action Accept\n");
581 if (action_array_active[action_id].action_bitmap
582 & acl_action_conntrack) {
584 /* Set conntrack bit for this pkt */
585 conntrack_mask |= pkt_mask;
587 printf("ACL Conntrack enabled: "
589 (void *)conntrack_mask,
593 if (action_array_active[action_id].action_bitmap
594 & acl_action_connexist) {
596 /* Set conntrack bit for this pkt */
597 conntrack_mask |= pkt_mask;
599 /* Set connexist bit for this pkt for public -> private */
600 /* Private -> public packet will open the connection */
601 if (action_array_active
602 [action_id].private_public ==
604 connexist_mask |= pkt_mask;
607 printf("ACL Connexist enabled "
608 "conntrack: %p connexist: %p pkt_mask: %p\n",
609 (void *)conntrack_mask,
610 (void *)connexist_mask,
616 if (hdr_chk == IPv6_HDR_VERSION) {
618 struct acl_table_entry *entry =
619 (struct acl_table_entry *)
620 p_acl->acl_entries_ipv6[pos];
621 uint16_t phy_port = entry->head.port_id;
622 uint32_t action_id = entry->action_id;
625 printf("action_id = %u\n", action_id);
627 if (action_array_active[action_id].action_bitmap &
630 [p_acl->action_counter_index]
631 [action_id].packetCount++;
633 [p_acl->action_counter_index]
634 [action_id].byteCount +=
635 rte_pktmbuf_pkt_len(pkt);
637 printf("Action Count Packet Count: %"
638 PRIu64 " Byte Count: %" PRIu64
641 [p_acl->action_counter_index]
642 [action_id].packetCount,
644 [p_acl->action_counter_index]
645 [action_id].byteCount);
648 if (action_array_active[action_id].action_bitmap &
649 acl_action_packet_drop) {
650 /* Drop packet by changing the mask */
652 printf("ACL before drop pkt_mask "
655 pkts_mask &= ~(1LLU << pos);
657 printf("ACL after drop pkt_mask "
660 p_acl->counters->pkts_drop++;
664 if (action_array_active[action_id].action_bitmap &
667 action_array_active[action_id].fwd_port;
668 entry->head.port_id = phy_port;
670 printf("Action FWD Port ID: %u\n",
674 if (action_array_active[action_id].action_bitmap &
677 action_array_active[action_id].nat_port;
678 entry->head.port_id = phy_port;
680 printf("Action NAT Port ID: %u\n",
684 if (action_array_active[action_id].action_bitmap &
687 /* Set DSCP priority */
688 uint32_t dscp_offset =
689 MBUF_HDR_ROOM + ETH_HDR_SIZE +
690 IP_HDR_DSCP_OFST_IPV6;
692 RTE_MBUF_METADATA_UINT16_PTR(pkt,
694 uint16_t dscp_value =
696 (RTE_MBUF_METADATA_UINT16
697 (pkt, dscp_offset)) & 0XF00F);
699 action_array_active[action_id].dscp_priority
701 uint16_t dscp_temp = dscp_store;
703 dscp_temp = dscp_temp << 4;
704 *dscp = rte_bswap16(dscp_temp | dscp_value);
707 ("Action DSCP DSCP Priority: %u\n",
711 if (action_array_active[action_id].action_bitmap &
712 acl_action_packet_accept) {
714 printf("Action Accept\n");
716 if (action_array_active[action_id].action_bitmap
717 & acl_action_conntrack) {
719 /* Set conntrack bit for this pkt */
720 conntrack_mask |= pkt_mask;
722 printf("ACL Conntrack enabled: "
723 " %p pkt_mask: %p\n",
724 (void *)conntrack_mask,
728 if (action_array_active[action_id].action_bitmap
729 & acl_action_connexist) {
731 /* Set conntrack bit for this pkt */
732 conntrack_mask |= pkt_mask;
734 /* Set connexist bit for this pkt for public -> private */
735 /* Private -> public packet will open the connection */
736 if (action_array_active
737 [action_id].private_public ==
739 connexist_mask |= pkt_mask;
742 printf("ACL Connexist enabled "
743 "conntrack: %p connexist: %p pkt_mask: %p\n",
744 (void *)conntrack_mask,
745 (void *)connexist_mask,
752 /* Only call connection tracker if required */
753 if (conntrack_mask > 0) {
756 ("ACL Call Conntrack Before = %p Connexist = %p\n",
757 (void *)conntrack_mask, (void *)connexist_mask);
759 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
760 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
762 printf("ACL Call Conntrack After = %p\n",
763 (void *)conntrack_mask);
765 /* Only change pkt mask for pkts that have conntrack enabled */
766 /* Need to loop through packets to check if conntrack enabled */
767 pkts_to_process = pkts_mask;
768 for (; pkts_to_process;) {
769 uint32_t action_id = 0;
771 (uint8_t) __builtin_ctzll(pkts_to_process);
772 uint64_t pkt_mask = 1LLU << pos;
773 /* bitmask representing only this packet */
775 pkts_to_process &= ~pkt_mask;
776 /* remove this packet from remaining list */
777 struct rte_mbuf *pkt = pkts[pos];
779 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
784 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
785 if (hdr_chk == IPv4_HDR_VERSION) {
786 struct acl_table_entry *entry =
787 (struct acl_table_entry *)
788 p_acl->acl_entries_ipv4[pos];
789 action_id = entry->action_id;
791 struct acl_table_entry *entry =
792 (struct acl_table_entry *)
793 p_acl->acl_entries_ipv6[pos];
794 action_id = entry->action_id;
797 if ((action_array_active[action_id].action_bitmap &
798 acl_action_conntrack)
799 || (action_array_active[action_id].action_bitmap &
800 acl_action_connexist)) {
802 if (conntrack_mask & pkt_mask) {
804 printf("ACL Conntrack Accept "
808 /* Drop packet by changing the mask */
810 printf("ACL Conntrack Drop "
813 pkts_mask &= ~pkt_mask;
814 p_acl->counters->pkts_drop++;
820 pkts_to_process = pkts_mask;
821 /* bitmap of packets left to process for ARP */
823 for (; pkts_to_process;) {
824 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
825 uint64_t pkt_mask = 1LLU << pos;
826 /* bitmask representing only this packet */
828 pkts_to_process &= ~pkt_mask;
829 /* remove this packet from remaining list */
830 struct rte_mbuf *pkt = pkts[pos];
833 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
834 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
836 if (hdr_chk == IPv4_HDR_VERSION) {
838 struct acl_table_entry *entry =
839 (struct acl_table_entry *)
840 p_acl->acl_entries_ipv4[pos];
841 uint16_t phy_port = pkt->port;
842 uint32_t *port_out_id =
843 RTE_MBUF_METADATA_UINT32_PTR(pkt,
850 ("phy_port = %i, links_map[phy_port] = %i\n",
851 phy_port, p_acl->links_map[phy_port]);
853 /* header room + eth hdr size + dst_adr offset in ip header */
854 uint32_t dst_addr_offset =
855 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
857 RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
859 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
861 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
862 struct ether_addr hw_addr;
863 uint32_t dest_address = rte_bswap32(*dst_addr);
864 uint32_t *nhip = RTE_MBUF_METADATA_UINT32_PTR(pkt,
871 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
873 struct arp_entry_data *ret_arp_data = NULL;
874 ret_arp_data = get_dest_mac_addr_port
875 (dest_address, &dest_if, (struct ether_addr *) eth_dest);
876 *port_out_id = p_acl->port_out_id[dest_if];
877 if (arp_cache_dest_mac_present(dest_if)) {
878 ether_addr_copy(get_link_hw_addr(dest_if),
879 (struct ether_addr *)eth_src);
880 update_nhip_access(dest_if);
881 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
882 printf("sending buffered packets\n");
883 arp_send_buffered_pkts(ret_arp_data,
884 (struct ether_addr *)eth_dest, *port_out_id);
887 p_acl->counters->tpkts_processed++;
888 p_acl->counters->bytes_processed +=
891 if (unlikely(ret_arp_data == NULL)) {
893 printf("%s: NHIP Not Found, "
894 "outport_id: %d\n", __func__,
898 pkts_mask &= ~(1LLU << pos);
900 printf("ACL after drop pkt_mask "
903 p_acl->counters->pkts_drop++;
907 if (ret_arp_data->status == INCOMPLETE ||
908 ret_arp_data->status == PROBE) {
909 if (ret_arp_data->num_pkts >= NUM_DESC) {
911 pkts_mask &= ~(1LLU << pos);
913 printf("ACL after drop pkt_mask "
916 p_acl->counters->pkts_drop++;
919 arp_pkts_mask |= pkt_mask;
920 arp_queue_unresolved_packet(ret_arp_data,
927 } /* end of if (hdr_chk == IPv4_HDR_VERSION) */
929 if (hdr_chk == IPv6_HDR_VERSION) {
931 struct acl_table_entry *entry =
932 (struct acl_table_entry *)
933 p_acl->acl_entries_ipv6[pos];
934 //uint16_t phy_port = entry->head.port_id;
935 uint16_t phy_port = pkt->port;
936 uint32_t *port_out_id =
937 RTE_MBUF_METADATA_UINT32_PTR(pkt,
942 /*if (is_phy_port_privte(phy_port))
943 *port_out_id = ACL_PUB_PORT_ID;
945 *port_out_id = ACL_PRV_PORT_ID;*/
947 /* *port_out_id = p_acl->links_map[phy_port]; */
949 printf("phy_port = %i, "
950 "links_map[phy_port] = %i\n",
951 phy_port, p_acl->links_map[phy_port]);
953 /* header room + eth hdr size + dst_adr offset in ip header */
954 uint32_t dst_addr_offset =
955 MBUF_HDR_ROOM + ETH_HDR_SIZE +
956 IP_HDR_DST_ADR_OFST_IPV6;
958 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
960 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
961 struct ether_addr hw_addr;
962 uint8_t dest_address[16];
966 RTE_MBUF_METADATA_UINT8(pkt,
971 uint8_t *dst_addr[16];
972 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
975 for (i = 0; i < 16; i++) {
977 RTE_MBUF_METADATA_UINT8_PTR(pkt,
981 memcpy(dest_address, *dst_addr, sizeof(dest_address));
982 memset(nhip, 0, sizeof(nhip));
984 struct nd_entry_data *ret_nd_data = NULL;
985 ret_nd_data = get_dest_mac_address_ipv6_port
986 (dest_address, &dest_if, &hw_addr, &nhip[0]);
987 *port_out_id = p_acl->port_out_id[dest_if];
988 if (nd_cache_dest_mac_present(dest_if)) {
989 ether_addr_copy(get_link_hw_addr(dest_if),
990 (struct ether_addr *)eth_src);
991 update_nhip_access(dest_if);
993 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
994 printf("sending buffered packets\n");
995 p_acl->counters->tpkts_processed +=
996 ret_nd_data->num_pkts;
997 nd_send_buffered_pkts(ret_nd_data,
998 (struct ether_addr *)eth_dest, *port_out_id);
1000 p_acl->counters->tpkts_processed++;
1001 p_acl->counters->bytes_processed +=
1004 if (unlikely(ret_nd_data == NULL)) {
1006 printf("ACL before drop pkt_mask "
1007 "%lu, pkt_num %d\n", pkts_mask, pos);
1008 pkts_mask &= ~(1LLU << pos);
1010 printf("ACL after drop pkt_mask "
1011 "%lu, pkt_num %d\n", pkts_mask, pos);
1012 p_acl->counters->pkts_drop++;
1016 if (ret_nd_data->status == INCOMPLETE ||
1017 ret_nd_data->status == PROBE) {
1018 if (ret_nd_data->num_pkts >= NUM_DESC) {
1021 printf("ACL before drop pkt_mask "
1022 "%lu, pkt_num %d\n", pkts_mask, pos);
1023 pkts_mask &= ~(1LLU << pos);
1025 printf("ACL after drop pkt_mask "
1026 "%lu, pkt_num %d\n", pkts_mask, pos);
1027 p_acl->counters->pkts_drop++;
1030 arp_pkts_mask |= pkt_mask;
1031 nd_queue_unresolved_packet(ret_nd_data,
1039 } /* if (hdr_chk == IPv6_HDR_VERSION) */
1043 pkts_drop_mask = keep_mask & ~pkts_mask;
1044 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1045 keep_mask = pkts_mask;
1047 if (arp_pkts_mask) {
1048 keep_mask &= ~(arp_pkts_mask);
1049 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1052 /* don't bother measuring if traffic very low, might skew stats */
1053 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1055 if (packets_this_iteration > 1) {
1056 uint64_t latency_this_iteration =
1057 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1059 p_acl->counters->sum_latencies += latency_this_iteration;
1060 p_acl->counters->count_latencies++;
1064 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1071 * Main packet processing function.
1072 * 64 packet bit mask are used to identify which packets to forward.
1073 * Performs the following:
1074 * - Burst lookup packets in the IPv4 ACL Rule Table.
1075 * - Burst lookup packets in the IPv6 ACL Rule Table.
1076 * - Lookup Action Table, perform actions.
1077 * - Burst lookup Connection Tracking, if enabled.
1078 * - Lookup MAC address.
1080 * - Packets with bit mask set are forwarded
1083 * A pointer to the pipeline.
1085 * A pointer to a burst of packets.
1087 * Number of packets to process.
1089 * A pointer to pipeline specific data.
1092 * 0 on success, negative on error.
1095 pkt_work_acl_ipv4_key(struct rte_pipeline *p,
1096 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1099 struct pipeline_acl *p_acl = arg;
1101 p_acl->counters->pkts_received =
1102 p_acl->counters->pkts_received + n_pkts;
1104 printf("pkt_work_acl_key pkts_received: %" PRIu64
1105 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1107 uint64_t lookup_hit_mask = 0;
1108 uint64_t lookup_hit_mask_ipv4 = 0;
1109 uint64_t lookup_hit_mask_ipv6 = 0;
1110 uint64_t lookup_miss_mask = 0;
1111 uint64_t conntrack_mask = 0;
1112 uint64_t connexist_mask = 0;
1113 uint32_t dest_address = 0;
1117 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1118 uint64_t keep_mask = pkts_mask;
1122 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1124 if (acl_ipv4_enabled) {
1126 printf("ACL IPV4 Lookup Mask Before = %p\n",
1129 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
1130 pkts_mask, &lookup_hit_mask_ipv4,
1132 p_acl->acl_entries_ipv4);
1134 printf("ACL IPV4 Lookup Mask After = %p\n",
1135 (void *)lookup_hit_mask_ipv4);
1138 /* Merge lookup results since we process both IPv4 and IPv6 below */
1139 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1141 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1143 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1144 pkts_mask = lookup_hit_mask;
1145 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1147 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1148 p_acl->counters->pkts_drop,
1149 __builtin_popcountll(lookup_miss_mask));
1151 uint64_t pkts_to_process = lookup_hit_mask;
1152 /* bitmap of packets left to process for ARP */
1154 for (; pkts_to_process;) {
1155 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1156 uint64_t pkt_mask = 1LLU << pos;
1157 /* bitmask representing only this packet */
1159 pkts_to_process &= ~pkt_mask;
1160 /* remove this packet from remaining list */
1161 struct rte_mbuf *pkt = pkts[pos];
1164 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1165 pkts_mask &= ~(1LLU << pos);
1170 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1171 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1173 if (hdr_chk == IPv4_HDR_VERSION) {
1174 struct acl_table_entry *entry =
1175 (struct acl_table_entry *)
1176 p_acl->acl_entries_ipv4[pos];
1177 uint16_t phy_port = entry->head.port_id;
1178 uint32_t action_id = entry->action_id;
1181 printf("action_id = %u\n", action_id);
1183 uint32_t dscp_offset =
1184 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1186 if (action_array_active[action_id].action_bitmap &
1188 action_counter_table
1189 [p_acl->action_counter_index]
1190 [action_id].packetCount++;
1191 action_counter_table
1192 [p_acl->action_counter_index]
1193 [action_id].byteCount +=
1194 rte_pktmbuf_pkt_len(pkt);
1196 printf("Action Count Packet Count: %"
1197 PRIu64 " Byte Count: %" PRIu64
1199 action_counter_table
1200 [p_acl->action_counter_index]
1201 [action_id].packetCount,
1202 action_counter_table
1203 [p_acl->action_counter_index]
1204 [action_id].byteCount);
1207 if (action_array_active[action_id].action_bitmap &
1208 acl_action_packet_drop) {
1210 /* Drop packet by changing the mask */
1212 printf("ACL before drop pkt_mask "
1213 "%lu, pkt_num %d\n",
1215 pkts_mask &= ~(1LLU << pos);
1217 printf("ACL after drop pkt_mask "
1218 " %lu, pkt_num %d\n",
1220 p_acl->counters->pkts_drop++;
1223 if (action_array_active[action_id].action_bitmap &
1226 action_array_active[action_id].fwd_port;
1227 entry->head.port_id = phy_port;
1229 printf("Action FWD Port ID: %u\n",
1233 if (action_array_active[action_id].action_bitmap &
1236 action_array_active[action_id].nat_port;
1237 entry->head.port_id = phy_port;
1239 printf("Action NAT Port ID: %u\n",
1243 if (action_array_active[action_id].action_bitmap &
1246 /* Set DSCP priority */
1247 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1250 action_array_active[action_id].dscp_priority
1254 ("Action DSCP DSCP Priority: %u\n",
1258 if (action_array_active[action_id].action_bitmap &
1259 acl_action_packet_accept) {
1261 printf("Action Accept\n");
1263 if (action_array_active[action_id].action_bitmap
1264 & acl_action_conntrack) {
1266 /* Set conntrack bit for this pkt */
1267 conntrack_mask |= pkt_mask;
1269 printf("ACL Conntrack "
1270 "enabled: %p pkt_mask: %p\n",
1271 (void *)conntrack_mask,
1275 if (action_array_active[action_id].action_bitmap
1276 & acl_action_connexist) {
1278 /* Set conntrack bit for this pkt */
1279 conntrack_mask |= pkt_mask;
1281 /* Set connexist bit for this pkt for public -> private */
1282 /* Private -> public packet will open the connection */
1283 if (action_array_active
1284 [action_id].private_public ==
1286 connexist_mask |= pkt_mask;
1289 printf("ACL Connexist "
1290 "enabled conntrack: %p connexist: %p pkt_mask: %p\n",
1291 (void *)conntrack_mask,
1292 (void *)connexist_mask,
1298 if (hdr_chk == IPv6_HDR_VERSION) {
1300 struct acl_table_entry *entry =
1301 (struct acl_table_entry *)
1302 p_acl->acl_entries_ipv6[pos];
1303 uint16_t phy_port = entry->head.port_id;
1304 uint32_t action_id = entry->action_id;
1307 printf("action_id = %u\n", action_id);
1309 if (action_array_active[action_id].action_bitmap &
1311 action_counter_table
1312 [p_acl->action_counter_index]
1313 [action_id].packetCount++;
1314 action_counter_table
1315 [p_acl->action_counter_index]
1316 [action_id].byteCount +=
1317 rte_pktmbuf_pkt_len(pkt);
1319 printf("Action Count Packet Count: %"
1320 PRIu64 " Byte Count: %" PRIu64
1322 action_counter_table
1323 [p_acl->action_counter_index]
1324 [action_id].packetCount,
1325 action_counter_table
1326 [p_acl->action_counter_index]
1327 [action_id].byteCount);
1330 if (action_array_active[action_id].action_bitmap &
1331 acl_action_packet_drop) {
1332 /* Drop packet by changing the mask */
1335 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1337 pkts_mask &= ~(1LLU << pos);
1340 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1342 p_acl->counters->pkts_drop++;
1346 if (action_array_active[action_id].action_bitmap &
1349 action_array_active[action_id].fwd_port;
1350 entry->head.port_id = phy_port;
1352 printf("Action FWD Port ID: %u\n",
1356 if (action_array_active[action_id].action_bitmap &
1359 action_array_active[action_id].nat_port;
1360 entry->head.port_id = phy_port;
1362 printf("Action NAT Port ID: %u\n",
1366 if (action_array_active[action_id].action_bitmap &
1369 /* Set DSCP priority */
1370 uint32_t dscp_offset =
1371 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1372 IP_HDR_DSCP_OFST_IPV6;
1374 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1376 uint16_t dscp_value =
1378 (RTE_MBUF_METADATA_UINT16
1379 (pkt, dscp_offset)) & 0XF00F);
1380 uint8_t dscp_store =
1381 action_array_active[action_id].dscp_priority
1383 uint16_t dscp_temp = dscp_store;
1385 dscp_temp = dscp_temp << 4;
1386 *dscp = rte_bswap16(dscp_temp | dscp_value);
1389 ("Action DSCP DSCP Priority: %u\n",
1393 if (action_array_active[action_id].action_bitmap &
1394 acl_action_packet_accept) {
1396 printf("Action Accept\n");
1398 if (action_array_active[action_id].action_bitmap
1399 & acl_action_conntrack) {
1401 /* Set conntrack bit for this pkt */
1402 conntrack_mask |= pkt_mask;
1404 printf("ACL Conntrack "
1405 "enabled: %p pkt_mask: %p\n",
1406 (void *)conntrack_mask,
1410 if (action_array_active[action_id].action_bitmap
1411 & acl_action_connexist) {
1413 /* Set conntrack bit for this pkt */
1414 conntrack_mask |= pkt_mask;
1416 /* Set connexist bit for this pkt for public -> private */
1417 /* Private -> public packet will open the connection */
1418 if (action_array_active
1419 [action_id].private_public ==
1421 connexist_mask |= pkt_mask;
1424 printf("ACL Connexist enabled "
1425 "conntrack: %p connexist: %p pkt_mask: %p\n",
1426 (void *)conntrack_mask,
1427 (void *)connexist_mask,
1434 /* Only call connection tracker if required */
1435 if (conntrack_mask > 0) {
1438 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1439 (void *)conntrack_mask, (void *)connexist_mask);
1441 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1442 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1444 printf("ACL Call Conntrack After = %p\n",
1445 (void *)conntrack_mask);
1447 /* Only change pkt mask for pkts that have conntrack enabled */
1448 /* Need to loop through packets to check if conntrack enabled */
1449 pkts_to_process = pkts_mask;
1450 for (; pkts_to_process;) {
1451 uint32_t action_id = 0;
1453 (uint8_t) __builtin_ctzll(pkts_to_process);
1454 uint64_t pkt_mask = 1LLU << pos;
1455 /* bitmask representing only this packet */
1457 pkts_to_process &= ~pkt_mask;
1458 /* remove this packet from remaining list */
1459 struct rte_mbuf *pkt = pkts[pos];
1461 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1465 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1466 if (hdr_chk == IPv4_HDR_VERSION) {
1467 struct acl_table_entry *entry =
1468 (struct acl_table_entry *)
1469 p_acl->acl_entries_ipv4[pos];
1470 action_id = entry->action_id;
1472 struct acl_table_entry *entry =
1473 (struct acl_table_entry *)
1474 p_acl->acl_entries_ipv6[pos];
1475 action_id = entry->action_id;
1478 if ((action_array_active[action_id].action_bitmap &
1479 acl_action_conntrack)
1480 || (action_array_active[action_id].action_bitmap &
1481 acl_action_connexist)) {
1483 if (conntrack_mask & pkt_mask) {
1485 printf("ACL Conntrack Accept "
1489 /* Drop packet by changing the mask */
1491 printf("ACL Conntrack Drop "
1494 pkts_mask &= ~pkt_mask;
1495 p_acl->counters->pkts_drop++;
1501 pkts_to_process = pkts_mask;
1502 /* bitmap of packets left to process for ARP */
1504 for (; pkts_to_process;) {
1505 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1506 uint64_t pkt_mask = 1LLU << pos;
1507 /* bitmask representing only this packet */
1509 pkts_to_process &= ~pkt_mask;
1510 /* remove this packet from remaining list */
1511 struct rte_mbuf *pkt = pkts[pos];
1514 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1515 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1517 if (hdr_chk == IPv4_HDR_VERSION) {
1519 struct acl_table_entry *entry =
1520 (struct acl_table_entry *)
1521 p_acl->acl_entries_ipv4[pos];
1522 //uint16_t phy_port = entry->head.port_id;
1523 uint16_t phy_port = pkt->port;
1524 uint32_t *port_out_id =
1525 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1530 /* *port_out_id = p_acl->links_map[phy_port]; */
1531 /* if (is_phy_port_privte(phy_port))
1532 *port_out_id = ACL_PUB_PORT_ID;
1534 *port_out_id = ACL_PRV_PORT_ID;*/
1537 ("phy_port = %i, links_map[phy_port] = %i\n",
1538 phy_port, p_acl->links_map[phy_port]);
1540 /* header room + eth hdr size + dst_adr offset in ip header */
1541 uint32_t dst_addr_offset =
1542 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
1543 uint32_t *dst_addr =
1544 RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
1546 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
1548 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
1549 struct ether_addr hw_addr;
1550 uint32_t dest_address = rte_bswap32(*dst_addr);
1551 uint32_t *nhip = RTE_MBUF_METADATA_UINT32_PTR(pkt,
1558 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1560 dest_address = rte_bswap32(*dst_addr);
1561 struct arp_entry_data *ret_arp_data = NULL;
1562 ret_arp_data = get_dest_mac_addr_port
1563 (dest_address, &dest_if, (struct ether_addr *)eth_dest);
1564 *port_out_id = p_acl->port_out_id[dest_if];
1566 if (arp_cache_dest_mac_present(dest_if)) {
1567 ether_addr_copy(get_link_hw_addr(dest_if),
1568 (struct ether_addr *)eth_src);
1569 update_nhip_access(dest_if);
1570 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1571 printf("sending buffered packets\n");
1572 arp_send_buffered_pkts(ret_arp_data,
1573 (struct ether_addr *)eth_dest, *port_out_id);
1576 p_acl->counters->tpkts_processed++;
1577 p_acl->counters->bytes_processed +=
1580 if (unlikely(ret_arp_data == NULL)) {
1583 printf("%s: NHIP Not Found, "
1584 "outport_id: %d\n", __func__,
1588 pkts_mask &= ~(1LLU << pos);
1590 printf("ACL after drop pkt_mask "
1591 "%lu, pkt_num %d\n",
1593 p_acl->counters->pkts_drop++;
1597 if (ret_arp_data->status == INCOMPLETE ||
1598 ret_arp_data->status == PROBE) {
1599 if (ret_arp_data->num_pkts >= NUM_DESC) {
1601 pkts_mask &= ~(1LLU << pos);
1603 printf("ACL after drop pkt_mask "
1604 "%lu, pkt_num %d\n",
1606 p_acl->counters->pkts_drop++;
1609 arp_pkts_mask |= pkt_mask;
1610 arp_queue_unresolved_packet(ret_arp_data, pkt);
1617 if (hdr_chk == IPv6_HDR_VERSION) {
1619 struct acl_table_entry *entry =
1620 (struct acl_table_entry *)
1621 p_acl->acl_entries_ipv6[pos];
1622 uint16_t phy_port = entry->head.port_id;
1623 uint32_t *port_out_id =
1624 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1629 if (is_phy_port_privte(phy_port))
1630 *port_out_id = ACL_PUB_PORT_ID;
1632 *port_out_id = ACL_PRV_PORT_ID;
1634 /* *port_out_id = p_acl->links_map[phy_port]; */
1637 ("phy_port = %i, links_map[phy_port] = %i\n",
1638 phy_port, p_acl->links_map[phy_port]);
1640 /* header room + eth hdr size + dst_adr offset in ip header */
1641 uint32_t dst_addr_offset =
1642 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1643 IP_HDR_DST_ADR_OFST_IPV6;
1645 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
1647 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
1648 struct ether_addr hw_addr;
1649 uint8_t dest_address[16];
1653 RTE_MBUF_METADATA_UINT8(pkt,
1658 uint8_t *dst_addr[16];
1659 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1662 for (i = 0; i < 16; i++) {
1664 RTE_MBUF_METADATA_UINT8_PTR(pkt,
1668 memcpy(dest_address, *dst_addr, sizeof(dest_address));
1669 memset(nhip, 0, sizeof(nhip));
1670 if (is_phy_port_privte(phy_port))
1671 port = ACL_PUB_PORT_ID;
1673 port = ACL_PRV_PORT_ID;
1675 if (get_dest_mac_address_ipv6_port
1676 (dest_address, port, &hw_addr, &nhip[0])) {
1680 ("MAC found for port %d - %02x:%02x:%02x:%02x:%02x:%02x\n",
1681 phy_port, hw_addr.addr_bytes[0],
1682 hw_addr.addr_bytes[1],
1683 hw_addr.addr_bytes[2],
1684 hw_addr.addr_bytes[3],
1685 hw_addr.addr_bytes[4],
1686 hw_addr.addr_bytes[5]);
1688 ("Dest MAC before - %02x:%02x:%02x:%02x:%02x:%02x\n",
1689 eth_dest[0], eth_dest[1],
1690 eth_dest[2], eth_dest[3],
1691 eth_dest[4], eth_dest[5]);
1693 memcpy(eth_dest, &hw_addr,
1694 sizeof(struct ether_addr));
1696 printf("PktP %p, dest_macP %p\n", pkt,
1699 ("Dest MAC after - %02x:%02x:%02x:%02x:%02x:%02x\n",
1700 eth_dest[0], eth_dest[1],
1701 eth_dest[2], eth_dest[3],
1702 eth_dest[4], eth_dest[5]);
1704 if (is_phy_port_privte(phy_port))
1706 get_link_hw_addr(dest_if),
1707 sizeof(struct ether_addr));
1710 get_link_hw_addr(dest_if),
1711 sizeof(struct ether_addr));
1714 * memcpy(eth_src, get_link_hw_addr(p_acl->links_map[phy_port]),
1715 * sizeof(struct ether_addr));
1717 p_acl->counters->tpkts_processed++;
1718 p_acl->counters->bytes_processed +=
1724 /* Drop packet by changing the mask */
1726 printf("ACL before drop pkt_mask "
1727 " %lu, pkt_num %d\n",
1729 pkts_mask &= ~(1LLU << pos);
1731 printf("ACL after drop pkt_mask "
1732 "%lu, pkt_num %d\n",
1734 p_acl->counters->pkts_drop++;
1741 pkts_drop_mask = keep_mask & ~pkts_mask;
1742 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1743 keep_mask = pkts_mask;
1745 if (arp_pkts_mask) {
1746 keep_mask &= ~(arp_pkts_mask);
1747 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1750 /* don't bother measuring if traffic very low, might skew stats */
1751 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1753 if (packets_this_iteration > 1) {
1754 uint64_t latency_this_iteration =
1755 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1756 p_acl->counters->sum_latencies += latency_this_iteration;
1757 p_acl->counters->count_latencies++;
1760 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1767 * Main packet processing function.
1768 * 64 packet bit mask are used to identify which packets to forward.
1769 * Performs the following:
1770 * - Burst lookup packets in the IPv4 ACL Rule Table.
1771 * - Burst lookup packets in the IPv6 ACL Rule Table.
1772 * - Lookup Action Table, perform actions.
1773 * - Burst lookup Connection Tracking, if enabled.
1774 * - Lookup MAC address.
1776 * - Packets with bit mask set are forwarded
1779 * A pointer to the pipeline.
1781 * A pointer to a burst of packets.
1783 * Number of packets to process.
1785 * A pointer to pipeline specific data.
1788 * 0 on success, negative on error.
1791 pkt_work_acl_ipv6_key(struct rte_pipeline *p,
1792 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1795 struct pipeline_acl *p_acl = arg;
1797 p_acl->counters->pkts_received =
1798 p_acl->counters->pkts_received + n_pkts;
1800 printf("pkt_work_acl_key pkts_received: %" PRIu64
1801 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1803 uint64_t lookup_hit_mask = 0;
1804 uint64_t lookup_hit_mask_ipv4 = 0;
1805 uint64_t lookup_hit_mask_ipv6 = 0;
1806 uint64_t lookup_miss_mask = 0;
1807 uint64_t conntrack_mask = 0;
1808 uint64_t connexist_mask = 0;
1809 uint32_t dest_address = 0;
1813 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1814 uint64_t keep_mask = pkts_mask;
1818 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1820 if (acl_ipv6_enabled) {
1822 printf("ACL IPV6 Lookup Mask Before = %p\n",
1825 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
1826 pkts_mask, &lookup_hit_mask_ipv6,
1828 p_acl->acl_entries_ipv6);
1830 printf("ACL IPV6 Lookup Mask After = %p\n",
1831 (void *)lookup_hit_mask_ipv6);
1834 /* Merge lookup results since we process both IPv4 and IPv6 below */
1835 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1837 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1839 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1840 pkts_mask = lookup_hit_mask;
1841 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1843 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1844 p_acl->counters->pkts_drop,
1845 __builtin_popcountll(lookup_miss_mask));
1847 uint64_t pkts_to_process = lookup_hit_mask;
1848 /* bitmap of packets left to process for ARP */
1850 for (; pkts_to_process;) {
1851 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1852 uint64_t pkt_mask = 1LLU << pos;
1853 /* bitmask representing only this packet */
1855 pkts_to_process &= ~pkt_mask;
1856 /* remove this packet from remaining list */
1857 struct rte_mbuf *pkt = pkts[pos];
1860 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1861 pkts_mask &= ~(1LLU << pos);
1865 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1866 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1868 if (hdr_chk == IPv4_HDR_VERSION) {
1869 struct acl_table_entry *entry =
1870 (struct acl_table_entry *)
1871 p_acl->acl_entries_ipv4[pos];
1872 uint16_t phy_port = entry->head.port_id;
1873 uint32_t action_id = entry->action_id;
1876 printf("action_id = %u\n", action_id);
1878 uint32_t dscp_offset =
1879 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1881 if (action_array_active[action_id].action_bitmap &
1883 action_counter_table
1884 [p_acl->action_counter_index]
1885 [action_id].packetCount++;
1886 action_counter_table
1887 [p_acl->action_counter_index]
1888 [action_id].byteCount +=
1889 rte_pktmbuf_pkt_len(pkt);
1891 printf("Action Count Packet Count: %"
1892 PRIu64 " Byte Count: %" PRIu64
1894 action_counter_table
1895 [p_acl->action_counter_index]
1896 [action_id].packetCount,
1897 action_counter_table
1898 [p_acl->action_counter_index]
1899 [action_id].byteCount);
1902 if (action_array_active[action_id].action_bitmap &
1903 acl_action_packet_drop) {
1905 /* Drop packet by changing the mask */
1908 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1910 pkts_mask &= ~(1LLU << pos);
1913 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1915 p_acl->counters->pkts_drop++;
1918 if (action_array_active[action_id].action_bitmap &
1921 action_array_active[action_id].fwd_port;
1922 entry->head.port_id = phy_port;
1924 printf("Action FWD Port ID: %u\n",
1928 if (action_array_active[action_id].action_bitmap &
1931 action_array_active[action_id].nat_port;
1932 entry->head.port_id = phy_port;
1934 printf("Action NAT Port ID: %u\n",
1938 if (action_array_active[action_id].action_bitmap &
1941 /* Set DSCP priority */
1942 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1945 action_array_active[action_id].dscp_priority
1949 ("Action DSCP DSCP Priority: %u\n",
1953 if (action_array_active[action_id].action_bitmap &
1954 acl_action_packet_accept) {
1956 printf("Action Accept\n");
1958 if (action_array_active[action_id].action_bitmap
1959 & acl_action_conntrack) {
1961 /* Set conntrack bit for this pkt */
1962 conntrack_mask |= pkt_mask;
1964 printf("ACL Conntrack enabled: "
1965 " %p pkt_mask: %p\n",
1966 (void *)conntrack_mask,
1970 if (action_array_active[action_id].action_bitmap
1971 & acl_action_connexist) {
1973 /* Set conntrack bit for this pkt */
1974 conntrack_mask |= pkt_mask;
1976 /* Set connexist bit for this pkt for public -> private */
1977 /* Private -> public packet will open the connection */
1978 if (action_array_active
1979 [action_id].private_public ==
1981 connexist_mask |= pkt_mask;
1984 printf("ACL Connexist enabled "
1985 "conntrack: %p connexist: %p pkt_mask: %p\n",
1986 (void *)conntrack_mask,
1987 (void *)connexist_mask,
1994 if (hdr_chk == IPv6_HDR_VERSION) {
1996 struct acl_table_entry *entry =
1997 (struct acl_table_entry *)
1998 p_acl->acl_entries_ipv6[pos];
1999 uint16_t phy_port = entry->head.port_id;
2000 uint32_t action_id = entry->action_id;
2003 printf("action_id = %u\n", action_id);
2005 if (action_array_active[action_id].action_bitmap &
2007 action_counter_table
2008 [p_acl->action_counter_index]
2009 [action_id].packetCount++;
2010 action_counter_table
2011 [p_acl->action_counter_index]
2012 [action_id].byteCount +=
2013 rte_pktmbuf_pkt_len(pkt);
2015 printf("Action Count Packet Count: %"
2016 PRIu64 " Byte Count: %" PRIu64
2018 action_counter_table
2019 [p_acl->action_counter_index]
2020 [action_id].packetCount,
2021 action_counter_table
2022 [p_acl->action_counter_index]
2023 [action_id].byteCount);
2026 if (action_array_active[action_id].action_bitmap &
2027 acl_action_packet_drop) {
2028 /* Drop packet by changing the mask */
2030 printf("ACL before drop pkt_mask "
2031 "%lu, pkt_num %d\n",
2033 pkts_mask &= ~(1LLU << pos);
2035 printf("ACL after drop pkt_mask "
2036 "%lu, pkt_num %d\n",
2038 p_acl->counters->pkts_drop++;
2042 if (action_array_active[action_id].action_bitmap &
2045 action_array_active[action_id].fwd_port;
2046 entry->head.port_id = phy_port;
2048 printf("Action FWD Port ID: %u\n",
2052 if (action_array_active[action_id].action_bitmap &
2055 action_array_active[action_id].nat_port;
2056 entry->head.port_id = phy_port;
2058 printf("Action NAT Port ID: %u\n",
2062 if (action_array_active[action_id].action_bitmap &
2065 /* Set DSCP priority */
2066 uint32_t dscp_offset =
2067 MBUF_HDR_ROOM + ETH_HDR_SIZE +
2068 IP_HDR_DSCP_OFST_IPV6;
2070 RTE_MBUF_METADATA_UINT16_PTR(pkt,
2072 uint16_t dscp_value =
2074 (RTE_MBUF_METADATA_UINT16
2075 (pkt, dscp_offset)) & 0XF00F);
2076 uint8_t dscp_store =
2077 action_array_active[action_id].dscp_priority
2079 uint16_t dscp_temp = dscp_store;
2081 dscp_temp = dscp_temp << 4;
2082 *dscp = rte_bswap16(dscp_temp | dscp_value);
2085 ("Action DSCP DSCP Priority: %u\n",
2089 if (action_array_active[action_id].action_bitmap &
2090 acl_action_packet_accept) {
2092 printf("Action Accept\n");
2094 if (action_array_active[action_id].action_bitmap
2095 & acl_action_conntrack) {
2097 /* Set conntrack bit for this pkt */
2098 conntrack_mask |= pkt_mask;
2100 printf("ACL Conntrack enabled: "
2101 " %p pkt_mask: %p\n",
2102 (void *)conntrack_mask,
2106 if (action_array_active[action_id].action_bitmap
2107 & acl_action_connexist) {
2109 /* Set conntrack bit for this pkt */
2110 conntrack_mask |= pkt_mask;
2112 /* Set connexist bit for this pkt for public -> private */
2113 /* Private -> public packet will open the connection */
2114 if (action_array_active
2115 [action_id].private_public ==
2117 connexist_mask |= pkt_mask;
2120 printf("ACL Connexist enabled "
2121 "conntrack: %p connexist: %p pkt_mask: %p\n",
2122 (void *)conntrack_mask,
2123 (void *)connexist_mask,
2129 /* Only call connection tracker if required */
2130 if (conntrack_mask > 0) {
2133 ("ACL Call Conntrack Before = %p Connexist = %p\n",
2134 (void *)conntrack_mask, (void *)connexist_mask);
2136 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
2137 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
2139 printf("ACL Call Conntrack After = %p\n",
2140 (void *)conntrack_mask);
2142 /* Only change pkt mask for pkts that have conntrack enabled */
2143 /* Need to loop through packets to check if conntrack enabled */
2144 pkts_to_process = pkts_mask;
2145 for (; pkts_to_process;) {
2146 uint32_t action_id = 0;
2148 (uint8_t) __builtin_ctzll(pkts_to_process);
2149 uint64_t pkt_mask = 1LLU << pos;
2150 /* bitmask representing only this packet */
2152 pkts_to_process &= ~pkt_mask;
2153 /* remove this packet from remaining list */
2154 struct rte_mbuf *pkt = pkts[pos];
2156 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
2160 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2161 if (hdr_chk == IPv4_HDR_VERSION) {
2162 struct acl_table_entry *entry =
2163 (struct acl_table_entry *)
2164 p_acl->acl_entries_ipv4[pos];
2165 action_id = entry->action_id;
2167 struct acl_table_entry *entry =
2168 (struct acl_table_entry *)
2169 p_acl->acl_entries_ipv6[pos];
2170 action_id = entry->action_id;
2173 if ((action_array_active[action_id].action_bitmap &
2174 acl_action_conntrack)
2175 || (action_array_active[action_id].action_bitmap &
2176 acl_action_connexist)) {
2178 if (conntrack_mask & pkt_mask) {
2180 printf("ACL Conntrack Accept "
2184 /* Drop packet by changing the mask */
2187 ("ACL Conntrack Drop packet = %p\n",
2189 pkts_mask &= ~pkt_mask;
2190 p_acl->counters->pkts_drop++;
2196 pkts_to_process = pkts_mask;
2197 /* bitmap of packets left to process for ARP */
2199 for (; pkts_to_process;) {
2200 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
2201 uint64_t pkt_mask = 1LLU << pos;
2202 /* bitmask representing only this packet */
2204 pkts_to_process &= ~pkt_mask;
2205 /* remove this packet from remaining list */
2206 struct rte_mbuf *pkt = pkts[pos];
2209 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
2210 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2212 if (hdr_chk == IPv6_HDR_VERSION) {
2214 struct acl_table_entry *entry =
2215 (struct acl_table_entry *)
2216 p_acl->acl_entries_ipv6[pos];
2217 //uint16_t phy_port = entry->head.port_id;
2218 uint16_t phy_port = pkt->port;
2219 uint32_t *port_out_id =
2220 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2225 /* if (is_phy_port_privte(phy_port))
2226 *port_out_id = ACL_PUB_PORT_ID;
2228 *port_out_id = ACL_PRV_PORT_ID;*/
2230 /* *port_out_id = p_acl->links_map[phy_port]; */
2233 ("phy_port = %i,links_map[phy_port] = %i\n",
2234 phy_port, p_acl->links_map[phy_port]);
2236 /* header room + eth hdr size + dst_adr offset in ip header */
2237 uint32_t dst_addr_offset =
2238 MBUF_HDR_ROOM + ETH_HDR_SIZE +
2239 IP_HDR_DST_ADR_OFST_IPV6;
2241 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
2243 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
2244 struct ether_addr hw_addr;
2245 uint8_t dest_address[16];
2249 RTE_MBUF_METADATA_UINT8(pkt,
2254 uint8_t *dst_addr[16];
2255 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
2258 for (i = 0; i < 16; i++) {
2260 RTE_MBUF_METADATA_UINT8_PTR(pkt,
2264 memcpy(dest_address, *dst_addr, sizeof(dest_address));
2265 memset(nhip, 0, sizeof(nhip));
2266 struct nd_entry_data *ret_nd_data = NULL;
2267 ret_nd_data = get_dest_mac_address_ipv6_port
2268 (dest_address, &dest_if, &hw_addr, &nhip[0]);
2269 *port_out_id = p_acl->port_out_id[dest_if];
2271 if (nd_cache_dest_mac_present(dest_if)) {
2272 ether_addr_copy(get_link_hw_addr(dest_if),
2273 (struct ether_addr *)eth_src);
2274 update_nhip_access(dest_if);
2276 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
2277 printf("sending buffered packets\n");
2278 p_acl->counters->tpkts_processed +=
2279 ret_nd_data->num_pkts;
2280 nd_send_buffered_pkts(ret_nd_data,
2281 (struct ether_addr *)eth_dest, *port_out_id);
2283 p_acl->counters->tpkts_processed++;
2284 p_acl->counters->bytes_processed +=
2287 if (unlikely(ret_nd_data == NULL)) {
2289 printf("ACL before drop pkt_mask "
2290 "%lu, pkt_num %d\n", pkts_mask, pos);
2291 pkts_mask &= ~(1LLU << pos);
2293 printf("ACL after drop pkt_mask "
2294 "%lu, pkt_num %d\n", pkts_mask, pos);
2295 p_acl->counters->pkts_drop++;
2299 if (ret_nd_data->status == INCOMPLETE ||
2300 ret_nd_data->status == PROBE) {
2301 if (ret_nd_data->num_pkts >= NUM_DESC) {
2304 printf("ACL before drop pkt_mask "
2305 "%lu, pkt_num %d\n", pkts_mask, pos);
2306 pkts_mask &= ~(1LLU << pos);
2308 printf("ACL after drop pkt_mask "
2309 "%lu, pkt_num %d\n", pkts_mask, pos);
2310 p_acl->counters->pkts_drop++;
2313 arp_pkts_mask |= pkt_mask;
2314 nd_queue_unresolved_packet(ret_nd_data,
2324 } /* end of for loop */
2326 pkts_drop_mask = keep_mask & ~pkts_mask;
2327 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2328 keep_mask = pkts_mask;
2330 if (arp_pkts_mask) {
2331 keep_mask &= ~(arp_pkts_mask);
2332 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
2335 /* don't bother measuring if traffic very low, might skew stats */
2336 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
2338 if (packets_this_iteration > 1) {
2339 uint64_t latency_this_iteration =
2340 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
2341 p_acl->counters->sum_latencies += latency_this_iteration;
2342 p_acl->counters->count_latencies++;
2345 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
2351 static struct rte_acl_field_def field_format_ipv4[] = {
2354 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2355 .size = sizeof(uint8_t),
2358 .offset = sizeof(struct ether_hdr) +
2359 offsetof(struct ipv4_hdr, next_proto_id),
2362 /* Source IP address (IPv4) */
2364 .type = RTE_ACL_FIELD_TYPE_MASK,
2365 .size = sizeof(uint32_t),
2368 .offset = sizeof(struct ether_hdr) +
2369 offsetof(struct ipv4_hdr, src_addr),
2372 /* Destination IP address (IPv4) */
2374 .type = RTE_ACL_FIELD_TYPE_MASK,
2375 .size = sizeof(uint32_t),
2378 .offset = sizeof(struct ether_hdr) +
2379 offsetof(struct ipv4_hdr, dst_addr),
2384 .type = RTE_ACL_FIELD_TYPE_RANGE,
2385 .size = sizeof(uint16_t),
2388 .offset = sizeof(struct ether_hdr) +
2389 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2392 /* Destination Port */
2394 .type = RTE_ACL_FIELD_TYPE_RANGE,
2395 .size = sizeof(uint16_t),
2398 .offset = sizeof(struct ether_hdr) +
2399 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2403 #define SIZEOF_VLAN_HDR 4
2405 static struct rte_acl_field_def field_format_vlan_ipv4[] = {
2408 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2409 .size = sizeof(uint8_t),
2412 .offset = sizeof(struct ether_hdr) +
2413 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, next_proto_id),
2416 /* Source IP address (IPv4) */
2418 .type = RTE_ACL_FIELD_TYPE_MASK,
2419 .size = sizeof(uint32_t),
2422 .offset = sizeof(struct ether_hdr) +
2423 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, src_addr),
2426 /* Destination IP address (IPv4) */
2428 .type = RTE_ACL_FIELD_TYPE_MASK,
2429 .size = sizeof(uint32_t),
2432 .offset = sizeof(struct ether_hdr) +
2433 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, dst_addr),
2438 .type = RTE_ACL_FIELD_TYPE_RANGE,
2439 .size = sizeof(uint16_t),
2442 .offset = sizeof(struct ether_hdr) +
2444 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2447 /* Destination Port */
2449 .type = RTE_ACL_FIELD_TYPE_RANGE,
2450 .size = sizeof(uint16_t),
2453 .offset = sizeof(struct ether_hdr) +
2455 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2459 #define SIZEOF_QINQ_HEADER 8
2461 static struct rte_acl_field_def field_format_qinq_ipv4[] = {
2464 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2465 .size = sizeof(uint8_t),
2468 .offset = sizeof(struct ether_hdr) +
2469 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, next_proto_id),
2472 /* Source IP address (IPv4) */
2474 .type = RTE_ACL_FIELD_TYPE_MASK,
2475 .size = sizeof(uint32_t),
2478 .offset = sizeof(struct ether_hdr) +
2479 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, src_addr),
2482 /* Destination IP address (IPv4) */
2484 .type = RTE_ACL_FIELD_TYPE_MASK,
2485 .size = sizeof(uint32_t),
2488 .offset = sizeof(struct ether_hdr) +
2489 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, dst_addr),
2494 .type = RTE_ACL_FIELD_TYPE_RANGE,
2495 .size = sizeof(uint16_t),
2498 .offset = sizeof(struct ether_hdr) +
2499 SIZEOF_QINQ_HEADER +
2500 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2503 /* Destination Port */
2505 .type = RTE_ACL_FIELD_TYPE_RANGE,
2506 .size = sizeof(uint16_t),
2509 .offset = sizeof(struct ether_hdr) +
2510 SIZEOF_QINQ_HEADER +
2511 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2515 static struct rte_acl_field_def field_format_ipv6[] = {
2518 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2519 .size = sizeof(uint8_t),
2522 .offset = sizeof(struct ether_hdr) +
2523 offsetof(struct ipv6_hdr, proto),
2526 /* Source IP address (IPv6) */
2528 .type = RTE_ACL_FIELD_TYPE_MASK,
2529 .size = sizeof(uint32_t),
2532 .offset = sizeof(struct ether_hdr) +
2533 offsetof(struct ipv6_hdr, src_addr),
2537 .type = RTE_ACL_FIELD_TYPE_MASK,
2538 .size = sizeof(uint32_t),
2541 .offset = sizeof(struct ether_hdr) +
2542 offsetof(struct ipv6_hdr, src_addr) + sizeof(uint32_t),
2547 .type = RTE_ACL_FIELD_TYPE_MASK,
2548 .size = sizeof(uint32_t),
2551 .offset = sizeof(struct ether_hdr) +
2552 offsetof(struct ipv6_hdr, src_addr) + 2 * sizeof(uint32_t),
2557 .type = RTE_ACL_FIELD_TYPE_MASK,
2558 .size = sizeof(uint32_t),
2561 .offset = sizeof(struct ether_hdr) +
2562 offsetof(struct ipv6_hdr, src_addr) + 3 * sizeof(uint32_t),
2566 /* Destination IP address (IPv6) */
2568 .type = RTE_ACL_FIELD_TYPE_MASK,
2569 .size = sizeof(uint32_t),
2572 .offset = sizeof(struct ether_hdr) +
2573 offsetof(struct ipv6_hdr, dst_addr),
2577 .type = RTE_ACL_FIELD_TYPE_MASK,
2578 .size = sizeof(uint32_t),
2581 .offset = sizeof(struct ether_hdr) +
2582 offsetof(struct ipv6_hdr, dst_addr) + sizeof(uint32_t),
2587 .type = RTE_ACL_FIELD_TYPE_MASK,
2588 .size = sizeof(uint32_t),
2591 .offset = sizeof(struct ether_hdr) +
2592 offsetof(struct ipv6_hdr, dst_addr) + 2 * sizeof(uint32_t),
2597 .type = RTE_ACL_FIELD_TYPE_MASK,
2598 .size = sizeof(uint32_t),
2601 .offset = sizeof(struct ether_hdr) +
2602 offsetof(struct ipv6_hdr, dst_addr) + 3 * sizeof(uint32_t),
2608 .type = RTE_ACL_FIELD_TYPE_RANGE,
2609 .size = sizeof(uint16_t),
2612 .offset = sizeof(struct ether_hdr) +
2613 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, src_port),
2616 /* Destination Port */
2618 .type = RTE_ACL_FIELD_TYPE_RANGE,
2619 .size = sizeof(uint16_t),
2622 .offset = sizeof(struct ether_hdr) +
2623 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, dst_port),
2628 * Parse arguments in config file.
2631 * A pointer to the pipeline.
2633 * A pointer to pipeline specific parameters.
2636 * 0 on success, negative on error.
2639 pipeline_acl_parse_args(struct pipeline_acl *p, struct pipeline_params *params)
2641 uint32_t n_rules_present = 0;
2642 uint32_t pkt_type_present = 0;
2644 uint8_t prv_que_handler_present = 0;
2645 uint8_t n_prv_in_port = 0;
2648 p->n_rules = 4 * 1024;
2649 acl_n_rules = 4 * 1024;
2650 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2651 p->field_format = field_format_ipv4;
2652 p->field_format_size = sizeof(field_format_ipv4);
2654 for (i = 0; i < params->n_args; i++) {
2655 char *arg_name = params->args_name[i];
2656 char *arg_value = params->args_value[i];
2658 if (strcmp(arg_name, "n_rules") == 0) {
2659 if (n_rules_present)
2661 n_rules_present = 1;
2663 p->n_rules = atoi(arg_value);
2664 acl_n_rules = atoi(arg_value);
2668 if (strcmp(arg_name, "pkt_type") == 0) {
2669 if (pkt_type_present)
2671 pkt_type_present = 1;
2674 if (strcmp(arg_value, "ipv4") == 0) {
2675 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2676 p->field_format = field_format_ipv4;
2677 p->field_format_size =
2678 sizeof(field_format_ipv4);
2683 if (strcmp(arg_value, "vlan_ipv4") == 0) {
2685 RTE_DIM(field_format_vlan_ipv4);
2686 p->field_format = field_format_vlan_ipv4;
2687 p->field_format_size =
2688 sizeof(field_format_vlan_ipv4);
2693 if (strcmp(arg_value, "qinq_ipv4") == 0) {
2695 RTE_DIM(field_format_qinq_ipv4);
2696 p->field_format = field_format_qinq_ipv4;
2697 p->field_format_size =
2698 sizeof(field_format_qinq_ipv4);
2703 if (strcmp(arg_value, "ipv6") == 0) {
2704 p->n_rule_fields = RTE_DIM(field_format_ipv6);
2705 p->field_format = field_format_ipv6;
2706 p->field_format_size =
2707 sizeof(field_format_ipv6);
2715 if (strcmp(arg_name, "traffic_type") == 0) {
2716 int traffic_type = atoi(arg_value);
2718 if (traffic_type == 0
2719 || !(traffic_type == IPv4_HDR_VERSION
2720 || traffic_type == IPv6_HDR_VERSION)) {
2721 printf("not IPVR4/IPVR6");
2725 p->traffic_type = traffic_type;
2729 if (strcmp(arg_name, "prv_que_handler") == 0) {
2731 if (prv_que_handler_present) {
2732 printf("Duplicate pktq_in_prv ..\n\n");
2735 prv_que_handler_present = 1;
2740 /* get the first token */
2741 token = strtok(arg_value, "(");
2742 token = strtok(token, ")");
2743 token = strtok(token, ",");
2744 printf("***** prv_que_handler *****\n");
2747 printf("string is null\n");
2748 printf("prv_que_handler is invalid\n");
2751 printf("string is :%s\n", token);
2753 while (token != NULL) {
2754 printf(" %s\n", token);
2755 rxport = atoi(token);
2756 acl_prv_que_port_index[n_prv_in_port++] =
2758 token = strtok(NULL, ",");
2761 if (n_prv_in_port == 0) {
2762 printf("VNF common parse err - no prv RX phy port\n");
2769 if (strcmp(arg_name, "n_flows") == 0) {
2770 p->n_flows = atoi(arg_value);
2771 if (p->n_flows == 0)
2774 continue;/* needed when multiple parms are checked */
2783 * Create and initialize Pipeline Back End (BE).
2786 * A pointer to the pipeline.
2788 * A pointer to pipeline specific data.
2791 * A pointer to the pipeline create, NULL on error.
2793 static void *pipeline_acl_init(struct pipeline_params *params,
2794 __rte_unused void *arg)
2797 struct pipeline_acl *p_acl;
2800 /* Check input arguments */
2801 if ((params == NULL) ||
2802 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2805 /* Memory allocation */
2806 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_acl));
2807 p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2808 p_acl = (struct pipeline_acl *)p;
2812 strcpy(p->name, params->name);
2813 p->log_level = params->log_level;
2815 PLOG(p, HIGH, "ACL");
2818 * p_acl->links_map[0] = 0xff;
2819 * p_acl->links_map[1] = 0xff;]
2821 p_acl->traffic_type = IPv4_HDR_VERSION;
2822 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2823 p_acl->links_map[i] = 0xff;
2824 p_acl->port_out_id[i] = 0xff;
2825 acl_prv_que_port_index[i] = 0;
2828 p_acl->pipeline_num = 0xff;
2830 /* if(enable_hwlb || enable_flow_dir) */
2831 // lib_arp_init(params, arg);
2833 p_acl->n_flows = 4096; /* small default value */
2834 /* Create a single firewall instance and initialize. */
2835 p_acl->cnxn_tracker =
2836 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2837 RTE_CACHE_LINE_SIZE);
2839 if (p_acl->cnxn_tracker == NULL)
2843 * Now allocate a counter block entry.It appears that the initialization
2844 * of all instances is serialized on core 0, so no lock is necessary.
2846 struct rte_ACL_counter_block *counter_ptr;
2848 if (rte_ACL_hi_counter_block_in_use == MAX_ACL_INSTANCES) {
2849 /* error, exceeded table bounds */
2853 rte_ACL_hi_counter_block_in_use++;
2854 counter_ptr = &rte_acl_counter_table[rte_ACL_hi_counter_block_in_use];
2855 strcpy(counter_ptr->name, params->name);
2856 p_acl->action_counter_index = rte_ACL_hi_counter_block_in_use;
2858 p_acl->counters = counter_ptr;
2860 rte_ct_initialize_default_timeouts(p_acl->cnxn_tracker);
2861 p_acl->arpPktCount = 0;
2863 /* Parse arguments */
2864 if (pipeline_acl_parse_args(p_acl, params))
2866 /*n_flows already checked, ignore Klockwork issue */
2867 if (p_acl->n_flows > 0) {
2868 rte_ct_initialize_cnxn_tracker(p_acl->cnxn_tracker,
2869 p_acl->n_flows, params->name);
2870 p_acl->counters->ct_counters =
2871 rte_ct_get_counter_address(p_acl->cnxn_tracker);
2873 printf("ACL invalid p_acl->n_flows: %u\n", p_acl->n_flows);
2879 struct rte_pipeline_params pipeline_params = {
2880 .name = params->name,
2881 .socket_id = params->socket_id,
2882 .offset_port_id = META_DATA_OFFSET +
2883 offsetof(struct mbuf_acl_meta_data, output_port),
2886 p->p = rte_pipeline_create(&pipeline_params);
2894 p->n_ports_in = params->n_ports_in;
2895 for (i = 0; i < p->n_ports_in; i++) {
2896 struct rte_pipeline_port_in_params port_params = {
2898 pipeline_port_in_params_get_ops(¶ms->port_in
2901 pipeline_port_in_params_convert(¶ms->port_in
2903 .f_action = pkt_work_acl_key,
2905 .burst_size = params->port_in[i].burst_size,
2907 if (p_acl->traffic_type == IPv4_HDR_VERSION)
2908 port_params.f_action = pkt_work_acl_ipv4_key;
2910 if (p_acl->traffic_type == IPv6_HDR_VERSION)
2911 port_params.f_action = pkt_work_acl_ipv6_key;
2913 int status = rte_pipeline_port_in_create(p->p,
2918 rte_pipeline_free(p->p);
2925 p->n_ports_out = params->n_ports_out;
2926 for (i = 0; i < p->n_ports_out; i++) {
2927 struct rte_pipeline_port_out_params port_params = {
2929 pipeline_port_out_params_get_ops(¶ms->port_out
2932 pipeline_port_out_params_convert(¶ms->port_out
2938 int status = rte_pipeline_port_out_create(p->p,
2940 &p->port_out_id[i]);
2943 rte_pipeline_free(p->p);
2949 int pipeline_num = 0;
2951 int temp = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2952 p_acl->pipeline_num = (uint8_t) pipeline_num;
2953 /* set_phy_outport_map(p_acl->pipeline_num, p_acl->links_map);*/
2954 register_pipeline_Qs(p_acl->pipeline_num, p);
2955 set_link_map(p_acl->pipeline_num, p, p_acl->links_map);
2956 set_outport_id(p_acl->pipeline_num, p, p_acl->port_out_id);
2958 /* If this is the first ACL thread, create common ACL Rule tables */
2959 if (rte_ACL_hi_counter_block_in_use == 0) {
2961 printf("Create ACL Tables rte_socket_id(): %i\n",
2964 /* Create IPV4 ACL Rule Tables */
2965 struct rte_table_acl_params common_ipv4_table_acl_params = {
2967 .n_rules = acl_n_rules,
2968 .n_rule_fields = RTE_DIM(field_format_ipv4),
2971 memcpy(common_ipv4_table_acl_params.field_format,
2972 field_format_ipv4, sizeof(field_format_ipv4));
2974 uint32_t ipv4_entry_size = sizeof(struct acl_table_entry);
2976 acl_rule_table_ipv4_active =
2977 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2981 if (acl_rule_table_ipv4_active == NULL) {
2983 ("Failed to create common ACL IPV4A Rule table\n");
2984 rte_pipeline_free(p->p);
2989 /* Create second IPV4 Table */
2990 common_ipv4_table_acl_params.name = "ACLIPV4B";
2991 acl_rule_table_ipv4_standby =
2992 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2996 if (acl_rule_table_ipv4_standby == NULL) {
2998 ("Failed to create common ACL IPV4B Rule table\n");
2999 rte_pipeline_free(p->p);
3004 /* Create IPV6 ACL Rule Tables */
3005 struct rte_table_acl_params common_ipv6_table_acl_params = {
3007 .n_rules = acl_n_rules,
3008 .n_rule_fields = RTE_DIM(field_format_ipv6),
3011 memcpy(common_ipv6_table_acl_params.field_format,
3012 field_format_ipv6, sizeof(field_format_ipv6));
3014 uint32_t ipv6_entry_size = sizeof(struct acl_table_entry);
3016 acl_rule_table_ipv6_active =
3017 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
3021 if (acl_rule_table_ipv6_active == NULL) {
3023 ("Failed to create common ACL IPV6A Rule table\n");
3024 rte_pipeline_free(p->p);
3029 /* Create second IPV6 table */
3030 common_ipv6_table_acl_params.name = "ACLIPV6B";
3031 acl_rule_table_ipv6_standby =
3032 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
3036 if (acl_rule_table_ipv6_standby == NULL) {
3038 ("Failed to create common ACL IPV6B Rule table\n");
3039 rte_pipeline_free(p->p);
3049 struct rte_pipeline_table_params table_params = {
3050 .ops = &rte_table_stub_ops,
3052 .f_action_hit = NULL,
3053 .f_action_miss = NULL,
3055 .action_data_size = 0,
3058 int status = rte_pipeline_table_create(p->p,
3063 rte_pipeline_free(p->p);
3068 struct rte_pipeline_table_entry default_entry = {
3069 .action = RTE_PIPELINE_ACTION_PORT_META
3072 struct rte_pipeline_table_entry *default_entry_ptr;
3074 status = rte_pipeline_table_default_entry_add(p->p,
3077 &default_entry_ptr);
3080 rte_pipeline_free(p->p);
3086 /* Connecting input ports to tables */
3087 for (i = 0; i < p->n_ports_in; i++) {
3088 int status = rte_pipeline_port_in_connect_to_table(p->p,
3095 rte_pipeline_free(p->p);
3101 /* Enable input ports */
3102 for (i = 0; i < p->n_ports_in; i++) {
3103 int status = rte_pipeline_port_in_enable(p->p,
3107 rte_pipeline_free(p->p);
3113 /* Check pipeline consistency */
3114 if (rte_pipeline_check(p->p) < 0) {
3115 rte_pipeline_free(p->p);
3120 /* Message queues */
3121 p->n_msgq = params->n_msgq;
3122 for (i = 0; i < p->n_msgq; i++)
3123 p->msgq_in[i] = params->msgq_in[i];
3124 for (i = 0; i < p->n_msgq; i++)
3125 p->msgq_out[i] = params->msgq_out[i];
3127 /* Message handlers */
3128 memcpy(p->handlers, handlers, sizeof(p->handlers));
3129 memcpy(p_acl->custom_handlers,
3130 custom_handlers, sizeof(p_acl->custom_handlers));
3136 * Free resources and delete pipeline.
3139 * A pointer to the pipeline.
3142 * 0 on success, negative on error.
3144 static int pipeline_acl_free(void *pipeline)
3146 struct pipeline *p = (struct pipeline *)pipeline;
3148 /* Check input arguments */
3152 /* Free resources */
3153 rte_pipeline_free(p->p);
3159 * Callback function to map input/output ports.
3162 * A pointer to the pipeline.
3166 * A pointer to the Output port.
3169 * 0 on success, negative on error.
3172 pipeline_acl_track(void *pipeline,
3173 __rte_unused uint32_t port_in, uint32_t *port_out)
3175 struct pipeline *p = (struct pipeline *)pipeline;
3177 /* Check input arguments */
3178 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
3181 if (p->n_ports_in == 1) {
3190 * Callback function to process timers.
3193 * A pointer to the pipeline.
3196 * 0 on success, negative on error.
3198 static int pipeline_acl_timer(void *pipeline)
3201 struct pipeline *p = (struct pipeline *)pipeline;
3202 struct pipeline_acl *p_acl = (struct pipeline_acl *)pipeline;
3204 pipeline_msg_req_handle(p);
3205 rte_pipeline_flush(p->p);
3207 rte_ct_handle_expired_timers(p_acl->cnxn_tracker);
3213 * Callback function to process CLI commands from FE.
3216 * A pointer to the pipeline.
3218 * A pointer to command specific data.
3221 * A pointer to message handler on success,
3222 * pipeline_msg_req_invalid_hander on error.
3224 void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg)
3226 struct pipeline_acl *p_acl = (struct pipeline_acl *)p;
3227 struct pipeline_custom_msg_req *req = msg;
3228 pipeline_msg_req_handler f_handle;
3230 f_handle = (req->subtype < PIPELINE_ACL_MSG_REQS) ?
3231 p_acl->custom_handlers[req->subtype] :
3232 pipeline_msg_req_invalid_handler;
3234 if (f_handle == NULL)
3235 f_handle = pipeline_msg_req_invalid_handler;
3237 return f_handle(p, req);
3241 * Handler for DBG CLI command.
3244 * A pointer to the pipeline.
3246 * A pointer to command specific data.
3249 * A pointer to response message.
3250 * Response message contains status.
3252 void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg)
3255 struct pipeline_acl_dbg_msg_req *req = msg;
3256 struct pipeline_acl_dbg_msg_rsp *rsp = msg;
3258 if (req->dbg == 0) {
3259 printf("DBG turned OFF\n");
3262 } else if (req->dbg == 1) {
3263 printf("DBG turned ON\n");
3267 printf("Invalid DBG setting\n");
3274 struct pipeline_be_ops pipeline_acl_be_ops = {
3275 .f_init = pipeline_acl_init,
3276 .f_free = pipeline_acl_free,
3278 .f_timer = pipeline_acl_timer,
3279 .f_track = pipeline_acl_track,