2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 * Pipeline ACL BE Implementation.
21 * Implementation of Pipeline ACL Back End (BE).
22 * Responsible for packet processing.
27 #include <rte_common.h>
28 #include <rte_malloc.h>
29 #include <rte_ether.h>
32 #include <rte_byteorder.h>
33 #include <rte_table_acl.h>
34 #include <rte_table_stub.h>
35 #include "pipeline_arpicmp_be.h"
36 #include "vnf_common.h"
37 #include "pipeline_common_be.h"
38 #include <rte_pipeline.h>
41 #include <rte_timer.h>
42 #include <rte_cycles.h>
44 #include "pipeline_acl.h"
45 #include "pipeline_acl_be.h"
46 #include "rte_cnxn_tracking.h"
47 #include "pipeline_actions_common.h"
49 #include "lib_icmpv6.h"
50 static uint8_t acl_prv_que_port_index[PIPELINE_MAX_PORT_IN];
51 extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
52 uint8_t netmask_ipv6[]);
59 * A structure defining the ACL pipeline per thread data.
63 pipeline_msg_req_handler custom_handlers[PIPELINE_ACL_MSG_REQS];
66 uint32_t n_rule_fields;
67 struct rte_acl_field_def *field_format;
68 uint32_t field_format_size;
70 /* Connection Tracker */
71 struct rte_ct_cnxn_tracker *cnxn_tracker;
72 struct rte_ACL_counter_block *counters;
73 int action_counter_index;
74 /* timestamp retrieved during in-port computations */
75 uint64_t in_port_time_stamp;
80 uint8_t links_map[PIPELINE_MAX_PORT_IN];
81 uint8_t port_out_id[PIPELINE_MAX_PORT_IN];
83 struct acl_table_entry *acl_entries_ipv4[RTE_PORT_IN_BURST_SIZE_MAX];
84 struct acl_table_entry *acl_entries_ipv6[RTE_PORT_IN_BURST_SIZE_MAX];
86 /* Local ARP & ND Tables */
87 struct lib_arp_route_table_entry
88 local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
89 uint8_t local_lib_arp_route_ent_cnt;
90 struct lib_nd_route_table_entry
91 local_lib_nd_route_table[MAX_ND_RT_ENTRY];
92 uint8_t local_lib_nd_route_ent_cnt;
94 } __rte_cache_aligned;
97 * A structure defining the mbuf meta data for ACL.
99 struct mbuf_acl_meta_data {
100 /* output port stored for RTE_PIPELINE_ACTION_PORT_META */
101 uint32_t output_port;
102 /* next hop ip address used by ARP code */
104 } __rte_cache_aligned;
106 #define META_DATA_OFFSET 128
108 struct rte_ACL_counter_block rte_acl_counter_table[MAX_ACL_INSTANCES]
110 int rte_ACL_hi_counter_block_in_use = -1;
112 /* a spin lock used during acl initialization only */
113 rte_spinlock_t rte_ACL_init_lock = RTE_SPINLOCK_INITIALIZER;
116 struct pipeline_action_key *action_array_a;
117 struct pipeline_action_key *action_array_b;
118 struct pipeline_action_key *action_array_active;
119 struct pipeline_action_key *action_array_standby;
120 uint32_t action_array_size;
122 struct action_counter_block
123 action_counter_table[MAX_ACL_INSTANCES][action_array_max]
126 static void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg);
128 static pipeline_msg_req_handler handlers[] = {
129 [PIPELINE_MSG_REQ_PING] = pipeline_msg_req_ping_handler,
130 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
131 pipeline_msg_req_stats_port_in_handler,
132 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
133 pipeline_msg_req_stats_port_out_handler,
134 [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
135 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
136 pipeline_msg_req_port_in_enable_handler,
137 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
138 pipeline_msg_req_port_in_disable_handler,
139 [PIPELINE_MSG_REQ_CUSTOM] = pipeline_acl_msg_req_custom_handler,
142 static void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg);
144 static pipeline_msg_req_handler custom_handlers[] = {
145 [PIPELINE_ACL_MSG_REQ_DBG] = pipeline_acl_msg_req_dbg_handler,
147 uint64_t arp_pkts_mask;
150 uint32_t local_get_nh_ipv4(uint32_t ip,
152 uint32_t *nhip, struct pipeline_acl *p_acl)
156 for (i = 0; i < p_acl->local_lib_arp_route_ent_cnt; i++) {
157 if (((p_acl->local_lib_arp_route_table[i].ip &
158 p_acl->local_lib_arp_route_table[i].mask) ==
159 (ip & p_acl->local_lib_arp_route_table[i].mask))) {
160 *port = p_acl->local_lib_arp_route_table[i].port;
162 *nhip = p_acl->local_lib_arp_route_table[i].nh;
169 static uint32_t local_get_nh_ipv6(uint8_t *ip,
171 uint8_t nhip[], struct pipeline_acl *p_acl)
174 uint8_t netmask_ipv6[16],netip_nd[16],netip_in[16];
175 uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
176 memset (netmask_ipv6, 0, sizeof(netmask_ipv6));
177 memset (netip_nd, 0, sizeof(netip_nd));
178 memset (netip_in, 0, sizeof(netip_in));
180 for (i = 0; i < p_acl->local_lib_nd_route_ent_cnt; i++) {
182 convert_prefixlen_to_netmask_ipv6
183 (p_acl->local_lib_nd_route_table[i].depth, netmask_ipv6);
185 for (k = 0; k < 16; k++)
186 if (p_acl->local_lib_nd_route_table[i].ipv6[k] &
189 netip_nd[k] = p_acl->
190 local_lib_nd_route_table[i].ipv6[k];
194 for (l = 0; l < 16; l++)
195 if (ip[l] & netmask_ipv6[l]) {
202 if ((depthflags == depthflags1) && (memcmp(netip_nd, netip_in,
203 sizeof(netip_nd)) == 0)){
204 *port = p_acl->local_lib_nd_route_table[i].port;
206 for (j = 0; j < 16; j++)
208 p_acl->local_lib_nd_route_table[i].
219 static uint8_t check_arp_icmp(struct rte_mbuf *pkt,
220 uint64_t pkt_mask, struct pipeline_acl *p_acl)
222 uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
223 struct ipv6_hdr *ipv6_h;
224 uint16_t *eth_proto =
225 RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
226 struct app_link_params *link;
228 //uint32_t *port_out_id = RTE_MBUF_METADATA_UINT32_PTR(pk
229 // offsetof(struct mbuf_acl_meta_dat
231 /* ARP outport number */
232 uint16_t out_port = p_acl->p.n_ports_out - 1;
235 uint32_t prot_offset;
237 link = &myApp->link_params[pkt->port];
239 switch (rte_be_to_cpu_16(*eth_proto)) {
242 rte_pipeline_port_out_packet_insert(p_acl->p.p, out_port, pkt);
245 * Pkt mask should be changed, and not changing the
248 p_acl->arpPktCount++;
253 /* header room + eth hdr size +
254 * src_aadr offset in ip header
256 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
257 ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
258 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
260 prot_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
261 IP_HDR_PROTOCOL_OFST;
262 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
264 if ((*protocol == IP_PROTOCOL_ICMP) &&
265 link->ip == rte_be_to_cpu_32(*dst_addr)) {
267 if (is_phy_port_privte(pkt->port)) {
269 rte_pipeline_port_out_packet_insert
270 (p_acl->p.p, out_port, pkt);
272 * Pkt mask should be changed,
273 * and not changing the drop mask
275 p_acl->arpPktCount++;
287 uint32_t dst_addr_offset = MBUF_HDR_ROOM +
288 ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST;
289 uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
292 uint32_t prot_offset_ipv6 = MBUF_HDR_ROOM +
293 ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
294 struct ipv6_hdr *ipv6_h;
296 ipv6_h = (struct ipv6_hdr *)MBUF_HDR_ROOM +
298 protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
301 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
302 (link->ip == rte_be_to_cpu_32(dst_addr[3]))) {
304 if (is_phy_port_privte(pkt->port)) {
306 rte_pipeline_port_out_packet_insert
307 (p_acl->p.p, out_port, pkt);
309 * Pkt mask should be changed,
310 * and not changing the drop mask
312 p_acl->arpPktCount++;
322 #define IP_START (MBUF_HDR_ROOM + ETH_HDR_SIZE)
325 ipv6_h = (struct ipv6_hdr *)
326 RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
328 if ((ipv6_h->proto == ICMPV6_PROTOCOL_ID) &&
330 rte_be_to_cpu_32(ipv6_h->dst_addr[3]))) {
332 if (is_phy_port_privte(pkt->port)) {
333 rte_pipeline_port_out_packet_insert(
338 p_acl->arpPktCount++;
353 * Print packet for debugging.
356 * A pointer to the packet.
359 void print_pkt_acl(struct rte_mbuf *pkt)
363 printf("Packet Contents:\n");
364 uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
366 for (i = 0; i < 20; i++) {
367 for (j = 0; j < 20; j++)
368 printf("%02x ", rd[(20 * i) + j]);
374 * Main packet processing function.
375 * 64 packet bit mask are used to identify which packets to forward.
376 * Performs the following:
377 * - Burst lookup packets in the IPv4 ACL Rule Table.
378 * - Burst lookup packets in the IPv6 ACL Rule Table.
379 * - Lookup Action Table, perform actions.
380 * - Burst lookup Connection Tracking, if enabled.
381 * - Lookup MAC address.
383 * - Packets with bit mask set are forwarded
386 * A pointer to the pipeline.
388 * A pointer to a burst of packets.
390 * Number of packets to process.
392 * A pointer to pipeline specific data.
395 * 0 on success, negative on error.
398 pkt_work_acl_key(struct rte_pipeline *p,
399 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
402 struct pipeline_acl *p_acl = arg;
404 p_acl->counters->pkts_received =
405 p_acl->counters->pkts_received + n_pkts;
407 printf("pkt_work_acl_key pkts_received: %" PRIu64
408 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
410 uint64_t lookup_hit_mask = 0;
411 uint64_t lookup_hit_mask_ipv4 = 0;
412 uint64_t lookup_hit_mask_ipv6 = 0;
413 uint64_t lookup_miss_mask = 0;
414 uint64_t conntrack_mask = 0;
415 uint64_t connexist_mask = 0;
416 uint32_t dest_address = 0;
420 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
421 uint64_t keep_mask = pkts_mask;
425 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
427 if (acl_ipv4_enabled) {
429 printf("ACL IPV4 Lookup Mask Before = %p\n",
432 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
433 pkts_mask, &lookup_hit_mask_ipv4,
435 p_acl->acl_entries_ipv4);
437 printf("ACL IPV4 Lookup Mask After = %p\n",
438 (void *)lookup_hit_mask_ipv4);
441 if (acl_ipv6_enabled) {
443 printf("ACL IPV6 Lookup Mask Before = %p\n",
446 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
447 pkts_mask, &lookup_hit_mask_ipv6,
449 p_acl->acl_entries_ipv6);
451 printf("ACL IPV6 Lookup Mask After = %p\n",
452 (void *)lookup_hit_mask_ipv6);
455 /* Merge lookup results since we process both IPv4 and IPv6 below */
456 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
458 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
460 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
461 pkts_mask = lookup_hit_mask;
462 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
464 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
465 p_acl->counters->pkts_drop,
466 __builtin_popcountll(lookup_miss_mask));
468 uint64_t pkts_to_process = lookup_hit_mask;
469 /* bitmap of packets left to process for ARP */
471 for (; pkts_to_process;) {
472 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
473 uint64_t pkt_mask = 1LLU << pos;
474 /* bitmask representing only this packet */
476 pkts_to_process &= ~pkt_mask;
477 /* remove this packet from remaining list */
478 struct rte_mbuf *pkt = pkts[pos];
481 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
482 pkts_mask &= ~(1LLU << pos);
487 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
488 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
490 if (hdr_chk == IPv4_HDR_VERSION) {
492 struct acl_table_entry *entry =
493 (struct acl_table_entry *)
494 p_acl->acl_entries_ipv4[pos];
495 uint16_t phy_port = entry->head.port_id;
496 uint32_t action_id = entry->action_id;
499 printf("action_id = %u\n", action_id);
501 uint32_t dscp_offset =
502 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
504 if (action_array_active[action_id].action_bitmap &
507 [p_acl->action_counter_index]
508 [action_id].packetCount++;
510 [p_acl->action_counter_index]
511 [action_id].byteCount +=
512 rte_pktmbuf_pkt_len(pkt);
514 printf("Action Count Packet Count: %"
515 PRIu64 " Byte Count: %" PRIu64
518 [p_acl->action_counter_index]
519 [action_id].packetCount,
521 [p_acl->action_counter_index]
522 [action_id].byteCount);
525 if (action_array_active[action_id].action_bitmap &
526 acl_action_packet_drop) {
528 /* Drop packet by changing the mask */
530 printf("ACL before drop pkt_mask "
531 " %lu, pkt_num %d\n",
533 pkts_mask &= ~(1LLU << pos);
535 printf("ACL after drop pkt_mask "
538 p_acl->counters->pkts_drop++;
541 if (action_array_active[action_id].action_bitmap &
544 action_array_active[action_id].fwd_port;
545 entry->head.port_id = phy_port;
547 printf("Action FWD Port ID: %u\n",
551 if (action_array_active[action_id].action_bitmap &
554 action_array_active[action_id].nat_port;
555 entry->head.port_id = phy_port;
557 printf("Action NAT Port ID: %u\n",
561 if (action_array_active[action_id].action_bitmap &
564 /* Set DSCP priority */
565 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
568 action_array_active[action_id].dscp_priority
572 ("Action DSCP DSCP Priority: %u\n",
576 if (action_array_active[action_id].action_bitmap &
577 acl_action_packet_accept) {
579 printf("Action Accept\n");
581 if (action_array_active[action_id].action_bitmap
582 & acl_action_conntrack) {
584 /* Set conntrack bit for this pkt */
585 conntrack_mask |= pkt_mask;
587 printf("ACL Conntrack enabled: "
589 (void *)conntrack_mask,
593 if (action_array_active[action_id].action_bitmap
594 & acl_action_connexist) {
596 /* Set conntrack bit for this pkt */
597 conntrack_mask |= pkt_mask;
599 /* Set connexist bit for this pkt for public -> private */
600 /* Private -> public packet will open the connection */
601 if (action_array_active
602 [action_id].private_public ==
604 connexist_mask |= pkt_mask;
607 printf("ACL Connexist enabled "
608 "conntrack: %p connexist: %p pkt_mask: %p\n",
609 (void *)conntrack_mask,
610 (void *)connexist_mask,
616 if (hdr_chk == IPv6_HDR_VERSION) {
618 struct acl_table_entry *entry =
619 (struct acl_table_entry *)
620 p_acl->acl_entries_ipv6[pos];
621 uint16_t phy_port = entry->head.port_id;
622 uint32_t action_id = entry->action_id;
625 printf("action_id = %u\n", action_id);
627 if (action_array_active[action_id].action_bitmap &
630 [p_acl->action_counter_index]
631 [action_id].packetCount++;
633 [p_acl->action_counter_index]
634 [action_id].byteCount +=
635 rte_pktmbuf_pkt_len(pkt);
637 printf("Action Count Packet Count: %"
638 PRIu64 " Byte Count: %" PRIu64
641 [p_acl->action_counter_index]
642 [action_id].packetCount,
644 [p_acl->action_counter_index]
645 [action_id].byteCount);
648 if (action_array_active[action_id].action_bitmap &
649 acl_action_packet_drop) {
650 /* Drop packet by changing the mask */
652 printf("ACL before drop pkt_mask "
655 pkts_mask &= ~(1LLU << pos);
657 printf("ACL after drop pkt_mask "
660 p_acl->counters->pkts_drop++;
664 if (action_array_active[action_id].action_bitmap &
667 action_array_active[action_id].fwd_port;
668 entry->head.port_id = phy_port;
670 printf("Action FWD Port ID: %u\n",
674 if (action_array_active[action_id].action_bitmap &
677 action_array_active[action_id].nat_port;
678 entry->head.port_id = phy_port;
680 printf("Action NAT Port ID: %u\n",
684 if (action_array_active[action_id].action_bitmap &
687 /* Set DSCP priority */
688 uint32_t dscp_offset =
689 MBUF_HDR_ROOM + ETH_HDR_SIZE +
690 IP_HDR_DSCP_OFST_IPV6;
692 RTE_MBUF_METADATA_UINT16_PTR(pkt,
694 uint16_t dscp_value =
696 (RTE_MBUF_METADATA_UINT16
697 (pkt, dscp_offset)) & 0XF00F);
699 action_array_active[action_id].dscp_priority
701 uint16_t dscp_temp = dscp_store;
703 dscp_temp = dscp_temp << 4;
704 *dscp = rte_bswap16(dscp_temp | dscp_value);
707 ("Action DSCP DSCP Priority: %u\n",
711 if (action_array_active[action_id].action_bitmap &
712 acl_action_packet_accept) {
714 printf("Action Accept\n");
716 if (action_array_active[action_id].action_bitmap
717 & acl_action_conntrack) {
719 /* Set conntrack bit for this pkt */
720 conntrack_mask |= pkt_mask;
722 printf("ACL Conntrack enabled: "
723 " %p pkt_mask: %p\n",
724 (void *)conntrack_mask,
728 if (action_array_active[action_id].action_bitmap
729 & acl_action_connexist) {
731 /* Set conntrack bit for this pkt */
732 conntrack_mask |= pkt_mask;
734 /* Set connexist bit for this pkt for public -> private */
735 /* Private -> public packet will open the connection */
736 if (action_array_active
737 [action_id].private_public ==
739 connexist_mask |= pkt_mask;
742 printf("ACL Connexist enabled "
743 "conntrack: %p connexist: %p pkt_mask: %p\n",
744 (void *)conntrack_mask,
745 (void *)connexist_mask,
752 /* Only call connection tracker if required */
753 if (conntrack_mask > 0) {
756 ("ACL Call Conntrack Before = %p Connexist = %p\n",
757 (void *)conntrack_mask, (void *)connexist_mask);
759 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
760 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
762 printf("ACL Call Conntrack After = %p\n",
763 (void *)conntrack_mask);
765 /* Only change pkt mask for pkts that have conntrack enabled */
766 /* Need to loop through packets to check if conntrack enabled */
767 pkts_to_process = pkts_mask;
768 for (; pkts_to_process;) {
769 uint32_t action_id = 0;
771 (uint8_t) __builtin_ctzll(pkts_to_process);
772 uint64_t pkt_mask = 1LLU << pos;
773 /* bitmask representing only this packet */
775 pkts_to_process &= ~pkt_mask;
776 /* remove this packet from remaining list */
777 struct rte_mbuf *pkt = pkts[pos];
779 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
784 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
785 if (hdr_chk == IPv4_HDR_VERSION) {
786 struct acl_table_entry *entry =
787 (struct acl_table_entry *)
788 p_acl->acl_entries_ipv4[pos];
789 action_id = entry->action_id;
791 struct acl_table_entry *entry =
792 (struct acl_table_entry *)
793 p_acl->acl_entries_ipv6[pos];
794 action_id = entry->action_id;
797 if ((action_array_active[action_id].action_bitmap &
798 acl_action_conntrack)
799 || (action_array_active[action_id].action_bitmap &
800 acl_action_connexist)) {
802 if (conntrack_mask & pkt_mask) {
804 printf("ACL Conntrack Accept "
808 /* Drop packet by changing the mask */
810 printf("ACL Conntrack Drop "
813 pkts_mask &= ~pkt_mask;
814 p_acl->counters->pkts_drop++;
820 pkts_to_process = pkts_mask;
821 /* bitmap of packets left to process for ARP */
823 for (; pkts_to_process;) {
824 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
825 uint64_t pkt_mask = 1LLU << pos;
826 /* bitmask representing only this packet */
828 pkts_to_process &= ~pkt_mask;
829 /* remove this packet from remaining list */
830 struct rte_mbuf *pkt = pkts[pos];
833 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
834 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
836 if (hdr_chk == IPv4_HDR_VERSION) {
838 struct acl_table_entry *entry =
839 (struct acl_table_entry *)
840 p_acl->acl_entries_ipv4[pos];
841 uint16_t phy_port = pkt->port;
842 uint32_t *port_out_id =
843 RTE_MBUF_METADATA_UINT32_PTR(pkt,
850 ("phy_port = %i, links_map[phy_port] = %i\n",
851 phy_port, p_acl->links_map[phy_port]);
853 /* header room + eth hdr size + dst_adr offset in ip header */
854 uint32_t dst_addr_offset =
855 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
857 RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
859 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
861 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
862 struct ether_addr hw_addr;
863 uint32_t dest_address = rte_bswap32(*dst_addr);
864 uint32_t *nhip = RTE_MBUF_METADATA_UINT32_PTR(pkt,
871 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
873 struct arp_entry_data *ret_arp_data = NULL;
874 ret_arp_data = get_dest_mac_addr_port
875 (dest_address, &dest_if, (struct ether_addr *) eth_dest);
876 *port_out_id = p_acl->port_out_id[dest_if];
877 if (arp_cache_dest_mac_present(dest_if)) {
878 ether_addr_copy(get_link_hw_addr(dest_if),
879 (struct ether_addr *)eth_src);
880 update_nhip_access(dest_if);
881 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
882 printf("sending buffered packets\n");
883 arp_send_buffered_pkts(ret_arp_data,
884 (struct ether_addr *)eth_dest, *port_out_id);
888 if (unlikely(ret_arp_data == NULL)) {
890 printf("%s: NHIP Not Found, "
891 "outport_id: %d\n", __func__,
895 pkts_mask &= ~(1LLU << pos);
897 printf("ACL after drop pkt_mask "
900 p_acl->counters->pkts_drop++;
904 if (ret_arp_data->status == INCOMPLETE ||
905 ret_arp_data->status == PROBE) {
906 if (ret_arp_data->num_pkts >= NUM_DESC) {
908 pkts_mask &= ~(1LLU << pos);
910 printf("ACL after drop pkt_mask "
913 p_acl->counters->pkts_drop++;
916 arp_pkts_mask |= pkt_mask;
917 arp_queue_unresolved_packet(ret_arp_data,
924 } /* end of if (hdr_chk == IPv4_HDR_VERSION) */
926 if (hdr_chk == IPv6_HDR_VERSION) {
928 struct acl_table_entry *entry =
929 (struct acl_table_entry *)
930 p_acl->acl_entries_ipv6[pos];
931 //uint16_t phy_port = entry->head.port_id;
932 uint16_t phy_port = pkt->port;
933 uint32_t *port_out_id =
934 RTE_MBUF_METADATA_UINT32_PTR(pkt,
939 /*if (is_phy_port_privte(phy_port))
940 *port_out_id = ACL_PUB_PORT_ID;
942 *port_out_id = ACL_PRV_PORT_ID;*/
944 /* *port_out_id = p_acl->links_map[phy_port]; */
946 printf("phy_port = %i, "
947 "links_map[phy_port] = %i\n",
948 phy_port, p_acl->links_map[phy_port]);
950 /* header room + eth hdr size + dst_adr offset in ip header */
951 uint32_t dst_addr_offset =
952 MBUF_HDR_ROOM + ETH_HDR_SIZE +
953 IP_HDR_DST_ADR_OFST_IPV6;
955 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
957 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
958 struct ether_addr hw_addr;
959 uint8_t dest_address[16];
963 RTE_MBUF_METADATA_UINT8(pkt,
968 uint8_t *dst_addr[16];
969 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
972 for (i = 0; i < 16; i++) {
974 RTE_MBUF_METADATA_UINT8_PTR(pkt,
978 memcpy(dest_address, *dst_addr, sizeof(dest_address));
979 memset(nhip, 0, sizeof(nhip));
981 struct nd_entry_data *ret_nd_data = NULL;
982 ret_nd_data = get_dest_mac_address_ipv6_port
983 (dest_address, &dest_if, &hw_addr, &nhip[0]);
984 *port_out_id = p_acl->port_out_id[dest_if];
985 if (nd_cache_dest_mac_present(dest_if)) {
986 ether_addr_copy(get_link_hw_addr(dest_if),
987 (struct ether_addr *)eth_src);
988 update_nhip_access(dest_if);
990 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
991 printf("sending buffered packets\n");
992 p_acl->counters->tpkts_processed +=
993 ret_nd_data->num_pkts;
994 nd_send_buffered_pkts(ret_nd_data,
995 (struct ether_addr *)eth_dest, *port_out_id);
998 if (unlikely(ret_nd_data == NULL)) {
1000 printf("ACL before drop pkt_mask "
1001 "%lu, pkt_num %d\n", pkts_mask, pos);
1002 pkts_mask &= ~(1LLU << pos);
1004 printf("ACL after drop pkt_mask "
1005 "%lu, pkt_num %d\n", pkts_mask, pos);
1006 p_acl->counters->pkts_drop++;
1010 if (ret_nd_data->status == INCOMPLETE ||
1011 ret_nd_data->status == PROBE) {
1012 if (ret_nd_data->num_pkts >= NUM_DESC) {
1015 printf("ACL before drop pkt_mask "
1016 "%lu, pkt_num %d\n", pkts_mask, pos);
1017 pkts_mask &= ~(1LLU << pos);
1019 printf("ACL after drop pkt_mask "
1020 "%lu, pkt_num %d\n", pkts_mask, pos);
1021 p_acl->counters->pkts_drop++;
1024 arp_pkts_mask |= pkt_mask;
1025 nd_queue_unresolved_packet(ret_nd_data,
1033 } /* if (hdr_chk == IPv6_HDR_VERSION) */
1037 pkts_drop_mask = keep_mask & ~pkts_mask;
1038 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1039 keep_mask = pkts_mask;
1041 if (arp_pkts_mask) {
1042 keep_mask &= ~(arp_pkts_mask);
1043 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1046 /* don't bother measuring if traffic very low, might skew stats */
1047 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1049 if (packets_this_iteration > 1) {
1050 uint64_t latency_this_iteration =
1051 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1053 p_acl->counters->sum_latencies += latency_this_iteration;
1054 p_acl->counters->count_latencies++;
1058 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1065 * Main packet processing function.
1066 * 64 packet bit mask are used to identify which packets to forward.
1067 * Performs the following:
1068 * - Burst lookup packets in the IPv4 ACL Rule Table.
1069 * - Burst lookup packets in the IPv6 ACL Rule Table.
1070 * - Lookup Action Table, perform actions.
1071 * - Burst lookup Connection Tracking, if enabled.
1072 * - Lookup MAC address.
1074 * - Packets with bit mask set are forwarded
1077 * A pointer to the pipeline.
1079 * A pointer to a burst of packets.
1081 * Number of packets to process.
1083 * A pointer to pipeline specific data.
1086 * 0 on success, negative on error.
1089 pkt_work_acl_ipv4_key(struct rte_pipeline *p,
1090 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1093 struct pipeline_acl *p_acl = arg;
1095 p_acl->counters->pkts_received =
1096 p_acl->counters->pkts_received + n_pkts;
1098 printf("pkt_work_acl_key pkts_received: %" PRIu64
1099 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1101 uint64_t lookup_hit_mask = 0;
1102 uint64_t lookup_hit_mask_ipv4 = 0;
1103 uint64_t lookup_hit_mask_ipv6 = 0;
1104 uint64_t lookup_miss_mask = 0;
1105 uint64_t conntrack_mask = 0;
1106 uint64_t connexist_mask = 0;
1107 uint32_t dest_address = 0;
1111 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1112 uint64_t keep_mask = pkts_mask;
1116 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1118 if (acl_ipv4_enabled) {
1120 printf("ACL IPV4 Lookup Mask Before = %p\n",
1123 rte_table_acl_ops.f_lookup(acl_rule_table_ipv4_active, pkts,
1124 pkts_mask, &lookup_hit_mask_ipv4,
1126 p_acl->acl_entries_ipv4);
1128 printf("ACL IPV4 Lookup Mask After = %p\n",
1129 (void *)lookup_hit_mask_ipv4);
1132 /* Merge lookup results since we process both IPv4 and IPv6 below */
1133 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1135 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1137 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1138 pkts_mask = lookup_hit_mask;
1139 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1141 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1142 p_acl->counters->pkts_drop,
1143 __builtin_popcountll(lookup_miss_mask));
1145 uint64_t pkts_to_process = lookup_hit_mask;
1146 /* bitmap of packets left to process for ARP */
1148 for (; pkts_to_process;) {
1149 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1150 uint64_t pkt_mask = 1LLU << pos;
1151 /* bitmask representing only this packet */
1153 pkts_to_process &= ~pkt_mask;
1154 /* remove this packet from remaining list */
1155 struct rte_mbuf *pkt = pkts[pos];
1158 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1159 pkts_mask &= ~(1LLU << pos);
1164 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1165 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1167 if (hdr_chk == IPv4_HDR_VERSION) {
1168 struct acl_table_entry *entry =
1169 (struct acl_table_entry *)
1170 p_acl->acl_entries_ipv4[pos];
1171 uint16_t phy_port = entry->head.port_id;
1172 uint32_t action_id = entry->action_id;
1175 printf("action_id = %u\n", action_id);
1177 uint32_t dscp_offset =
1178 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1180 if (action_array_active[action_id].action_bitmap &
1182 action_counter_table
1183 [p_acl->action_counter_index]
1184 [action_id].packetCount++;
1185 action_counter_table
1186 [p_acl->action_counter_index]
1187 [action_id].byteCount +=
1188 rte_pktmbuf_pkt_len(pkt);
1190 printf("Action Count Packet Count: %"
1191 PRIu64 " Byte Count: %" PRIu64
1193 action_counter_table
1194 [p_acl->action_counter_index]
1195 [action_id].packetCount,
1196 action_counter_table
1197 [p_acl->action_counter_index]
1198 [action_id].byteCount);
1201 if (action_array_active[action_id].action_bitmap &
1202 acl_action_packet_drop) {
1204 /* Drop packet by changing the mask */
1206 printf("ACL before drop pkt_mask "
1207 "%lu, pkt_num %d\n",
1209 pkts_mask &= ~(1LLU << pos);
1211 printf("ACL after drop pkt_mask "
1212 " %lu, pkt_num %d\n",
1214 p_acl->counters->pkts_drop++;
1217 if (action_array_active[action_id].action_bitmap &
1220 action_array_active[action_id].fwd_port;
1221 entry->head.port_id = phy_port;
1223 printf("Action FWD Port ID: %u\n",
1227 if (action_array_active[action_id].action_bitmap &
1230 action_array_active[action_id].nat_port;
1231 entry->head.port_id = phy_port;
1233 printf("Action NAT Port ID: %u\n",
1237 if (action_array_active[action_id].action_bitmap &
1240 /* Set DSCP priority */
1241 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1244 action_array_active[action_id].dscp_priority
1248 ("Action DSCP DSCP Priority: %u\n",
1252 if (action_array_active[action_id].action_bitmap &
1253 acl_action_packet_accept) {
1255 printf("Action Accept\n");
1257 if (action_array_active[action_id].action_bitmap
1258 & acl_action_conntrack) {
1260 /* Set conntrack bit for this pkt */
1261 conntrack_mask |= pkt_mask;
1263 printf("ACL Conntrack "
1264 "enabled: %p pkt_mask: %p\n",
1265 (void *)conntrack_mask,
1269 if (action_array_active[action_id].action_bitmap
1270 & acl_action_connexist) {
1272 /* Set conntrack bit for this pkt */
1273 conntrack_mask |= pkt_mask;
1275 /* Set connexist bit for this pkt for public -> private */
1276 /* Private -> public packet will open the connection */
1277 if (action_array_active
1278 [action_id].private_public ==
1280 connexist_mask |= pkt_mask;
1283 printf("ACL Connexist "
1284 "enabled conntrack: %p connexist: %p pkt_mask: %p\n",
1285 (void *)conntrack_mask,
1286 (void *)connexist_mask,
1292 if (hdr_chk == IPv6_HDR_VERSION) {
1294 struct acl_table_entry *entry =
1295 (struct acl_table_entry *)
1296 p_acl->acl_entries_ipv6[pos];
1297 uint16_t phy_port = entry->head.port_id;
1298 uint32_t action_id = entry->action_id;
1301 printf("action_id = %u\n", action_id);
1303 if (action_array_active[action_id].action_bitmap &
1305 action_counter_table
1306 [p_acl->action_counter_index]
1307 [action_id].packetCount++;
1308 action_counter_table
1309 [p_acl->action_counter_index]
1310 [action_id].byteCount +=
1311 rte_pktmbuf_pkt_len(pkt);
1313 printf("Action Count Packet Count: %"
1314 PRIu64 " Byte Count: %" PRIu64
1316 action_counter_table
1317 [p_acl->action_counter_index]
1318 [action_id].packetCount,
1319 action_counter_table
1320 [p_acl->action_counter_index]
1321 [action_id].byteCount);
1324 if (action_array_active[action_id].action_bitmap &
1325 acl_action_packet_drop) {
1326 /* Drop packet by changing the mask */
1329 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1331 pkts_mask &= ~(1LLU << pos);
1334 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1336 p_acl->counters->pkts_drop++;
1340 if (action_array_active[action_id].action_bitmap &
1343 action_array_active[action_id].fwd_port;
1344 entry->head.port_id = phy_port;
1346 printf("Action FWD Port ID: %u\n",
1350 if (action_array_active[action_id].action_bitmap &
1353 action_array_active[action_id].nat_port;
1354 entry->head.port_id = phy_port;
1356 printf("Action NAT Port ID: %u\n",
1360 if (action_array_active[action_id].action_bitmap &
1363 /* Set DSCP priority */
1364 uint32_t dscp_offset =
1365 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1366 IP_HDR_DSCP_OFST_IPV6;
1368 RTE_MBUF_METADATA_UINT16_PTR(pkt,
1370 uint16_t dscp_value =
1372 (RTE_MBUF_METADATA_UINT16
1373 (pkt, dscp_offset)) & 0XF00F);
1374 uint8_t dscp_store =
1375 action_array_active[action_id].dscp_priority
1377 uint16_t dscp_temp = dscp_store;
1379 dscp_temp = dscp_temp << 4;
1380 *dscp = rte_bswap16(dscp_temp | dscp_value);
1383 ("Action DSCP DSCP Priority: %u\n",
1387 if (action_array_active[action_id].action_bitmap &
1388 acl_action_packet_accept) {
1390 printf("Action Accept\n");
1392 if (action_array_active[action_id].action_bitmap
1393 & acl_action_conntrack) {
1395 /* Set conntrack bit for this pkt */
1396 conntrack_mask |= pkt_mask;
1398 printf("ACL Conntrack "
1399 "enabled: %p pkt_mask: %p\n",
1400 (void *)conntrack_mask,
1404 if (action_array_active[action_id].action_bitmap
1405 & acl_action_connexist) {
1407 /* Set conntrack bit for this pkt */
1408 conntrack_mask |= pkt_mask;
1410 /* Set connexist bit for this pkt for public -> private */
1411 /* Private -> public packet will open the connection */
1412 if (action_array_active
1413 [action_id].private_public ==
1415 connexist_mask |= pkt_mask;
1418 printf("ACL Connexist enabled "
1419 "conntrack: %p connexist: %p pkt_mask: %p\n",
1420 (void *)conntrack_mask,
1421 (void *)connexist_mask,
1428 /* Only call connection tracker if required */
1429 if (conntrack_mask > 0) {
1432 ("ACL Call Conntrack Before = %p Connexist = %p\n",
1433 (void *)conntrack_mask, (void *)connexist_mask);
1435 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
1436 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
1438 printf("ACL Call Conntrack After = %p\n",
1439 (void *)conntrack_mask);
1441 /* Only change pkt mask for pkts that have conntrack enabled */
1442 /* Need to loop through packets to check if conntrack enabled */
1443 pkts_to_process = pkts_mask;
1444 for (; pkts_to_process;) {
1445 uint32_t action_id = 0;
1447 (uint8_t) __builtin_ctzll(pkts_to_process);
1448 uint64_t pkt_mask = 1LLU << pos;
1449 /* bitmask representing only this packet */
1451 pkts_to_process &= ~pkt_mask;
1452 /* remove this packet from remaining list */
1453 struct rte_mbuf *pkt = pkts[pos];
1455 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
1459 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1460 if (hdr_chk == IPv4_HDR_VERSION) {
1461 struct acl_table_entry *entry =
1462 (struct acl_table_entry *)
1463 p_acl->acl_entries_ipv4[pos];
1464 action_id = entry->action_id;
1466 struct acl_table_entry *entry =
1467 (struct acl_table_entry *)
1468 p_acl->acl_entries_ipv6[pos];
1469 action_id = entry->action_id;
1472 if ((action_array_active[action_id].action_bitmap &
1473 acl_action_conntrack)
1474 || (action_array_active[action_id].action_bitmap &
1475 acl_action_connexist)) {
1477 if (conntrack_mask & pkt_mask) {
1479 printf("ACL Conntrack Accept "
1483 /* Drop packet by changing the mask */
1485 printf("ACL Conntrack Drop "
1488 pkts_mask &= ~pkt_mask;
1489 p_acl->counters->pkts_drop++;
1495 pkts_to_process = pkts_mask;
1496 /* bitmap of packets left to process for ARP */
1498 for (; pkts_to_process;) {
1499 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1500 uint64_t pkt_mask = 1LLU << pos;
1501 /* bitmask representing only this packet */
1503 pkts_to_process &= ~pkt_mask;
1504 /* remove this packet from remaining list */
1505 struct rte_mbuf *pkt = pkts[pos];
1508 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1509 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1511 if (hdr_chk == IPv4_HDR_VERSION) {
1513 struct acl_table_entry *entry =
1514 (struct acl_table_entry *)
1515 p_acl->acl_entries_ipv4[pos];
1516 //uint16_t phy_port = entry->head.port_id;
1517 uint16_t phy_port = pkt->port;
1518 uint32_t *port_out_id =
1519 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1524 /* *port_out_id = p_acl->links_map[phy_port]; */
1525 /* if (is_phy_port_privte(phy_port))
1526 *port_out_id = ACL_PUB_PORT_ID;
1528 *port_out_id = ACL_PRV_PORT_ID;*/
1531 ("phy_port = %i, links_map[phy_port] = %i\n",
1532 phy_port, p_acl->links_map[phy_port]);
1534 /* header room + eth hdr size + dst_adr offset in ip header */
1535 uint32_t dst_addr_offset =
1536 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
1537 uint32_t *dst_addr =
1538 RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
1540 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
1542 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
1543 struct ether_addr hw_addr;
1544 uint32_t dest_address = rte_bswap32(*dst_addr);
1545 uint32_t *nhip = RTE_MBUF_METADATA_UINT32_PTR(pkt,
1552 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1554 dest_address = rte_bswap32(*dst_addr);
1555 struct arp_entry_data *ret_arp_data = NULL;
1556 ret_arp_data = get_dest_mac_addr_port
1557 (dest_address, &dest_if, (struct ether_addr *)eth_dest);
1558 *port_out_id = p_acl->port_out_id[dest_if];
1560 if (arp_cache_dest_mac_present(dest_if)) {
1561 ether_addr_copy(get_link_hw_addr(dest_if),
1562 (struct ether_addr *)eth_src);
1563 update_nhip_access(dest_if);
1564 if (unlikely(ret_arp_data && ret_arp_data->num_pkts)) {
1565 printf("sending buffered packets\n");
1566 arp_send_buffered_pkts(ret_arp_data,
1567 (struct ether_addr *)eth_dest, *port_out_id);
1571 if (unlikely(ret_arp_data == NULL)) {
1574 printf("%s: NHIP Not Found, "
1575 "outport_id: %d\n", __func__,
1579 pkts_mask &= ~(1LLU << pos);
1581 printf("ACL after drop pkt_mask "
1582 "%lu, pkt_num %d\n",
1584 p_acl->counters->pkts_drop++;
1588 if (ret_arp_data->status == INCOMPLETE ||
1589 ret_arp_data->status == PROBE) {
1590 if (ret_arp_data->num_pkts >= NUM_DESC) {
1592 pkts_mask &= ~(1LLU << pos);
1594 printf("ACL after drop pkt_mask "
1595 "%lu, pkt_num %d\n",
1597 p_acl->counters->pkts_drop++;
1600 arp_pkts_mask |= pkt_mask;
1601 arp_queue_unresolved_packet(ret_arp_data, pkt);
1608 if (hdr_chk == IPv6_HDR_VERSION) {
1610 struct acl_table_entry *entry =
1611 (struct acl_table_entry *)
1612 p_acl->acl_entries_ipv6[pos];
1613 uint16_t phy_port = entry->head.port_id;
1614 uint32_t *port_out_id =
1615 RTE_MBUF_METADATA_UINT32_PTR(pkt,
1620 if (is_phy_port_privte(phy_port))
1621 *port_out_id = ACL_PUB_PORT_ID;
1623 *port_out_id = ACL_PRV_PORT_ID;
1625 /* *port_out_id = p_acl->links_map[phy_port]; */
1628 ("phy_port = %i, links_map[phy_port] = %i\n",
1629 phy_port, p_acl->links_map[phy_port]);
1631 /* header room + eth hdr size + dst_adr offset in ip header */
1632 uint32_t dst_addr_offset =
1633 MBUF_HDR_ROOM + ETH_HDR_SIZE +
1634 IP_HDR_DST_ADR_OFST_IPV6;
1636 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
1638 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
1639 struct ether_addr hw_addr;
1640 uint8_t dest_address[16];
1644 RTE_MBUF_METADATA_UINT8(pkt,
1649 uint8_t *dst_addr[16];
1650 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
1653 for (i = 0; i < 16; i++) {
1655 RTE_MBUF_METADATA_UINT8_PTR(pkt,
1659 memcpy(dest_address, *dst_addr, sizeof(dest_address));
1660 memset(nhip, 0, sizeof(nhip));
1661 if (is_phy_port_privte(phy_port))
1662 port = ACL_PUB_PORT_ID;
1664 port = ACL_PRV_PORT_ID;
1666 if (get_dest_mac_address_ipv6_port
1667 (dest_address, port, &hw_addr, &nhip[0])) {
1671 ("MAC found for port %d - %02x:%02x:%02x:%02x:%02x:%02x\n",
1672 phy_port, hw_addr.addr_bytes[0],
1673 hw_addr.addr_bytes[1],
1674 hw_addr.addr_bytes[2],
1675 hw_addr.addr_bytes[3],
1676 hw_addr.addr_bytes[4],
1677 hw_addr.addr_bytes[5]);
1679 ("Dest MAC before - %02x:%02x:%02x:%02x:%02x:%02x\n",
1680 eth_dest[0], eth_dest[1],
1681 eth_dest[2], eth_dest[3],
1682 eth_dest[4], eth_dest[5]);
1684 memcpy(eth_dest, &hw_addr,
1685 sizeof(struct ether_addr));
1687 printf("PktP %p, dest_macP %p\n", pkt,
1690 ("Dest MAC after - %02x:%02x:%02x:%02x:%02x:%02x\n",
1691 eth_dest[0], eth_dest[1],
1692 eth_dest[2], eth_dest[3],
1693 eth_dest[4], eth_dest[5]);
1695 if (is_phy_port_privte(phy_port))
1697 get_link_hw_addr(dest_if),
1698 sizeof(struct ether_addr));
1701 get_link_hw_addr(dest_if),
1702 sizeof(struct ether_addr));
1705 * memcpy(eth_src, get_link_hw_addr(p_acl->links_map[phy_port]),
1706 * sizeof(struct ether_addr));
1708 p_acl->counters->tpkts_processed++;
1709 p_acl->counters->bytes_processed +=
1715 /* Drop packet by changing the mask */
1717 printf("ACL before drop pkt_mask "
1718 " %lu, pkt_num %d\n",
1720 pkts_mask &= ~(1LLU << pos);
1722 printf("ACL after drop pkt_mask "
1723 "%lu, pkt_num %d\n",
1725 p_acl->counters->pkts_drop++;
1732 pkts_drop_mask = keep_mask & ~pkts_mask;
1733 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
1734 keep_mask = pkts_mask;
1736 if (arp_pkts_mask) {
1737 keep_mask &= ~(arp_pkts_mask);
1738 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
1741 /* don't bother measuring if traffic very low, might skew stats */
1742 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
1744 if (packets_this_iteration > 1) {
1745 uint64_t latency_this_iteration =
1746 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
1747 p_acl->counters->sum_latencies += latency_this_iteration;
1748 p_acl->counters->count_latencies++;
1751 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
1758 * Main packet processing function.
1759 * 64 packet bit mask are used to identify which packets to forward.
1760 * Performs the following:
1761 * - Burst lookup packets in the IPv4 ACL Rule Table.
1762 * - Burst lookup packets in the IPv6 ACL Rule Table.
1763 * - Lookup Action Table, perform actions.
1764 * - Burst lookup Connection Tracking, if enabled.
1765 * - Lookup MAC address.
1767 * - Packets with bit mask set are forwarded
1770 * A pointer to the pipeline.
1772 * A pointer to a burst of packets.
1774 * Number of packets to process.
1776 * A pointer to pipeline specific data.
1779 * 0 on success, negative on error.
1782 pkt_work_acl_ipv6_key(struct rte_pipeline *p,
1783 struct rte_mbuf **pkts, uint32_t n_pkts, void *arg)
1786 struct pipeline_acl *p_acl = arg;
1788 p_acl->counters->pkts_received =
1789 p_acl->counters->pkts_received + n_pkts;
1791 printf("pkt_work_acl_key pkts_received: %" PRIu64
1792 " n_pkts: %u\n", p_acl->counters->pkts_received, n_pkts);
1794 uint64_t lookup_hit_mask = 0;
1795 uint64_t lookup_hit_mask_ipv4 = 0;
1796 uint64_t lookup_hit_mask_ipv6 = 0;
1797 uint64_t lookup_miss_mask = 0;
1798 uint64_t conntrack_mask = 0;
1799 uint64_t connexist_mask = 0;
1800 uint32_t dest_address = 0;
1804 uint64_t pkts_drop_mask, pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
1805 uint64_t keep_mask = pkts_mask;
1809 p_acl->in_port_time_stamp = rte_get_tsc_cycles();
1811 if (acl_ipv6_enabled) {
1813 printf("ACL IPV6 Lookup Mask Before = %p\n",
1816 rte_table_acl_ops.f_lookup(acl_rule_table_ipv6_active, pkts,
1817 pkts_mask, &lookup_hit_mask_ipv6,
1819 p_acl->acl_entries_ipv6);
1821 printf("ACL IPV6 Lookup Mask After = %p\n",
1822 (void *)lookup_hit_mask_ipv6);
1825 /* Merge lookup results since we process both IPv4 and IPv6 below */
1826 lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
1828 printf("ACL Lookup Mask After = %p\n", (void *)lookup_hit_mask);
1830 lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
1831 pkts_mask = lookup_hit_mask;
1832 p_acl->counters->pkts_drop += __builtin_popcountll(lookup_miss_mask);
1834 printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
1835 p_acl->counters->pkts_drop,
1836 __builtin_popcountll(lookup_miss_mask));
1838 uint64_t pkts_to_process = lookup_hit_mask;
1839 /* bitmap of packets left to process for ARP */
1841 for (; pkts_to_process;) {
1842 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
1843 uint64_t pkt_mask = 1LLU << pos;
1844 /* bitmask representing only this packet */
1846 pkts_to_process &= ~pkt_mask;
1847 /* remove this packet from remaining list */
1848 struct rte_mbuf *pkt = pkts[pos];
1851 if (!check_arp_icmp(pkt, pkt_mask, p_acl)) {
1852 pkts_mask &= ~(1LLU << pos);
1856 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
1857 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
1859 if (hdr_chk == IPv4_HDR_VERSION) {
1860 struct acl_table_entry *entry =
1861 (struct acl_table_entry *)
1862 p_acl->acl_entries_ipv4[pos];
1863 uint16_t phy_port = entry->head.port_id;
1864 uint32_t action_id = entry->action_id;
1867 printf("action_id = %u\n", action_id);
1869 uint32_t dscp_offset =
1870 MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DSCP_OFST;
1872 if (action_array_active[action_id].action_bitmap &
1874 action_counter_table
1875 [p_acl->action_counter_index]
1876 [action_id].packetCount++;
1877 action_counter_table
1878 [p_acl->action_counter_index]
1879 [action_id].byteCount +=
1880 rte_pktmbuf_pkt_len(pkt);
1882 printf("Action Count Packet Count: %"
1883 PRIu64 " Byte Count: %" PRIu64
1885 action_counter_table
1886 [p_acl->action_counter_index]
1887 [action_id].packetCount,
1888 action_counter_table
1889 [p_acl->action_counter_index]
1890 [action_id].byteCount);
1893 if (action_array_active[action_id].action_bitmap &
1894 acl_action_packet_drop) {
1896 /* Drop packet by changing the mask */
1899 ("ACL before drop pkt_mask %lu, pkt_num %d\n",
1901 pkts_mask &= ~(1LLU << pos);
1904 ("ACL after drop pkt_mask %lu, pkt_num %d\n",
1906 p_acl->counters->pkts_drop++;
1909 if (action_array_active[action_id].action_bitmap &
1912 action_array_active[action_id].fwd_port;
1913 entry->head.port_id = phy_port;
1915 printf("Action FWD Port ID: %u\n",
1919 if (action_array_active[action_id].action_bitmap &
1922 action_array_active[action_id].nat_port;
1923 entry->head.port_id = phy_port;
1925 printf("Action NAT Port ID: %u\n",
1929 if (action_array_active[action_id].action_bitmap &
1932 /* Set DSCP priority */
1933 uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
1936 action_array_active[action_id].dscp_priority
1940 ("Action DSCP DSCP Priority: %u\n",
1944 if (action_array_active[action_id].action_bitmap &
1945 acl_action_packet_accept) {
1947 printf("Action Accept\n");
1949 if (action_array_active[action_id].action_bitmap
1950 & acl_action_conntrack) {
1952 /* Set conntrack bit for this pkt */
1953 conntrack_mask |= pkt_mask;
1955 printf("ACL Conntrack enabled: "
1956 " %p pkt_mask: %p\n",
1957 (void *)conntrack_mask,
1961 if (action_array_active[action_id].action_bitmap
1962 & acl_action_connexist) {
1964 /* Set conntrack bit for this pkt */
1965 conntrack_mask |= pkt_mask;
1967 /* Set connexist bit for this pkt for public -> private */
1968 /* Private -> public packet will open the connection */
1969 if (action_array_active
1970 [action_id].private_public ==
1972 connexist_mask |= pkt_mask;
1975 printf("ACL Connexist enabled "
1976 "conntrack: %p connexist: %p pkt_mask: %p\n",
1977 (void *)conntrack_mask,
1978 (void *)connexist_mask,
1985 if (hdr_chk == IPv6_HDR_VERSION) {
1987 struct acl_table_entry *entry =
1988 (struct acl_table_entry *)
1989 p_acl->acl_entries_ipv6[pos];
1990 uint16_t phy_port = entry->head.port_id;
1991 uint32_t action_id = entry->action_id;
1994 printf("action_id = %u\n", action_id);
1996 if (action_array_active[action_id].action_bitmap &
1998 action_counter_table
1999 [p_acl->action_counter_index]
2000 [action_id].packetCount++;
2001 action_counter_table
2002 [p_acl->action_counter_index]
2003 [action_id].byteCount +=
2004 rte_pktmbuf_pkt_len(pkt);
2006 printf("Action Count Packet Count: %"
2007 PRIu64 " Byte Count: %" PRIu64
2009 action_counter_table
2010 [p_acl->action_counter_index]
2011 [action_id].packetCount,
2012 action_counter_table
2013 [p_acl->action_counter_index]
2014 [action_id].byteCount);
2017 if (action_array_active[action_id].action_bitmap &
2018 acl_action_packet_drop) {
2019 /* Drop packet by changing the mask */
2021 printf("ACL before drop pkt_mask "
2022 "%lu, pkt_num %d\n",
2024 pkts_mask &= ~(1LLU << pos);
2026 printf("ACL after drop pkt_mask "
2027 "%lu, pkt_num %d\n",
2029 p_acl->counters->pkts_drop++;
2033 if (action_array_active[action_id].action_bitmap &
2036 action_array_active[action_id].fwd_port;
2037 entry->head.port_id = phy_port;
2039 printf("Action FWD Port ID: %u\n",
2043 if (action_array_active[action_id].action_bitmap &
2046 action_array_active[action_id].nat_port;
2047 entry->head.port_id = phy_port;
2049 printf("Action NAT Port ID: %u\n",
2053 if (action_array_active[action_id].action_bitmap &
2056 /* Set DSCP priority */
2057 uint32_t dscp_offset =
2058 MBUF_HDR_ROOM + ETH_HDR_SIZE +
2059 IP_HDR_DSCP_OFST_IPV6;
2061 RTE_MBUF_METADATA_UINT16_PTR(pkt,
2063 uint16_t dscp_value =
2065 (RTE_MBUF_METADATA_UINT16
2066 (pkt, dscp_offset)) & 0XF00F);
2067 uint8_t dscp_store =
2068 action_array_active[action_id].dscp_priority
2070 uint16_t dscp_temp = dscp_store;
2072 dscp_temp = dscp_temp << 4;
2073 *dscp = rte_bswap16(dscp_temp | dscp_value);
2076 ("Action DSCP DSCP Priority: %u\n",
2080 if (action_array_active[action_id].action_bitmap &
2081 acl_action_packet_accept) {
2083 printf("Action Accept\n");
2085 if (action_array_active[action_id].action_bitmap
2086 & acl_action_conntrack) {
2088 /* Set conntrack bit for this pkt */
2089 conntrack_mask |= pkt_mask;
2091 printf("ACL Conntrack enabled: "
2092 " %p pkt_mask: %p\n",
2093 (void *)conntrack_mask,
2097 if (action_array_active[action_id].action_bitmap
2098 & acl_action_connexist) {
2100 /* Set conntrack bit for this pkt */
2101 conntrack_mask |= pkt_mask;
2103 /* Set connexist bit for this pkt for public -> private */
2104 /* Private -> public packet will open the connection */
2105 if (action_array_active
2106 [action_id].private_public ==
2108 connexist_mask |= pkt_mask;
2111 printf("ACL Connexist enabled "
2112 "conntrack: %p connexist: %p pkt_mask: %p\n",
2113 (void *)conntrack_mask,
2114 (void *)connexist_mask,
2120 /* Only call connection tracker if required */
2121 if (conntrack_mask > 0) {
2124 ("ACL Call Conntrack Before = %p Connexist = %p\n",
2125 (void *)conntrack_mask, (void *)connexist_mask);
2127 rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control
2128 (p_acl->cnxn_tracker, pkts, conntrack_mask, connexist_mask);
2130 printf("ACL Call Conntrack After = %p\n",
2131 (void *)conntrack_mask);
2133 /* Only change pkt mask for pkts that have conntrack enabled */
2134 /* Need to loop through packets to check if conntrack enabled */
2135 pkts_to_process = pkts_mask;
2136 for (; pkts_to_process;) {
2137 uint32_t action_id = 0;
2139 (uint8_t) __builtin_ctzll(pkts_to_process);
2140 uint64_t pkt_mask = 1LLU << pos;
2141 /* bitmask representing only this packet */
2143 pkts_to_process &= ~pkt_mask;
2144 /* remove this packet from remaining list */
2145 struct rte_mbuf *pkt = pkts[pos];
2147 uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt,
2151 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2152 if (hdr_chk == IPv4_HDR_VERSION) {
2153 struct acl_table_entry *entry =
2154 (struct acl_table_entry *)
2155 p_acl->acl_entries_ipv4[pos];
2156 action_id = entry->action_id;
2158 struct acl_table_entry *entry =
2159 (struct acl_table_entry *)
2160 p_acl->acl_entries_ipv6[pos];
2161 action_id = entry->action_id;
2164 if ((action_array_active[action_id].action_bitmap &
2165 acl_action_conntrack)
2166 || (action_array_active[action_id].action_bitmap &
2167 acl_action_connexist)) {
2169 if (conntrack_mask & pkt_mask) {
2171 printf("ACL Conntrack Accept "
2175 /* Drop packet by changing the mask */
2178 ("ACL Conntrack Drop packet = %p\n",
2180 pkts_mask &= ~pkt_mask;
2181 p_acl->counters->pkts_drop++;
2187 pkts_to_process = pkts_mask;
2188 /* bitmap of packets left to process for ARP */
2190 for (; pkts_to_process;) {
2191 uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
2192 uint64_t pkt_mask = 1LLU << pos;
2193 /* bitmask representing only this packet */
2195 pkts_to_process &= ~pkt_mask;
2196 /* remove this packet from remaining list */
2197 struct rte_mbuf *pkt = pkts[pos];
2200 RTE_MBUF_METADATA_UINT8(pkt, MBUF_HDR_ROOM + ETH_HDR_SIZE);
2201 hdr_chk = hdr_chk >> IP_VERSION_CHECK;
2203 if (hdr_chk == IPv6_HDR_VERSION) {
2205 struct acl_table_entry *entry =
2206 (struct acl_table_entry *)
2207 p_acl->acl_entries_ipv6[pos];
2208 //uint16_t phy_port = entry->head.port_id;
2209 uint16_t phy_port = pkt->port;
2210 uint32_t *port_out_id =
2211 RTE_MBUF_METADATA_UINT32_PTR(pkt,
2216 /* if (is_phy_port_privte(phy_port))
2217 *port_out_id = ACL_PUB_PORT_ID;
2219 *port_out_id = ACL_PRV_PORT_ID;*/
2221 /* *port_out_id = p_acl->links_map[phy_port]; */
2224 ("phy_port = %i,links_map[phy_port] = %i\n",
2225 phy_port, p_acl->links_map[phy_port]);
2227 /* header room + eth hdr size + dst_adr offset in ip header */
2228 uint32_t dst_addr_offset =
2229 MBUF_HDR_ROOM + ETH_HDR_SIZE +
2230 IP_HDR_DST_ADR_OFST_IPV6;
2232 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
2234 RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
2235 struct ether_addr hw_addr;
2236 uint8_t dest_address[16];
2240 RTE_MBUF_METADATA_UINT8(pkt,
2245 uint8_t *dst_addr[16];
2246 uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
2249 for (i = 0; i < 16; i++) {
2251 RTE_MBUF_METADATA_UINT8_PTR(pkt,
2255 memcpy(dest_address, *dst_addr, sizeof(dest_address));
2256 memset(nhip, 0, sizeof(nhip));
2257 struct nd_entry_data *ret_nd_data = NULL;
2258 ret_nd_data = get_dest_mac_address_ipv6_port
2259 (dest_address, &dest_if, &hw_addr, &nhip[0]);
2260 *port_out_id = p_acl->port_out_id[dest_if];
2262 if (nd_cache_dest_mac_present(dest_if)) {
2263 ether_addr_copy(get_link_hw_addr(dest_if),
2264 (struct ether_addr *)eth_src);
2265 update_nhip_access(dest_if);
2267 if (unlikely(ret_nd_data && ret_nd_data->num_pkts)) {
2268 printf("sending buffered packets\n");
2269 p_acl->counters->tpkts_processed +=
2270 ret_nd_data->num_pkts;
2271 nd_send_buffered_pkts(ret_nd_data,
2272 (struct ether_addr *)eth_dest, *port_out_id);
2275 if (unlikely(ret_nd_data == NULL)) {
2277 printf("ACL before drop pkt_mask "
2278 "%lu, pkt_num %d\n", pkts_mask, pos);
2279 pkts_mask &= ~(1LLU << pos);
2281 printf("ACL after drop pkt_mask "
2282 "%lu, pkt_num %d\n", pkts_mask, pos);
2283 p_acl->counters->pkts_drop++;
2287 if (ret_nd_data->status == INCOMPLETE ||
2288 ret_nd_data->status == PROBE) {
2289 if (ret_nd_data->num_pkts >= NUM_DESC) {
2292 printf("ACL before drop pkt_mask "
2293 "%lu, pkt_num %d\n", pkts_mask, pos);
2294 pkts_mask &= ~(1LLU << pos);
2296 printf("ACL after drop pkt_mask "
2297 "%lu, pkt_num %d\n", pkts_mask, pos);
2298 p_acl->counters->pkts_drop++;
2301 arp_pkts_mask |= pkt_mask;
2302 nd_queue_unresolved_packet(ret_nd_data,
2312 } /* end of for loop */
2314 pkts_drop_mask = keep_mask & ~pkts_mask;
2315 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2316 keep_mask = pkts_mask;
2318 if (arp_pkts_mask) {
2319 keep_mask &= ~(arp_pkts_mask);
2320 rte_pipeline_ah_packet_hijack(p, arp_pkts_mask);
2323 /* don't bother measuring if traffic very low, might skew stats */
2324 uint32_t packets_this_iteration = __builtin_popcountll(pkts_mask);
2326 if (packets_this_iteration > 1) {
2327 uint64_t latency_this_iteration =
2328 rte_get_tsc_cycles() - p_acl->in_port_time_stamp;
2329 p_acl->counters->sum_latencies += latency_this_iteration;
2330 p_acl->counters->count_latencies++;
2333 printf("Leaving pkt_work_acl_key pkts_mask = %p\n",
2339 static struct rte_acl_field_def field_format_ipv4[] = {
2342 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2343 .size = sizeof(uint8_t),
2346 .offset = sizeof(struct ether_hdr) +
2347 offsetof(struct ipv4_hdr, next_proto_id),
2350 /* Source IP address (IPv4) */
2352 .type = RTE_ACL_FIELD_TYPE_MASK,
2353 .size = sizeof(uint32_t),
2356 .offset = sizeof(struct ether_hdr) +
2357 offsetof(struct ipv4_hdr, src_addr),
2360 /* Destination IP address (IPv4) */
2362 .type = RTE_ACL_FIELD_TYPE_MASK,
2363 .size = sizeof(uint32_t),
2366 .offset = sizeof(struct ether_hdr) +
2367 offsetof(struct ipv4_hdr, dst_addr),
2372 .type = RTE_ACL_FIELD_TYPE_RANGE,
2373 .size = sizeof(uint16_t),
2376 .offset = sizeof(struct ether_hdr) +
2377 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2380 /* Destination Port */
2382 .type = RTE_ACL_FIELD_TYPE_RANGE,
2383 .size = sizeof(uint16_t),
2386 .offset = sizeof(struct ether_hdr) +
2387 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2391 #define SIZEOF_VLAN_HDR 4
2393 static struct rte_acl_field_def field_format_vlan_ipv4[] = {
2396 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2397 .size = sizeof(uint8_t),
2400 .offset = sizeof(struct ether_hdr) +
2401 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, next_proto_id),
2404 /* Source IP address (IPv4) */
2406 .type = RTE_ACL_FIELD_TYPE_MASK,
2407 .size = sizeof(uint32_t),
2410 .offset = sizeof(struct ether_hdr) +
2411 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, src_addr),
2414 /* Destination IP address (IPv4) */
2416 .type = RTE_ACL_FIELD_TYPE_MASK,
2417 .size = sizeof(uint32_t),
2420 .offset = sizeof(struct ether_hdr) +
2421 SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, dst_addr),
2426 .type = RTE_ACL_FIELD_TYPE_RANGE,
2427 .size = sizeof(uint16_t),
2430 .offset = sizeof(struct ether_hdr) +
2432 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2435 /* Destination Port */
2437 .type = RTE_ACL_FIELD_TYPE_RANGE,
2438 .size = sizeof(uint16_t),
2441 .offset = sizeof(struct ether_hdr) +
2443 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2447 #define SIZEOF_QINQ_HEADER 8
2449 static struct rte_acl_field_def field_format_qinq_ipv4[] = {
2452 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2453 .size = sizeof(uint8_t),
2456 .offset = sizeof(struct ether_hdr) +
2457 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, next_proto_id),
2460 /* Source IP address (IPv4) */
2462 .type = RTE_ACL_FIELD_TYPE_MASK,
2463 .size = sizeof(uint32_t),
2466 .offset = sizeof(struct ether_hdr) +
2467 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, src_addr),
2470 /* Destination IP address (IPv4) */
2472 .type = RTE_ACL_FIELD_TYPE_MASK,
2473 .size = sizeof(uint32_t),
2476 .offset = sizeof(struct ether_hdr) +
2477 SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, dst_addr),
2482 .type = RTE_ACL_FIELD_TYPE_RANGE,
2483 .size = sizeof(uint16_t),
2486 .offset = sizeof(struct ether_hdr) +
2487 SIZEOF_QINQ_HEADER +
2488 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
2491 /* Destination Port */
2493 .type = RTE_ACL_FIELD_TYPE_RANGE,
2494 .size = sizeof(uint16_t),
2497 .offset = sizeof(struct ether_hdr) +
2498 SIZEOF_QINQ_HEADER +
2499 sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
2503 static struct rte_acl_field_def field_format_ipv6[] = {
2506 .type = RTE_ACL_FIELD_TYPE_BITMASK,
2507 .size = sizeof(uint8_t),
2510 .offset = sizeof(struct ether_hdr) +
2511 offsetof(struct ipv6_hdr, proto),
2514 /* Source IP address (IPv6) */
2516 .type = RTE_ACL_FIELD_TYPE_MASK,
2517 .size = sizeof(uint32_t),
2520 .offset = sizeof(struct ether_hdr) +
2521 offsetof(struct ipv6_hdr, src_addr),
2525 .type = RTE_ACL_FIELD_TYPE_MASK,
2526 .size = sizeof(uint32_t),
2529 .offset = sizeof(struct ether_hdr) +
2530 offsetof(struct ipv6_hdr, src_addr) + sizeof(uint32_t),
2535 .type = RTE_ACL_FIELD_TYPE_MASK,
2536 .size = sizeof(uint32_t),
2539 .offset = sizeof(struct ether_hdr) +
2540 offsetof(struct ipv6_hdr, src_addr) + 2 * sizeof(uint32_t),
2545 .type = RTE_ACL_FIELD_TYPE_MASK,
2546 .size = sizeof(uint32_t),
2549 .offset = sizeof(struct ether_hdr) +
2550 offsetof(struct ipv6_hdr, src_addr) + 3 * sizeof(uint32_t),
2554 /* Destination IP address (IPv6) */
2556 .type = RTE_ACL_FIELD_TYPE_MASK,
2557 .size = sizeof(uint32_t),
2560 .offset = sizeof(struct ether_hdr) +
2561 offsetof(struct ipv6_hdr, dst_addr),
2565 .type = RTE_ACL_FIELD_TYPE_MASK,
2566 .size = sizeof(uint32_t),
2569 .offset = sizeof(struct ether_hdr) +
2570 offsetof(struct ipv6_hdr, dst_addr) + sizeof(uint32_t),
2575 .type = RTE_ACL_FIELD_TYPE_MASK,
2576 .size = sizeof(uint32_t),
2579 .offset = sizeof(struct ether_hdr) +
2580 offsetof(struct ipv6_hdr, dst_addr) + 2 * sizeof(uint32_t),
2585 .type = RTE_ACL_FIELD_TYPE_MASK,
2586 .size = sizeof(uint32_t),
2589 .offset = sizeof(struct ether_hdr) +
2590 offsetof(struct ipv6_hdr, dst_addr) + 3 * sizeof(uint32_t),
2596 .type = RTE_ACL_FIELD_TYPE_RANGE,
2597 .size = sizeof(uint16_t),
2600 .offset = sizeof(struct ether_hdr) +
2601 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, src_port),
2604 /* Destination Port */
2606 .type = RTE_ACL_FIELD_TYPE_RANGE,
2607 .size = sizeof(uint16_t),
2610 .offset = sizeof(struct ether_hdr) +
2611 sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, dst_port),
2616 * Parse arguments in config file.
2619 * A pointer to the pipeline.
2621 * A pointer to pipeline specific parameters.
2624 * 0 on success, negative on error.
2627 pipeline_acl_parse_args(struct pipeline_acl *p, struct pipeline_params *params)
2629 uint32_t n_rules_present = 0;
2630 uint32_t pkt_type_present = 0;
2632 uint8_t prv_que_handler_present = 0;
2633 uint8_t n_prv_in_port = 0;
2636 p->n_rules = 4 * 1024;
2637 acl_n_rules = 4 * 1024;
2638 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2639 p->field_format = field_format_ipv4;
2640 p->field_format_size = sizeof(field_format_ipv4);
2642 for (i = 0; i < params->n_args; i++) {
2643 char *arg_name = params->args_name[i];
2644 char *arg_value = params->args_value[i];
2646 if (strcmp(arg_name, "n_rules") == 0) {
2647 if (n_rules_present)
2649 n_rules_present = 1;
2651 p->n_rules = atoi(arg_value);
2652 acl_n_rules = atoi(arg_value);
2656 if (strcmp(arg_name, "pkt_type") == 0) {
2657 if (pkt_type_present)
2659 pkt_type_present = 1;
2662 if (strcmp(arg_value, "ipv4") == 0) {
2663 p->n_rule_fields = RTE_DIM(field_format_ipv4);
2664 p->field_format = field_format_ipv4;
2665 p->field_format_size =
2666 sizeof(field_format_ipv4);
2671 if (strcmp(arg_value, "vlan_ipv4") == 0) {
2673 RTE_DIM(field_format_vlan_ipv4);
2674 p->field_format = field_format_vlan_ipv4;
2675 p->field_format_size =
2676 sizeof(field_format_vlan_ipv4);
2681 if (strcmp(arg_value, "qinq_ipv4") == 0) {
2683 RTE_DIM(field_format_qinq_ipv4);
2684 p->field_format = field_format_qinq_ipv4;
2685 p->field_format_size =
2686 sizeof(field_format_qinq_ipv4);
2691 if (strcmp(arg_value, "ipv6") == 0) {
2692 p->n_rule_fields = RTE_DIM(field_format_ipv6);
2693 p->field_format = field_format_ipv6;
2694 p->field_format_size =
2695 sizeof(field_format_ipv6);
2703 if (strcmp(arg_name, "traffic_type") == 0) {
2704 int traffic_type = atoi(arg_value);
2706 if (traffic_type == 0
2707 || !(traffic_type == IPv4_HDR_VERSION
2708 || traffic_type == IPv6_HDR_VERSION)) {
2709 printf("not IPVR4/IPVR6");
2713 p->traffic_type = traffic_type;
2717 if (strcmp(arg_name, "prv_que_handler") == 0) {
2719 if (prv_que_handler_present) {
2720 printf("Duplicate pktq_in_prv ..\n\n");
2723 prv_que_handler_present = 1;
2728 /* get the first token */
2729 token = strtok(arg_value, "(");
2730 token = strtok(token, ")");
2731 token = strtok(token, ",");
2732 printf("***** prv_que_handler *****\n");
2735 printf("string is null\n");
2736 printf("prv_que_handler is invalid\n");
2739 printf("string is :%s\n", token);
2741 while (token != NULL) {
2742 printf(" %s\n", token);
2743 rxport = atoi(token);
2744 acl_prv_que_port_index[n_prv_in_port++] =
2746 token = strtok(NULL, ",");
2749 if (n_prv_in_port == 0) {
2750 printf("VNF common parse err - no prv RX phy port\n");
2757 if (strcmp(arg_name, "n_flows") == 0) {
2758 p->n_flows = atoi(arg_value);
2759 if (p->n_flows == 0)
2762 continue;/* needed when multiple parms are checked */
2771 * Create and initialize Pipeline Back End (BE).
2774 * A pointer to the pipeline.
2776 * A pointer to pipeline specific data.
2779 * A pointer to the pipeline create, NULL on error.
2781 static void *pipeline_acl_init(struct pipeline_params *params,
2782 __rte_unused void *arg)
2785 struct pipeline_acl *p_acl;
2788 /* Check input arguments */
2789 if ((params == NULL) ||
2790 (params->n_ports_in == 0) || (params->n_ports_out == 0))
2793 /* Memory allocation */
2794 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_acl));
2795 p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
2796 p_acl = (struct pipeline_acl *)p;
2800 strcpy(p->name, params->name);
2801 p->log_level = params->log_level;
2803 PLOG(p, HIGH, "ACL");
2806 * p_acl->links_map[0] = 0xff;
2807 * p_acl->links_map[1] = 0xff;]
2809 p_acl->traffic_type = MIX;
2810 for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
2811 p_acl->links_map[i] = 0xff;
2812 p_acl->port_out_id[i] = 0xff;
2813 acl_prv_que_port_index[i] = 0;
2816 p_acl->pipeline_num = 0xff;
2818 /* if(enable_hwlb || enable_flow_dir) */
2819 // lib_arp_init(params, arg);
2821 p_acl->n_flows = 4096; /* small default value */
2822 /* Create a single firewall instance and initialize. */
2823 p_acl->cnxn_tracker =
2824 rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
2825 RTE_CACHE_LINE_SIZE);
2827 if (p_acl->cnxn_tracker == NULL)
2831 * Now allocate a counter block entry.It appears that the initialization
2832 * of all instances is serialized on core 0, so no lock is necessary.
2834 struct rte_ACL_counter_block *counter_ptr;
2836 if (rte_ACL_hi_counter_block_in_use == MAX_ACL_INSTANCES) {
2837 /* error, exceeded table bounds */
2841 rte_ACL_hi_counter_block_in_use++;
2842 counter_ptr = &rte_acl_counter_table[rte_ACL_hi_counter_block_in_use];
2843 strcpy(counter_ptr->name, params->name);
2844 p_acl->action_counter_index = rte_ACL_hi_counter_block_in_use;
2846 p_acl->counters = counter_ptr;
2848 rte_ct_initialize_default_timeouts(p_acl->cnxn_tracker);
2849 p_acl->arpPktCount = 0;
2851 /* Parse arguments */
2852 if (pipeline_acl_parse_args(p_acl, params))
2854 /*n_flows already checked, ignore Klockwork issue */
2855 if (p_acl->n_flows > 0) {
2856 rte_ct_initialize_cnxn_tracker(p_acl->cnxn_tracker,
2857 p_acl->n_flows, params->name);
2858 p_acl->counters->ct_counters =
2859 rte_ct_get_counter_address(p_acl->cnxn_tracker);
2861 printf("ACL invalid p_acl->n_flows: %u\n", p_acl->n_flows);
2867 struct rte_pipeline_params pipeline_params = {
2868 .name = params->name,
2869 .socket_id = params->socket_id,
2870 .offset_port_id = META_DATA_OFFSET +
2871 offsetof(struct mbuf_acl_meta_data, output_port),
2874 p->p = rte_pipeline_create(&pipeline_params);
2882 p->n_ports_in = params->n_ports_in;
2883 for (i = 0; i < p->n_ports_in; i++) {
2884 struct rte_pipeline_port_in_params port_params = {
2886 pipeline_port_in_params_get_ops(¶ms->port_in
2889 pipeline_port_in_params_convert(¶ms->port_in
2891 .f_action = pkt_work_acl_key,
2893 .burst_size = params->port_in[i].burst_size,
2895 if (p_acl->traffic_type == IPv4_HDR_VERSION)
2896 port_params.f_action = pkt_work_acl_ipv4_key;
2898 if (p_acl->traffic_type == IPv6_HDR_VERSION)
2899 port_params.f_action = pkt_work_acl_ipv6_key;
2901 int status = rte_pipeline_port_in_create(p->p,
2906 rte_pipeline_free(p->p);
2913 p->n_ports_out = params->n_ports_out;
2914 for (i = 0; i < p->n_ports_out; i++) {
2915 struct rte_pipeline_port_out_params port_params = {
2917 pipeline_port_out_params_get_ops(¶ms->port_out
2920 pipeline_port_out_params_convert(¶ms->port_out
2926 int status = rte_pipeline_port_out_create(p->p,
2928 &p->port_out_id[i]);
2931 rte_pipeline_free(p->p);
2937 int pipeline_num = 0;
2939 int temp = sscanf(params->name, "PIPELINE%d", &pipeline_num);
2940 p_acl->pipeline_num = (uint8_t) pipeline_num;
2941 /* set_phy_outport_map(p_acl->pipeline_num, p_acl->links_map);*/
2942 register_pipeline_Qs(p_acl->pipeline_num, p);
2943 set_link_map(p_acl->pipeline_num, p, p_acl->links_map);
2944 set_outport_id(p_acl->pipeline_num, p, p_acl->port_out_id);
2946 /* If this is the first ACL thread, create common ACL Rule tables */
2947 if (rte_ACL_hi_counter_block_in_use == 0) {
2949 printf("Create ACL Tables rte_socket_id(): %i\n",
2952 /* Create IPV4 ACL Rule Tables */
2953 struct rte_table_acl_params common_ipv4_table_acl_params = {
2955 .n_rules = acl_n_rules,
2956 .n_rule_fields = RTE_DIM(field_format_ipv4),
2959 memcpy(common_ipv4_table_acl_params.field_format,
2960 field_format_ipv4, sizeof(field_format_ipv4));
2962 uint32_t ipv4_entry_size = sizeof(struct acl_table_entry);
2964 acl_rule_table_ipv4_active =
2965 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2969 if (acl_rule_table_ipv4_active == NULL) {
2971 ("Failed to create common ACL IPV4A Rule table\n");
2972 rte_pipeline_free(p->p);
2977 /* Create second IPV4 Table */
2978 common_ipv4_table_acl_params.name = "ACLIPV4B";
2979 acl_rule_table_ipv4_standby =
2980 rte_table_acl_ops.f_create(&common_ipv4_table_acl_params,
2984 if (acl_rule_table_ipv4_standby == NULL) {
2986 ("Failed to create common ACL IPV4B Rule table\n");
2987 rte_pipeline_free(p->p);
2992 /* Create IPV6 ACL Rule Tables */
2993 struct rte_table_acl_params common_ipv6_table_acl_params = {
2995 .n_rules = acl_n_rules,
2996 .n_rule_fields = RTE_DIM(field_format_ipv6),
2999 memcpy(common_ipv6_table_acl_params.field_format,
3000 field_format_ipv6, sizeof(field_format_ipv6));
3002 uint32_t ipv6_entry_size = sizeof(struct acl_table_entry);
3004 acl_rule_table_ipv6_active =
3005 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
3009 if (acl_rule_table_ipv6_active == NULL) {
3011 ("Failed to create common ACL IPV6A Rule table\n");
3012 rte_pipeline_free(p->p);
3017 /* Create second IPV6 table */
3018 common_ipv6_table_acl_params.name = "ACLIPV6B";
3019 acl_rule_table_ipv6_standby =
3020 rte_table_acl_ops.f_create(&common_ipv6_table_acl_params,
3024 if (acl_rule_table_ipv6_standby == NULL) {
3026 ("Failed to create common ACL IPV6B Rule table\n");
3027 rte_pipeline_free(p->p);
3037 struct rte_pipeline_table_params table_params = {
3038 .ops = &rte_table_stub_ops,
3040 .f_action_hit = NULL,
3041 .f_action_miss = NULL,
3043 .action_data_size = 0,
3046 int status = rte_pipeline_table_create(p->p,
3051 rte_pipeline_free(p->p);
3056 struct rte_pipeline_table_entry default_entry = {
3057 .action = RTE_PIPELINE_ACTION_PORT_META
3060 struct rte_pipeline_table_entry *default_entry_ptr;
3062 status = rte_pipeline_table_default_entry_add(p->p,
3065 &default_entry_ptr);
3068 rte_pipeline_free(p->p);
3074 /* Connecting input ports to tables */
3075 for (i = 0; i < p->n_ports_in; i++) {
3076 int status = rte_pipeline_port_in_connect_to_table(p->p,
3083 rte_pipeline_free(p->p);
3089 /* Enable input ports */
3090 for (i = 0; i < p->n_ports_in; i++) {
3091 int status = rte_pipeline_port_in_enable(p->p,
3095 rte_pipeline_free(p->p);
3101 /* Check pipeline consistency */
3102 if (rte_pipeline_check(p->p) < 0) {
3103 rte_pipeline_free(p->p);
3108 /* Message queues */
3109 p->n_msgq = params->n_msgq;
3110 for (i = 0; i < p->n_msgq; i++)
3111 p->msgq_in[i] = params->msgq_in[i];
3112 for (i = 0; i < p->n_msgq; i++)
3113 p->msgq_out[i] = params->msgq_out[i];
3115 /* Message handlers */
3116 memcpy(p->handlers, handlers, sizeof(p->handlers));
3117 memcpy(p_acl->custom_handlers,
3118 custom_handlers, sizeof(p_acl->custom_handlers));
3124 * Free resources and delete pipeline.
3127 * A pointer to the pipeline.
3130 * 0 on success, negative on error.
3132 static int pipeline_acl_free(void *pipeline)
3134 struct pipeline *p = (struct pipeline *)pipeline;
3136 /* Check input arguments */
3140 /* Free resources */
3141 rte_pipeline_free(p->p);
3147 * Callback function to map input/output ports.
3150 * A pointer to the pipeline.
3154 * A pointer to the Output port.
3157 * 0 on success, negative on error.
3160 pipeline_acl_track(void *pipeline,
3161 __rte_unused uint32_t port_in, uint32_t *port_out)
3163 struct pipeline *p = (struct pipeline *)pipeline;
3165 /* Check input arguments */
3166 if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
3169 if (p->n_ports_in == 1) {
3178 * Callback function to process timers.
3181 * A pointer to the pipeline.
3184 * 0 on success, negative on error.
3186 static int pipeline_acl_timer(void *pipeline)
3189 struct pipeline *p = (struct pipeline *)pipeline;
3190 struct pipeline_acl *p_acl = (struct pipeline_acl *)pipeline;
3192 pipeline_msg_req_handle(p);
3193 rte_pipeline_flush(p->p);
3195 rte_ct_handle_expired_timers(p_acl->cnxn_tracker);
3201 * Callback function to process CLI commands from FE.
3204 * A pointer to the pipeline.
3206 * A pointer to command specific data.
3209 * A pointer to message handler on success,
3210 * pipeline_msg_req_invalid_hander on error.
3212 void *pipeline_acl_msg_req_custom_handler(struct pipeline *p, void *msg)
3214 struct pipeline_acl *p_acl = (struct pipeline_acl *)p;
3215 struct pipeline_custom_msg_req *req = msg;
3216 pipeline_msg_req_handler f_handle;
3218 f_handle = (req->subtype < PIPELINE_ACL_MSG_REQS) ?
3219 p_acl->custom_handlers[req->subtype] :
3220 pipeline_msg_req_invalid_handler;
3222 if (f_handle == NULL)
3223 f_handle = pipeline_msg_req_invalid_handler;
3225 return f_handle(p, req);
3229 * Handler for DBG CLI command.
3232 * A pointer to the pipeline.
3234 * A pointer to command specific data.
3237 * A pointer to response message.
3238 * Response message contains status.
3240 void *pipeline_acl_msg_req_dbg_handler(struct pipeline *p, void *msg)
3243 struct pipeline_acl_dbg_msg_req *req = msg;
3244 struct pipeline_acl_dbg_msg_rsp *rsp = msg;
3246 if (req->dbg == 0) {
3247 printf("DBG turned OFF\n");
3250 } else if (req->dbg == 1) {
3251 printf("DBG turned ON\n");
3255 printf("Invalid DBG setting\n");
3262 struct pipeline_be_ops pipeline_acl_be_ops = {
3263 .f_init = pipeline_acl_init,
3264 .f_free = pipeline_acl_free,
3266 .f_timer = pipeline_acl_timer,
3267 .f_track = pipeline_acl_track,