2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_cycles.h>
21 #include <rte_version.h>
24 #include "prox_lua_types.h"
28 #include "parse_utils.h"
29 #include "ip_subnet.h"
30 #include "handle_acl.h"
31 #include "acl_field_def.h"
32 #include "task_init.h"
33 #include "task_base.h"
37 #include "prox_compat.h"
38 #include "handle_sched.h"
41 struct task_base base;
42 struct rte_acl_ctx *context;
43 const uint8_t *ptuples[64];
49 size_t field_defs_size;
50 uint32_t n_field_defs;
51 struct rte_sched_port *sched_port;
54 static void set_tc(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t tc)
56 struct task_acl *task = (struct task_acl *)tbase;
57 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
58 uint32_t subport, pipe, traffic_class, queue;
59 enum prox_rte_color color;
61 prox_rte_sched_port_pkt_read_tree_path(task->sched_port, mbuf, &subport, &pipe, &traffic_class, &queue);
62 color = rte_sched_port_pkt_read_color(mbuf);
64 prox_rte_sched_port_pkt_write(task->sched_port, mbuf, subport, pipe, tc, queue, color);
66 struct rte_sched_port_hierarchy *sched =
67 (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
68 sched->traffic_class = tc;
72 static int handle_acl_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
74 struct task_acl *task = (struct task_acl *)tbase;
76 uint8_t out[MAX_PKT_BURST];
79 #ifdef PROX_PREFETCH_OFFSET
80 for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
83 for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
84 PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *));
87 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
88 #ifdef PROX_PREFETCH_OFFSET
89 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
90 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
92 /* TODO: detect version_ihl != 0x45. Extract relevant
93 fields of that packet and point ptuples[j] to the
94 extracted verion. Note that this is very unlikely. */
95 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
97 #ifdef PROX_PREFETCH_OFFSET
98 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
99 for (; j < n_pkts; ++j) {
100 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
104 rte_acl_classify(task->context, (const uint8_t **)task->ptuples, results, n_pkts, 1);
106 for (uint8_t i = 0; i < n_pkts; ++i) {
107 switch (results[i]) {
111 out[i] = OUT_DISCARD;
115 // __attribute__ ((fallthrough));
117 set_tc(tbase, mbufs[i], 3);
122 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
125 static void acl_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
127 struct task_acl *task = (struct task_acl *)tbase;
128 struct acl4_rule **new_rules = (struct acl4_rule **)data;
131 for (i = 0; i < n_msgs; ++i) {
132 if (task->n_rules == task->n_max_rules) {
133 plog_err("Failed to add %d rule%s (already at maximum number of rules (%d))",
134 n_msgs - i, (n_msgs - i)? "s" : "", task->n_max_rules);
138 new_rules[i]->data.priority = ++task->n_rules;
139 rte_acl_add_rules(task->context, (struct rte_acl_rule*) new_rules[i], 1);
142 /* No need to rebuild if no rules have been added */
147 struct rte_acl_config acl_build_param;
149 acl_build_param.num_categories = 1;
151 acl_build_param.num_fields = task->n_field_defs;
152 rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
155 PROX_PANIC((ret = rte_acl_build(task->context, &acl_build_param)),
156 "Failed to build ACL trie (%d)\n", ret);
159 static void init_task_acl(struct task_base *tbase, struct task_args *targ)
161 struct task_acl *task = (struct task_acl *)tbase;
162 int use_qinq = targ->flags & TASK_ARG_QINQ_ACL;
165 struct rte_acl_param acl_param;
167 /* Create ACL contexts */
168 snprintf(name, sizeof(name), "acl-%d-%d", targ->lconf->id, targ->task);
171 task->n_field_defs = RTE_DIM(pkt_qinq_ipv4_udp_defs);
172 task->field_defs = pkt_qinq_ipv4_udp_defs;
173 task->field_defs_size = sizeof(pkt_qinq_ipv4_udp_defs);
175 task->n_field_defs = RTE_DIM(pkt_eth_ipv4_udp_defs);
176 task->field_defs = pkt_eth_ipv4_udp_defs;
177 task->field_defs_size = sizeof(pkt_eth_ipv4_udp_defs);
180 acl_param.name = name;
181 acl_param.socket_id = rte_lcore_to_socket_id(targ->lconf->id);
182 acl_param.rule_size = RTE_ACL_RULE_SZ(task->n_field_defs);
183 acl_param.max_rule_num = targ->n_max_rules;
185 task->n_max_rules = targ->n_max_rules;
186 task->context = rte_acl_create(&acl_param);
188 PROX_PANIC(task->context == NULL, "Failed to create ACL context\n");
189 uint32_t free_rules = targ->n_max_rules;
191 PROX_PANIC(!strcmp(targ->rules, ""), "No rule specified for ACL\n");
193 int ret = lua_to_rules(prox_lua(), GLOBAL, targ->rules, task->context, &free_rules, use_qinq, targ->qinq_tag);
194 PROX_PANIC(ret, "Failed to read rules from config:\n%s\n", get_lua_to_errors());
195 task->n_rules = targ->n_max_rules - free_rules;
197 plog_info("Configured %d rules\n", task->n_rules);
200 struct rte_acl_config acl_build_param;
202 acl_build_param.num_categories = 1;
203 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
204 acl_build_param.max_size = 0;
207 acl_build_param.num_fields = task->n_field_defs;
208 rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
210 plog_info("Building trie structure\n");
211 PROX_PANIC(rte_acl_build(task->context, &acl_build_param),
212 "Failed to build ACL trie\n");
215 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
216 targ->lconf->ctrl_func_m[targ->task] = acl_msg;
218 // If rate limiting is used tc will be set, sched_port must be initialized, and tc will be used by a following policing or qos task
219 int rc = init_port_sched(&task->sched_port, targ);
221 // ACL can be used to accept/drop packets and/or to set rate limiting. If using rate limiting, then sched_port must be defined
222 // TODO: check whether rate limiting is configured, and, if yes, check that QoS or policing task configures the qos_conf.params.
224 plog_info("Did not find any QoS or Policing task to transmit to => setting tc will not work\n");
227 int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq)
229 uint32_t svlan, svlan_mask;
230 uint32_t cvlan, cvlan_mask;
232 uint32_t ip_proto, ip_proto_mask;
234 struct ip4_subnet ip_src;
235 struct ip4_subnet ip_dst;
237 uint32_t sport_lo, sport_hi;
238 uint32_t dport_lo, dport_hi;
240 enum acl_action class = ACL_NOT_SET;
243 PROX_PANIC(parse_int_mask(&svlan, &svlan_mask, fields[0]), "Error parsing svlan: %s\n", get_parse_err());
244 PROX_PANIC(parse_int_mask(&cvlan, &cvlan_mask, fields[1]), "Error parsing cvlan: %s\n", get_parse_err());
245 PROX_PANIC(parse_int_mask(&ip_proto, &ip_proto_mask, fields[2]), "Error parsing ip protocol: %s\n", get_parse_err());
246 PROX_PANIC(parse_ip4_cidr(&ip_src, fields[3]), "Error parsing source IP subnet: %s\n", get_parse_err());
247 PROX_PANIC(parse_ip4_cidr(&ip_dst, fields[4]), "Error parsing dest IP subnet: %s\n", get_parse_err());
249 PROX_PANIC(parse_range(&sport_lo, &sport_hi, fields[5]), "Error parsing source port range: %s\n", get_parse_err());
250 PROX_PANIC(parse_range(&dport_lo, &dport_hi, fields[6]), "Error parsing destination port range: %s\n", get_parse_err());
252 PROX_PANIC(parse_str(class_str, fields[7], sizeof(class_str)), "Error parsing action: %s\n", get_parse_err());
254 if (!strcmp(class_str, "drop")) {
257 else if (!strcmp(class_str, "allow")) {
260 else if (!strcmp(class_str, "rate limit")) {
261 class = ACL_RATE_LIMIT;
264 plog_err("unknown class type: %s\n", class_str);
267 rule->data.userdata = class; /* allow, drop or ratelimit */
268 rule->data.category_mask = 1;
269 rule->data.priority = n_rules;
271 /* Configuration for rules is done in little-endian so no bswap is needed here.. */
273 rule->fields[0].value.u8 = ip_proto;
274 rule->fields[0].mask_range.u8 = ip_proto_mask;
275 rule->fields[1].value.u32 = ip_src.ip;
276 rule->fields[1].mask_range.u32 = ip_src.prefix;
278 rule->fields[2].value.u32 = ip_dst.ip;
279 rule->fields[2].mask_range.u32 = ip_dst.prefix;
281 rule->fields[3].value.u16 = sport_lo;
282 rule->fields[3].mask_range.u16 = sport_hi;
284 rule->fields[4].value.u16 = dport_lo;
285 rule->fields[4].mask_range.u16 = dport_hi;
288 rule->fields[5].value.u16 = rte_bswap16(ETYPE_8021ad);
289 rule->fields[5].mask_range.u16 = 0xffff;
291 /* To mask out the TCI and only keep the VID, the mask should be 0x0fff */
292 rule->fields[6].value.u16 = svlan;
293 rule->fields[6].mask_range.u16 = svlan_mask;
295 rule->fields[7].value.u16 = rte_bswap16(ETYPE_VLAN);
296 rule->fields[7].mask_range.u16 = 0xffff;
298 rule->fields[8].value.u16 = cvlan;
299 rule->fields[8].mask_range.u16 = cvlan_mask;
302 /* Reuse first ethertype from vlan to check if packet is IPv4 packet */
303 rule->fields[5].value.u16 = rte_bswap16(ETYPE_IPv4);
304 rule->fields[5].mask_range.u16 = 0xffff;
306 /* Other fields are ignored */
307 rule->fields[6].value.u16 = 0;
308 rule->fields[6].mask_range.u16 = 0;
309 rule->fields[7].value.u16 = 0;
310 rule->fields[7].mask_range.u16 = 0;
311 rule->fields[8].value.u16 = 0;
312 rule->fields[8].mask_range.u16 = 0;
317 static struct task_init task_init_acl = {
319 .init = init_task_acl,
320 .handle = handle_acl_bulk,
321 .size = sizeof(struct task_acl)
324 __attribute__((constructor)) static void reg_task_acl(void)
326 reg_task(&task_init_acl);