2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_cycles.h>
21 #include <rte_version.h>
24 #include "prox_lua_types.h"
28 #include "parse_utils.h"
29 #include "ip_subnet.h"
30 #include "handle_acl.h"
31 #include "acl_field_def.h"
32 #include "task_init.h"
33 #include "task_base.h"
39 struct task_base base;
40 struct rte_acl_ctx *context;
41 const uint8_t *ptuples[64];
47 size_t field_defs_size;
48 uint32_t n_field_defs;
51 static void set_tc(struct rte_mbuf *mbuf, uint32_t tc)
53 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
54 uint32_t subport, pipe, traffic_class, queue;
55 enum rte_meter_color color;
57 rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue);
58 color = rte_sched_port_pkt_read_color(mbuf);
60 rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue, color);
62 struct rte_sched_port_hierarchy *sched =
63 (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
64 sched->traffic_class = tc;
68 static int handle_acl_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
70 struct task_acl *task = (struct task_acl *)tbase;
72 uint8_t out[MAX_PKT_BURST];
75 #ifdef PROX_PREFETCH_OFFSET
76 for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
79 for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
80 PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *));
83 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
84 #ifdef PROX_PREFETCH_OFFSET
85 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
86 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
88 /* TODO: detect version_ihl != 0x45. Extract relevant
89 fields of that packet and point ptuples[j] to the
90 extracted verion. Note that this is very unlikely. */
91 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
93 #ifdef PROX_PREFETCH_OFFSET
94 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
95 for (; j < n_pkts; ++j) {
96 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
100 rte_acl_classify(task->context, (const uint8_t **)task->ptuples, results, n_pkts, 1);
102 for (uint8_t i = 0; i < n_pkts; ++i) {
103 switch (results[i]) {
107 out[i] = OUT_DISCARD;
117 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
120 static void acl_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
122 struct task_acl *task = (struct task_acl *)tbase;
123 struct acl4_rule **new_rules = (struct acl4_rule **)data;
126 for (i = 0; i < n_msgs; ++i) {
127 if (task->n_rules == task->n_max_rules) {
128 plog_err("Failed to add %d rule%s (already at maximum number of rules (%d))",
129 n_msgs - i, (n_msgs - i)? "s" : "", task->n_max_rules);
133 new_rules[i]->data.priority = ++task->n_rules;
134 rte_acl_add_rules(task->context, (struct rte_acl_rule*) new_rules[i], 1);
137 /* No need to rebuild if no rules have been added */
142 struct rte_acl_config acl_build_param;
144 acl_build_param.num_categories = 1;
146 acl_build_param.num_fields = task->n_field_defs;
147 rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
150 PROX_PANIC((ret = rte_acl_build(task->context, &acl_build_param)),
151 "Failed to build ACL trie (%d)\n", ret);
154 static void init_task_acl(struct task_base *tbase, struct task_args *targ)
156 struct task_acl *task = (struct task_acl *)tbase;
157 int use_qinq = targ->flags & TASK_ARG_QINQ_ACL;
160 struct rte_acl_param acl_param;
162 /* Create ACL contexts */
163 snprintf(name, sizeof(name), "acl-%d-%d", targ->lconf->id, targ->task);
166 task->n_field_defs = RTE_DIM(pkt_qinq_ipv4_udp_defs);
167 task->field_defs = pkt_qinq_ipv4_udp_defs;
168 task->field_defs_size = sizeof(pkt_qinq_ipv4_udp_defs);
170 task->n_field_defs = RTE_DIM(pkt_eth_ipv4_udp_defs);
171 task->field_defs = pkt_eth_ipv4_udp_defs;
172 task->field_defs_size = sizeof(pkt_eth_ipv4_udp_defs);
175 acl_param.name = name;
176 acl_param.socket_id = rte_lcore_to_socket_id(targ->lconf->id);
177 acl_param.rule_size = RTE_ACL_RULE_SZ(task->n_field_defs);
178 acl_param.max_rule_num = targ->n_max_rules;
180 task->n_max_rules = targ->n_max_rules;
181 task->context = rte_acl_create(&acl_param);
183 PROX_PANIC(task->context == NULL, "Failed to create ACL context\n");
184 uint32_t free_rules = targ->n_max_rules;
186 PROX_PANIC(!strcmp(targ->rules, ""), "No rule specified for ACL\n");
188 int ret = lua_to_rules(prox_lua(), GLOBAL, targ->rules, task->context, &free_rules, use_qinq, targ->qinq_tag);
189 PROX_PANIC(ret, "Failed to read rules from config:\n%s\n", get_lua_to_errors());
190 task->n_rules = targ->n_max_rules - free_rules;
192 plog_info("Configured %d rules\n", task->n_rules);
195 struct rte_acl_config acl_build_param;
197 acl_build_param.num_categories = 1;
198 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
199 acl_build_param.max_size = 0;
202 acl_build_param.num_fields = task->n_field_defs;
203 rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
205 plog_info("Building trie structure\n");
206 PROX_PANIC(rte_acl_build(task->context, &acl_build_param),
207 "Failed to build ACL trie\n");
210 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
211 targ->lconf->ctrl_func_m[targ->task] = acl_msg;
214 int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq)
216 uint32_t svlan, svlan_mask;
217 uint32_t cvlan, cvlan_mask;
219 uint32_t ip_proto, ip_proto_mask;
221 struct ip4_subnet ip_src;
222 struct ip4_subnet ip_dst;
224 uint32_t sport_lo, sport_hi;
225 uint32_t dport_lo, dport_hi;
227 enum acl_action class = ACL_NOT_SET;
230 PROX_PANIC(parse_int_mask(&svlan, &svlan_mask, fields[0]), "Error parsing svlan: %s\n", get_parse_err());
231 PROX_PANIC(parse_int_mask(&cvlan, &cvlan_mask, fields[1]), "Error parsing cvlan: %s\n", get_parse_err());
232 PROX_PANIC(parse_int_mask(&ip_proto, &ip_proto_mask, fields[2]), "Error parsing ip protocol: %s\n", get_parse_err());
233 PROX_PANIC(parse_ip4_cidr(&ip_src, fields[3]), "Error parsing source IP subnet: %s\n", get_parse_err());
234 PROX_PANIC(parse_ip4_cidr(&ip_dst, fields[4]), "Error parsing dest IP subnet: %s\n", get_parse_err());
236 PROX_PANIC(parse_range(&sport_lo, &sport_hi, fields[5]), "Error parsing source port range: %s\n", get_parse_err());
237 PROX_PANIC(parse_range(&dport_lo, &dport_hi, fields[6]), "Error parsing destination port range: %s\n", get_parse_err());
239 PROX_PANIC(parse_str(class_str, fields[7], sizeof(class_str)), "Error parsing action: %s\n", get_parse_err());
241 if (!strcmp(class_str, "drop")) {
244 else if (!strcmp(class_str, "allow")) {
247 else if (!strcmp(class_str, "rate limit")) {
248 class = ACL_RATE_LIMIT;
251 plog_err("unknown class type: %s\n", class_str);
254 rule->data.userdata = class; /* allow, drop or ratelimit */
255 rule->data.category_mask = 1;
256 rule->data.priority = n_rules;
258 /* Configuration for rules is done in little-endian so no bswap is needed here.. */
260 rule->fields[0].value.u8 = ip_proto;
261 rule->fields[0].mask_range.u8 = ip_proto_mask;
262 rule->fields[1].value.u32 = ip_src.ip;
263 rule->fields[1].mask_range.u32 = ip_src.prefix;
265 rule->fields[2].value.u32 = ip_dst.ip;
266 rule->fields[2].mask_range.u32 = ip_dst.prefix;
268 rule->fields[3].value.u16 = sport_lo;
269 rule->fields[3].mask_range.u16 = sport_hi;
271 rule->fields[4].value.u16 = dport_lo;
272 rule->fields[4].mask_range.u16 = dport_hi;
275 rule->fields[5].value.u16 = rte_bswap16(ETYPE_8021ad);
276 rule->fields[5].mask_range.u16 = 0xffff;
278 /* To mask out the TCI and only keep the VID, the mask should be 0x0fff */
279 rule->fields[6].value.u16 = svlan;
280 rule->fields[6].mask_range.u16 = svlan_mask;
282 rule->fields[7].value.u16 = rte_bswap16(ETYPE_VLAN);
283 rule->fields[7].mask_range.u16 = 0xffff;
285 rule->fields[8].value.u16 = cvlan;
286 rule->fields[8].mask_range.u16 = cvlan_mask;
289 /* Reuse first ethertype from vlan to check if packet is IPv4 packet */
290 rule->fields[5].value.u16 = rte_bswap16(ETYPE_IPv4);
291 rule->fields[5].mask_range.u16 = 0xffff;
293 /* Other fields are ignored */
294 rule->fields[6].value.u16 = 0;
295 rule->fields[6].mask_range.u16 = 0;
296 rule->fields[7].value.u16 = 0;
297 rule->fields[7].mask_range.u16 = 0;
298 rule->fields[8].value.u16 = 0;
299 rule->fields[8].mask_range.u16 = 0;
304 static struct task_init task_init_acl = {
306 .init = init_task_acl,
307 .handle = handle_acl_bulk,
308 .size = sizeof(struct task_acl)
311 __attribute__((constructor)) static void reg_task_acl(void)
313 reg_task(&task_init_acl);