Merge "[l2l3 stack] implements new nd state machine & nd buffering"
[samplevnf.git] / VNFs / DPPD-PROX / handle_acl.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_mbuf.h>
18 #include <rte_acl.h>
19 #include <rte_ip.h>
20 #include <rte_cycles.h>
21 #include <rte_version.h>
22
23 #include "prox_lua.h"
24 #include "prox_lua_types.h"
25
26 #include "log.h"
27 #include "quit.h"
28 #include "parse_utils.h"
29 #include "ip_subnet.h"
30 #include "handle_acl.h"
31 #include "acl_field_def.h"
32 #include "task_init.h"
33 #include "task_base.h"
34 #include "lconf.h"
35 #include "prefetch.h"
36 #include "etypes.h"
37
38 struct task_acl {
39         struct task_base base;
40         struct rte_acl_ctx *context;
41         const uint8_t *ptuples[64];
42
43         uint32_t       n_rules;
44         uint32_t       n_max_rules;
45
46         void           *field_defs;
47         size_t         field_defs_size;
48         uint32_t       n_field_defs;
49 };
50
51 static void set_tc(struct rte_mbuf *mbuf, uint32_t tc)
52 {
53 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
54         uint32_t subport, pipe, traffic_class, queue;
55         enum rte_meter_color color;
56
57         rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue);
58         color = rte_sched_port_pkt_read_color(mbuf);
59
60         rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue, color);
61 #else
62         struct rte_sched_port_hierarchy *sched =
63                 (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
64         sched->traffic_class = tc;
65 #endif
66 }
67
68 static int handle_acl_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
69 {
70         struct task_acl *task = (struct task_acl *)tbase;
71         uint32_t results[64];
72         uint8_t out[MAX_PKT_BURST];
73         uint16_t j;
74
75 #ifdef PROX_PREFETCH_OFFSET
76         for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
77                 PREFETCH0(mbufs[j]);
78         }
79         for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) {
80                 PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *));
81         }
82 #endif
83         for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
84 #ifdef PROX_PREFETCH_OFFSET
85                 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
86                 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
87 #endif
88                 /* TODO: detect version_ihl != 0x45. Extract relevant
89                    fields of that packet and point ptuples[j] to the
90                    extracted verion. Note that this is very unlikely. */
91                 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
92         }
93 #ifdef PROX_PREFETCH_OFFSET
94         PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
95         for (; j < n_pkts; ++j) {
96                 task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *);
97         }
98 #endif
99
100         rte_acl_classify(task->context, (const uint8_t **)task->ptuples, results, n_pkts, 1);
101
102         for (uint8_t i = 0; i < n_pkts; ++i) {
103                 switch (results[i]) {
104                 default:
105                 case ACL_NOT_SET:
106                 case ACL_DROP:
107                         out[i] = OUT_DISCARD;
108                         break;
109                 case ACL_ALLOW:
110                         out[i] = 0;
111                 case ACL_RATE_LIMIT:
112                         set_tc(mbufs[i], 3);
113                         break;
114                 };
115         }
116
117         return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
118 }
119
120 static void acl_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
121 {
122         struct task_acl *task = (struct task_acl *)tbase;
123         struct acl4_rule **new_rules = (struct acl4_rule **)data;
124         uint16_t i;
125
126         for (i = 0; i < n_msgs; ++i) {
127                 if (task->n_rules == task->n_max_rules) {
128                         plog_err("Failed to add %d rule%s (already at maximum number of rules (%d))",
129                                 n_msgs - i, (n_msgs - i)? "s" : "", task->n_max_rules);
130                         break;
131                 }
132
133                 new_rules[i]->data.priority = ++task->n_rules;
134                 rte_acl_add_rules(task->context, (struct rte_acl_rule*) new_rules[i], 1);
135         }
136
137         /* No need to rebuild if no rules have been added */
138         if (!i) {
139                 return ;
140         }
141
142         struct rte_acl_config acl_build_param;
143         /* Perform builds */
144         acl_build_param.num_categories = 1;
145
146         acl_build_param.num_fields = task->n_field_defs;
147         rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
148
149         int ret;
150         PROX_PANIC((ret = rte_acl_build(task->context, &acl_build_param)),
151                    "Failed to build ACL trie (%d)\n", ret);
152 }
153
154 static void init_task_acl(struct task_base *tbase, struct task_args *targ)
155 {
156         struct task_acl *task = (struct task_acl *)tbase;
157         int use_qinq = targ->flags & TASK_ARG_QINQ_ACL;
158
159         char name[PATH_MAX];
160         struct rte_acl_param acl_param;
161
162         /* Create ACL contexts */
163         snprintf(name, sizeof(name), "acl-%d-%d", targ->lconf->id, targ->task);
164
165         if (use_qinq) {
166                 task->n_field_defs    = RTE_DIM(pkt_qinq_ipv4_udp_defs);
167                 task->field_defs      = pkt_qinq_ipv4_udp_defs;
168                 task->field_defs_size = sizeof(pkt_qinq_ipv4_udp_defs);
169         } else {
170                 task->n_field_defs    = RTE_DIM(pkt_eth_ipv4_udp_defs);
171                 task->field_defs      = pkt_eth_ipv4_udp_defs;
172                 task->field_defs_size = sizeof(pkt_eth_ipv4_udp_defs);
173         }
174
175         acl_param.name = name;
176         acl_param.socket_id = rte_lcore_to_socket_id(targ->lconf->id);
177         acl_param.rule_size = RTE_ACL_RULE_SZ(task->n_field_defs);
178         acl_param.max_rule_num = targ->n_max_rules;
179
180         task->n_max_rules = targ->n_max_rules;
181         task->context = rte_acl_create(&acl_param);
182
183         PROX_PANIC(task->context == NULL, "Failed to create ACL context\n");
184         uint32_t free_rules = targ->n_max_rules;
185
186         PROX_PANIC(!strcmp(targ->rules, ""), "No rule specified for ACL\n");
187
188         int ret = lua_to_rules(prox_lua(), GLOBAL, targ->rules, task->context, &free_rules, use_qinq, targ->qinq_tag);
189         PROX_PANIC(ret, "Failed to read rules from config:\n%s\n", get_lua_to_errors());
190         task->n_rules = targ->n_max_rules - free_rules;
191
192         plog_info("Configured %d rules\n", task->n_rules);
193
194         if (task->n_rules) {
195                 struct rte_acl_config acl_build_param;
196                 /* Perform builds */
197                 acl_build_param.num_categories = 1;
198 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
199                 acl_build_param.max_size = 0;
200 #endif
201
202                 acl_build_param.num_fields = task->n_field_defs;
203                 rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size);
204
205                 plog_info("Building trie structure\n");
206                 PROX_PANIC(rte_acl_build(task->context, &acl_build_param),
207                            "Failed to build ACL trie\n");
208         }
209
210         targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
211         targ->lconf->ctrl_func_m[targ->task] = acl_msg;
212 }
213
214 int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq)
215 {
216         uint32_t svlan, svlan_mask;
217         uint32_t cvlan, cvlan_mask;
218
219         uint32_t ip_proto, ip_proto_mask;
220
221         struct ip4_subnet ip_src;
222         struct ip4_subnet ip_dst;
223
224         uint32_t sport_lo, sport_hi;
225         uint32_t dport_lo, dport_hi;
226
227         enum acl_action class = ACL_NOT_SET;
228         char class_str[24];
229
230         PROX_PANIC(parse_int_mask(&svlan, &svlan_mask, fields[0]), "Error parsing svlan: %s\n", get_parse_err());
231         PROX_PANIC(parse_int_mask(&cvlan, &cvlan_mask, fields[1]), "Error parsing cvlan: %s\n", get_parse_err());
232         PROX_PANIC(parse_int_mask(&ip_proto, &ip_proto_mask, fields[2]), "Error parsing ip protocol: %s\n", get_parse_err());
233         PROX_PANIC(parse_ip4_cidr(&ip_src, fields[3]), "Error parsing source IP subnet: %s\n", get_parse_err());
234         PROX_PANIC(parse_ip4_cidr(&ip_dst, fields[4]), "Error parsing dest IP subnet: %s\n", get_parse_err());
235
236         PROX_PANIC(parse_range(&sport_lo, &sport_hi, fields[5]), "Error parsing source port range: %s\n", get_parse_err());
237         PROX_PANIC(parse_range(&dport_lo, &dport_hi, fields[6]), "Error parsing destination port range: %s\n", get_parse_err());
238
239         PROX_PANIC(parse_str(class_str, fields[7], sizeof(class_str)), "Error parsing action: %s\n", get_parse_err());
240
241         if (!strcmp(class_str, "drop")) {
242                 class = ACL_DROP;
243         }
244         else if (!strcmp(class_str, "allow")) {
245                 class = ACL_ALLOW;
246         }
247         else if (!strcmp(class_str, "rate limit")) {
248                 class = ACL_RATE_LIMIT;
249         }
250         else {
251                 plog_err("unknown class type: %s\n", class_str);
252         }
253
254         rule->data.userdata = class; /* allow, drop or ratelimit */
255         rule->data.category_mask = 1;
256         rule->data.priority = n_rules;
257
258         /* Configuration for rules is done in little-endian so no bswap is needed here.. */
259
260         rule->fields[0].value.u8 = ip_proto;
261         rule->fields[0].mask_range.u8 = ip_proto_mask;
262         rule->fields[1].value.u32 = ip_src.ip;
263         rule->fields[1].mask_range.u32 = ip_src.prefix;
264
265         rule->fields[2].value.u32 = ip_dst.ip;
266         rule->fields[2].mask_range.u32 = ip_dst.prefix;
267
268         rule->fields[3].value.u16 = sport_lo;
269         rule->fields[3].mask_range.u16 = sport_hi;
270
271         rule->fields[4].value.u16 = dport_lo;
272         rule->fields[4].mask_range.u16 = dport_hi;
273
274         if (use_qinq) {
275                 rule->fields[5].value.u16 = rte_bswap16(ETYPE_8021ad);
276                 rule->fields[5].mask_range.u16 = 0xffff;
277
278                 /* To mask out the TCI and only keep the VID, the mask should be 0x0fff */
279                 rule->fields[6].value.u16 = svlan;
280                 rule->fields[6].mask_range.u16 = svlan_mask;
281
282                 rule->fields[7].value.u16 = rte_bswap16(ETYPE_VLAN);
283                 rule->fields[7].mask_range.u16 = 0xffff;
284
285                 rule->fields[8].value.u16 = cvlan;
286                 rule->fields[8].mask_range.u16 = cvlan_mask;
287         }
288         else {
289                 /* Reuse first ethertype from vlan to check if packet is IPv4 packet */
290                 rule->fields[5].value.u16 =  rte_bswap16(ETYPE_IPv4);
291                 rule->fields[5].mask_range.u16 = 0xffff;
292
293                 /* Other fields are ignored */
294                 rule->fields[6].value.u16 = 0;
295                 rule->fields[6].mask_range.u16 = 0;
296                 rule->fields[7].value.u16 = 0;
297                 rule->fields[7].mask_range.u16 = 0;
298                 rule->fields[8].value.u16 = 0;
299                 rule->fields[8].mask_range.u16 = 0;
300         }
301         return 0;
302 }
303
304 static struct task_init task_init_acl = {
305         .mode_str = "acl",
306         .init = init_task_acl,
307         .handle = handle_acl_bulk,
308         .size = sizeof(struct task_acl)
309 };
310
311 __attribute__((constructor)) static void reg_task_acl(void)
312 {
313         reg_task(&task_init_acl);
314 }