2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
22 #include <rte_cycles.h>
23 #include <rte_ether.h>
24 #include <rte_eth_ctrl.h>
29 #include "task_init.h"
30 #include "task_base.h"
31 #include "kv_store_expire.h"
33 #include "prox_shared.h"
38 struct task_dpi_per_core {
43 struct task_base base;
44 /* FM related fields */
45 struct kv_store_expire *kv_store_expire;
48 struct dpi_engine dpi_engine;
49 struct task_dpi_per_core *dpi_shared; /* Used only during init */
59 } __attribute__((packed));
66 } __attribute__((packed));
70 static union pkt_type pkt_type_udp = {
71 .next_proto = IPPROTO_UDP,
76 static union pkt_type pkt_type_tcp = {
77 .next_proto = IPPROTO_TCP,
82 static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct flow_info *fi_flipped, uint32_t *len, uint8_t **payload)
84 union pkt_type pkt_type = {
85 .next_proto = p->l3.next_proto_id,
86 .ip_byte = p->l3.version_ihl,
87 .etype = p->l2.ether_type,
90 memset(fi->reservered, 0, sizeof(fi->reservered));
91 memset(fi_flipped->reservered, 0, sizeof(fi_flipped->reservered));
93 if (pkt_type.val == pkt_type_udp.val) {
94 fi->ip_src = p->l3.src_addr;
95 fi->ip_dst = p->l3.dst_addr;
96 fi->ip_proto = p->l3.next_proto_id;
97 fi->port_src = p->l4.udp.src_port;
98 fi->port_dst = p->l4.udp.dst_port;
100 fi_flipped->ip_src = p->l3.dst_addr;
101 fi_flipped->ip_dst = p->l3.src_addr;
102 fi_flipped->ip_proto = p->l3.next_proto_id;
103 fi_flipped->port_src = p->l4.udp.dst_port;
104 fi_flipped->port_dst = p->l4.udp.src_port;
106 *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(struct udp_hdr);
107 *payload = (uint8_t*)(&p->l4.udp) + sizeof(struct udp_hdr);
110 else if (pkt_type.val == pkt_type_tcp.val) {
111 fi->ip_src = p->l3.src_addr;
112 fi->ip_dst = p->l3.dst_addr;
113 fi->ip_proto = p->l3.next_proto_id;
114 fi->port_src = p->l4.tcp.src_port;
115 fi->port_dst = p->l4.tcp.dst_port;
117 fi_flipped->ip_src = p->l3.dst_addr;
118 fi_flipped->ip_dst = p->l3.src_addr;
119 fi_flipped->ip_proto = p->l3.next_proto_id;
120 fi_flipped->port_src = p->l4.tcp.dst_port;
121 fi_flipped->port_dst = p->l4.tcp.src_port;
123 *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(struct ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4);
124 *payload = ((uint8_t*)&p->l4.tcp) + ((p->l4.tcp.data_off >> 4)*4);
131 static int is_flow_beg(const struct flow_info *fi, const struct eth_ip4_udp *p)
133 return fi->ip_proto == IPPROTO_UDP ||
134 (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & TCP_SYN_FLAG);
137 static void *lookup_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc)
139 struct kv_store_expire_entry *entry;
141 entry = kv_store_expire_get(task->kv_store_expire, fi, now_tsc);
143 return entry ? entry_value(task->kv_store_expire, entry) : NULL;
146 static void *lookup_or_insert_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc)
148 struct kv_store_expire_entry *entry;
150 entry = kv_store_expire_get_or_put(task->kv_store_expire, fi, now_tsc);
152 return entry ? entry_value(task->kv_store_expire, entry) : NULL;
155 static int handle_fm(struct task_fm *task, struct rte_mbuf *mbuf, uint64_t now_tsc)
157 struct eth_ip4_udp *p;
158 struct flow_info fi, fi_flipped;
165 struct dpi_payload dpi_payload;
168 p = rte_pktmbuf_mtod(mbuf, struct eth_ip4_udp *);
170 if (0 != extract_flow_info(p, &fi, &fi_flipped, &len, &payload)) {
171 plogx_err("Unknown packet type\n");
175 /* First, try to see if the flow already exists where the
176 current packet is sent by the server. */
177 if (!(flow_data = lookup_flow(task, &fi_flipped, now_tsc))) {
178 /* Insert a new flow, only if this is the first packet
181 if (is_flow_beg(&fi, p))
182 flow_data = lookup_or_insert_flow(task, &fi, now_tsc);
184 flow_data = lookup_flow(task, &fi, now_tsc);
192 dpi_payload.payload = payload;
193 dpi_payload.len = len;
194 dpi_payload.client_to_server = is_upstream;
195 gettimeofday(&dpi_payload.tv, NULL);
196 task->dpi_engine.dpi_process(task->dpi_opaque, is_upstream? &fi : &fi_flipped, flow_data, &dpi_payload, res, &res_len);
200 static int handle_fm_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
202 struct task_fm *task = (struct task_fm *)tbase;
203 uint64_t now_tsc = rte_rdtsc();
204 uint16_t handled = 0;
205 uint16_t discard = 0;
208 for (uint16_t i = 0; i < n_pkts; ++i) {
209 ret = handle_fm(task, mbufs[i], now_tsc);
210 if (ret == OUT_DISCARD)
212 else if (ret == OUT_HANDLED)
216 for (uint16_t i = 0; i < n_pkts; ++i)
217 rte_pktmbuf_free(mbufs[i]);
219 TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, handled);
220 TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, discard);
224 static void load_dpi_engine(const char *dpi_engine_path, struct dpi_engine *dst)
226 void *handle = prox_sh_find_system(dpi_engine_path);
228 if (handle == NULL) {
229 plogx_info("Loading DPI engine from '%s'\n", dpi_engine_path);
230 handle = dlopen(dpi_engine_path, RTLD_NOW | RTLD_GLOBAL);
232 PROX_PANIC(handle == NULL, "Failed to load dpi engine from '%s' with error:\n\t\t%s\n", dpi_engine_path, dlerror());
233 prox_sh_add_system(dpi_engine_path, handle);
236 struct dpi_engine *(*get_dpi_engine)(void) = dlsym(handle, "get_dpi_engine");
238 PROX_PANIC(get_dpi_engine == NULL, "Failed to find get_dpi_engine function from '%s'\n", dpi_engine_path);
239 struct dpi_engine *dpi_engine = get_dpi_engine();
241 dpi_engine->dpi_print = plog_info;
242 rte_memcpy(dst, dpi_engine, sizeof(*dst));
245 static uint32_t count_fm_cores(void)
247 uint32_t n_cores = 0;
248 uint32_t lcore_id = -1;
249 struct lcore_cfg *lconf;
251 while(prox_core_next(&lcore_id, 0) == 0) {
252 lconf = &lcore_cfg[lcore_id];
253 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
254 if (!strcmp(lconf->targs[task_id].task_init->mode_str, "fm")) {
256 /* Only intersted in number of cores
266 static struct kv_store_expire *get_shared_flow_table(struct task_args *targ, struct dpi_engine *de)
268 struct kv_store_expire *ret = prox_sh_find_core(targ->lconf->id, "flow_table");
269 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
272 ret = kv_store_expire_create(rte_align32pow2(targ->flow_table_size) * 4,
273 sizeof(struct flow_info),
274 de->dpi_get_flow_entry_size(),
277 rte_get_tsc_hz() * 60);
278 PROX_PANIC(ret == NULL, "Failed to allocate KV store\n");
279 prox_sh_add_core(targ->lconf->id, "flow_table", ret);
284 static struct task_dpi_per_core *get_shared_dpi_shared(struct task_args *targ)
286 static const char *name = "dpi_shared";
287 struct task_dpi_per_core *ret = prox_sh_find_core(targ->lconf->id, name);
288 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
291 ret = prox_zmalloc(sizeof(*ret), socket_id);
292 prox_sh_add_core(targ->lconf->id, name, ret);
297 static void init_task_fm(struct task_base *tbase, struct task_args *targ)
299 struct task_fm *task = (struct task_fm *)tbase;
300 static int dpi_inited = 0;
302 load_dpi_engine(targ->dpi_engine_path, &task->dpi_engine);
304 task->kv_store_expire = get_shared_flow_table(targ, &task->dpi_engine);
305 task->dpi_shared = get_shared_dpi_shared(targ);
308 uint32_t n_threads = count_fm_cores();
309 const char *dpi_params[16];
311 plogx_info("Initializing DPI with %u threads\n", n_threads);
314 PROX_PANIC(targ->n_dpi_engine_args > 16, "Too many DPI arguments");
315 for (size_t i = 0; i < targ->n_dpi_engine_args && i < 16; ++i)
316 dpi_params[i] = targ->dpi_engine_args[i];
318 int ret = task->dpi_engine.dpi_init(n_threads, targ->n_dpi_engine_args, dpi_params);
320 PROX_PANIC(ret, "Failed to initialize DPI engine\n");
324 static void start_first(struct task_base *tbase)
326 struct task_fm *task = (struct task_fm *)tbase;
327 void *ret = task->dpi_engine.dpi_thread_start();
329 task->dpi_shared->dpi_opaque = ret;
330 PROX_PANIC(ret == NULL, "dpi_thread_init failed\n");
333 static void start(struct task_base *tbase)
335 struct task_fm *task = (struct task_fm *)tbase;
337 task->dpi_opaque = task->dpi_shared->dpi_opaque;
338 PROX_PANIC(task->dpi_opaque == NULL, "dpi_opaque == NULL");
341 static void stop(struct task_base *tbase)
343 struct task_fm *task = (struct task_fm *)tbase;
345 size_t expired = kv_store_expire_expire_all(task->kv_store_expire);
346 size_t size = kv_store_expire_size(task->kv_store_expire);
348 plogx_info("%zu/%zu\n", expired, size);
351 static void stop_last(struct task_base *tbase)
353 struct task_fm *task = (struct task_fm *)tbase;
355 task->dpi_engine.dpi_thread_stop(task->dpi_shared->dpi_opaque);
356 task->dpi_shared->dpi_opaque = NULL;
359 static struct task_init task_init_fm = {
361 .init = init_task_fm,
362 .handle = handle_fm_bulk,
365 .start_first = start_first,
366 .stop_last = stop_last,
367 .size = sizeof(struct task_fm)
370 __attribute__((constructor)) static void reg_task_fm(void)
372 reg_task(&task_init_fm);