26536af81afe1eead60984963caaba1193cd0fd0
[samplevnf.git] / VNFs / DPPD-PROX / handle_esp.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 /*
18  * Non compatible implementation of RFC3686(CTR-AES 128 bit key), RFC4303 (tunnel ipv4 ESP)
19  * Limitations:
20  * 1. Crypto not safe!!!!! (underlying AES-CTR implementation is OK, but ESP implementation is lousy)
21  * 2. Only ESP/tunnel/ipv4/AES-CTR
22  * 3. Not fully implemented
23  * 4. No proper key / SADB
24  * So performance demonstrator only
25  */
26
27 #include "task_init.h"
28 #include "task_base.h"
29 #include "etypes.h"
30 #include "stats.h"
31 #include "cfgfile.h"
32 #include "log.h"
33 #include "prox_cksum.h"
34 #include "defines.h"
35 #include <rte_ip.h>
36 #include <rte_cryptodev.h>
37 #include <rte_bus_vdev.h>
38 #include "prox_port_cfg.h"
39 #include "prox_compat.h"
40
41 typedef unsigned int u32;
42 typedef unsigned char u8;
43
44 #define BYTE_LENGTH(x) (x/8)
45 #define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
46
47 //#define CIPHER_KEY_LENGTH_AES_CBC (32)
48 #define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
49 #define CIPHER_IV_LENGTH_AES_CBC 16
50
51 #define MAXIMUM_IV_LENGTH 16
52 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
53
54 #define MAX_SESSIONS 1024
55 #define POOL_CACHE_SIZE 128
56
57 #define NUM_OPS 256
58
59 struct task_esp_enc {
60         struct task_base base;
61         uint8_t cdev_id;
62         uint16_t qp_id;
63         uint32_t local_ipv4;
64         prox_rte_ether_addr local_mac;
65         uint32_t remote_ipv4;
66         prox_rte_ether_addr dst_mac;
67         struct rte_mempool *crypto_op_pool;
68         struct rte_mempool *session_pool;
69         struct rte_cryptodev_sym_session *sess;
70         struct rte_crypto_op *ops_burst[NUM_OPS];
71 };
72
73 struct task_esp_dec {
74         struct task_base base;
75         uint8_t cdev_id;
76         uint16_t qp_id;
77         uint32_t local_ipv4;
78         prox_rte_ether_addr local_mac;
79         prox_rte_ether_addr dst_mac;
80         struct rte_mempool *crypto_op_pool;
81         struct rte_mempool *session_pool;
82         struct rte_cryptodev_sym_session *sess;
83         struct rte_crypto_op *ops_burst[NUM_OPS];
84 };
85
86 static uint8_t hmac_sha1_key[] = {
87         0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
88         0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
89         0xDE, 0xF4, 0xDE, 0xAD };
90
91 static uint8_t aes_cbc_key[] = {
92         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
93         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
94         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
95         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
96
97 static uint8_t aes_cbc_iv[] = {
98         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
99         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
100
101 static void printf_cdev_info(uint8_t cdev_id)
102 {
103         struct rte_cryptodev_info dev_info;
104         rte_cryptodev_info_get(cdev_id, &dev_info);
105         plog_info("!!!numdevs:%d\n", rte_cryptodev_count());
106         //uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id);
107         plog_info("dev:%d name:%s nb_queue_pairs:%d max_nb_sessions:%d\n",
108                 cdev_id, dev_info.driver_name, dev_info.max_nb_queue_pairs, dev_info.sym.max_nb_sessions);
109         const struct rte_cryptodev_capabilities *cap = &dev_info.capabilities[0];
110         int i=0;
111         while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
112                 //plog_info("cap->sym.xform_type:%d,");
113                 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER)
114                         plog_info("RTE_CRYPTO_SYM_XFORM_CIPHER: %d\n", cap->sym.cipher.algo);
115                 cap = &dev_info.capabilities[++i];
116         }
117 }
118
119 #if 0
120 static uint8_t get_cdev_id(void)
121 {
122         //crypto devices must be configured in the config file
123         //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
124
125         static uint8_t cdev_id=0;
126         PROX_PANIC(cdev_id+1 > rte_cryptodev_count(), "not enough crypto devices\n");
127         //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
128         return cdev_id++;
129 }
130 #else
131 static uint8_t get_cdev_id(void)
132 {
133         static uint8_t cdev_id=0;
134         char name[64]={0};
135
136         sprintf(name, "crypto_aesni_mb%d", cdev_id);
137
138         int cdev_id1 = rte_cryptodev_get_dev_id(name);
139         if (cdev_id1 >= 0){
140                 plog_info("crypto dev %d preconfigured\n", cdev_id1);
141                 ++cdev_id;
142                 return cdev_id1;
143         }
144 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
145         int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
146 #else
147         int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
148 #endif
149         PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
150
151         return cdev_id++;
152 }
153 #endif
154
155 static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
156 {
157         struct task_esp_enc *task = (struct task_esp_enc *)tbase;
158
159         tbase->flags |= FLAG_NEVER_FLUSH;
160
161         uint8_t lcore_id = targ->lconf->id;
162         char name[64];
163         sprintf(name, "core_%03u_crypto_pool", lcore_id);
164         task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165                 targ->nb_mbuf, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
166         PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC CRYPTO_OP_POOL\n");
167
168         task->cdev_id = get_cdev_id();
169
170         struct rte_cryptodev_config cdev_conf;
171         cdev_conf.nb_queue_pairs = 2;
172         cdev_conf.socket_id = rte_socket_id();
173         rte_cryptodev_configure(task->cdev_id, &cdev_conf);
174
175         unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
176         plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
177         sprintf(name, "core_%03u_session_pool", lcore_id);
178         task->session_pool = rte_cryptodev_sym_session_pool_create(name,
179                                 MAX_SESSIONS,
180                                 session_size,
181                                 POOL_CACHE_SIZE,
182                                 0, rte_socket_id());
183         PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
184
185         task->qp_id=0;
186         plog_info("enc: task->qp_id=%u\n", task->qp_id);
187         struct prox_rte_cryptodev_qp_conf qp_conf;
188         qp_conf.nb_descriptors = 128;
189         qp_conf.mp_session = task->session_pool;
190         prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
191
192         int ret = rte_cryptodev_start(task->cdev_id);
193         PROX_PANIC(ret < 0, "Failed to start device\n");
194
195         //Setup Cipher Parameters
196         struct rte_crypto_sym_xform cipher_xform = {0};
197         struct rte_crypto_sym_xform auth_xform = {0};
198
199         cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
200         cipher_xform.next = &auth_xform;
201
202         cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
203         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
204         cipher_xform.cipher.key.data = aes_cbc_key;
205         cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
206
207         cipher_xform.cipher.iv.offset = IV_OFFSET;
208         cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
209
210         //Setup HMAC Parameters
211         auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
212         auth_xform.next = NULL;
213         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
214         auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
215         auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
216         auth_xform.auth.key.data = hmac_sha1_key;
217         auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
218
219         auth_xform.auth.iv.offset = 0;
220         auth_xform.auth.iv.length = 0;
221
222         task->sess = rte_cryptodev_sym_session_create(task->cdev_id, &cipher_xform, task->session_pool);
223         PROX_PANIC(task->sess < 0, "Failed ENC sym_session_create\n");
224
225         task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
226         task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
227         //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
228         struct prox_port_cfg *port = find_reachable_port(targ);
229         memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
230
231         if (targ->flags & TASK_ARG_DST_MAC_SET){
232                 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
233                 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
234                 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
235                 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
236         }
237 }
238
239 static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
240 {
241         struct task_esp_dec *task = (struct task_esp_dec *)tbase;
242
243         tbase->flags |= FLAG_NEVER_FLUSH;
244
245         uint8_t lcore_id = targ->lconf->id;
246         char name[64];
247         sprintf(name, "core_%03u_crypto_pool", lcore_id);
248         task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
249                 targ->nb_mbuf, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
250         PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC CRYPTO_OP_POOL\n");
251
252         task->cdev_id = get_cdev_id();
253         struct rte_cryptodev_config cdev_conf;
254         cdev_conf.nb_queue_pairs = 2;
255         cdev_conf.socket_id = SOCKET_ID_ANY;
256         cdev_conf.socket_id = rte_socket_id();
257         rte_cryptodev_configure(task->cdev_id, &cdev_conf);
258
259         unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
260         plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
261         sprintf(name, "core_%03u_session_pool", lcore_id);
262         task->session_pool = rte_cryptodev_sym_session_pool_create(name,
263                                 MAX_SESSIONS,
264                                 session_size,
265                                 POOL_CACHE_SIZE,
266                                 0, rte_socket_id());
267         PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
268
269         task->qp_id=0;
270         plog_info("dec: task->qp_id=%u\n", task->qp_id);
271         struct prox_rte_cryptodev_qp_conf qp_conf;
272         qp_conf.nb_descriptors = 128;
273         qp_conf.mp_session = task->session_pool;
274         prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
275
276         int ret = rte_cryptodev_start(task->cdev_id);
277         PROX_PANIC(ret < 0, "Failed to start device\n");
278
279         //Setup Cipher Parameters
280         struct rte_crypto_sym_xform cipher_xform = {0};
281         struct rte_crypto_sym_xform auth_xform = {0};
282
283         cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
284         cipher_xform.next = NULL;
285         cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
286         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
287         cipher_xform.cipher.key.data = aes_cbc_key;
288         cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
289
290         cipher_xform.cipher.iv.offset = IV_OFFSET;
291         cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
292
293         //Setup HMAC Parameters
294         auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
295         auth_xform.next = &cipher_xform;
296         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
297         auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
298         auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
299         auth_xform.auth.key.data = hmac_sha1_key;
300         auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
301
302         auth_xform.auth.iv.offset = 0;
303         auth_xform.auth.iv.length = 0;
304
305         task->sess = rte_cryptodev_sym_session_create(task->cdev_id, &cipher_xform, task->session_pool);
306         PROX_PANIC(task->sess < 0, "Failed DEC sym_session_create\n");
307
308         task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
309         //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
310         struct prox_port_cfg *port = find_reachable_port(targ);
311         memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
312
313         if (targ->flags & TASK_ARG_DST_MAC_SET){
314                 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
315                 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
316                 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
317                 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
318         }
319
320 }
321
322 static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
323 {
324         u8 *data;
325         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
326         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
327         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
328         struct rte_crypto_sym_op *sym_cop = cop->sym;
329
330         if (unlikely((pip4->version_ihl >> 4) != 4)) {
331                 plog_info("Received non IPv4 packet at esp enc %i\n", pip4->version_ihl);
332                 plogdx_info(mbuf, "ENC RX: ");
333                 return OUT_DISCARD;
334         }
335         if (pip4->time_to_live) {
336                 pip4->time_to_live--;
337         }
338         else {
339                 plog_info("TTL = 0 => Dropping\n");
340                 return OUT_DISCARD;
341         }
342
343         // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
344         int l1 = rte_pktmbuf_pkt_len(mbuf);
345         int padding = l1 - (ipv4_length + sizeof(prox_rte_ether_hdr));
346         if (unlikely(padding > 0)) {
347                 rte_pktmbuf_trim(mbuf, padding);
348         }
349
350         l1 = rte_pktmbuf_pkt_len(mbuf);
351         int encrypt_len = l1 - sizeof(prox_rte_ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
352         padding = 0;
353         if ((encrypt_len & 0xf) != 0){
354                 padding = 16 - (encrypt_len % 16);
355                 encrypt_len += padding;
356         }
357
358         const int extra_space = sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
359
360         prox_rte_ether_addr src_mac = peth->s_addr;
361         prox_rte_ether_addr dst_mac = peth->d_addr;
362         uint32_t src_addr = pip4->src_addr;
363         uint32_t dst_addr = pip4->dst_addr;
364         uint8_t ttl = pip4->time_to_live;
365         uint8_t version_ihl = pip4->version_ihl;
366
367         peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
368         peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
369         peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
370         l1 = rte_pktmbuf_pkt_len(mbuf);
371         peth->ether_type = ETYPE_IPv4;
372 #if 0
373         //send it back
374         prox_rte_ether_addr_copy(&dst_mac, &peth->s_addr);
375         prox_rte_ether_addr_copy(&src_mac, &peth->d_addr);
376 #else
377         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
378         //prox_rte_ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
379         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
380 #endif
381
382         pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
383         pip4->src_addr = task->local_ipv4;
384         pip4->dst_addr = task->remote_ipv4;
385         pip4->time_to_live = ttl;
386         pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
387         pip4->version_ihl = version_ihl; // 20 bytes, ipv4
388         pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
389         pip4->packet_id = 0x0101;
390         pip4->type_of_service = 0;
391         pip4->time_to_live = 64;
392         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
393
394         data = (u8*)(pip4 + 1);
395 #if 0
396         *((u32*) data) = 0x2016; // FIXME SPI
397         *((u32*) data + 1) = 0x2; // FIXME SN
398 #else
399         struct prox_esp_hdr *pesp = (struct prox_esp_hdr*)(pip4+1);
400         pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
401         static u32 sn = 0;
402         pesp->seq = ++sn;
403         pesp->spi=0xAAAAAAAA;//debug
404         pesp->seq =0xBBBBBBBB;//debug
405 #endif
406         u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
407         //padl += CIPHER_IV_LENGTH_AES_CBC;
408         *padl = padding;
409         *(padl + 1) = 4; // ipv4 in 4
410
411         sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;
412         //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
413         sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
414         //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
415
416         //sym_cop->cipher.iv.data = data + 8;
417         //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
418         //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
419
420         //rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
421
422         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET);
423         rte_memcpy(iv_ptr, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
424
425 #if 0//old
426         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
427         sym_cop->cipher.data.length = encrypt_len;
428
429         uint64_t *iv = (uint64_t *)(pesp + 1);
430         memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
431 #else
432         //uint64_t *iv = (uint64_t *)(pesp + 1);
433         //memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
434         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
435         sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC;
436 #endif
437
438         sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
439         sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
440
441         sym_cop->m_src = mbuf;
442         rte_crypto_op_attach_sym_session(cop, task->sess);
443         //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
444         //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
445
446         return 0;
447 }
448
449 static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
450 {
451         struct rte_crypto_sym_op *sym_cop = cop->sym;
452         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
453         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
454         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
455         u8 *data = (u8*)(pip4 + 1);
456
457         if (pip4->next_proto_id != IPPROTO_ESP){
458                 plog_info("Received non ESP packet on esp dec\n");
459                 plogdx_info(mbuf, "DEC RX: ");
460                 return OUT_DISCARD;
461         }
462
463         rte_crypto_op_attach_sym_session(cop, task->sess);
464
465         sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
466         //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME
467         sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr));
468         //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
469
470         //sym_cop->cipher.iv.data = (uint8_t *)data + 8;
471         //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
472         //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
473
474 #if 0
475         rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
476                                 aes_cbc_iv,
477                                 CIPHER_IV_LENGTH_AES_CBC);
478 #else
479         uint8_t * iv = (uint8_t *)(pip4 + 1) + sizeof(struct prox_esp_hdr);
480         rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
481                                 iv,
482                                 CIPHER_IV_LENGTH_AES_CBC);
483 #endif
484
485         sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
486         sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
487
488         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
489         sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
490
491         sym_cop->m_src = mbuf;
492         return 0;
493 }
494
495 static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf)
496 {
497         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
498         prox_rte_ether_addr src_mac = peth->s_addr;
499         prox_rte_ether_addr dst_mac = peth->d_addr;
500         uint32_t src_ip, dst_ip;
501
502         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
503         src_ip = pip4->src_addr;
504         dst_ip = pip4->dst_addr;
505
506         //peth->s_addr = dst_mac;
507         peth->d_addr = src_mac;//should be replaced by arp
508         pip4->src_addr = dst_ip;
509         pip4->dst_addr = src_ip;
510         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
511 }
512
513 static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf)
514 {
515         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
516         rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr), ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
517                         + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(prox_rte_ipv4_hdr));// next hdr, padding
518         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
519
520         if (unlikely((pip4->version_ihl >> 4) != 4)) {
521                 plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
522                 plogdx_info(mbuf, "DEC TX: ");
523                 return OUT_DISCARD;
524         }
525         if (pip4->time_to_live) {
526                 pip4->time_to_live--;
527         }
528         else {
529                 plog_info("TTL = 0 => Dropping\n");
530                 return OUT_DISCARD;
531         }
532         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
533         rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr),
534                 ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
535                 + 2 * sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(prox_rte_ipv4_hdr));
536
537         int len = rte_pktmbuf_pkt_len(mbuf);
538         rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
539         peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
540
541 #if 0
542         do_ipv4_swap(task, mbuf);
543 #else
544         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
545         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
546         //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
547 #endif
548         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
549
550         return 0;
551 }
552
553 static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struct rte_mbuf *mbuf)
554 {
555         u8* m = rte_pktmbuf_mtod(mbuf, u8*);
556         rte_memcpy(m+sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC,
557                 m, sizeof(prox_rte_ether_hdr));
558         m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC);
559         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m+sizeof(prox_rte_ether_hdr));
560
561         if (unlikely((pip4->version_ihl >> 4) != 4)) {
562                 plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
563                 plogdx_info(mbuf, "DEC TX: ");
564                 return OUT_DISCARD;
565         }
566         if (pip4->time_to_live) {
567                 pip4->time_to_live--;
568         }
569         else {
570                 plog_info("TTL = 0 => Dropping\n");
571                 return OUT_DISCARD;
572         }
573         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
574         int len = rte_pktmbuf_pkt_len(mbuf);
575         rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
576
577 #if 0
578         do_ipv4_swap(task, mbuf);
579 #else
580         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
581         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
582         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
583         //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
584 #endif
585
586         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
587         return 0;
588 }
589
590 static int handle_esp_enc_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
591 {
592         struct task_esp_enc *task = (struct task_esp_enc *)tbase;
593         uint8_t out[MAX_PKT_BURST];
594         uint8_t result;
595         uint16_t i = 0, nb_rx = 0, nb_enc=0, j = 0, idx = 0;
596         struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
597
598         if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
599                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
600                         task->ops_burst, n_pkts) != n_pkts) {
601                 PROX_PANIC(1, "Failed to allocate ENC crypto operations\n");
602         }
603         for (uint16_t j = 0; j < n_pkts; ++j) {
604                 result = handle_esp_ah_enc(task, mbufs[j], task->ops_burst[nb_enc]);
605                 if (result != OUT_DISCARD) {
606                         ++nb_enc;
607                 }
608                 else {
609                         new_mbufs[idx] = mbufs[j];
610                         out[idx] = result;
611                         idx++;
612                 }
613         }
614         if (nb_enc) {
615                 if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_enc) != nb_enc) {
616                         plog_info("Error enc enqueue_burst\n");
617                         return -1;
618                 }
619         }
620
621         nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id, task->ops_burst, MAX_PKT_BURST - idx);
622         for (uint16_t j = 0; j < nb_rx; ++j) {
623                 new_mbufs[idx] = task->ops_burst[j]->sym->m_src;
624                 out[idx] = 0;
625                 rte_crypto_op_free(task->ops_burst[j]);
626                 idx++;
627         }
628         return task->base.tx_pkt(&task->base, new_mbufs, idx, out);
629 }
630
631 static int handle_esp_dec_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
632 {
633         struct task_esp_dec *task = (struct task_esp_dec *)tbase;
634         uint8_t out[MAX_PKT_BURST];
635         uint8_t result;
636         uint16_t i = 0, nb_rx = 0, nb_dec=0, j = 0, idx = 0;
637         struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
638
639         if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
640                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
641                         task->ops_burst, n_pkts) != n_pkts) {
642                 PROX_PANIC(1, "Failed to allocate DEC crypto operations\n");
643         }
644         for (j = 0; j < n_pkts; ++j) {
645                 result = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[nb_dec]);
646                 if (result != OUT_DISCARD)
647                         ++nb_dec;
648                 else {
649                         new_mbufs[idx] = mbufs[j];
650                         out[idx] = result;
651                         idx++;
652                 }
653         }
654         if (nb_dec) {
655                 if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_dec) != nb_dec) {
656                         plog_info("Error dec enqueue_burst\n");
657                         return -1;
658                 }
659         }
660
661         nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id,
662                                 task->ops_burst, MAX_PKT_BURST - idx);
663
664         for (j = 0; j < nb_rx; ++j) {
665                 new_mbufs[idx] = task->ops_burst[j]->sym->m_src;
666                 if (task->ops_burst[j]->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
667                         out[idx] = handle_esp_ah_dec_finish2(task, new_mbufs[idx]);
668                 }
669                 else {
670                         out[idx] = OUT_DISCARD;
671                 }
672                 rte_crypto_op_free(task->ops_burst[j]);
673                 idx++;
674         }
675
676         return task->base.tx_pkt(&task->base, new_mbufs, idx, out);
677 }
678
679 struct task_init task_init_esp_enc = {
680         .mode = ESP_ENC,
681         .mode_str = "esp_enc",
682         .init = init_task_esp_enc,
683         .handle = handle_esp_enc_bulk,
684         .size = sizeof(struct task_esp_enc),
685 };
686
687 struct task_init task_init_esp_dec = {
688         .mode = ESP_DEC,
689         .mode_str = "esp_dec",
690         .init = init_task_esp_dec,
691         .handle = handle_esp_dec_bulk,
692         .size = sizeof(struct task_esp_dec),
693 };
694
695 __attribute__((constructor)) static void reg_task_esp_enc(void)
696 {
697         reg_task(&task_init_esp_enc);
698 }
699
700 __attribute__((constructor)) static void reg_task_esp_dec(void)
701 {
702         reg_task(&task_init_esp_dec);
703 }