447fcfa28239e4a0c8cfc1c59c77919f7652141c
[samplevnf.git] / VNFs / DPPD-PROX / handle_esp.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 /*
18  * Non compatible implementation of RFC3686(CTR-AES 128 bit key), RFC4303 (tunnel ipv4 ESP)
19  * Limitations:
20  * 1. Crypto not safe!!!!! (underlying AES-CTR implementation is OK, but ESP implementation is lousy)
21  * 2. Only ESP/tunnel/ipv4/AES-CTR
22  * 3. Not fully implemented
23  * 4. No proper key / SADB
24  * So performance demonstrator only
25  */
26
27 #include "task_init.h"
28 #include "task_base.h"
29 #include "etypes.h"
30 #include "stats.h"
31 #include "cfgfile.h"
32 #include "log.h"
33 #include "prox_cksum.h"
34 #include "defines.h"
35 #include <rte_ip.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
38 #include <rte_bus_vdev.h>
39 #include "prox_port_cfg.h"
40 #include "prox_compat.h"
41
42 typedef unsigned int u32;
43 typedef unsigned char u8;
44
45 #define BYTE_LENGTH(x) (x/8)
46 #define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
47
48 //#define CIPHER_KEY_LENGTH_AES_CBC (32)
49 #define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
50 #define CIPHER_IV_LENGTH_AES_CBC 16
51
52 #define MAXIMUM_IV_LENGTH 16
53 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
54
55 #define MAX_SESSIONS 1024
56 #define POOL_CACHE_SIZE 128
57
58 #define NUM_OPS 256
59
60 struct task_esp_enc {
61         struct task_base base;
62         uint8_t cdev_id;
63         uint16_t qp_id;
64         uint32_t local_ipv4;
65         prox_rte_ether_addr local_mac;
66         uint32_t remote_ipv4;
67         prox_rte_ether_addr dst_mac;
68         struct rte_mempool *crypto_op_pool;
69         struct rte_mempool *session_pool;
70         struct rte_cryptodev_sym_session *sess;
71         struct rte_crypto_op *ops_burst[NUM_OPS];
72 };
73
74 struct task_esp_dec {
75         struct task_base base;
76         uint8_t cdev_id;
77         uint16_t qp_id;
78         uint32_t local_ipv4;
79         prox_rte_ether_addr local_mac;
80         prox_rte_ether_addr dst_mac;
81         struct rte_mempool *crypto_op_pool;
82         struct rte_mempool *session_pool;
83         struct rte_cryptodev_sym_session *sess;
84         struct rte_crypto_op *ops_burst[NUM_OPS];
85 };
86
87 static uint8_t hmac_sha1_key[] = {
88         0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
89         0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
90         0xDE, 0xF4, 0xDE, 0xAD };
91
92 static uint8_t aes_cbc_key[] = {
93         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
94         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
95         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
96         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
97
98 static uint8_t aes_cbc_iv[] = {
99         0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
100         0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
101
102 static void printf_cdev_info(uint8_t cdev_id)
103 {
104         struct rte_cryptodev_info dev_info;
105         rte_cryptodev_info_get(cdev_id, &dev_info);
106         plog_info("!!!numdevs:%d\n", rte_cryptodev_count());
107         //uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id);
108         plog_info("dev:%d name:%s nb_queue_pairs:%d max_nb_sessions:%d\n",
109                 cdev_id, dev_info.driver_name, dev_info.max_nb_queue_pairs, dev_info.sym.max_nb_sessions);
110         const struct rte_cryptodev_capabilities *cap = &dev_info.capabilities[0];
111         int i=0;
112         while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
113                 //plog_info("cap->sym.xform_type:%d,");
114                 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER)
115                         plog_info("RTE_CRYPTO_SYM_XFORM_CIPHER: %d\n", cap->sym.cipher.algo);
116                 cap = &dev_info.capabilities[++i];
117         }
118 }
119
120 #if 0
121 static uint8_t get_cdev_id(void)
122 {
123         //crypto devices must be configured in the config file
124         //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
125
126         static uint8_t cdev_id=0;
127         PROX_PANIC(cdev_id+1 > rte_cryptodev_count(), "not enough crypto devices\n");
128         //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
129         return cdev_id++;
130 }
131 #else
132 static uint8_t get_cdev_id(void)
133 {
134         static uint8_t cdev_id=0;
135         char name[64]={0};
136
137         sprintf(name, "crypto_aesni_mb%d", cdev_id);
138
139         int cdev_id1 = rte_cryptodev_get_dev_id(name);
140         if (cdev_id1 >= 0){
141                 plog_info("crypto dev %d preconfigured\n", cdev_id1);
142                 ++cdev_id;
143                 return cdev_id1;
144         }
145 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
146         int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
147 #else
148         int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
149 #endif
150         PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
151
152         return cdev_id++;
153 }
154 #endif
155
156 static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
157 {
158         struct task_esp_enc *task = (struct task_esp_enc *)tbase;
159
160         tbase->flags |= FLAG_NEVER_FLUSH;
161
162         uint8_t lcore_id = targ->lconf->id;
163         char name[64];
164         sprintf(name, "core_%03u_crypto_pool", lcore_id);
165         task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
166                 8192, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
167         PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC CRYPTO_OP_POOL\n");
168
169         task->cdev_id = get_cdev_id();
170
171         struct rte_cryptodev_config cdev_conf;
172         cdev_conf.nb_queue_pairs = 2;
173         //cdev_conf.socket_id = SOCKET_ID_ANY;
174         cdev_conf.socket_id = rte_socket_id();
175         rte_cryptodev_configure(task->cdev_id, &cdev_conf);
176
177         unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
178         plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
179         sprintf(name, "core_%03u_session_pool", lcore_id);
180         task->session_pool = rte_mempool_create(name,
181                                 MAX_SESSIONS,
182                                 session_size,
183                                 POOL_CACHE_SIZE,
184                                 0, NULL, NULL, NULL,
185                                 NULL, rte_socket_id(),
186                                 0);
187         PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
188
189         task->qp_id=0;
190         plog_info("enc: task->qp_id=%u\n", task->qp_id);
191         struct prox_rte_cryptodev_qp_conf qp_conf;
192         qp_conf.nb_descriptors = 128;
193         qp_conf.mp_session = task->session_pool;
194         prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
195
196         int ret = rte_cryptodev_start(task->cdev_id);
197         PROX_PANIC(ret < 0, "Failed to start device\n");
198
199         struct rte_cryptodev *dev;
200         dev = rte_cryptodev_pmd_get_dev(task->cdev_id);
201         PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
202
203         //Setup Cipher Parameters
204         struct rte_crypto_sym_xform cipher_xform = {0};
205         struct rte_crypto_sym_xform auth_xform = {0};
206
207         cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
208         cipher_xform.next = &auth_xform;
209
210         cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
211         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
212         cipher_xform.cipher.key.data = aes_cbc_key;
213         cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
214
215         cipher_xform.cipher.iv.offset = IV_OFFSET;
216         cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
217
218         //Setup HMAC Parameters
219         auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
220         auth_xform.next = NULL;
221         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
222         auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
223         auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
224         auth_xform.auth.key.data = hmac_sha1_key;
225         auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
226
227         auth_xform.auth.iv.offset = 0;
228         auth_xform.auth.iv.length = 0;
229
230         task->sess = rte_cryptodev_sym_session_create(task->session_pool);
231         PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
232
233         ret = rte_cryptodev_sym_session_init(task->cdev_id, task->sess, &cipher_xform, task->session_pool);
234         PROX_PANIC(ret < 0, "Failed sym_session_init\n");
235
236         //TODO: doublecheck task->ops_burst lifecycle!
237         if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
238                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
239                         task->ops_burst, NUM_OPS) != NUM_OPS) {
240                 PROX_PANIC(1, "Failed to allocate ENC crypto operations\n");
241         }
242
243         task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
244         task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
245         //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
246         struct prox_port_cfg *port = find_reachable_port(targ);
247         memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
248
249         if (targ->flags & TASK_ARG_DST_MAC_SET){
250                 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
251                 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
252                 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
253                 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
254         }
255 }
256
257 static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
258 {
259         struct task_esp_dec *task = (struct task_esp_dec *)tbase;
260
261         tbase->flags |= FLAG_NEVER_FLUSH;
262
263         uint8_t lcore_id = targ->lconf->id;
264         char name[64];
265         sprintf(name, "core_%03u_crypto_pool", lcore_id);
266         task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
267                 8192, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
268         PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC CRYPTO_OP_POOL\n");
269
270         task->cdev_id = get_cdev_id();
271         struct rte_cryptodev_config cdev_conf;
272         cdev_conf.nb_queue_pairs = 2;
273         cdev_conf.socket_id = SOCKET_ID_ANY;
274         cdev_conf.socket_id = rte_socket_id();
275         rte_cryptodev_configure(task->cdev_id, &cdev_conf);
276
277         unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
278         plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
279         sprintf(name, "core_%03u_session_pool", lcore_id);
280         task->session_pool = rte_mempool_create(name,
281                                 MAX_SESSIONS,
282                                 session_size,
283                                 POOL_CACHE_SIZE,
284                                 0, NULL, NULL, NULL,
285                                 NULL, rte_socket_id(),
286                                 0);
287         PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
288
289         task->qp_id=0;
290         plog_info("dec: task->qp_id=%u\n", task->qp_id);
291         struct prox_rte_cryptodev_qp_conf qp_conf;
292         qp_conf.nb_descriptors = 128;
293         qp_conf.mp_session = task->session_pool;
294         prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
295
296         int ret = rte_cryptodev_start(task->cdev_id);
297         PROX_PANIC(ret < 0, "Failed to start device\n");
298
299         struct rte_cryptodev *dev;
300         dev = rte_cryptodev_pmd_get_dev(task->cdev_id);
301         PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
302
303         //Setup Cipher Parameters
304         struct rte_crypto_sym_xform cipher_xform = {0};
305         struct rte_crypto_sym_xform auth_xform = {0};
306
307         cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
308         cipher_xform.next = NULL;
309         cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
310         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
311         cipher_xform.cipher.key.data = aes_cbc_key;
312         cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
313
314         cipher_xform.cipher.iv.offset = IV_OFFSET;
315         cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
316
317         //Setup HMAC Parameters
318         auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
319         auth_xform.next = &cipher_xform;
320         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
321         auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
322         auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
323         auth_xform.auth.key.data = hmac_sha1_key;
324         auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
325
326         auth_xform.auth.iv.offset = 0;
327         auth_xform.auth.iv.length = 0;
328
329         task->sess = rte_cryptodev_sym_session_create(task->session_pool);
330         PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
331
332         ret = rte_cryptodev_sym_session_init(task->cdev_id, task->sess, &cipher_xform, task->session_pool);
333         PROX_PANIC(ret < 0, "Failed sym_session_init\n");
334
335         //TODO: doublecheck task->ops_burst lifecycle!
336         if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
337                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
338                         task->ops_burst, NUM_OPS) != NUM_OPS) {
339                 PROX_PANIC(1, "Failed to allocate DEC crypto operations\n");
340         }
341
342         task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
343         //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
344         struct prox_port_cfg *port = find_reachable_port(targ);
345         memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
346
347         if (targ->flags & TASK_ARG_DST_MAC_SET){
348                 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
349                 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
350                 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
351                 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
352         }
353
354 }
355
356 static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
357 {
358         u8 *data;
359         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
360         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
361         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
362         struct rte_crypto_sym_op *sym_cop = cop->sym;
363
364         if (unlikely((pip4->version_ihl >> 4) != 4)) {
365                 plog_info("Received non IPv4 packet at esp enc %i\n", pip4->version_ihl);
366                 plogdx_info(mbuf, "ENC RX: ");
367                 return OUT_DISCARD;
368         }
369         if (pip4->time_to_live) {
370                 pip4->time_to_live--;
371         }
372         else {
373                 plog_info("TTL = 0 => Dropping\n");
374                 return OUT_DISCARD;
375         }
376
377         // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
378         int l1 = rte_pktmbuf_pkt_len(mbuf);
379         int padding = l1 - (ipv4_length + sizeof(prox_rte_ether_hdr));
380         if (unlikely(padding > 0)) {
381                 rte_pktmbuf_trim(mbuf, padding);
382         }
383
384         l1 = rte_pktmbuf_pkt_len(mbuf);
385         int encrypt_len = l1 - sizeof(prox_rte_ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
386         padding = 0;
387         if ((encrypt_len & 0xf) != 0){
388                 padding = 16 - (encrypt_len % 16);
389                 encrypt_len += padding;
390         }
391
392         const int extra_space = sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
393
394         prox_rte_ether_addr src_mac = peth->s_addr;
395         prox_rte_ether_addr dst_mac = peth->d_addr;
396         uint32_t src_addr = pip4->src_addr;
397         uint32_t dst_addr = pip4->dst_addr;
398         uint8_t ttl = pip4->time_to_live;
399         uint8_t version_ihl = pip4->version_ihl;
400
401         peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
402         peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
403         peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
404         l1 = rte_pktmbuf_pkt_len(mbuf);
405         peth->ether_type = ETYPE_IPv4;
406 #if 0
407         //send it back
408         prox_rte_ether_addr_copy(&dst_mac, &peth->s_addr);
409         prox_rte_ether_addr_copy(&src_mac, &peth->d_addr);
410 #else
411         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
412         //prox_rte_ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
413         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
414 #endif
415
416         pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
417         pip4->src_addr = task->local_ipv4;
418         pip4->dst_addr = task->remote_ipv4;
419         pip4->time_to_live = ttl;
420         pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
421         pip4->version_ihl = version_ihl; // 20 bytes, ipv4
422         pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
423         pip4->packet_id = 0x0101;
424         pip4->type_of_service = 0;
425         pip4->time_to_live = 64;
426         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
427
428         data = (u8*)(pip4 + 1);
429 #if 0
430         *((u32*) data) = 0x2016; // FIXME SPI
431         *((u32*) data + 1) = 0x2; // FIXME SN
432 #else
433         struct prox_esp_hdr *pesp = (struct prox_esp_hdr*)(pip4+1);
434         pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
435         static u32 sn = 0;
436         pesp->seq = ++sn;
437         pesp->spi=0xAAAAAAAA;//debug
438         pesp->seq =0xBBBBBBBB;//debug
439 #endif
440         u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
441         //padl += CIPHER_IV_LENGTH_AES_CBC;
442         *padl = padding;
443         *(padl + 1) = 4; // ipv4 in 4
444
445         sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;
446         //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
447         sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
448         //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
449
450         //sym_cop->cipher.iv.data = data + 8;
451         //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
452         //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
453
454         //rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
455
456         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET);
457         rte_memcpy(iv_ptr, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
458
459 #if 0//old
460         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
461         sym_cop->cipher.data.length = encrypt_len;
462
463         uint64_t *iv = (uint64_t *)(pesp + 1);
464         memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
465 #else
466         //uint64_t *iv = (uint64_t *)(pesp + 1);
467         //memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
468         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
469         sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC;
470 #endif
471
472         sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
473         sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
474
475         sym_cop->m_src = mbuf;
476         rte_crypto_op_attach_sym_session(cop, task->sess);
477         //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
478         //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
479
480         return 0;
481 }
482
483 static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
484 {
485         struct rte_crypto_sym_op *sym_cop = cop->sym;
486         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
487         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
488         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
489         u8 *data = (u8*)(pip4 + 1);
490
491         if (pip4->next_proto_id != IPPROTO_ESP){
492                 plog_info("Received non ESP packet on esp dec\n");
493                 plogdx_info(mbuf, "DEC RX: ");
494                 return OUT_DISCARD;
495         }
496
497         rte_crypto_op_attach_sym_session(cop, task->sess);
498
499         sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
500         //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME
501         sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr));
502         //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
503
504         //sym_cop->cipher.iv.data = (uint8_t *)data + 8;
505         //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
506         //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
507
508 #if 0
509         rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
510                                 aes_cbc_iv,
511                                 CIPHER_IV_LENGTH_AES_CBC);
512 #else
513         uint8_t * iv = (uint8_t *)(pip4 + 1) + sizeof(struct prox_esp_hdr);
514         rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
515                                 iv,
516                                 CIPHER_IV_LENGTH_AES_CBC);
517 #endif
518
519         sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
520         sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
521
522         sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
523         sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
524
525         sym_cop->m_src = mbuf;
526         return 0;
527 }
528
529 static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf)
530 {
531         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
532         prox_rte_ether_addr src_mac = peth->s_addr;
533         prox_rte_ether_addr dst_mac = peth->d_addr;
534         uint32_t src_ip, dst_ip;
535
536         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
537         src_ip = pip4->src_addr;
538         dst_ip = pip4->dst_addr;
539
540         //peth->s_addr = dst_mac;
541         peth->d_addr = src_mac;//should be replaced by arp
542         pip4->src_addr = dst_ip;
543         pip4->dst_addr = src_ip;
544         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
545 }
546
547 static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf)
548 {
549         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
550         rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr), ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
551                         + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(prox_rte_ipv4_hdr));// next hdr, padding
552         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
553
554         if (unlikely((pip4->version_ihl >> 4) != 4)) {
555                 plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
556                 plogdx_info(mbuf, "DEC TX: ");
557                 return OUT_DISCARD;
558         }
559         if (pip4->time_to_live) {
560                 pip4->time_to_live--;
561         }
562         else {
563                 plog_info("TTL = 0 => Dropping\n");
564                 return OUT_DISCARD;
565         }
566         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
567         rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr),
568                 ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
569                 + 2 * sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(prox_rte_ipv4_hdr));
570
571         int len = rte_pktmbuf_pkt_len(mbuf);
572         rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
573         peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
574
575 #if 0
576         do_ipv4_swap(task, mbuf);
577 #else
578         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
579         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
580         //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
581 #endif
582         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
583
584         return 0;
585 }
586
587 static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struct rte_mbuf *mbuf)
588 {
589         u8* m = rte_pktmbuf_mtod(mbuf, u8*);
590         rte_memcpy(m+sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC,
591                 m, sizeof(prox_rte_ether_hdr));
592         m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC);
593         prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m+sizeof(prox_rte_ether_hdr));
594
595         if (unlikely((pip4->version_ihl >> 4) != 4)) {
596                 plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
597                 plogdx_info(mbuf, "DEC TX: ");
598                 return OUT_DISCARD;
599         }
600         if (pip4->time_to_live) {
601                 pip4->time_to_live--;
602         }
603         else {
604                 plog_info("TTL = 0 => Dropping\n");
605                 return OUT_DISCARD;
606         }
607         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
608         int len = rte_pktmbuf_pkt_len(mbuf);
609         rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
610
611 #if 0
612         do_ipv4_swap(task, mbuf);
613 #else
614         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
615         prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
616         prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
617         //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
618 #endif
619
620         prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
621         return 0;
622 }
623
624 static int handle_esp_enc_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
625 {
626         struct task_esp_enc *task = (struct task_esp_enc *)tbase;
627         uint8_t out[MAX_PKT_BURST];
628         uint16_t i = 0, nb_rx = 0, nb_enc=0, j = 0;
629
630         for (uint16_t j = 0; j < n_pkts; ++j) {
631                 out[j] = handle_esp_ah_enc(task, mbufs[j], task->ops_burst[nb_enc]);
632                 if (out[j] != OUT_DISCARD)
633                         ++nb_enc;
634         }
635
636         if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_enc) != nb_enc) {
637                 plog_info("Error enc enqueue_burst\n");
638                 return -1;
639         }
640
641         do {
642                 nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id, task->ops_burst+i, nb_enc-i);
643                 i += nb_rx;
644         } while (i < nb_enc);
645
646         return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
647 }
648
649 static int handle_esp_dec_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
650 {
651         struct task_esp_dec *task = (struct task_esp_dec *)tbase;
652         uint8_t out[MAX_PKT_BURST];
653         uint16_t j, nb_dec=0, nb_rx=0;
654
655         for (j = 0; j < n_pkts; ++j) {
656                 out[j] = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[nb_dec]);
657                 if (out[j] != OUT_DISCARD)
658                         ++nb_dec;
659         }
660
661         if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_dec) != nb_dec) {
662                 plog_info("Error dec enqueue_burst\n");
663                 return -1;
664         }
665
666         j=0;
667         do {
668                 nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id,
669                                         task->ops_burst+j, nb_dec-j);
670                 j += nb_rx;
671         } while (j < nb_dec);
672
673         for (j = 0; j < nb_dec; ++j) {
674                 if (task->ops_burst[j]->status != RTE_CRYPTO_OP_STATUS_SUCCESS){
675                         plog_info("err: task->ops_burst[%d].status=%d\n", j, task->ops_burst[j]->status);
676                         //!!!TODO!!! find mbuf and discard it!!!
677                         //for now just send it further
678                         //plogdx_info(mbufs[j], "RX: ");
679                 }
680                 if (task->ops_burst[j]->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
681                         struct rte_mbuf *mbuf = task->ops_burst[j]->sym->m_src;
682                         handle_esp_ah_dec_finish2(task, mbuf);//TODO set out[j] properly
683                 }
684         }
685
686         return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
687 }
688
689 struct task_init task_init_esp_enc = {
690         .mode = ESP_ENC,
691         .mode_str = "esp_enc",
692         .init = init_task_esp_enc,
693         .handle = handle_esp_enc_bulk,
694         .size = sizeof(struct task_esp_enc),
695 };
696
697 struct task_init task_init_esp_dec = {
698         .mode = ESP_ENC,
699         .mode_str = "esp_dec",
700         .init = init_task_esp_dec,
701         .handle = handle_esp_dec_bulk,
702         .size = sizeof(struct task_esp_dec),
703 };
704
705 __attribute__((constructor)) static void reg_task_esp_enc(void)
706 {
707         reg_task(&task_init_esp_enc);
708 }
709
710 __attribute__((constructor)) static void reg_task_esp_dec(void)
711 {
712         reg_task(&task_init_esp_dec);
713 }