2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 * Non compatible implementation of RFC3686(CTR-AES 128 bit key), RFC4303 (tunnel ipv4 ESP)
20 * 1. Crypto not safe!!!!! (underlying AES-CTR implementation is OK, but ESP implementation is lousy)
21 * 2. Only ESP/tunnel/ipv4/AES-CTR
22 * 3. Not fully implemented
23 * 4. No proper key / SADB
24 * So performance demonstrator only
27 #include "task_init.h"
28 #include "task_base.h"
33 #include "prox_cksum.h"
36 #include <rte_cryptodev.h>
37 #include <rte_bus_vdev.h>
38 #include "prox_port_cfg.h"
39 #include "prox_compat.h"
41 typedef unsigned int u32;
42 typedef unsigned char u8;
44 #define BYTE_LENGTH(x) (x/8)
45 #define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
47 //#define CIPHER_KEY_LENGTH_AES_CBC (32)
48 #define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
49 #define CIPHER_IV_LENGTH_AES_CBC 16
51 #define MAXIMUM_IV_LENGTH 16
52 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
54 #define MAX_SESSIONS 1024
55 #define POOL_CACHE_SIZE 128
60 struct task_base base;
64 prox_rte_ether_addr local_mac;
66 prox_rte_ether_addr dst_mac;
67 struct rte_mempool *crypto_op_pool;
68 struct rte_mempool *session_pool;
69 struct rte_cryptodev_sym_session *sess;
70 struct rte_crypto_op *ops_burst[NUM_OPS];
71 unsigned len; //number of ops ready to be enqueued
72 uint32_t pkts_in_flight; // difference between enqueued and dequeued
73 uint8_t (*handle_esp_finish)(struct task_esp *task,
74 struct rte_mbuf *mbuf, uint8_t status);
75 uint8_t (*handle_esp_ah)(struct task_esp *task, struct rte_mbuf *mbuf,
76 struct rte_crypto_op *cop);
79 static uint8_t hmac_sha1_key[] = {
80 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
81 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
82 0xDE, 0xF4, 0xDE, 0xAD };
84 static uint8_t aes_cbc_key[] = {
85 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
86 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
87 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
88 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
90 static uint8_t aes_cbc_iv[] = {
91 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
92 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
94 static void printf_cdev_info(uint8_t cdev_id)
96 struct rte_cryptodev_info dev_info;
97 rte_cryptodev_info_get(cdev_id, &dev_info);
98 plog_info("!!!numdevs:%d\n", rte_cryptodev_count());
99 //uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id);
100 plog_info("dev:%d name:%s nb_queue_pairs:%d max_nb_sessions:%d\n",
101 cdev_id, dev_info.driver_name, dev_info.max_nb_queue_pairs, dev_info.sym.max_nb_sessions);
102 const struct rte_cryptodev_capabilities *cap = &dev_info.capabilities[0];
104 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
105 //plog_info("cap->sym.xform_type:%d,");
106 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER)
107 plog_info("RTE_CRYPTO_SYM_XFORM_CIPHER: %d\n", cap->sym.cipher.algo);
108 cap = &dev_info.capabilities[++i];
112 static uint8_t get_cdev_id(void)
114 static uint8_t last_unused_cdev_id=0;
116 uint8_t cdev_count, cdev_id;
118 cdev_count = rte_cryptodev_count();
119 plog_info("crypto dev count: %d \n", cdev_count);
120 for (cdev_id = last_unused_cdev_id; cdev_id < cdev_count; cdev_id++) {
122 printf_cdev_info(cdev_id);
123 last_unused_cdev_id = cdev_id + 1;
127 sprintf(name, "crypto_aesni_mb%d", cdev_count);
129 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
130 int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
132 int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
134 PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
135 cdev_id = rte_cryptodev_get_dev_id(name);
137 printf_cdev_info(cdev_id);
138 last_unused_cdev_id = cdev_id + 1;
142 static inline uint8_t handle_enc_finish(struct task_esp *task,
143 struct rte_mbuf *mbuf, uint8_t status)
145 prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
146 prox_rte_ether_hdr *);
147 prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
148 pip4->dst_addr = task->remote_ipv4;
149 pip4->src_addr = task->local_ipv4;
150 prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
151 sizeof(prox_rte_ipv4_hdr), 1);
155 static inline uint8_t handle_dec_finish(struct task_esp *task,
156 struct rte_mbuf *mbuf, uint8_t status)
158 if (likely(status == RTE_CRYPTO_OP_STATUS_SUCCESS)) {
159 u8* m = rte_pktmbuf_mtod(mbuf, u8*);
160 rte_memcpy(m + sizeof(prox_rte_ipv4_hdr) +
161 sizeof(struct prox_esp_hdr) +
162 CIPHER_IV_LENGTH_AES_CBC, m,
163 sizeof(prox_rte_ether_hdr));
164 m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr) +
165 sizeof(struct prox_esp_hdr) +
166 CIPHER_IV_LENGTH_AES_CBC);
167 prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m +
168 sizeof(prox_rte_ether_hdr));
170 if (unlikely((pip4->version_ihl >> 4) != 4)) {
171 // plog_info("non IPv4 packet after esp dec %i\n",
172 // pip4->version_ihl);
173 // plogdx_info(mbuf, "DEC TX: ");
176 if (pip4->time_to_live) {
177 pip4->time_to_live--;
180 plog_info("TTL = 0 => Dropping\n");
183 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
184 int len = rte_pktmbuf_pkt_len(mbuf);
185 rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) -
189 do_ipv4_swap(task, mbuf);
191 prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
192 prox_rte_ether_hdr *);
193 prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
194 prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
195 //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
197 pip4->dst_addr = task->remote_ipv4;
198 pip4->src_addr = task->local_ipv4;
199 prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
200 sizeof(prox_rte_ipv4_hdr), 1);
208 static inline uint8_t handle_esp_ah_enc(struct task_esp *task,
209 struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
212 prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
213 prox_rte_ether_hdr *);
214 prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
215 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
216 struct rte_crypto_sym_op *sym_cop = cop->sym;
218 if (unlikely((pip4->version_ihl >> 4) != 4)) {
219 plog_info("Received non IPv4 packet at esp enc %i\n",
223 if (pip4->time_to_live) {
224 pip4->time_to_live--;
227 plog_info("TTL = 0 => Dropping\n");
231 // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
232 int l1 = rte_pktmbuf_pkt_len(mbuf);
233 int padding = l1 - (ipv4_length + sizeof(prox_rte_ether_hdr));
234 if (unlikely(padding > 0)) {
235 rte_pktmbuf_trim(mbuf, padding);
238 l1 = rte_pktmbuf_pkt_len(mbuf);
239 int encrypt_len = l1 - sizeof(prox_rte_ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
241 if ((encrypt_len & 0xf) != 0){
242 padding = 16 - (encrypt_len % 16);
243 encrypt_len += padding;
246 const int extra_space = sizeof(prox_rte_ipv4_hdr) +
247 sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
249 prox_rte_ether_addr src_mac = peth->s_addr;
250 prox_rte_ether_addr dst_mac = peth->d_addr;
251 uint32_t src_addr = pip4->src_addr;
252 uint32_t dst_addr = pip4->dst_addr;
253 uint8_t ttl = pip4->time_to_live;
254 uint8_t version_ihl = pip4->version_ihl;
256 peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
257 peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 +
258 padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
259 peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
260 l1 = rte_pktmbuf_pkt_len(mbuf);
261 peth->ether_type = ETYPE_IPv4;
264 prox_rte_ether_addr_copy(&dst_mac, &peth->s_addr);
265 prox_rte_ether_addr_copy(&src_mac, &peth->d_addr);
267 prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
268 //prox_rte_ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
269 prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
272 pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
273 pip4->src_addr = task->local_ipv4;
274 pip4->dst_addr = task->remote_ipv4;
275 pip4->time_to_live = ttl;
276 pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
277 pip4->version_ihl = version_ihl; // 20 bytes, ipv4
278 pip4->total_length = rte_cpu_to_be_16(ipv4_length +
279 sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)
280 + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 +
281 DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
282 pip4->packet_id = 0x0101;
283 pip4->type_of_service = 0;
284 pip4->time_to_live = 64;
285 prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
286 sizeof(prox_rte_ipv4_hdr), 1);
288 data = (u8*)(pip4 + 1);
290 *((u32*) data) = 0x2016; // FIXME SPI
291 *((u32*) data + 1) = 0x2; // FIXME SN
293 struct prox_esp_hdr *pesp = (struct prox_esp_hdr*)(pip4+1);
294 pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
297 // pesp->spi=0xAAAAAAAA;//debug
298 // pesp->seq =0xBBBBBBBB;//debug
300 u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
301 //padl += CIPHER_IV_LENGTH_AES_CBC;
303 *(padl + 1) = 4; // ipv4 in 4
305 sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC +
307 //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
308 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
309 (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
310 + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
311 //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
313 //sym_cop->cipher.iv.data = data + 8;
314 //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
315 //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
317 //rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
319 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET);
320 rte_memcpy(iv_ptr, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
323 sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
324 sym_cop->cipher.data.length = encrypt_len;
326 uint64_t *iv = (uint64_t *)(pesp + 1);
327 memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
329 //uint64_t *iv = (uint64_t *)(pesp + 1);
330 //memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
331 sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
332 sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
333 sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC;
336 sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
337 sizeof(prox_rte_ipv4_hdr);
338 sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) +
339 CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
341 sym_cop->m_src = mbuf;
342 rte_crypto_op_attach_sym_session(cop, task->sess);
344 //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
345 //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
350 static inline uint8_t handle_esp_ah_dec(struct task_esp *task,
351 struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
353 struct rte_crypto_sym_op *sym_cop = cop->sym;
354 prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
355 prox_rte_ether_hdr *);
356 prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
357 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
358 u8 *data = (u8*)(pip4 + 1);
360 if (pip4->next_proto_id != IPPROTO_ESP){
361 plog_info("Received non ESP packet on esp dec\n");
362 plogdx_info(mbuf, "DEC RX: ");
366 rte_crypto_op_attach_sym_session(cop, task->sess);
368 sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 +
369 ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
370 //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME
371 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
372 sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
373 + sizeof(struct prox_esp_hdr));
374 //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
376 //sym_cop->cipher.iv.data = (uint8_t *)data + 8;
377 //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
378 //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
381 rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
383 CIPHER_IV_LENGTH_AES_CBC);
385 uint8_t * iv = (uint8_t *)(pip4 + 1) + sizeof(struct prox_esp_hdr);
386 rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
388 CIPHER_IV_LENGTH_AES_CBC);
391 sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
392 sizeof(prox_rte_ipv4_hdr);
393 sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 -
394 CIPHER_IV_LENGTH_AES_CBC;
396 sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
397 sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) +
398 CIPHER_IV_LENGTH_AES_CBC;
399 sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) -
400 CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
402 sym_cop->m_src = mbuf;
406 static inline void do_ipv4_swap(struct task_esp *task, struct rte_mbuf *mbuf)
408 prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
409 prox_rte_ether_hdr *);
410 prox_rte_ether_addr src_mac = peth->s_addr;
411 prox_rte_ether_addr dst_mac = peth->d_addr;
412 uint32_t src_ip, dst_ip;
414 prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
415 src_ip = pip4->src_addr;
416 dst_ip = pip4->dst_addr;
418 //peth->s_addr = dst_mac;
419 peth->d_addr = src_mac;//should be replaced by arp
420 pip4->src_addr = dst_ip;
421 pip4->dst_addr = src_ip;
422 prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
426 static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
428 struct task_esp *task = (struct task_esp *)tbase;
429 unsigned int session_size;
431 tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
433 uint8_t lcore_id = targ->lconf->id;
435 task->handle_esp_finish = handle_enc_finish;
436 task->handle_esp_ah = handle_esp_ah_enc;
438 task->pkts_in_flight = 0;
439 sprintf(name, "core_%03u_crypto_pool", lcore_id);
440 task->crypto_op_pool = rte_crypto_op_pool_create(name,
441 RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
442 MAXIMUM_IV_LENGTH, rte_socket_id());
443 plog_info("rte_crypto_op_pool_create nb_elements =%d\n",
445 PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC \
448 task->cdev_id = get_cdev_id();
450 struct rte_cryptodev_config cdev_conf;
451 cdev_conf.nb_queue_pairs = 2;
452 cdev_conf.socket_id = rte_socket_id();
453 rte_cryptodev_configure(task->cdev_id, &cdev_conf);
455 session_size = rte_cryptodev_sym_get_private_session_size(
457 plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
459 sprintf(name, "core_%03u_session_pool", lcore_id);
460 task->session_pool = rte_cryptodev_sym_session_pool_create(name,
465 PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
468 plog_info("enc: task->qp_id=%u\n", task->qp_id);
469 struct prox_rte_cryptodev_qp_conf qp_conf;
470 qp_conf.nb_descriptors = 2048;
471 qp_conf.mp_session = task->session_pool;
472 prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
473 &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
475 int ret = rte_cryptodev_start(task->cdev_id);
476 PROX_PANIC(ret < 0, "Failed to start device\n");
478 //Setup Cipher Parameters
479 struct rte_crypto_sym_xform cipher_xform = {0};
480 struct rte_crypto_sym_xform auth_xform = {0};
482 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
483 // cipher_xform.next = &auth_xform;
484 cipher_xform.next = NULL; //CRYPTO_ONLY
486 cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
487 cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
488 cipher_xform.cipher.key.data = aes_cbc_key;
489 cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
491 cipher_xform.cipher.iv.offset = IV_OFFSET;
492 cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
494 //Setup HMAC Parameters
495 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
496 auth_xform.next = NULL;
497 auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
498 auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
499 auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
500 auth_xform.auth.key.data = hmac_sha1_key;
501 auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
503 auth_xform.auth.iv.offset = 0;
504 auth_xform.auth.iv.length = 0;
506 task->sess = rte_cryptodev_sym_session_create(task->cdev_id,
507 &cipher_xform, task->session_pool);
508 PROX_PANIC(task->sess < 0, "Failed ENC sym_session_create\n");
510 task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
511 task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
512 //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
513 struct prox_port_cfg *port = find_reachable_port(targ);
514 memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
516 if (targ->flags & TASK_ARG_DST_MAC_SET){
517 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
518 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
519 MAC_BYTES(task->dst_mac.addr_bytes));
520 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
521 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
525 static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
527 struct task_esp *task = (struct task_esp *)tbase;
528 unsigned int session_size;
530 tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
532 uint8_t lcore_id = targ->lconf->id;
534 task->handle_esp_finish = handle_dec_finish;
535 task->handle_esp_ah = handle_esp_ah_dec;
537 task->pkts_in_flight = 0;
538 sprintf(name, "core_%03u_crypto_pool", lcore_id);
539 task->crypto_op_pool = rte_crypto_op_pool_create(name,
540 RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
541 MAXIMUM_IV_LENGTH, rte_socket_id());
542 PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC \
545 task->cdev_id = get_cdev_id();
546 struct rte_cryptodev_config cdev_conf;
547 cdev_conf.nb_queue_pairs = 2;
548 cdev_conf.socket_id = SOCKET_ID_ANY;
549 cdev_conf.socket_id = rte_socket_id();
550 rte_cryptodev_configure(task->cdev_id, &cdev_conf);
552 session_size = rte_cryptodev_sym_get_private_session_size(
554 plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
556 sprintf(name, "core_%03u_session_pool", lcore_id);
557 task->session_pool = rte_cryptodev_sym_session_pool_create(name,
562 PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
565 plog_info("dec: task->qp_id=%u\n", task->qp_id);
566 struct prox_rte_cryptodev_qp_conf qp_conf;
567 qp_conf.nb_descriptors = 2048;
568 qp_conf.mp_session = task->session_pool;
569 prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
570 &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
572 int ret = rte_cryptodev_start(task->cdev_id);
573 PROX_PANIC(ret < 0, "Failed to start device\n");
575 //Setup Cipher Parameters
576 struct rte_crypto_sym_xform cipher_xform = {0};
577 struct rte_crypto_sym_xform auth_xform = {0};
579 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
580 cipher_xform.next = NULL;
581 cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
582 cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
583 cipher_xform.cipher.key.data = aes_cbc_key;
584 cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
586 cipher_xform.cipher.iv.offset = IV_OFFSET;
587 cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
589 //Setup HMAC Parameters
590 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
591 auth_xform.next = &cipher_xform;
592 auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
593 auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
594 auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
595 auth_xform.auth.key.data = hmac_sha1_key;
596 auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
598 auth_xform.auth.iv.offset = 0;
599 auth_xform.auth.iv.length = 0;
601 task->sess = rte_cryptodev_sym_session_create(task->cdev_id, &cipher_xform,
603 PROX_PANIC(task->sess < 0, "Failed DEC sym_session_create\n");
605 task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
606 task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
607 //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
608 struct prox_port_cfg *port = find_reachable_port(targ);
609 memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
611 if (targ->flags & TASK_ARG_DST_MAC_SET){
612 memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
613 plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
614 MAC_BYTES(task->dst_mac.addr_bytes));
615 //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
616 //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
620 static int crypto_send_burst(struct task_esp *task, uint16_t n)
622 uint8_t out[MAX_PKT_BURST];
623 struct rte_mbuf *mbufs[MAX_PKT_BURST];
626 ret = rte_cryptodev_enqueue_burst(task->cdev_id,
627 task->qp_id, task->ops_burst, n);
628 task->pkts_in_flight += ret;
629 if (unlikely(ret < n)) {
630 for (i = 0; i < (n-ret); i++) {
631 mbufs[i] = task->ops_burst[ret + i]->sym->m_src;
632 out[i] = OUT_DISCARD;
633 rte_crypto_op_free(task->ops_burst[ret + i]);
635 return task->base.tx_pkt(&task->base, mbufs, i, out);
640 static int handle_esp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
643 struct task_esp *task = (struct task_esp *)tbase;
644 uint8_t out[MAX_PKT_BURST];
646 uint16_t nb_deq = 0, j, idx = 0;
647 struct rte_mbuf *drop_mbufs[MAX_PKT_BURST];
648 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
651 if (likely(n_pkts != 0)) {
652 if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
653 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
654 ops_burst, n_pkts) != n_pkts) {
655 plog_info("Failed to allocate crypto operations, discarding \
656 %d packets\n", n_pkts);
657 for (j = 0; j < n_pkts; j++) {
658 out[j] = OUT_DISCARD;
660 nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, n_pkts,
664 for (j = 0; j < n_pkts; j++) {
665 result = task->handle_esp_ah(task, mbufs[j],
668 task->ops_burst[task->len] = ops_burst[j];
670 /* enough ops to be sent */
671 if (task->len == MAX_PKT_BURST) {
672 nbr_tx_pkt += crypto_send_burst(task,
673 (uint16_t) MAX_PKT_BURST);
678 drop_mbufs[idx] = mbufs[j];
681 rte_crypto_op_free(ops_burst[j]);
682 plog_info("Failed handle_esp_ah for 1 \
686 if (idx) nbr_tx_pkt += task->base.tx_pkt(&task->base,
687 drop_mbufs, idx, out);
689 } else if (task->len) {
690 // No packets where received on the rx queue, but this handle
691 // function was called anyway since some packets where not yet
692 // enqueued. Hence they get enqueued here in order to minimize
693 // latency or in case no new packets will arrive
694 nbr_tx_pkt += crypto_send_burst(task, task->len);
697 if (task->pkts_in_flight) {
699 nb_deq = rte_cryptodev_dequeue_burst(task->cdev_id,
700 task->qp_id, ops_burst, MAX_PKT_BURST);
701 task->pkts_in_flight -= nb_deq;
702 for (j = 0; j < nb_deq; j++) {
703 mbufs[j] = ops_burst[j]->sym->m_src;
704 out[j] = task->handle_esp_finish(task, mbufs[j],
705 ops_burst[j]->status);
706 rte_crypto_op_free(ops_burst[j]);
708 nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, nb_deq,
710 } while (nb_deq == MAX_PKT_BURST);
715 struct task_init task_init_esp_enc = {
717 .mode_str = "esp_enc",
718 .init = init_task_esp_enc,
719 .handle = handle_esp_bulk,
720 .flag_features = TASK_FEATURE_ZERO_RX,
721 .size = sizeof(struct task_esp),
724 struct task_init task_init_esp_dec = {
726 .mode_str = "esp_dec",
727 .init = init_task_esp_dec,
728 .handle = handle_esp_bulk,
729 .flag_features = TASK_FEATURE_ZERO_RX,
730 .size = sizeof(struct task_esp),
733 __attribute__((constructor)) static void reg_task_esp_enc(void)
735 reg_task(&task_init_esp_enc);
738 __attribute__((constructor)) static void reg_task_esp_dec(void)
740 reg_task(&task_init_esp_dec);