2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 * Non compatible implementation of RFC3686(CTR-AES 128 bit key), RFC4303 (tunnel ipv4 ESP)
20 * 1. Crypto not safe!!!!! (underlying AES-CTR implementation is OK, but ESP implementation is lousy)
21 * 2. Only ESP/tunnel/ipv4/AES-CTR
22 * 3. Not fully implemented
23 * 4. No proper key / SADB
24 * So performance demonstrator only
27 #include "task_init.h"
28 #include "task_base.h"
33 #include "prox_cksum.h"
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include "prox_port_cfg.h"
39 typedef unsigned int u32;
40 typedef unsigned char u8;
42 #define BYTE_LENGTH(x) (x/8)
43 #define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
45 //#define CIPHER_KEY_LENGTH_AES_CBC (32)
46 #define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
47 #define CIPHER_IV_LENGTH_AES_CBC 16
49 static inline void *get_sym_cop(struct rte_crypto_op *cop)
51 //return (cop + 1);//makes no sense on dpdk_17.05.2; TODO: doublecheck
56 struct task_base base;
60 struct ether_addr local_mac;
64 struct rte_cryptodev_sym_session *sess;
65 struct rte_crypto_sym_xform cipher_xform;
66 struct rte_crypto_sym_xform auth_xform;
67 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
71 struct task_base base;
75 struct ether_addr local_mac;
78 struct rte_cryptodev_sym_session *sess;
79 struct rte_crypto_sym_xform cipher_xform;
80 struct rte_crypto_sym_xform auth_xform;
81 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
84 struct crypto_testsuite_params {
85 struct rte_mempool *mbuf_ol_pool_enc;
86 struct rte_mempool *mbuf_ol_pool_dec;
88 struct rte_cryptodev_config conf;
89 struct rte_cryptodev_qp_conf qp_conf;
92 static struct crypto_testsuite_params testsuite_params = { NULL };
93 static enum rte_cryptodev_type gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
95 static uint8_t hmac_sha1_key[] = {
96 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
97 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
98 0xDE, 0xF4, 0xDE, 0xAD };
100 static uint8_t aes_cbc_key[] = {
101 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
102 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
103 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
104 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
106 static uint8_t aes_cbc_iv[] = {
107 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
108 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
110 static void init_task_esp_common(void)
112 static int vdev_initialized = 0;
113 struct crypto_testsuite_params *ts_params = &testsuite_params;
115 if (!vdev_initialized) {
116 rte_vdev_init(RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
117 int nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
118 PROX_PANIC(nb_devs < 1, "No crypto devices found?\n");
119 vdev_initialized = 1;
121 ts_params->conf.nb_queue_pairs = 2;
122 ts_params->conf.socket_id = SOCKET_ID_ANY;
123 ts_params->conf.session_mp.nb_objs = 2048;
124 ts_params->qp_conf.nb_descriptors = 4096;
126 /*Now reconfigure queues to size we actually want to use in this testsuite.*/
127 ts_params->qp_conf.nb_descriptors = 128;
128 rte_cryptodev_configure(0, &ts_params->conf);
129 //rte_cryptodev_start(task->crypto_dev_id);
132 static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
134 int i, nb_devs, valid_dev_id = 0;
136 struct rte_cryptodev_info info;
137 struct crypto_testsuite_params *ts_params = &testsuite_params;
139 init_task_esp_common();
140 tbase->flags |= FLAG_NEVER_FLUSH;
142 ts_params->mbuf_ol_pool_enc = rte_crypto_op_pool_create("crypto_op_pool_enc",
143 RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
145 PROX_PANIC(ts_params->mbuf_ol_pool_enc == NULL, "Can't create ENC CRYPTO_OP_POOL\n");
147 struct task_esp_enc *task = (struct task_esp_enc *)tbase;
148 task->crypto_dev_id = 0;
151 * Since we can't free and re-allocate queue memory always set the queues
152 * on this device up to max size first so enough memory is allocated for
153 * any later re-configures needed by other tests
156 rte_cryptodev_queue_pair_setup(task->crypto_dev_id, 0,
157 &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
159 struct rte_cryptodev *dev;
160 dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
161 PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
163 /* Setup Cipher Parameters */
164 task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
165 task->cipher_xform.next = &(task->auth_xform);
167 task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
168 task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
169 task->cipher_xform.cipher.key.data = aes_cbc_key;
170 task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
172 /* Setup HMAC Parameters */
173 task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
174 task->auth_xform.next = NULL;
175 task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
176 task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
177 task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
178 task->auth_xform.auth.key.data = hmac_sha1_key;
179 task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
181 task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->cipher_xform);
182 PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
184 //TODO: doublecheck task->ops_burst lifecycle!
185 if (rte_crypto_op_bulk_alloc(ts_params->mbuf_ol_pool_enc,
186 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
187 task->ops_burst, MAX_PKT_BURST) != MAX_PKT_BURST) {
188 PROX_PANIC(1, "Failed to allocate ENC crypto operations\n");
190 //to clean up after rte_crypto_op_bulk_alloc:
191 //for (j = 0; j < MAX_PKT_BURST; j++) {
192 // rte_crypto_op_free(task->ops_burst[j]);
195 // Read config file with SAs
196 task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
197 task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
198 //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
199 struct prox_port_cfg *port = find_reachable_port(targ);
200 memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr));
202 for (i = 0; i < 16; i++) task->key[i] = i+2;
203 for (i = 0; i < 16; i++) task->iv[i] = i;
206 static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
209 struct crypto_testsuite_params *ts_params = &testsuite_params;
210 init_task_esp_common();
212 tbase->flags |= FLAG_NEVER_FLUSH;
213 ts_params->mbuf_ol_pool_dec = rte_crypto_op_pool_create("crypto_op_pool_dec",
214 RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
216 PROX_PANIC(ts_params->mbuf_ol_pool_dec == NULL, "Can't create DEC CRYPTO_OP_POOL\n");
218 struct task_esp_dec *task = (struct task_esp_dec *)tbase;
220 static struct rte_cryptodev_session *sess_dec = NULL;
221 // Read config file with SAs
222 task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
224 task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
225 task->cipher_xform.next = NULL;
226 task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
227 task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
228 task->cipher_xform.cipher.key.data = aes_cbc_key;
229 task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
231 /* Setup HMAC Parameters */
232 task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
233 task->auth_xform.next = &task->cipher_xform;
234 task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
235 task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
236 task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
237 task->auth_xform.auth.key.data = hmac_sha1_key;
238 task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
240 rte_cryptodev_queue_pair_setup(task->crypto_dev_id, 1, &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
242 struct rte_cryptodev *dev;
243 dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
244 PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No DEC cryptodev attached\n");
246 ts_params->qp_conf.nb_descriptors = 128;
248 task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->auth_xform);
249 PROX_PANIC(task->sess == NULL, "Failed to create DEC session\n");
251 if (rte_crypto_op_bulk_alloc(ts_params->mbuf_ol_pool_dec,
252 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
253 task->ops_burst, MAX_PKT_BURST) != MAX_PKT_BURST) {
254 PROX_PANIC(1, "Failed to allocate DEC crypto operations\n");
256 //to clean up after rte_crypto_op_bulk_alloc:
257 //for (int j = 0; j < MAX_PKT_BURST; j++) {
258 // rte_crypto_op_free(task->ops_burst[j]);
261 struct prox_port_cfg *port = find_reachable_port(targ);
262 memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr));
265 for (i = 0; i < 16; i++) task->key[i] = i+2;
266 for (i = 0; i < 16; i++) task->iv[i] = i;
269 static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
272 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
273 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
274 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
275 struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
277 if (unlikely((pip4->version_ihl >> 4) != 4)) {
278 plog_info("Received non IPv4 packet at esp enc %i\n", pip4->version_ihl);
279 plogdx_info(mbuf, "ENC RX: ");
282 if (pip4->time_to_live) {
283 pip4->time_to_live--;
286 plog_info("TTL = 0 => Dropping\n");
290 // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
291 int l1 = rte_pktmbuf_pkt_len(mbuf);
292 int padding = l1 - (ipv4_length + sizeof(struct ether_hdr));
293 if (unlikely(padding > 0)) {
294 rte_pktmbuf_trim(mbuf, padding);
297 l1 = rte_pktmbuf_pkt_len(mbuf);
298 int encrypt_len = l1 - sizeof(struct ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
300 if ((encrypt_len & 0xf) != 0)
302 padding = 16 - (encrypt_len % 16);
303 encrypt_len += padding;
306 // Encapsulate, crypt in a separate buffer
307 const int extra_space = sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC; // + new IP header, SPI, SN, IV
308 struct ether_addr src_mac = peth->s_addr;
309 struct ether_addr dst_mac = peth->d_addr;
310 uint32_t src_addr = pip4->src_addr;
311 uint32_t dst_addr = pip4->dst_addr;
312 uint8_t version_ihl = pip4->version_ihl;
314 peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
315 peth = (struct ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
316 peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
317 l1 = rte_pktmbuf_pkt_len(mbuf);
318 peth->ether_type = ETYPE_IPv4;
321 ether_addr_copy(&dst_mac, &peth->s_addr);
322 ether_addr_copy(&src_mac, &peth->d_addr);
324 ether_addr_copy(&task->local_mac, &peth->s_addr);
325 ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
328 pip4 = (struct ipv4_hdr *)(peth + 1);
329 pip4->src_addr = task->local_ipv4;
330 pip4->dst_addr = task->remote_ipv4;
331 pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
332 pip4->version_ihl = version_ihl; // 20 bytes, ipv4
333 pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
334 pip4->packet_id = 0x0101;
335 pip4->type_of_service = 0;
336 pip4->time_to_live = 64;
337 pip4->fragment_offset = rte_cpu_to_be_16(0x4000);
338 pip4->hdr_checksum = 0;
339 prox_ip_cksum_sw(pip4);
341 //find the SA when there will be more than one
342 if (task->ipaddr == pip4->src_addr)
345 data = (u8*)(pip4 + 1);
346 *((u32*) data) = 0x2016; // FIXME SPI
347 *((u32*) data + 1) = 0x2; // FIXME SN
348 u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
349 //padl += CIPHER_IV_LENGTH_AES_CBC;
351 *(padl + 1) = 4; // ipv4 in 4
353 //one key for them all for now
354 rte_crypto_op_attach_sym_session(cop, task->sess);
356 sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;
357 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
358 sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
360 sym_cop->cipher.iv.data = data + 8;
361 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
362 sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
364 rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
366 sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
367 sym_cop->cipher.data.length = encrypt_len;
369 sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
370 sym_cop->auth.data.length = 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len ;// + 4;// FIXME
372 sym_cop->m_src = mbuf;
373 //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
374 //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
379 static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
381 struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
382 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
383 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
384 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
385 u8 *data = (u8*)(pip4 + 1);
387 if (pip4->next_proto_id != IPPROTO_ESP)
389 plog_info("Received non ESP packet on esp dec\n");
390 plogdx_info(mbuf, "DEC RX: ");
393 if (task->ipaddr == pip4->src_addr)
397 rte_crypto_op_attach_sym_session(cop, task->sess);
399 sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
400 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4); // FIXME
401 sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
403 sym_cop->cipher.iv.data = (uint8_t *)data + 8;
404 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
405 sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
407 sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
408 sym_cop->auth.data.length = ipv4_length - sizeof(struct ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
410 sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
411 sym_cop->cipher.data.length = ipv4_length - sizeof(struct ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
413 sym_cop->m_src = mbuf;
417 static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf)
419 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
420 struct ether_addr src_mac = peth->s_addr;
421 struct ether_addr dst_mac = peth->d_addr;
422 uint32_t src_ip, dst_ip;
424 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
425 src_ip = pip4->src_addr;
426 dst_ip = pip4->dst_addr;
428 //peth->s_addr = dst_mac;
429 peth->d_addr = src_mac;//should be replaced by arp
430 //pip4->src_addr = dst_ip;
431 pip4->dst_addr = src_ip;
432 ether_addr_copy(&task->local_mac, &peth->s_addr);
435 static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf)
437 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
438 rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr), ((u8*)peth) + sizeof (struct ether_hdr) +
439 + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(struct ipv4_hdr));// next hdr, padding
440 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
442 if (unlikely((pip4->version_ihl >> 4) != 4)) {
443 plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
444 plogdx_info(mbuf, "DEC TX: ");
447 if (pip4->time_to_live) {
448 pip4->time_to_live--;
451 plog_info("TTL = 0 => Dropping\n");
454 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
455 rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr),
456 ((u8*)peth) + sizeof (struct ether_hdr) +
457 + 2 * sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(struct ipv4_hdr));
459 int len = rte_pktmbuf_pkt_len(mbuf);
460 rte_pktmbuf_trim(mbuf, len - sizeof (struct ether_hdr) - ipv4_length);
462 do_ipv4_swap(task, mbuf);
464 pip4->hdr_checksum = 0;
465 prox_ip_cksum_sw(pip4);
466 // one key for them all for now
468 // struct crypto_aes_ctx ctx;
469 // ctx.iv = (u8*)&iv_onstack;
470 // *((u32*)ctx.iv) = *((u32*)data + 2);
471 // aes_set_key(&ctx, task->key, 16);//
473 // result = ctr_crypt(&ctx, dest, data + 12, len);//
474 // memcpy(pip4, dest, len);
479 static int handle_esp_enc_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
481 struct task_esp_enc *task = (struct task_esp_enc *)tbase;
482 struct crypto_testsuite_params *ts_params = &testsuite_params;
483 uint8_t out[MAX_PKT_BURST];
484 uint16_t i = 0, nb_rx = 0, nb_enc=0, j = 0;
486 for (uint16_t j = 0; j < n_pkts; ++j) {
487 out[j] = handle_esp_ah_enc(task, mbufs[j], task->ops_burst[nb_enc]);
488 if (out[j] != OUT_DISCARD)
492 if (rte_cryptodev_enqueue_burst(task->crypto_dev_id, 0, task->ops_burst, nb_enc) != nb_enc) {
493 plog_info("Error enc enqueue_burst\n");
497 //do not call rte_cryptodev_dequeue_burst() on already dequeued packets
498 //otherwise handle_completed_jobs() screws up the content of the ops_burst array!
500 nb_rx = rte_cryptodev_dequeue_burst(
501 task->crypto_dev_id, 0,// FIXME AK
502 task->ops_burst+i, nb_enc-i);
504 } while (i < nb_enc);
506 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
509 static int handle_esp_dec_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
511 struct task_esp_dec *task = (struct task_esp_dec *)tbase;
512 struct crypto_testsuite_params *ts_params = &testsuite_params;
513 uint8_t out[MAX_PKT_BURST];
514 uint16_t j, nb_dec=0, nb_rx=0;
516 for (j = 0; j < n_pkts; ++j) {
517 out[j] = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[nb_dec]);
518 if (out[j] != OUT_DISCARD)
522 if (rte_cryptodev_enqueue_burst(task->crypto_dev_id, 1, task->ops_burst, nb_dec) != nb_dec) {
523 plog_info("Error dec enqueue_burst\n");
529 nb_rx = rte_cryptodev_dequeue_burst(task->crypto_dev_id, 1,// FIXME AK
530 task->ops_burst+j, nb_dec-j);
532 } while (j < nb_dec);
534 for (j = 0; j < nb_dec; ++j) {
535 if (task->ops_burst[j]->status != RTE_CRYPTO_OP_STATUS_SUCCESS){
536 plog_info("err: task->ops_burst[%d].status=%d\n", j, task->ops_burst[j]->status);
537 //!!!TODO!!! find mbuf and discard it!!!
538 //for now just send it further
539 //plogdx_info(mbufs[j], "RX: ");
541 if (task->ops_burst[j]->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
542 struct rte_mbuf *mbuf = task->ops_burst[j]->sym->m_src;
543 handle_esp_ah_dec_finish(task, mbuf);//TODO set out[j] properly
547 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
550 struct task_init task_init_esp_enc = {
552 .mode_str = "esp_enc",
553 .init = init_task_esp_enc,
554 .handle = handle_esp_enc_bulk,
555 .size = sizeof(struct task_esp_enc),
556 .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM
559 struct task_init task_init_esp_dec = {
561 .mode_str = "esp_dec",
562 .init = init_task_esp_dec,
563 .handle = handle_esp_dec_bulk,
564 .size = sizeof(struct task_esp_dec),
565 .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM
568 __attribute__((constructor)) static void reg_task_esp_enc(void)
570 reg_task(&task_init_esp_enc);
573 __attribute__((constructor)) static void reg_task_esp_dec(void)
575 reg_task(&task_init_esp_dec);