2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 * Non compatible implementation of RFC3686(CTR-AES 128 bit key), RFC4303 (tunnel ipv4 ESP)
20 * 1. Crypto not safe!!!!! (underlying AES-CTR implementation is OK, but ESP implementation is lousy)
21 * 2. Only ESP/tunnel/ipv4/AES-CTR
22 * 3. Not fully implemented
23 * 4. No proper key / SADB
24 * So performance demonstrator only
27 #include "task_init.h"
28 #include "task_base.h"
33 #include "prox_cksum.h"
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
38 typedef unsigned int u32;
39 typedef unsigned char u8;
40 #define NUM_MBUFS (8191)
41 #define MBUF_CACHE_SIZE (250)
43 #define BYTE_LENGTH(x) (x/8)
44 #define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
46 #define CIPHER_KEY_LENGTH_AES_CBC (32)
47 #define CIPHER_IV_LENGTH_AES_CBC 16
49 static inline void *get_sym_cop(struct rte_crypto_op *cop)
55 struct task_base base;
62 struct rte_cryptodev_sym_session *sess;
63 struct rte_crypto_sym_xform cipher_xform;
64 struct rte_crypto_sym_xform auth_xform;
65 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
69 struct task_base base;
75 struct rte_cryptodev_sym_session *sess;
76 struct rte_crypto_sym_xform cipher_xform;
77 struct rte_crypto_sym_xform auth_xform;
78 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
81 struct crypto_testsuite_params {
82 struct rte_mempool *mbuf_ol_pool_enc;
83 struct rte_mempool *mbuf_ol_pool_dec;
85 uint16_t nb_queue_pairs;
87 struct rte_cryptodev_config conf;
88 struct rte_cryptodev_qp_conf qp_conf;
91 static struct crypto_testsuite_params testsuite_params = { NULL };
92 static enum rte_cryptodev_type gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
94 static uint8_t hmac_sha1_key[] = {
95 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
96 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
97 0xDE, 0xF4, 0xDE, 0xAD };
98 static uint8_t aes_cbc_key[] = {
99 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
100 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
101 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
102 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
104 static void init_task_esp_enc(struct task_base *tbase, __attribute__((unused)) struct task_args *targ)
106 int i, nb_devs, valid_dev_id = 0;
108 struct crypto_testsuite_params *ts_params = &testsuite_params;
109 struct rte_cryptodev_info info;
111 tbase->flags |= FLAG_NEVER_FLUSH;
113 ts_params->mbuf_ol_pool_enc = rte_crypto_op_pool_create("crypto_op_pool_enc",
114 RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
117 struct task_esp_enc *task = (struct task_esp_enc *)tbase;
118 task->crypto_dev_id = rte_vdev_init(RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
119 nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
122 RTE_LOG(ERR, USER1, "No crypto devices found?");
126 /* Search for the first valid */
127 for (i = 0; i < nb_devs; i++) {
128 rte_cryptodev_info_get(i, &info);
129 if (info.dev_type == gbl_cryptodev_preftest_devtype) {
130 task->crypto_dev_id = i;
138 RTE_LOG(ERR, USER1, "invalid crypto devices found?");
143 * * Since we can't free and re-allocate queue memory always set the queues
144 * * on this device up to max size first so enough memory is allocated for
145 * * any later re-configures needed by other tests */
147 ts_params->conf.nb_queue_pairs = 2;
148 ts_params->conf.socket_id = SOCKET_ID_ANY;
149 ts_params->conf.session_mp.nb_objs = 2048;
150 ts_params->qp_conf.nb_descriptors = 4096;
152 /*Now reconfigure queues to size we actually want to use in this testsuite.*/
153 ts_params->qp_conf.nb_descriptors = 128;
154 rte_cryptodev_configure(task->crypto_dev_id, &ts_params->conf);
155 rte_cryptodev_queue_pair_setup(task->crypto_dev_id, 0,
156 &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
157 rte_cryptodev_configure(task->crypto_dev_id, &ts_params->conf);
159 struct rte_cryptodev *dev;
161 dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
162 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
165 /* Setup Cipher Parameters */
166 task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
167 task->cipher_xform.next = &(task->auth_xform);
169 task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
170 task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
171 task->cipher_xform.cipher.key.data = aes_cbc_key;
172 task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
174 /* Setup HMAC Parameters */
175 task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
176 task->auth_xform.next = NULL;
177 task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
178 task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
179 task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
180 task->auth_xform.auth.key.data = hmac_sha1_key;
181 task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
183 /* Create Crypto session*/
184 task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->cipher_xform);
185 if (task->sess == NULL)
191 // Read config file with SAs
192 task->local_ipv4 = targ->local_ipv4;
193 task->remote_ipv4 = targ->remote_ipv4;
195 for (i = 0; i < 16; i++) task->key[i] = i+2;
196 for (i = 0; i < 16; i++) task->iv[i] = i;
199 static void init_task_esp_dec(struct task_base *tbase, __attribute__((unused)) struct task_args *targ)
202 struct crypto_testsuite_params *ts_params = &testsuite_params;
203 tbase->flags |= FLAG_NEVER_FLUSH;
204 ts_params->mbuf_ol_pool_dec = rte_crypto_op_pool_create("crypto_op_pool_dec",
205 RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
207 if (ts_params->mbuf_ol_pool_dec == NULL) {
208 RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
212 static struct rte_cryptodev_session *sess_dec = NULL;
213 // Read config file with SAs
214 struct task_esp_dec *task = (struct task_esp_dec *)tbase;
215 task->local_ipv4 = targ->local_ipv4;
217 task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
218 task->cipher_xform.next = NULL;
219 task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
220 task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
221 task->cipher_xform.cipher.key.data = aes_cbc_key;
222 task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
224 /* Setup HMAC Parameters */
225 struct rte_crypto_sym_xform auth_xform;
226 task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
227 task->auth_xform.next = &task->cipher_xform;
228 task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
229 task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
230 task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
231 task->auth_xform.auth.key.data = hmac_sha1_key;
232 task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
234 rte_cryptodev_queue_pair_setup(task->crypto_dev_id, 1, &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
236 struct rte_cryptodev *dev;
238 dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
239 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
242 ts_params->qp_conf.nb_descriptors = 128;
244 rte_cryptodev_stats_reset(task->crypto_dev_id);
246 task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->auth_xform);
247 if (task->sess == NULL)
249 printf("not ok dec\n");
252 rte_cryptodev_stats_reset(task->crypto_dev_id);
253 rte_cryptodev_start(task->crypto_dev_id);
256 for (i = 0; i < 16; i++) task->key[i] = i+2;
257 for (i = 0; i < 16; i++) task->iv[i] = i;
260 static uint8_t aes_cbc_iv[] = {
261 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
262 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
264 static int enqueue_crypto_request(struct task_esp_enc *task, struct rte_crypto_op *cop, int dir)
266 if (rte_cryptodev_enqueue_burst(task->crypto_dev_id, dir, &cop, 1) != 1) {
267 // printf("Error sending packet for encryption");
274 static int debug_counter = 0;
275 static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
277 struct crypto_testsuite_params *ts_params = &testsuite_params;
280 u8 dest[8192]; // scratch buf, maximum packet
282 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
283 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
284 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
285 struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
287 if (unlikely((pip4->version_ihl >> 4) != 4)) {
288 plog_info("Received non IPv4 packet at esp tunnel input %i\n", pip4->version_ihl);
292 if (pip4->time_to_live) {
293 pip4->time_to_live--;
296 plog_info("TTL = 0 => Dropping\n");
300 // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
301 int l1 = rte_pktmbuf_pkt_len(mbuf);
302 int padding = l1 - (ipv4_length + sizeof(struct ether_hdr));
303 if (unlikely(padding > 0)) {
304 rte_pktmbuf_trim(mbuf, padding);
307 l1 = rte_pktmbuf_pkt_len(mbuf);
308 int encrypt_len = l1 - sizeof(struct ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
310 if ((encrypt_len & 0xf) != 0)
313 padding = 16 - (encrypt_len % 16);
314 encrypt_len += padding;
317 // Encapsulate, crypt in a separate buffer
318 // memcpy(dest, pip4, encrypt_len);
319 const int extra_space = sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC; // + new IP header, SPI, SN, IV
320 struct ether_addr src_mac = peth->s_addr;
321 struct ether_addr dst_mac = peth->d_addr;
322 uint32_t src_addr = pip4->src_addr;
323 uint32_t dst_addr = pip4->dst_addr;
324 uint8_t ttl = pip4->time_to_live;
325 uint8_t version_ihl = pip4->version_ihl;
327 peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
328 peth = (struct ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
329 peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
330 l1 = rte_pktmbuf_pkt_len(mbuf);
331 peth->ether_type = ETYPE_IPv4;
332 ether_addr_copy(&src_mac, &peth->s_addr);
333 ether_addr_copy(&dst_mac, &peth->d_addr);
335 pip4 = (struct ipv4_hdr *)(peth + 1);
336 pip4->src_addr = task->local_ipv4;
337 pip4->dst_addr = task->remote_ipv4;
338 pip4->time_to_live = ttl;
339 pip4->next_proto_id = 50; // 50 for ESP, ip in ip next proto trailer
340 pip4->version_ihl = version_ihl; // 20 bytes, ipv4
341 pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
342 prox_ip_cksum_sw(pip4);
344 // find the SA when there will be more than one
345 if (task->ipaddr == pip4->src_addr)
348 data = (u8*)(pip4 + 1);
349 *((u32*) data) = 0x2016; // FIXME SPI
350 *((u32*) data + 1) = 0x2; // FIXME SN
351 u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
352 // padl += CIPHER_IV_LENGTH_AES_CBC;
354 *(padl + 1) = 4; // ipv4 in 4
356 // one key for them all for now
357 rte_crypto_op_attach_sym_session(cop, task->sess);
359 sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len + 2;
360 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len + 2));
361 sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
363 sym_cop->cipher.iv.data = data + 8;
364 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
365 sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
367 rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
369 sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
370 sym_cop->cipher.data.length = encrypt_len;
372 sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
373 sym_cop->auth.data.length = 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len ;// + 4;// FIXME
375 /* Process crypto operation */
376 sym_cop->m_src = mbuf;
377 return enqueue_crypto_request(task, cop, 0);
380 static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
382 struct crypto_testsuite_params *ts_params = &testsuite_params;
384 struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
386 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
387 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
388 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
389 int l1 = rte_pktmbuf_pkt_len(mbuf);
391 u8 *data = (u8*)(pip4 + 1);
393 if (pip4->next_proto_id != 50)
395 plog_info("Received non ip in ip tunnel packet esp tunnel output\n");
396 return OUT_DISCARD;//NO_PORT_AVAIL;
398 if (task->ipaddr == pip4->src_addr)
402 /* Create Crypto session*/
403 rte_crypto_op_attach_sym_session(cop, task->sess);
405 sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - 20);
406 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4); // FIXME
407 sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
409 sym_cop->cipher.iv.data = (uint8_t *)data + 8;
410 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
411 sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
413 sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
414 sym_cop->auth.data.length = ipv4_length - sizeof(struct ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
416 sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
417 sym_cop->cipher.data.length = ipv4_length - sizeof(struct ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
419 /* Process crypto operation */
420 sym_cop->m_src = mbuf;
421 return enqueue_crypto_request((struct task_esp_enc *)task, cop, 1);
424 static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
426 struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
427 rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr), ((u8*)peth) + sizeof (struct ether_hdr) +
428 + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(struct ipv4_hdr));// next hdr, padding
429 struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
430 if (unlikely((pip4->version_ihl >> 4) != 4)) {
431 plog_info("Received non IPv4 packet at esp tunnel input %i\n", pip4->version_ihl);
434 if (pip4->time_to_live) {
435 pip4->time_to_live--;
438 plog_info("TTL = 0 => Dropping\n");
441 uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
442 rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr),
443 ((u8*)peth) + sizeof (struct ether_hdr) +
444 + 2 * sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(struct ipv4_hdr));
446 int len = rte_pktmbuf_pkt_len(mbuf);
447 rte_pktmbuf_trim(mbuf, len - sizeof (struct ether_hdr) - ipv4_length);
448 peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
450 // one key for them all for now
452 // struct crypto_aes_ctx ctx;
453 // ctx.iv = (u8*)&iv_onstack;
454 // *((u32*)ctx.iv) = *((u32*)data + 2);
455 // aes_set_key(&ctx, task->key, 16);//
457 // result = ctr_crypt(&ctx, dest, data + 12, len);//
458 // memcpy(pip4, dest, len);
463 static void handle_esp_enc_bulk(__attribute__((unused)) struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
465 struct task_esp_enc *task = (struct task_esp_enc *)tbase;
466 struct crypto_testsuite_params *ts_params = &testsuite_params;
467 uint8_t out[MAX_PKT_BURST];
468 uint16_t i = 0, nb_rx = 0, j = 0;
470 if (rte_crypto_op_bulk_alloc( ts_params->mbuf_ol_pool_enc,
471 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
472 task->ops_burst, n_pkts) != n_pkts) {
474 printf("out of memory\n");
478 for (uint16_t j = 0; j < n_pkts; ++j) {
479 out[j] = handle_esp_ah_enc(task, mbufs[j], task->ops_burst[j]);
481 /* Dequeue packets from Crypto device */
484 nb_rx = rte_cryptodev_dequeue_burst(
485 task->crypto_dev_id, 0,// FIXME AK
486 task->ops_burst, n_pkts);
489 } while (i < n_pkts);
491 for (j = 0; j < n_pkts; j++) {
492 rte_crypto_op_free(task->ops_burst[j]);
495 task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
498 static void handle_esp_dec_bulk(__attribute__((unused)) struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
500 uint8_t out[MAX_PKT_BURST];
501 struct task_esp_dec *task = (struct task_esp_dec *)tbase;
502 struct crypto_testsuite_params *ts_params = &testsuite_params;
503 //__itt_frame_begin_v3(pD, NULL);
505 if (rte_crypto_op_bulk_alloc(
506 ts_params->mbuf_ol_pool_dec,
507 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
508 task->ops_burst, n_pkts) !=
511 printf("out of memory\n");
514 uint16_t i = 0, nb_rx, j;
516 for (uint16_t j = 0; j < n_pkts; ++j) {
518 out[j] = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[j]);
520 for (j = 0; j < n_pkts; j++) {
521 rte_crypto_op_free(task->ops_burst[j]);
524 task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
527 struct task_init task_init_esp_enc = {
529 .mode_str = "esp_enc",
530 .init = init_task_esp_enc,
531 .handle = handle_esp_enc_bulk,
532 .size = sizeof(struct task_esp_enc)
535 struct task_init task_init_esp_dec = {
537 .mode_str = "esp_dec",
538 .init = init_task_esp_dec,
539 .handle = handle_esp_dec_bulk,
540 .size = sizeof(struct task_esp_dec)
543 __attribute__((constructor)) static void reg_task_esp_enc(void)
545 reg_task(&task_init_esp_enc);
548 __attribute__((constructor)) static void reg_task_esp_dec(void)
550 reg_task(&task_init_esp_dec);