Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          CTR_RFC3686_NONCE_SIZE + \
64                                          SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH              16
67
68 /* length of descriptors text */
69 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
70 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
71 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
72 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
73
74 /* Note: Nonce is counted in enckeylen */
75 #define DESC_AEAD_CTR_RFC3686_LEN       (6 * CAAM_CMD_SZ)
76
77 #define DESC_AEAD_NULL_BASE             (3 * CAAM_CMD_SZ)
78 #define DESC_AEAD_NULL_ENC_LEN          (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
79 #define DESC_AEAD_NULL_DEC_LEN          (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
80
81 #define DESC_GCM_BASE                   (3 * CAAM_CMD_SZ)
82 #define DESC_GCM_ENC_LEN                (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
83 #define DESC_GCM_DEC_LEN                (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
84
85 #define DESC_RFC4106_BASE               (3 * CAAM_CMD_SZ)
86 #define DESC_RFC4106_ENC_LEN            (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
87 #define DESC_RFC4106_DEC_LEN            (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
88 #define DESC_RFC4106_GIVENC_LEN         (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
89
90 #define DESC_RFC4543_BASE               (3 * CAAM_CMD_SZ)
91 #define DESC_RFC4543_ENC_LEN            (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
92 #define DESC_RFC4543_DEC_LEN            (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
93 #define DESC_RFC4543_GIVENC_LEN         (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
94
95 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
96 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
97                                          20 * CAAM_CMD_SZ)
98 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
99                                          15 * CAAM_CMD_SZ)
100
101 #define DESC_MAX_USED_BYTES             (DESC_RFC4543_GIVENC_LEN + \
102                                          CAAM_MAX_KEY_SIZE)
103 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
104
105 #ifdef DEBUG
106 /* for print_hex_dumps with line references */
107 #define debug(format, arg...) printk(format, arg)
108 #else
109 #define debug(format, arg...)
110 #endif
111 static struct list_head alg_list;
112
113 /* Set DK bit in class 1 operation if shared */
114 static inline void append_dec_op1(u32 *desc, u32 type)
115 {
116         u32 *jump_cmd, *uncond_jump_cmd;
117
118         /* DK bit is valid only for AES */
119         if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
120                 append_operation(desc, type | OP_ALG_AS_INITFINAL |
121                                  OP_ALG_DECRYPT);
122                 return;
123         }
124
125         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
126         append_operation(desc, type | OP_ALG_AS_INITFINAL |
127                          OP_ALG_DECRYPT);
128         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
129         set_jump_tgt_here(desc, jump_cmd);
130         append_operation(desc, type | OP_ALG_AS_INITFINAL |
131                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
132         set_jump_tgt_here(desc, uncond_jump_cmd);
133 }
134
135 /*
136  * For aead functions, read payload and write payload,
137  * both of which are specified in req->src and req->dst
138  */
139 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
140 {
141         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
142         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
143                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
144 }
145
146 /*
147  * For aead encrypt and decrypt, read iv for both classes
148  */
149 static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
150 {
151         append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
152                         LDST_SRCDST_BYTE_CONTEXT |
153                         (ivoffset << LDST_OFFSET_SHIFT));
154         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
155                     (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
156 }
157
158 /*
159  * For ablkcipher encrypt and decrypt, read from req->src and
160  * write to req->dst
161  */
162 static inline void ablkcipher_append_src_dst(u32 *desc)
163 {
164         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
165         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
166         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
167                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
168         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
169 }
170
171 /*
172  * If all data, including src (with assoc and iv) or dst (with iv only) are
173  * contiguous
174  */
175 #define GIV_SRC_CONTIG          1
176 #define GIV_DST_CONTIG          (1 << 1)
177
178 /*
179  * per-session context
180  */
181 struct caam_ctx {
182         struct device *jrdev;
183         u32 sh_desc_enc[DESC_MAX_USED_LEN];
184         u32 sh_desc_dec[DESC_MAX_USED_LEN];
185         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
186         dma_addr_t sh_desc_enc_dma;
187         dma_addr_t sh_desc_dec_dma;
188         dma_addr_t sh_desc_givenc_dma;
189         u32 class1_alg_type;
190         u32 class2_alg_type;
191         u32 alg_op;
192         u8 key[CAAM_MAX_KEY_SIZE];
193         dma_addr_t key_dma;
194         unsigned int enckeylen;
195         unsigned int split_key_len;
196         unsigned int split_key_pad_len;
197         unsigned int authsize;
198 };
199
200 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
201                             int keys_fit_inline, bool is_rfc3686)
202 {
203         u32 *nonce;
204         unsigned int enckeylen = ctx->enckeylen;
205
206         /*
207          * RFC3686 specific:
208          *      | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
209          *      | enckeylen = encryption key size + nonce size
210          */
211         if (is_rfc3686)
212                 enckeylen -= CTR_RFC3686_NONCE_SIZE;
213
214         if (keys_fit_inline) {
215                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
216                                   ctx->split_key_len, CLASS_2 |
217                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
218                 append_key_as_imm(desc, (void *)ctx->key +
219                                   ctx->split_key_pad_len, enckeylen,
220                                   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
221         } else {
222                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
223                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
224                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
225                            enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
226         }
227
228         /* Load Counter into CONTEXT1 reg */
229         if (is_rfc3686) {
230                 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
231                                enckeylen);
232                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
233                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
234                 append_move(desc,
235                             MOVE_SRC_OUTFIFO |
236                             MOVE_DEST_CLASS1CTX |
237                             (16 << MOVE_OFFSET_SHIFT) |
238                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
239         }
240 }
241
242 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
243                                   int keys_fit_inline, bool is_rfc3686)
244 {
245         u32 *key_jump_cmd;
246
247         /* Note: Context registers are saved. */
248         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
249
250         /* Skip if already shared */
251         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252                                    JUMP_COND_SHRD);
253
254         append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
255
256         set_jump_tgt_here(desc, key_jump_cmd);
257 }
258
259 static int aead_null_set_sh_desc(struct crypto_aead *aead)
260 {
261         struct aead_tfm *tfm = &aead->base.crt_aead;
262         struct caam_ctx *ctx = crypto_aead_ctx(aead);
263         struct device *jrdev = ctx->jrdev;
264         bool keys_fit_inline = false;
265         u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
266         u32 *desc;
267
268         /*
269          * Job Descriptor and Shared Descriptors
270          * must all fit into the 64-word Descriptor h/w Buffer
271          */
272         if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
273             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
274                 keys_fit_inline = true;
275
276         /* aead_encrypt shared descriptor */
277         desc = ctx->sh_desc_enc;
278
279         init_sh_desc(desc, HDR_SHARE_SERIAL);
280
281         /* Skip if already shared */
282         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
283                                    JUMP_COND_SHRD);
284         if (keys_fit_inline)
285                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
286                                   ctx->split_key_len, CLASS_2 |
287                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
288         else
289                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
290                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
291         set_jump_tgt_here(desc, key_jump_cmd);
292
293         /* cryptlen = seqoutlen - authsize */
294         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
295
296         /*
297          * NULL encryption; IV is zero
298          * assoclen = (assoclen + cryptlen) - cryptlen
299          */
300         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
301
302         /* read assoc before reading payload */
303         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
304                              KEY_VLF);
305
306         /* Prepare to read and write cryptlen bytes */
307         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
308         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
309
310         /*
311          * MOVE_LEN opcode is not available in all SEC HW revisions,
312          * thus need to do some magic, i.e. self-patch the descriptor
313          * buffer.
314          */
315         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
316                                     MOVE_DEST_MATH3 |
317                                     (0x6 << MOVE_LEN_SHIFT));
318         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
319                                      MOVE_DEST_DESCBUF |
320                                      MOVE_WAITCOMP |
321                                      (0x8 << MOVE_LEN_SHIFT));
322
323         /* Class 2 operation */
324         append_operation(desc, ctx->class2_alg_type |
325                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
326
327         /* Read and write cryptlen bytes */
328         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
329
330         set_move_tgt_here(desc, read_move_cmd);
331         set_move_tgt_here(desc, write_move_cmd);
332         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
333         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
334                     MOVE_AUX_LS);
335
336         /* Write ICV */
337         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
338                          LDST_SRCDST_BYTE_CONTEXT);
339
340         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
341                                               desc_bytes(desc),
342                                               DMA_TO_DEVICE);
343         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
344                 dev_err(jrdev, "unable to map shared descriptor\n");
345                 return -ENOMEM;
346         }
347 #ifdef DEBUG
348         print_hex_dump(KERN_ERR,
349                        "aead null enc shdesc@"__stringify(__LINE__)": ",
350                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
351                        desc_bytes(desc), 1);
352 #endif
353
354         /*
355          * Job Descriptor and Shared Descriptors
356          * must all fit into the 64-word Descriptor h/w Buffer
357          */
358         keys_fit_inline = false;
359         if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
360             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
361                 keys_fit_inline = true;
362
363         desc = ctx->sh_desc_dec;
364
365         /* aead_decrypt shared descriptor */
366         init_sh_desc(desc, HDR_SHARE_SERIAL);
367
368         /* Skip if already shared */
369         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
370                                    JUMP_COND_SHRD);
371         if (keys_fit_inline)
372                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
373                                   ctx->split_key_len, CLASS_2 |
374                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
375         else
376                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
377                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
378         set_jump_tgt_here(desc, key_jump_cmd);
379
380         /* Class 2 operation */
381         append_operation(desc, ctx->class2_alg_type |
382                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
383
384         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
385         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
386                                 ctx->authsize + tfm->ivsize);
387         /* assoclen = (assoclen + cryptlen) - cryptlen */
388         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
389         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
390
391         /* read assoc before reading payload */
392         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
393                              KEY_VLF);
394
395         /* Prepare to read and write cryptlen bytes */
396         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
397         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
398
399         /*
400          * MOVE_LEN opcode is not available in all SEC HW revisions,
401          * thus need to do some magic, i.e. self-patch the descriptor
402          * buffer.
403          */
404         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
405                                     MOVE_DEST_MATH2 |
406                                     (0x6 << MOVE_LEN_SHIFT));
407         write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
408                                      MOVE_DEST_DESCBUF |
409                                      MOVE_WAITCOMP |
410                                      (0x8 << MOVE_LEN_SHIFT));
411
412         /* Read and write cryptlen bytes */
413         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
414
415         /*
416          * Insert a NOP here, since we need at least 4 instructions between
417          * code patching the descriptor buffer and the location being patched.
418          */
419         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
420         set_jump_tgt_here(desc, jump_cmd);
421
422         set_move_tgt_here(desc, read_move_cmd);
423         set_move_tgt_here(desc, write_move_cmd);
424         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
425         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
426                     MOVE_AUX_LS);
427         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
428
429         /* Load ICV */
430         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
431                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
432
433         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
434                                               desc_bytes(desc),
435                                               DMA_TO_DEVICE);
436         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
437                 dev_err(jrdev, "unable to map shared descriptor\n");
438                 return -ENOMEM;
439         }
440 #ifdef DEBUG
441         print_hex_dump(KERN_ERR,
442                        "aead null dec shdesc@"__stringify(__LINE__)": ",
443                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
444                        desc_bytes(desc), 1);
445 #endif
446
447         return 0;
448 }
449
450 static int aead_set_sh_desc(struct crypto_aead *aead)
451 {
452         struct aead_tfm *tfm = &aead->base.crt_aead;
453         struct caam_ctx *ctx = crypto_aead_ctx(aead);
454         struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
455         const char *alg_name = crypto_tfm_alg_name(ctfm);
456         struct device *jrdev = ctx->jrdev;
457         bool keys_fit_inline;
458         u32 geniv, moveiv;
459         u32 ctx1_iv_off = 0;
460         u32 *desc;
461         const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
462                                OP_ALG_AAI_CTR_MOD128);
463         const bool is_rfc3686 = (ctr_mode &&
464                                  (strstr(alg_name, "rfc3686") != NULL));
465
466         if (!ctx->authsize)
467                 return 0;
468
469         /* NULL encryption / decryption */
470         if (!ctx->enckeylen)
471                 return aead_null_set_sh_desc(aead);
472
473         /*
474          * AES-CTR needs to load IV in CONTEXT1 reg
475          * at an offset of 128bits (16bytes)
476          * CONTEXT1[255:128] = IV
477          */
478         if (ctr_mode)
479                 ctx1_iv_off = 16;
480
481         /*
482          * RFC3686 specific:
483          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
484          */
485         if (is_rfc3686)
486                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
487
488         /*
489          * Job Descriptor and Shared Descriptors
490          * must all fit into the 64-word Descriptor h/w Buffer
491          */
492         keys_fit_inline = false;
493         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
494             ctx->split_key_pad_len + ctx->enckeylen +
495             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
496             CAAM_DESC_BYTES_MAX)
497                 keys_fit_inline = true;
498
499         /* aead_encrypt shared descriptor */
500         desc = ctx->sh_desc_enc;
501
502         /* Note: Context registers are saved. */
503         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
504
505         /* Class 2 operation */
506         append_operation(desc, ctx->class2_alg_type |
507                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
508
509         /* cryptlen = seqoutlen - authsize */
510         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
511
512         /* assoclen + cryptlen = seqinlen - ivsize */
513         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
514
515         /* assoclen = (assoclen + cryptlen) - cryptlen */
516         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
517
518         /* read assoc before reading payload */
519         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
520                              KEY_VLF);
521         aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
522
523         /* Load Counter into CONTEXT1 reg */
524         if (is_rfc3686)
525                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
526                                     LDST_CLASS_1_CCB |
527                                     LDST_SRCDST_BYTE_CONTEXT |
528                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
529                                      LDST_OFFSET_SHIFT));
530
531         /* Class 1 operation */
532         append_operation(desc, ctx->class1_alg_type |
533                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
534
535         /* Read and write cryptlen bytes */
536         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
537         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
538         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
539
540         /* Write ICV */
541         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
542                          LDST_SRCDST_BYTE_CONTEXT);
543
544         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
545                                               desc_bytes(desc),
546                                               DMA_TO_DEVICE);
547         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
548                 dev_err(jrdev, "unable to map shared descriptor\n");
549                 return -ENOMEM;
550         }
551 #ifdef DEBUG
552         print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
553                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
554                        desc_bytes(desc), 1);
555 #endif
556
557         /*
558          * Job Descriptor and Shared Descriptors
559          * must all fit into the 64-word Descriptor h/w Buffer
560          */
561         keys_fit_inline = false;
562         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
563             ctx->split_key_pad_len + ctx->enckeylen +
564             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
565             CAAM_DESC_BYTES_MAX)
566                 keys_fit_inline = true;
567
568         /* aead_decrypt shared descriptor */
569         desc = ctx->sh_desc_dec;
570
571         /* Note: Context registers are saved. */
572         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
573
574         /* Class 2 operation */
575         append_operation(desc, ctx->class2_alg_type |
576                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
577
578         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
579         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
580                                 ctx->authsize + tfm->ivsize);
581         /* assoclen = (assoclen + cryptlen) - cryptlen */
582         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
583         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
584
585         /* read assoc before reading payload */
586         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
587                              KEY_VLF);
588
589         aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
590
591         /* Load Counter into CONTEXT1 reg */
592         if (is_rfc3686)
593                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
594                                     LDST_CLASS_1_CCB |
595                                     LDST_SRCDST_BYTE_CONTEXT |
596                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
597                                      LDST_OFFSET_SHIFT));
598
599         /* Choose operation */
600         if (ctr_mode)
601                 append_operation(desc, ctx->class1_alg_type |
602                                  OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
603         else
604                 append_dec_op1(desc, ctx->class1_alg_type);
605
606         /* Read and write cryptlen bytes */
607         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
608         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
609         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
610
611         /* Load ICV */
612         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
613                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
614
615         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
616                                               desc_bytes(desc),
617                                               DMA_TO_DEVICE);
618         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
619                 dev_err(jrdev, "unable to map shared descriptor\n");
620                 return -ENOMEM;
621         }
622 #ifdef DEBUG
623         print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
624                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
625                        desc_bytes(desc), 1);
626 #endif
627
628         /*
629          * Job Descriptor and Shared Descriptors
630          * must all fit into the 64-word Descriptor h/w Buffer
631          */
632         keys_fit_inline = false;
633         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
634             ctx->split_key_pad_len + ctx->enckeylen +
635             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
636             CAAM_DESC_BYTES_MAX)
637                 keys_fit_inline = true;
638
639         /* aead_givencrypt shared descriptor */
640         desc = ctx->sh_desc_givenc;
641
642         /* Note: Context registers are saved. */
643         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
644
645         /* Generate IV */
646         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
647                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
648                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
649         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
650                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
651         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
652         append_move(desc, MOVE_WAITCOMP |
653                     MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
654                     (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
655                     (tfm->ivsize << MOVE_LEN_SHIFT));
656         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
657
658         /* Copy IV to class 1 context */
659         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
660                     (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
661                     (tfm->ivsize << MOVE_LEN_SHIFT));
662
663         /* Return to encryption */
664         append_operation(desc, ctx->class2_alg_type |
665                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
666
667         /* ivsize + cryptlen = seqoutlen - authsize */
668         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
669
670         /* assoclen = seqinlen - (ivsize + cryptlen) */
671         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
672
673         /* read assoc before reading payload */
674         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
675                              KEY_VLF);
676
677         /* Copy iv from outfifo to class 2 fifo */
678         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
679                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
680         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
681                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
682         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
683                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
684
685         /* Load Counter into CONTEXT1 reg */
686         if (is_rfc3686)
687                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
688                                     LDST_CLASS_1_CCB |
689                                     LDST_SRCDST_BYTE_CONTEXT |
690                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
691                                      LDST_OFFSET_SHIFT));
692
693         /* Class 1 operation */
694         append_operation(desc, ctx->class1_alg_type |
695                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
697         /* Will write ivsize + cryptlen */
698         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
699
700         /* Not need to reload iv */
701         append_seq_fifo_load(desc, tfm->ivsize,
702                              FIFOLD_CLASS_SKIP);
703
704         /* Will read cryptlen */
705         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
706         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
707
708         /* Write ICV */
709         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
710                          LDST_SRCDST_BYTE_CONTEXT);
711
712         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
713                                                  desc_bytes(desc),
714                                                  DMA_TO_DEVICE);
715         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
716                 dev_err(jrdev, "unable to map shared descriptor\n");
717                 return -ENOMEM;
718         }
719 #ifdef DEBUG
720         print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
721                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
722                        desc_bytes(desc), 1);
723 #endif
724
725         return 0;
726 }
727
728 static int aead_setauthsize(struct crypto_aead *authenc,
729                                     unsigned int authsize)
730 {
731         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
732
733         ctx->authsize = authsize;
734         aead_set_sh_desc(authenc);
735
736         return 0;
737 }
738
739 static int gcm_set_sh_desc(struct crypto_aead *aead)
740 {
741         struct aead_tfm *tfm = &aead->base.crt_aead;
742         struct caam_ctx *ctx = crypto_aead_ctx(aead);
743         struct device *jrdev = ctx->jrdev;
744         bool keys_fit_inline = false;
745         u32 *key_jump_cmd, *zero_payload_jump_cmd,
746             *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747         u32 *desc;
748
749         if (!ctx->enckeylen || !ctx->authsize)
750                 return 0;
751
752         /*
753          * AES GCM encrypt shared descriptor
754          * Job Descriptor and Shared Descriptor
755          * must fit into the 64-word Descriptor h/w Buffer
756          */
757         if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
758             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759                 keys_fit_inline = true;
760
761         desc = ctx->sh_desc_enc;
762
763         init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765         /* skip key loading if they are loaded due to sharing */
766         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767                                    JUMP_COND_SHRD | JUMP_COND_SELF);
768         if (keys_fit_inline)
769                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771         else
772                 append_key(desc, ctx->key_dma, ctx->enckeylen,
773                            CLASS_1 | KEY_DEST_CLASS_REG);
774         set_jump_tgt_here(desc, key_jump_cmd);
775
776         /* class 1 operation */
777         append_operation(desc, ctx->class1_alg_type |
778                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
780         /* cryptlen = seqoutlen - authsize */
781         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
782
783         /* assoclen + cryptlen = seqinlen - ivsize */
784         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
785
786         /* assoclen = (assoclen + cryptlen) - cryptlen */
787         append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
788
789         /* if cryptlen is ZERO jump to zero-payload commands */
790         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791         zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
792                                             JUMP_COND_MATH_Z);
793         /* read IV */
794         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
795                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
796
797         /* if assoclen is ZERO, skip reading the assoc data */
798         append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
799         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
800                                            JUMP_COND_MATH_Z);
801
802         /* read assoc data */
803         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805         set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
807         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
808
809         /* write encrypted data */
810         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812         /* read payload data */
813         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816         /* jump the zero-payload commands */
817         append_jump(desc, JUMP_TEST_ALL | 7);
818
819         /* zero-payload commands */
820         set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
822         /* if assoclen is ZERO, jump to IV reading - is the only input data */
823         append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
824         zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
825                                            JUMP_COND_MATH_Z);
826         /* read IV */
827         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
828                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
829
830         /* read assoc data */
831         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
832                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
833
834         /* jump to ICV writing */
835         append_jump(desc, JUMP_TEST_ALL | 2);
836
837         /* read IV - is the only input data */
838         set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
839         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
840                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
841                              FIFOLD_TYPE_LAST1);
842
843         /* write ICV */
844         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
845                          LDST_SRCDST_BYTE_CONTEXT);
846
847         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
848                                               desc_bytes(desc),
849                                               DMA_TO_DEVICE);
850         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
851                 dev_err(jrdev, "unable to map shared descriptor\n");
852                 return -ENOMEM;
853         }
854 #ifdef DEBUG
855         print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
856                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
857                        desc_bytes(desc), 1);
858 #endif
859
860         /*
861          * Job Descriptor and Shared Descriptors
862          * must all fit into the 64-word Descriptor h/w Buffer
863          */
864         keys_fit_inline = false;
865         if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
866             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
867                 keys_fit_inline = true;
868
869         desc = ctx->sh_desc_dec;
870
871         init_sh_desc(desc, HDR_SHARE_SERIAL);
872
873         /* skip key loading if they are loaded due to sharing */
874         key_jump_cmd = append_jump(desc, JUMP_JSL |
875                                    JUMP_TEST_ALL | JUMP_COND_SHRD |
876                                    JUMP_COND_SELF);
877         if (keys_fit_inline)
878                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
879                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
880         else
881                 append_key(desc, ctx->key_dma, ctx->enckeylen,
882                            CLASS_1 | KEY_DEST_CLASS_REG);
883         set_jump_tgt_here(desc, key_jump_cmd);
884
885         /* class 1 operation */
886         append_operation(desc, ctx->class1_alg_type |
887                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
888
889         /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
890         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
891                                 ctx->authsize + tfm->ivsize);
892
893         /* assoclen = (assoclen + cryptlen) - cryptlen */
894         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
895         append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
896
897         /* read IV */
898         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
899                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
900
901         /* jump to zero-payload command if cryptlen is zero */
902         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
903         zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
904                                             JUMP_COND_MATH_Z);
905
906         append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
907         /* if asoclen is ZERO, skip reading assoc data */
908         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
909                                            JUMP_COND_MATH_Z);
910         /* read assoc data */
911         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
912                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
913         set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
914
915         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
916
917         /* store encrypted data */
918         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
919
920         /* read payload data */
921         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
922                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
923
924         /* jump the zero-payload commands */
925         append_jump(desc, JUMP_TEST_ALL | 4);
926
927         /* zero-payload command */
928         set_jump_tgt_here(desc, zero_payload_jump_cmd);
929
930         /* if assoclen is ZERO, jump to ICV reading */
931         append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
932         zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
933                                            JUMP_COND_MATH_Z);
934         /* read assoc data */
935         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
936                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
937         set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
938
939         /* read ICV */
940         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
941                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
942
943         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
944                                               desc_bytes(desc),
945                                               DMA_TO_DEVICE);
946         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
947                 dev_err(jrdev, "unable to map shared descriptor\n");
948                 return -ENOMEM;
949         }
950 #ifdef DEBUG
951         print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
952                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
953                        desc_bytes(desc), 1);
954 #endif
955
956         return 0;
957 }
958
959 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
960 {
961         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
962
963         ctx->authsize = authsize;
964         gcm_set_sh_desc(authenc);
965
966         return 0;
967 }
968
969 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
970 {
971         struct aead_tfm *tfm = &aead->base.crt_aead;
972         struct caam_ctx *ctx = crypto_aead_ctx(aead);
973         struct device *jrdev = ctx->jrdev;
974         bool keys_fit_inline = false;
975         u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
976         u32 *desc;
977         u32 geniv;
978
979         if (!ctx->enckeylen || !ctx->authsize)
980                 return 0;
981
982         /*
983          * RFC4106 encrypt shared descriptor
984          * Job Descriptor and Shared Descriptor
985          * must fit into the 64-word Descriptor h/w Buffer
986          */
987         if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
988             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
989                 keys_fit_inline = true;
990
991         desc = ctx->sh_desc_enc;
992
993         init_sh_desc(desc, HDR_SHARE_SERIAL);
994
995         /* Skip key loading if it is loaded due to sharing */
996         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
997                                    JUMP_COND_SHRD);
998         if (keys_fit_inline)
999                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1000                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1001         else
1002                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1003                            CLASS_1 | KEY_DEST_CLASS_REG);
1004         set_jump_tgt_here(desc, key_jump_cmd);
1005
1006         /* Class 1 operation */
1007         append_operation(desc, ctx->class1_alg_type |
1008                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1009
1010         /* cryptlen = seqoutlen - authsize */
1011         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1012         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1013
1014         /* assoclen + cryptlen = seqinlen - ivsize */
1015         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
1016
1017         /* assoclen = (assoclen + cryptlen) - cryptlen */
1018         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
1019
1020         /* Read Salt */
1021         append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1022                                 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1023         /* Read AES-GCM-ESP IV */
1024         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1025                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1026
1027         /* Read assoc data */
1028         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1029                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1030
1031         /* Will read cryptlen bytes */
1032         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1033
1034         /* Write encrypted data */
1035         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1036
1037         /* Read payload data */
1038         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1039                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1040
1041         /* Write ICV */
1042         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1043                          LDST_SRCDST_BYTE_CONTEXT);
1044
1045         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1046                                               desc_bytes(desc),
1047                                               DMA_TO_DEVICE);
1048         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1049                 dev_err(jrdev, "unable to map shared descriptor\n");
1050                 return -ENOMEM;
1051         }
1052 #ifdef DEBUG
1053         print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1054                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1055                        desc_bytes(desc), 1);
1056 #endif
1057
1058         /*
1059          * Job Descriptor and Shared Descriptors
1060          * must all fit into the 64-word Descriptor h/w Buffer
1061          */
1062         keys_fit_inline = false;
1063         if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1064             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1065                 keys_fit_inline = true;
1066
1067         desc = ctx->sh_desc_dec;
1068
1069         init_sh_desc(desc, HDR_SHARE_SERIAL);
1070
1071         /* Skip key loading if it is loaded due to sharing */
1072         key_jump_cmd = append_jump(desc, JUMP_JSL |
1073                                    JUMP_TEST_ALL | JUMP_COND_SHRD);
1074         if (keys_fit_inline)
1075                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1076                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1077         else
1078                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1079                            CLASS_1 | KEY_DEST_CLASS_REG);
1080         set_jump_tgt_here(desc, key_jump_cmd);
1081
1082         /* Class 1 operation */
1083         append_operation(desc, ctx->class1_alg_type |
1084                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1085
1086         /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
1087         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
1088                                 ctx->authsize + tfm->ivsize);
1089
1090         /* assoclen = (assoclen + cryptlen) - cryptlen */
1091         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1092         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1093
1094         /* Will write cryptlen bytes */
1095         append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1096
1097         /* Read Salt */
1098         append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1099                                 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1100         /* Read AES-GCM-ESP IV */
1101         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1102                              FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1103
1104         /* Read assoc data */
1105         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1106                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1107
1108         /* Will read cryptlen bytes */
1109         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1110
1111         /* Store payload data */
1112         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1113
1114         /* Read encrypted data */
1115         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1116                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1117
1118         /* Read ICV */
1119         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1120                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1121
1122         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1123                                               desc_bytes(desc),
1124                                               DMA_TO_DEVICE);
1125         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1126                 dev_err(jrdev, "unable to map shared descriptor\n");
1127                 return -ENOMEM;
1128         }
1129 #ifdef DEBUG
1130         print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1131                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1132                        desc_bytes(desc), 1);
1133 #endif
1134
1135         /*
1136          * Job Descriptor and Shared Descriptors
1137          * must all fit into the 64-word Descriptor h/w Buffer
1138          */
1139         keys_fit_inline = false;
1140         if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1141             ctx->split_key_pad_len + ctx->enckeylen <=
1142             CAAM_DESC_BYTES_MAX)
1143                 keys_fit_inline = true;
1144
1145         /* rfc4106_givencrypt shared descriptor */
1146         desc = ctx->sh_desc_givenc;
1147
1148         init_sh_desc(desc, HDR_SHARE_SERIAL);
1149
1150         /* Skip key loading if it is loaded due to sharing */
1151         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1152                                    JUMP_COND_SHRD);
1153         if (keys_fit_inline)
1154                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1155                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1156         else
1157                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1158                            CLASS_1 | KEY_DEST_CLASS_REG);
1159         set_jump_tgt_here(desc, key_jump_cmd);
1160
1161         /* Generate IV */
1162         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1163                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1164                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1165         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1166                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1167         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1168         move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1169                                (tfm->ivsize << MOVE_LEN_SHIFT));
1170         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1171
1172         /* Copy generated IV to OFIFO */
1173         write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1174                                    (tfm->ivsize << MOVE_LEN_SHIFT));
1175
1176         /* Class 1 operation */
1177         append_operation(desc, ctx->class1_alg_type |
1178                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1179
1180         /* ivsize + cryptlen = seqoutlen - authsize */
1181         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1182
1183         /* assoclen = seqinlen - (ivsize + cryptlen) */
1184         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1185
1186         /* Will write ivsize + cryptlen */
1187         append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1188
1189         /* Read Salt and generated IV */
1190         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1191                    FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1192         /* Append Salt */
1193         append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1194         set_move_tgt_here(desc, move_cmd);
1195         set_move_tgt_here(desc, write_iv_cmd);
1196         /* Blank commands. Will be overwritten by generated IV. */
1197         append_cmd(desc, 0x00000000);
1198         append_cmd(desc, 0x00000000);
1199         /* End of blank commands */
1200
1201         /* No need to reload iv */
1202         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1203
1204         /* Read assoc data */
1205         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1206                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1207
1208         /* Will read cryptlen */
1209         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1210
1211         /* Store generated IV and encrypted data */
1212         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1213
1214         /* Read payload data */
1215         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1216                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1217
1218         /* Write ICV */
1219         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1220                          LDST_SRCDST_BYTE_CONTEXT);
1221
1222         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1223                                                  desc_bytes(desc),
1224                                                  DMA_TO_DEVICE);
1225         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1226                 dev_err(jrdev, "unable to map shared descriptor\n");
1227                 return -ENOMEM;
1228         }
1229 #ifdef DEBUG
1230         print_hex_dump(KERN_ERR,
1231                        "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1232                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233                        desc_bytes(desc), 1);
1234 #endif
1235
1236         return 0;
1237 }
1238
1239 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1240                                unsigned int authsize)
1241 {
1242         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1243
1244         ctx->authsize = authsize;
1245         rfc4106_set_sh_desc(authenc);
1246
1247         return 0;
1248 }
1249
1250 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1251 {
1252         struct aead_tfm *tfm = &aead->base.crt_aead;
1253         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1254         struct device *jrdev = ctx->jrdev;
1255         bool keys_fit_inline = false;
1256         u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1257         u32 *read_move_cmd, *write_move_cmd;
1258         u32 *desc;
1259         u32 geniv;
1260
1261         if (!ctx->enckeylen || !ctx->authsize)
1262                 return 0;
1263
1264         /*
1265          * RFC4543 encrypt shared descriptor
1266          * Job Descriptor and Shared Descriptor
1267          * must fit into the 64-word Descriptor h/w Buffer
1268          */
1269         if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1270             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1271                 keys_fit_inline = true;
1272
1273         desc = ctx->sh_desc_enc;
1274
1275         init_sh_desc(desc, HDR_SHARE_SERIAL);
1276
1277         /* Skip key loading if it is loaded due to sharing */
1278         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1279                                    JUMP_COND_SHRD);
1280         if (keys_fit_inline)
1281                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1282                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1283         else
1284                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1285                            CLASS_1 | KEY_DEST_CLASS_REG);
1286         set_jump_tgt_here(desc, key_jump_cmd);
1287
1288         /* Class 1 operation */
1289         append_operation(desc, ctx->class1_alg_type |
1290                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1291
1292         /* Load AES-GMAC ESP IV into Math1 register */
1293         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1294                    LDST_CLASS_DECO | tfm->ivsize);
1295
1296         /* Wait the DMA transaction to finish */
1297         append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1298                     (1 << JUMP_OFFSET_SHIFT));
1299
1300         /* Overwrite blank immediate AES-GMAC ESP IV data */
1301         write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1302                                    (tfm->ivsize << MOVE_LEN_SHIFT));
1303
1304         /* Overwrite blank immediate AAD data */
1305         write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1306                                     (tfm->ivsize << MOVE_LEN_SHIFT));
1307
1308         /* cryptlen = seqoutlen - authsize */
1309         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1310
1311         /* assoclen = (seqinlen - ivsize) - cryptlen */
1312         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1313
1314         /* Read Salt and AES-GMAC ESP IV */
1315         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1316                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1317         /* Append Salt */
1318         append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1319         set_move_tgt_here(desc, write_iv_cmd);
1320         /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1321         append_cmd(desc, 0x00000000);
1322         append_cmd(desc, 0x00000000);
1323         /* End of blank commands */
1324
1325         /* Read assoc data */
1326         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1327                              FIFOLD_TYPE_AAD);
1328
1329         /* Will read cryptlen bytes */
1330         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1331
1332         /* Will write cryptlen bytes */
1333         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1334
1335         /*
1336          * MOVE_LEN opcode is not available in all SEC HW revisions,
1337          * thus need to do some magic, i.e. self-patch the descriptor
1338          * buffer.
1339          */
1340         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1341                                     (0x6 << MOVE_LEN_SHIFT));
1342         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1343                                      (0x8 << MOVE_LEN_SHIFT));
1344
1345         /* Authenticate AES-GMAC ESP IV  */
1346         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1347                    FIFOLD_TYPE_AAD | tfm->ivsize);
1348         set_move_tgt_here(desc, write_aad_cmd);
1349         /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1350         append_cmd(desc, 0x00000000);
1351         append_cmd(desc, 0x00000000);
1352         /* End of blank commands */
1353
1354         /* Read and write cryptlen bytes */
1355         aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1356
1357         set_move_tgt_here(desc, read_move_cmd);
1358         set_move_tgt_here(desc, write_move_cmd);
1359         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1360         /* Move payload data to OFIFO */
1361         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1362
1363         /* Write ICV */
1364         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1365                          LDST_SRCDST_BYTE_CONTEXT);
1366
1367         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1368                                               desc_bytes(desc),
1369                                               DMA_TO_DEVICE);
1370         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1371                 dev_err(jrdev, "unable to map shared descriptor\n");
1372                 return -ENOMEM;
1373         }
1374 #ifdef DEBUG
1375         print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1376                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1377                        desc_bytes(desc), 1);
1378 #endif
1379
1380         /*
1381          * Job Descriptor and Shared Descriptors
1382          * must all fit into the 64-word Descriptor h/w Buffer
1383          */
1384         keys_fit_inline = false;
1385         if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1386             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1387                 keys_fit_inline = true;
1388
1389         desc = ctx->sh_desc_dec;
1390
1391         init_sh_desc(desc, HDR_SHARE_SERIAL);
1392
1393         /* Skip key loading if it is loaded due to sharing */
1394         key_jump_cmd = append_jump(desc, JUMP_JSL |
1395                                    JUMP_TEST_ALL | JUMP_COND_SHRD);
1396         if (keys_fit_inline)
1397                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1398                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1399         else
1400                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1401                            CLASS_1 | KEY_DEST_CLASS_REG);
1402         set_jump_tgt_here(desc, key_jump_cmd);
1403
1404         /* Class 1 operation */
1405         append_operation(desc, ctx->class1_alg_type |
1406                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1407
1408         /* Load AES-GMAC ESP IV into Math1 register */
1409         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1410                    LDST_CLASS_DECO | tfm->ivsize);
1411
1412         /* Wait the DMA transaction to finish */
1413         append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1414                     (1 << JUMP_OFFSET_SHIFT));
1415
1416         /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1417         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1418
1419         /* Overwrite blank immediate AES-GMAC ESP IV data */
1420         write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1421                                    (tfm->ivsize << MOVE_LEN_SHIFT));
1422
1423         /* Overwrite blank immediate AAD data */
1424         write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1425                                     (tfm->ivsize << MOVE_LEN_SHIFT));
1426
1427         /* assoclen = (assoclen + cryptlen) - cryptlen */
1428         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1429         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1430
1431         /*
1432          * MOVE_LEN opcode is not available in all SEC HW revisions,
1433          * thus need to do some magic, i.e. self-patch the descriptor
1434          * buffer.
1435          */
1436         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1437                                     (0x6 << MOVE_LEN_SHIFT));
1438         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1439                                      (0x8 << MOVE_LEN_SHIFT));
1440
1441         /* Read Salt and AES-GMAC ESP IV */
1442         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1443                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1444         /* Append Salt */
1445         append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1446         set_move_tgt_here(desc, write_iv_cmd);
1447         /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1448         append_cmd(desc, 0x00000000);
1449         append_cmd(desc, 0x00000000);
1450         /* End of blank commands */
1451
1452         /* Read assoc data */
1453         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1454                              FIFOLD_TYPE_AAD);
1455
1456         /* Will read cryptlen bytes */
1457         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1458
1459         /* Will write cryptlen bytes */
1460         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1461
1462         /* Authenticate AES-GMAC ESP IV  */
1463         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1464                    FIFOLD_TYPE_AAD | tfm->ivsize);
1465         set_move_tgt_here(desc, write_aad_cmd);
1466         /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1467         append_cmd(desc, 0x00000000);
1468         append_cmd(desc, 0x00000000);
1469         /* End of blank commands */
1470
1471         /* Store payload data */
1472         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1473
1474         /* In-snoop cryptlen data */
1475         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1476                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1477
1478         set_move_tgt_here(desc, read_move_cmd);
1479         set_move_tgt_here(desc, write_move_cmd);
1480         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1481         /* Move payload data to OFIFO */
1482         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1483         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1484
1485         /* Read ICV */
1486         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1487                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1488
1489         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1490                                               desc_bytes(desc),
1491                                               DMA_TO_DEVICE);
1492         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1493                 dev_err(jrdev, "unable to map shared descriptor\n");
1494                 return -ENOMEM;
1495         }
1496 #ifdef DEBUG
1497         print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1498                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1499                        desc_bytes(desc), 1);
1500 #endif
1501
1502         /*
1503          * Job Descriptor and Shared Descriptors
1504          * must all fit into the 64-word Descriptor h/w Buffer
1505          */
1506         keys_fit_inline = false;
1507         if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1508             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1509                 keys_fit_inline = true;
1510
1511         /* rfc4543_givencrypt shared descriptor */
1512         desc = ctx->sh_desc_givenc;
1513
1514         init_sh_desc(desc, HDR_SHARE_SERIAL);
1515
1516         /* Skip key loading if it is loaded due to sharing */
1517         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1518                                    JUMP_COND_SHRD);
1519         if (keys_fit_inline)
1520                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1521                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1522         else
1523                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1524                            CLASS_1 | KEY_DEST_CLASS_REG);
1525         set_jump_tgt_here(desc, key_jump_cmd);
1526
1527         /* Generate IV */
1528         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1529                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1530                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1531         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1532                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1533         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1534         /* Move generated IV to Math1 register */
1535         append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1536                     (tfm->ivsize << MOVE_LEN_SHIFT));
1537         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1538
1539         /* Overwrite blank immediate AES-GMAC IV data */
1540         write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1541                                    (tfm->ivsize << MOVE_LEN_SHIFT));
1542
1543         /* Overwrite blank immediate AAD data */
1544         write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1545                                     (tfm->ivsize << MOVE_LEN_SHIFT));
1546
1547         /* Copy generated IV to OFIFO */
1548         append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1549                     (tfm->ivsize << MOVE_LEN_SHIFT));
1550
1551         /* Class 1 operation */
1552         append_operation(desc, ctx->class1_alg_type |
1553                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1554
1555         /* ivsize + cryptlen = seqoutlen - authsize */
1556         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1557
1558         /* assoclen = seqinlen - (ivsize + cryptlen) */
1559         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1560
1561         /* Will write ivsize + cryptlen */
1562         append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1563
1564         /*
1565          * MOVE_LEN opcode is not available in all SEC HW revisions,
1566          * thus need to do some magic, i.e. self-patch the descriptor
1567          * buffer.
1568          */
1569         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1570                                     (0x6 << MOVE_LEN_SHIFT));
1571         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1572                                      (0x8 << MOVE_LEN_SHIFT));
1573
1574         /* Read Salt and AES-GMAC generated IV */
1575         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1576                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1577         /* Append Salt */
1578         append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1579         set_move_tgt_here(desc, write_iv_cmd);
1580         /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1581         append_cmd(desc, 0x00000000);
1582         append_cmd(desc, 0x00000000);
1583         /* End of blank commands */
1584
1585         /* No need to reload iv */
1586         append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1587
1588         /* Read assoc data */
1589         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1590                              FIFOLD_TYPE_AAD);
1591
1592         /* Will read cryptlen */
1593         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1594
1595         /* Authenticate AES-GMAC IV  */
1596         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1597                    FIFOLD_TYPE_AAD | tfm->ivsize);
1598         set_move_tgt_here(desc, write_aad_cmd);
1599         /* Blank commands. Will be overwritten by AES-GMAC IV. */
1600         append_cmd(desc, 0x00000000);
1601         append_cmd(desc, 0x00000000);
1602         /* End of blank commands */
1603
1604         /* Read and write cryptlen bytes */
1605         aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1606
1607         set_move_tgt_here(desc, read_move_cmd);
1608         set_move_tgt_here(desc, write_move_cmd);
1609         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1610         /* Move payload data to OFIFO */
1611         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1612
1613         /* Write ICV */
1614         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1615                          LDST_SRCDST_BYTE_CONTEXT);
1616
1617         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1618                                                  desc_bytes(desc),
1619                                                  DMA_TO_DEVICE);
1620         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1621                 dev_err(jrdev, "unable to map shared descriptor\n");
1622                 return -ENOMEM;
1623         }
1624 #ifdef DEBUG
1625         print_hex_dump(KERN_ERR,
1626                        "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1627                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1628                        desc_bytes(desc), 1);
1629 #endif
1630
1631         return 0;
1632 }
1633
1634 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1635                                unsigned int authsize)
1636 {
1637         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1638
1639         ctx->authsize = authsize;
1640         rfc4543_set_sh_desc(authenc);
1641
1642         return 0;
1643 }
1644
1645 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1646                               u32 authkeylen)
1647 {
1648         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1649                                ctx->split_key_pad_len, key_in, authkeylen,
1650                                ctx->alg_op);
1651 }
1652
1653 static int aead_setkey(struct crypto_aead *aead,
1654                                const u8 *key, unsigned int keylen)
1655 {
1656         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1657         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1658         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1659         struct device *jrdev = ctx->jrdev;
1660         struct crypto_authenc_keys keys;
1661         int ret = 0;
1662
1663         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1664                 goto badkey;
1665
1666         /* Pick class 2 key length from algorithm submask */
1667         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1668                                       OP_ALG_ALGSEL_SHIFT] * 2;
1669         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1670
1671         if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1672                 goto badkey;
1673
1674 #ifdef DEBUG
1675         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1676                keys.authkeylen + keys.enckeylen, keys.enckeylen,
1677                keys.authkeylen);
1678         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1679                ctx->split_key_len, ctx->split_key_pad_len);
1680         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1681                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1682 #endif
1683
1684         ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1685         if (ret) {
1686                 goto badkey;
1687         }
1688
1689         /* postpend encryption key to auth split key */
1690         memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1691
1692         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1693                                       keys.enckeylen, DMA_TO_DEVICE);
1694         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1695                 dev_err(jrdev, "unable to map key i/o memory\n");
1696                 return -ENOMEM;
1697         }
1698 #ifdef DEBUG
1699         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1700                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1701                        ctx->split_key_pad_len + keys.enckeylen, 1);
1702 #endif
1703
1704         ctx->enckeylen = keys.enckeylen;
1705
1706         ret = aead_set_sh_desc(aead);
1707         if (ret) {
1708                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1709                                  keys.enckeylen, DMA_TO_DEVICE);
1710         }
1711
1712         return ret;
1713 badkey:
1714         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1715         return -EINVAL;
1716 }
1717
1718 static int gcm_setkey(struct crypto_aead *aead,
1719                       const u8 *key, unsigned int keylen)
1720 {
1721         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1722         struct device *jrdev = ctx->jrdev;
1723         int ret = 0;
1724
1725 #ifdef DEBUG
1726         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1727                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1728 #endif
1729
1730         memcpy(ctx->key, key, keylen);
1731         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1732                                       DMA_TO_DEVICE);
1733         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1734                 dev_err(jrdev, "unable to map key i/o memory\n");
1735                 return -ENOMEM;
1736         }
1737         ctx->enckeylen = keylen;
1738
1739         ret = gcm_set_sh_desc(aead);
1740         if (ret) {
1741                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1742                                  DMA_TO_DEVICE);
1743         }
1744
1745         return ret;
1746 }
1747
1748 static int rfc4106_setkey(struct crypto_aead *aead,
1749                           const u8 *key, unsigned int keylen)
1750 {
1751         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1752         struct device *jrdev = ctx->jrdev;
1753         int ret = 0;
1754
1755         if (keylen < 4)
1756                 return -EINVAL;
1757
1758 #ifdef DEBUG
1759         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1760                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1761 #endif
1762
1763         memcpy(ctx->key, key, keylen);
1764
1765         /*
1766          * The last four bytes of the key material are used as the salt value
1767          * in the nonce. Update the AES key length.
1768          */
1769         ctx->enckeylen = keylen - 4;
1770
1771         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1772                                       DMA_TO_DEVICE);
1773         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1774                 dev_err(jrdev, "unable to map key i/o memory\n");
1775                 return -ENOMEM;
1776         }
1777
1778         ret = rfc4106_set_sh_desc(aead);
1779         if (ret) {
1780                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1781                                  DMA_TO_DEVICE);
1782         }
1783
1784         return ret;
1785 }
1786
1787 static int rfc4543_setkey(struct crypto_aead *aead,
1788                           const u8 *key, unsigned int keylen)
1789 {
1790         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1791         struct device *jrdev = ctx->jrdev;
1792         int ret = 0;
1793
1794         if (keylen < 4)
1795                 return -EINVAL;
1796
1797 #ifdef DEBUG
1798         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1799                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1800 #endif
1801
1802         memcpy(ctx->key, key, keylen);
1803
1804         /*
1805          * The last four bytes of the key material are used as the salt value
1806          * in the nonce. Update the AES key length.
1807          */
1808         ctx->enckeylen = keylen - 4;
1809
1810         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1811                                       DMA_TO_DEVICE);
1812         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1813                 dev_err(jrdev, "unable to map key i/o memory\n");
1814                 return -ENOMEM;
1815         }
1816
1817         ret = rfc4543_set_sh_desc(aead);
1818         if (ret) {
1819                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1820                                  DMA_TO_DEVICE);
1821         }
1822
1823         return ret;
1824 }
1825
1826 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1827                              const u8 *key, unsigned int keylen)
1828 {
1829         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1830         struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1831         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1832         const char *alg_name = crypto_tfm_alg_name(tfm);
1833         struct device *jrdev = ctx->jrdev;
1834         int ret = 0;
1835         u32 *key_jump_cmd;
1836         u32 *desc;
1837         u32 *nonce;
1838         u32 geniv;
1839         u32 ctx1_iv_off = 0;
1840         const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1841                                OP_ALG_AAI_CTR_MOD128);
1842         const bool is_rfc3686 = (ctr_mode &&
1843                                  (strstr(alg_name, "rfc3686") != NULL));
1844
1845 #ifdef DEBUG
1846         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1847                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1848 #endif
1849         /*
1850          * AES-CTR needs to load IV in CONTEXT1 reg
1851          * at an offset of 128bits (16bytes)
1852          * CONTEXT1[255:128] = IV
1853          */
1854         if (ctr_mode)
1855                 ctx1_iv_off = 16;
1856
1857         /*
1858          * RFC3686 specific:
1859          *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1860          *      | *key = {KEY, NONCE}
1861          */
1862         if (is_rfc3686) {
1863                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1864                 keylen -= CTR_RFC3686_NONCE_SIZE;
1865         }
1866
1867         memcpy(ctx->key, key, keylen);
1868         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1869                                       DMA_TO_DEVICE);
1870         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1871                 dev_err(jrdev, "unable to map key i/o memory\n");
1872                 return -ENOMEM;
1873         }
1874         ctx->enckeylen = keylen;
1875
1876         /* ablkcipher_encrypt shared descriptor */
1877         desc = ctx->sh_desc_enc;
1878         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1879         /* Skip if already shared */
1880         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1881                                    JUMP_COND_SHRD);
1882
1883         /* Load class1 key only */
1884         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1885                           ctx->enckeylen, CLASS_1 |
1886                           KEY_DEST_CLASS_REG);
1887
1888         /* Load nonce into CONTEXT1 reg */
1889         if (is_rfc3686) {
1890                 nonce = (u32 *)(key + keylen);
1891                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1892                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1893                 append_move(desc, MOVE_WAITCOMP |
1894                             MOVE_SRC_OUTFIFO |
1895                             MOVE_DEST_CLASS1CTX |
1896                             (16 << MOVE_OFFSET_SHIFT) |
1897                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1898         }
1899
1900         set_jump_tgt_here(desc, key_jump_cmd);
1901
1902         /* Load iv */
1903         append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1904                         LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1905
1906         /* Load counter into CONTEXT1 reg */
1907         if (is_rfc3686)
1908                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1909                                     LDST_CLASS_1_CCB |
1910                                     LDST_SRCDST_BYTE_CONTEXT |
1911                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1912                                      LDST_OFFSET_SHIFT));
1913
1914         /* Load operation */
1915         append_operation(desc, ctx->class1_alg_type |
1916                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1917
1918         /* Perform operation */
1919         ablkcipher_append_src_dst(desc);
1920
1921         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1922                                               desc_bytes(desc),
1923                                               DMA_TO_DEVICE);
1924         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1925                 dev_err(jrdev, "unable to map shared descriptor\n");
1926                 return -ENOMEM;
1927         }
1928 #ifdef DEBUG
1929         print_hex_dump(KERN_ERR,
1930                        "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1931                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1932                        desc_bytes(desc), 1);
1933 #endif
1934         /* ablkcipher_decrypt shared descriptor */
1935         desc = ctx->sh_desc_dec;
1936
1937         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1938         /* Skip if already shared */
1939         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1940                                    JUMP_COND_SHRD);
1941
1942         /* Load class1 key only */
1943         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1944                           ctx->enckeylen, CLASS_1 |
1945                           KEY_DEST_CLASS_REG);
1946
1947         /* Load nonce into CONTEXT1 reg */
1948         if (is_rfc3686) {
1949                 nonce = (u32 *)(key + keylen);
1950                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1951                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1952                 append_move(desc, MOVE_WAITCOMP |
1953                             MOVE_SRC_OUTFIFO |
1954                             MOVE_DEST_CLASS1CTX |
1955                             (16 << MOVE_OFFSET_SHIFT) |
1956                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1957         }
1958
1959         set_jump_tgt_here(desc, key_jump_cmd);
1960
1961         /* load IV */
1962         append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1963                         LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1964
1965         /* Load counter into CONTEXT1 reg */
1966         if (is_rfc3686)
1967                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1968                                     LDST_CLASS_1_CCB |
1969                                     LDST_SRCDST_BYTE_CONTEXT |
1970                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1971                                      LDST_OFFSET_SHIFT));
1972
1973         /* Choose operation */
1974         if (ctr_mode)
1975                 append_operation(desc, ctx->class1_alg_type |
1976                                  OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1977         else
1978                 append_dec_op1(desc, ctx->class1_alg_type);
1979
1980         /* Perform operation */
1981         ablkcipher_append_src_dst(desc);
1982
1983         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1984                                               desc_bytes(desc),
1985                                               DMA_TO_DEVICE);
1986         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1987                 dev_err(jrdev, "unable to map shared descriptor\n");
1988                 return -ENOMEM;
1989         }
1990
1991 #ifdef DEBUG
1992         print_hex_dump(KERN_ERR,
1993                        "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1994                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1995                        desc_bytes(desc), 1);
1996 #endif
1997         /* ablkcipher_givencrypt shared descriptor */
1998         desc = ctx->sh_desc_givenc;
1999
2000         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2001         /* Skip if already shared */
2002         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2003                                    JUMP_COND_SHRD);
2004
2005         /* Load class1 key only */
2006         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
2007                           ctx->enckeylen, CLASS_1 |
2008                           KEY_DEST_CLASS_REG);
2009
2010         /* Load Nonce into CONTEXT1 reg */
2011         if (is_rfc3686) {
2012                 nonce = (u32 *)(key + keylen);
2013                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
2014                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
2015                 append_move(desc, MOVE_WAITCOMP |
2016                             MOVE_SRC_OUTFIFO |
2017                             MOVE_DEST_CLASS1CTX |
2018                             (16 << MOVE_OFFSET_SHIFT) |
2019                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
2020         }
2021         set_jump_tgt_here(desc, key_jump_cmd);
2022
2023         /* Generate IV */
2024         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
2025                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
2026                 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
2027         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
2028                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2029         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
2030         append_move(desc, MOVE_WAITCOMP |
2031                     MOVE_SRC_INFIFO |
2032                     MOVE_DEST_CLASS1CTX |
2033                     (crt->ivsize << MOVE_LEN_SHIFT) |
2034                     (ctx1_iv_off << MOVE_OFFSET_SHIFT));
2035         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
2036
2037         /* Copy generated IV to memory */
2038         append_seq_store(desc, crt->ivsize,
2039                          LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
2040                          (ctx1_iv_off << LDST_OFFSET_SHIFT));
2041
2042         /* Load Counter into CONTEXT1 reg */
2043         if (is_rfc3686)
2044                 append_load_imm_u32(desc, (u32)1, LDST_IMM |
2045                                     LDST_CLASS_1_CCB |
2046                                     LDST_SRCDST_BYTE_CONTEXT |
2047                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
2048                                      LDST_OFFSET_SHIFT));
2049
2050         if (ctx1_iv_off)
2051                 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
2052                             (1 << JUMP_OFFSET_SHIFT));
2053
2054         /* Load operation */
2055         append_operation(desc, ctx->class1_alg_type |
2056                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
2057
2058         /* Perform operation */
2059         ablkcipher_append_src_dst(desc);
2060
2061         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
2062                                                  desc_bytes(desc),
2063                                                  DMA_TO_DEVICE);
2064         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
2065                 dev_err(jrdev, "unable to map shared descriptor\n");
2066                 return -ENOMEM;
2067         }
2068 #ifdef DEBUG
2069         print_hex_dump(KERN_ERR,
2070                        "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
2071                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
2072                        desc_bytes(desc), 1);
2073 #endif
2074
2075         return ret;
2076 }
2077
2078 /*
2079  * aead_edesc - s/w-extended aead descriptor
2080  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
2081  * @assoc_chained: if source is chained
2082  * @src_nents: number of segments in input scatterlist
2083  * @src_chained: if source is chained
2084  * @dst_nents: number of segments in output scatterlist
2085  * @dst_chained: if destination is chained
2086  * @iv_dma: dma address of iv for checking continuity and link table
2087  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2088  * @sec4_sg_bytes: length of dma mapped sec4_sg space
2089  * @sec4_sg_dma: bus physical mapped address of h/w link table
2090  * @hw_desc: the h/w job descriptor followed by any referenced link tables
2091  */
2092 struct aead_edesc {
2093         int assoc_nents;
2094         bool assoc_chained;
2095         int src_nents;
2096         bool src_chained;
2097         int dst_nents;
2098         bool dst_chained;
2099         dma_addr_t iv_dma;
2100         int sec4_sg_bytes;
2101         dma_addr_t sec4_sg_dma;
2102         struct sec4_sg_entry *sec4_sg;
2103         u32 hw_desc[0];
2104 };
2105
2106 /*
2107  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
2108  * @src_nents: number of segments in input scatterlist
2109  * @src_chained: if source is chained
2110  * @dst_nents: number of segments in output scatterlist
2111  * @dst_chained: if destination is chained
2112  * @iv_dma: dma address of iv for checking continuity and link table
2113  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2114  * @sec4_sg_bytes: length of dma mapped sec4_sg space
2115  * @sec4_sg_dma: bus physical mapped address of h/w link table
2116  * @hw_desc: the h/w job descriptor followed by any referenced link tables
2117  */
2118 struct ablkcipher_edesc {
2119         int src_nents;
2120         bool src_chained;
2121         int dst_nents;
2122         bool dst_chained;
2123         dma_addr_t iv_dma;
2124         int sec4_sg_bytes;
2125         dma_addr_t sec4_sg_dma;
2126         struct sec4_sg_entry *sec4_sg;
2127         u32 hw_desc[0];
2128 };
2129
2130 static void caam_unmap(struct device *dev, struct scatterlist *src,
2131                        struct scatterlist *dst, int src_nents,
2132                        bool src_chained, int dst_nents, bool dst_chained,
2133                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
2134                        int sec4_sg_bytes)
2135 {
2136         if (dst != src) {
2137                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
2138                                      src_chained);
2139                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
2140                                      dst_chained);
2141         } else {
2142                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
2143                                      DMA_BIDIRECTIONAL, src_chained);
2144         }
2145
2146         if (iv_dma)
2147                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
2148         if (sec4_sg_bytes)
2149                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
2150                                  DMA_TO_DEVICE);
2151 }
2152
2153 static void aead_unmap(struct device *dev,
2154                        struct aead_edesc *edesc,
2155                        struct aead_request *req)
2156 {
2157         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2158         int ivsize = crypto_aead_ivsize(aead);
2159
2160         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
2161                              DMA_TO_DEVICE, edesc->assoc_chained);
2162
2163         caam_unmap(dev, req->src, req->dst,
2164                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
2165                    edesc->dst_chained, edesc->iv_dma, ivsize,
2166                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
2167 }
2168
2169 static void ablkcipher_unmap(struct device *dev,
2170                              struct ablkcipher_edesc *edesc,
2171                              struct ablkcipher_request *req)
2172 {
2173         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2174         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2175
2176         caam_unmap(dev, req->src, req->dst,
2177                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
2178                    edesc->dst_chained, edesc->iv_dma, ivsize,
2179                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
2180 }
2181
2182 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2183                                    void *context)
2184 {
2185         struct aead_request *req = context;
2186         struct aead_edesc *edesc;
2187 #ifdef DEBUG
2188         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2189         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2190         int ivsize = crypto_aead_ivsize(aead);
2191
2192         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2193 #endif
2194
2195         edesc = (struct aead_edesc *)((char *)desc -
2196                  offsetof(struct aead_edesc, hw_desc));
2197
2198         if (err)
2199                 caam_jr_strstatus(jrdev, err);
2200
2201         aead_unmap(jrdev, edesc, req);
2202
2203 #ifdef DEBUG
2204         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
2205                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2206                        req->assoclen , 1);
2207         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2208                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
2209                        edesc->src_nents ? 100 : ivsize, 1);
2210         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2211                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2212                        edesc->src_nents ? 100 : req->cryptlen +
2213                        ctx->authsize + 4, 1);
2214 #endif
2215
2216         kfree(edesc);
2217
2218         aead_request_complete(req, err);
2219 }
2220
2221 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2222                                    void *context)
2223 {
2224         struct aead_request *req = context;
2225         struct aead_edesc *edesc;
2226 #ifdef DEBUG
2227         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2228         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2229         int ivsize = crypto_aead_ivsize(aead);
2230
2231         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2232 #endif
2233
2234         edesc = (struct aead_edesc *)((char *)desc -
2235                  offsetof(struct aead_edesc, hw_desc));
2236
2237 #ifdef DEBUG
2238         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2239                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2240                        ivsize, 1);
2241         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2242                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
2243                        req->cryptlen - ctx->authsize, 1);
2244 #endif
2245
2246         if (err)
2247                 caam_jr_strstatus(jrdev, err);
2248
2249         aead_unmap(jrdev, edesc, req);
2250
2251         /*
2252          * verify hw auth check passed else return -EBADMSG
2253          */
2254         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2255                 err = -EBADMSG;
2256
2257 #ifdef DEBUG
2258         print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
2259                        DUMP_PREFIX_ADDRESS, 16, 4,
2260                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
2261                        sizeof(struct iphdr) + req->assoclen +
2262                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
2263                        ctx->authsize + 36, 1);
2264         if (!err && edesc->sec4_sg_bytes) {
2265                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
2266                 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
2267                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
2268                         sg->length + ctx->authsize + 16, 1);
2269         }
2270 #endif
2271
2272         kfree(edesc);
2273
2274         aead_request_complete(req, err);
2275 }
2276
2277 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2278                                    void *context)
2279 {
2280         struct ablkcipher_request *req = context;
2281         struct ablkcipher_edesc *edesc;
2282 #ifdef DEBUG
2283         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2284         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2285
2286         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2287 #endif
2288
2289         edesc = (struct ablkcipher_edesc *)((char *)desc -
2290                  offsetof(struct ablkcipher_edesc, hw_desc));
2291
2292         if (err)
2293                 caam_jr_strstatus(jrdev, err);
2294
2295 #ifdef DEBUG
2296         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2297                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2298                        edesc->src_nents > 1 ? 100 : ivsize, 1);
2299         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2300                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2301                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2302 #endif
2303
2304         ablkcipher_unmap(jrdev, edesc, req);
2305         kfree(edesc);
2306
2307         ablkcipher_request_complete(req, err);
2308 }
2309
2310 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2311                                     void *context)
2312 {
2313         struct ablkcipher_request *req = context;
2314         struct ablkcipher_edesc *edesc;
2315 #ifdef DEBUG
2316         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2317         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2318
2319         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2320 #endif
2321
2322         edesc = (struct ablkcipher_edesc *)((char *)desc -
2323                  offsetof(struct ablkcipher_edesc, hw_desc));
2324         if (err)
2325                 caam_jr_strstatus(jrdev, err);
2326
2327 #ifdef DEBUG
2328         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2329                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2330                        ivsize, 1);
2331         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2332                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2333                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2334 #endif
2335
2336         ablkcipher_unmap(jrdev, edesc, req);
2337         kfree(edesc);
2338
2339         ablkcipher_request_complete(req, err);
2340 }
2341
2342 /*
2343  * Fill in aead job descriptor
2344  */
2345 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2346                           struct aead_edesc *edesc,
2347                           struct aead_request *req,
2348                           bool all_contig, bool encrypt)
2349 {
2350         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2351         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2352         int ivsize = crypto_aead_ivsize(aead);
2353         int authsize = ctx->authsize;
2354         u32 *desc = edesc->hw_desc;
2355         u32 out_options = 0, in_options;
2356         dma_addr_t dst_dma, src_dma;
2357         int len, sec4_sg_index = 0;
2358         bool is_gcm = false;
2359
2360 #ifdef DEBUG
2361         debug("assoclen %d cryptlen %d authsize %d\n",
2362               req->assoclen, req->cryptlen, authsize);
2363         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
2364                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2365                        req->assoclen , 1);
2366         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2367                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2368                        edesc->src_nents ? 100 : ivsize, 1);
2369         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2370                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2371                         edesc->src_nents ? 100 : req->cryptlen, 1);
2372         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
2373                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2374                        desc_bytes(sh_desc), 1);
2375 #endif
2376
2377         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2378               OP_ALG_ALGSEL_AES) &&
2379             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2380                 is_gcm = true;
2381
2382         len = desc_len(sh_desc);
2383         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2384
2385         if (all_contig) {
2386                 if (is_gcm)
2387                         src_dma = edesc->iv_dma;
2388                 else
2389                         src_dma = sg_dma_address(req->assoc);
2390                 in_options = 0;
2391         } else {
2392                 src_dma = edesc->sec4_sg_dma;
2393                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2394                                  (edesc->src_nents ? : 1);
2395                 in_options = LDST_SGF;
2396         }
2397
2398         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2399                           in_options);
2400
2401         if (likely(req->src == req->dst)) {
2402                 if (all_contig) {
2403                         dst_dma = sg_dma_address(req->src);
2404                 } else {
2405                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
2406                                   ((edesc->assoc_nents ? : 1) + 1);
2407                         out_options = LDST_SGF;
2408                 }
2409         } else {
2410                 if (!edesc->dst_nents) {
2411                         dst_dma = sg_dma_address(req->dst);
2412                 } else {
2413                         dst_dma = edesc->sec4_sg_dma +
2414                                   sec4_sg_index *
2415                                   sizeof(struct sec4_sg_entry);
2416                         out_options = LDST_SGF;
2417                 }
2418         }
2419         if (encrypt)
2420                 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2421                                    out_options);
2422         else
2423                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2424                                    out_options);
2425 }
2426
2427 /*
2428  * Fill in aead givencrypt job descriptor
2429  */
2430 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2431                               struct aead_edesc *edesc,
2432                               struct aead_request *req,
2433                               int contig)
2434 {
2435         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2436         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2437         int ivsize = crypto_aead_ivsize(aead);
2438         int authsize = ctx->authsize;
2439         u32 *desc = edesc->hw_desc;
2440         u32 out_options = 0, in_options;
2441         dma_addr_t dst_dma, src_dma;
2442         int len, sec4_sg_index = 0;
2443         bool is_gcm = false;
2444
2445 #ifdef DEBUG
2446         debug("assoclen %d cryptlen %d authsize %d\n",
2447               req->assoclen, req->cryptlen, authsize);
2448         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
2449                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2450                        req->assoclen , 1);
2451         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2452                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
2453         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2454                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2455                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
2456         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
2457                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2458                        desc_bytes(sh_desc), 1);
2459 #endif
2460
2461         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2462               OP_ALG_ALGSEL_AES) &&
2463             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2464                 is_gcm = true;
2465
2466         len = desc_len(sh_desc);
2467         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2468
2469         if (contig & GIV_SRC_CONTIG) {
2470                 if (is_gcm)
2471                         src_dma = edesc->iv_dma;
2472                 else
2473                         src_dma = sg_dma_address(req->assoc);
2474                 in_options = 0;
2475         } else {
2476                 src_dma = edesc->sec4_sg_dma;
2477                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
2478                 in_options = LDST_SGF;
2479         }
2480         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2481                           in_options);
2482
2483         if (contig & GIV_DST_CONTIG) {
2484                 dst_dma = edesc->iv_dma;
2485         } else {
2486                 if (likely(req->src == req->dst)) {
2487                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
2488                                   (edesc->assoc_nents +
2489                                    (is_gcm ? 1 + edesc->src_nents : 0));
2490                         out_options = LDST_SGF;
2491                 } else {
2492                         dst_dma = edesc->sec4_sg_dma +
2493                                   sec4_sg_index *
2494                                   sizeof(struct sec4_sg_entry);
2495                         out_options = LDST_SGF;
2496                 }
2497         }
2498
2499         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2500                            out_options);
2501 }
2502
2503 /*
2504  * Fill in ablkcipher job descriptor
2505  */
2506 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2507                                 struct ablkcipher_edesc *edesc,
2508                                 struct ablkcipher_request *req,
2509                                 bool iv_contig)
2510 {
2511         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2512         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2513         u32 *desc = edesc->hw_desc;
2514         u32 out_options = 0, in_options;
2515         dma_addr_t dst_dma, src_dma;
2516         int len, sec4_sg_index = 0;
2517
2518 #ifdef DEBUG
2519         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2520                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2521                        ivsize, 1);
2522         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2523                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2524                        edesc->src_nents ? 100 : req->nbytes, 1);
2525 #endif
2526
2527         len = desc_len(sh_desc);
2528         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2529
2530         if (iv_contig) {
2531                 src_dma = edesc->iv_dma;
2532                 in_options = 0;
2533         } else {
2534                 src_dma = edesc->sec4_sg_dma;
2535                 sec4_sg_index += edesc->src_nents + 1;
2536                 in_options = LDST_SGF;
2537         }
2538         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2539
2540         if (likely(req->src == req->dst)) {
2541                 if (!edesc->src_nents && iv_contig) {
2542                         dst_dma = sg_dma_address(req->src);
2543                 } else {
2544                         dst_dma = edesc->sec4_sg_dma +
2545                                 sizeof(struct sec4_sg_entry);
2546                         out_options = LDST_SGF;
2547                 }
2548         } else {
2549                 if (!edesc->dst_nents) {
2550                         dst_dma = sg_dma_address(req->dst);
2551                 } else {
2552                         dst_dma = edesc->sec4_sg_dma +
2553                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
2554                         out_options = LDST_SGF;
2555                 }
2556         }
2557         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2558 }
2559
2560 /*
2561  * Fill in ablkcipher givencrypt job descriptor
2562  */
2563 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2564                                     struct ablkcipher_edesc *edesc,
2565                                     struct ablkcipher_request *req,
2566                                     bool iv_contig)
2567 {
2568         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2569         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2570         u32 *desc = edesc->hw_desc;
2571         u32 out_options, in_options;
2572         dma_addr_t dst_dma, src_dma;
2573         int len, sec4_sg_index = 0;
2574
2575 #ifdef DEBUG
2576         print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2577                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2578                        ivsize, 1);
2579         print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2580                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2581                        edesc->src_nents ? 100 : req->nbytes, 1);
2582 #endif
2583
2584         len = desc_len(sh_desc);
2585         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2586
2587         if (!edesc->src_nents) {
2588                 src_dma = sg_dma_address(req->src);
2589                 in_options = 0;
2590         } else {
2591                 src_dma = edesc->sec4_sg_dma;
2592                 sec4_sg_index += edesc->src_nents;
2593                 in_options = LDST_SGF;
2594         }
2595         append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2596
2597         if (iv_contig) {
2598                 dst_dma = edesc->iv_dma;
2599                 out_options = 0;
2600         } else {
2601                 dst_dma = edesc->sec4_sg_dma +
2602                           sec4_sg_index * sizeof(struct sec4_sg_entry);
2603                 out_options = LDST_SGF;
2604         }
2605         append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2606 }
2607
2608 /*
2609  * allocate and map the aead extended descriptor
2610  */
2611 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2612                                            int desc_bytes, bool *all_contig_ptr,
2613                                            bool encrypt)
2614 {
2615         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2616         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2617         struct device *jrdev = ctx->jrdev;
2618         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2619                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2620         int assoc_nents, src_nents, dst_nents = 0;
2621         struct aead_edesc *edesc;
2622         dma_addr_t iv_dma = 0;
2623         int sgc;
2624         bool all_contig = true;
2625         bool assoc_chained = false, src_chained = false, dst_chained = false;
2626         int ivsize = crypto_aead_ivsize(aead);
2627         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2628         unsigned int authsize = ctx->authsize;
2629         bool is_gcm = false;
2630
2631         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2632
2633         if (unlikely(req->dst != req->src)) {
2634                 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2635                 dst_nents = sg_count(req->dst,
2636                                      req->cryptlen +
2637                                         (encrypt ? authsize : (-authsize)),
2638                                      &dst_chained);
2639         } else {
2640                 src_nents = sg_count(req->src,
2641                                      req->cryptlen +
2642                                         (encrypt ? authsize : 0),
2643                                      &src_chained);
2644         }
2645
2646         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
2647                                  DMA_TO_DEVICE, assoc_chained);
2648         if (likely(req->src == req->dst)) {
2649                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2650                                          DMA_BIDIRECTIONAL, src_chained);
2651         } else {
2652                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2653                                          DMA_TO_DEVICE, src_chained);
2654                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2655                                          DMA_FROM_DEVICE, dst_chained);
2656         }
2657
2658         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
2659         if (dma_mapping_error(jrdev, iv_dma)) {
2660                 dev_err(jrdev, "unable to map IV\n");
2661                 return ERR_PTR(-ENOMEM);
2662         }
2663
2664         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2665               OP_ALG_ALGSEL_AES) &&
2666             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2667                 is_gcm = true;
2668
2669         /*
2670          * Check if data are contiguous.
2671          * GCM expected input sequence: IV, AAD, text
2672          * All other - expected input sequence: AAD, IV, text
2673          */
2674         if (is_gcm)
2675                 all_contig = (!assoc_nents &&
2676                               iv_dma + ivsize == sg_dma_address(req->assoc) &&
2677                               !src_nents && sg_dma_address(req->assoc) +
2678                               req->assoclen == sg_dma_address(req->src));
2679         else
2680                 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2681                               req->assoclen == iv_dma && !src_nents &&
2682                               iv_dma + ivsize == sg_dma_address(req->src));
2683         if (!all_contig) {
2684                 assoc_nents = assoc_nents ? : 1;
2685                 src_nents = src_nents ? : 1;
2686                 sec4_sg_len = assoc_nents + 1 + src_nents;
2687         }
2688
2689         sec4_sg_len += dst_nents;
2690
2691         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2692
2693         /* allocate space for base edesc and hw desc commands, link tables */
2694         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
2695                         sec4_sg_bytes, GFP_DMA | flags);
2696         if (!edesc) {
2697                 dev_err(jrdev, "could not allocate extended descriptor\n");
2698                 return ERR_PTR(-ENOMEM);
2699         }
2700
2701         edesc->assoc_nents = assoc_nents;
2702         edesc->assoc_chained = assoc_chained;
2703         edesc->src_nents = src_nents;
2704         edesc->src_chained = src_chained;
2705         edesc->dst_nents = dst_nents;
2706         edesc->dst_chained = dst_chained;
2707         edesc->iv_dma = iv_dma;
2708         edesc->sec4_sg_bytes = sec4_sg_bytes;
2709         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2710                          desc_bytes;
2711         *all_contig_ptr = all_contig;
2712
2713         sec4_sg_index = 0;
2714         if (!all_contig) {
2715                 if (!is_gcm) {
2716                         sg_to_sec4_sg(req->assoc,
2717                                       assoc_nents,
2718                                       edesc->sec4_sg +
2719                                       sec4_sg_index, 0);
2720                         sec4_sg_index += assoc_nents;
2721                 }
2722
2723                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2724                                    iv_dma, ivsize, 0);
2725                 sec4_sg_index += 1;
2726
2727                 if (is_gcm) {
2728                         sg_to_sec4_sg(req->assoc,
2729                                       assoc_nents,
2730                                       edesc->sec4_sg +
2731                                       sec4_sg_index, 0);
2732                         sec4_sg_index += assoc_nents;
2733                 }
2734
2735                 sg_to_sec4_sg_last(req->src,
2736                                    src_nents,
2737                                    edesc->sec4_sg +
2738                                    sec4_sg_index, 0);
2739                 sec4_sg_index += src_nents;
2740         }
2741         if (dst_nents) {
2742                 sg_to_sec4_sg_last(req->dst, dst_nents,
2743                                    edesc->sec4_sg + sec4_sg_index, 0);
2744         }
2745         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2746                                             sec4_sg_bytes, DMA_TO_DEVICE);
2747         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2748                 dev_err(jrdev, "unable to map S/G table\n");
2749                 return ERR_PTR(-ENOMEM);
2750         }
2751
2752         return edesc;
2753 }
2754
2755 static int aead_encrypt(struct aead_request *req)
2756 {
2757         struct aead_edesc *edesc;
2758         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2759         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2760         struct device *jrdev = ctx->jrdev;
2761         bool all_contig;
2762         u32 *desc;
2763         int ret = 0;
2764
2765         /* allocate extended descriptor */
2766         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2767                                  CAAM_CMD_SZ, &all_contig, true);
2768         if (IS_ERR(edesc))
2769                 return PTR_ERR(edesc);
2770
2771         /* Create and submit job descriptor */
2772         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2773                       all_contig, true);
2774 #ifdef DEBUG
2775         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2776                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2777                        desc_bytes(edesc->hw_desc), 1);
2778 #endif
2779
2780         desc = edesc->hw_desc;
2781         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2782         if (!ret) {
2783                 ret = -EINPROGRESS;
2784         } else {
2785                 aead_unmap(jrdev, edesc, req);
2786                 kfree(edesc);
2787         }
2788
2789         return ret;
2790 }
2791
2792 static int aead_decrypt(struct aead_request *req)
2793 {
2794         struct aead_edesc *edesc;
2795         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2796         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2797         struct device *jrdev = ctx->jrdev;
2798         bool all_contig;
2799         u32 *desc;
2800         int ret = 0;
2801
2802         /* allocate extended descriptor */
2803         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2804                                  CAAM_CMD_SZ, &all_contig, false);
2805         if (IS_ERR(edesc))
2806                 return PTR_ERR(edesc);
2807
2808 #ifdef DEBUG
2809         print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2810                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2811                        req->cryptlen, 1);
2812 #endif
2813
2814         /* Create and submit job descriptor*/
2815         init_aead_job(ctx->sh_desc_dec,
2816                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2817 #ifdef DEBUG
2818         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2819                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2820                        desc_bytes(edesc->hw_desc), 1);
2821 #endif
2822
2823         desc = edesc->hw_desc;
2824         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2825         if (!ret) {
2826                 ret = -EINPROGRESS;
2827         } else {
2828                 aead_unmap(jrdev, edesc, req);
2829                 kfree(edesc);
2830         }
2831
2832         return ret;
2833 }
2834
2835 /*
2836  * allocate and map the aead extended descriptor for aead givencrypt
2837  */
2838 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2839                                                *greq, int desc_bytes,
2840                                                u32 *contig_ptr)
2841 {
2842         struct aead_request *req = &greq->areq;
2843         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2844         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2845         struct device *jrdev = ctx->jrdev;
2846         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2847                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2848         int assoc_nents, src_nents, dst_nents = 0;
2849         struct aead_edesc *edesc;
2850         dma_addr_t iv_dma = 0;
2851         int sgc;
2852         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2853         int ivsize = crypto_aead_ivsize(aead);
2854         bool assoc_chained = false, src_chained = false, dst_chained = false;
2855         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2856         bool is_gcm = false;
2857
2858         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2859         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2860
2861         if (unlikely(req->dst != req->src))
2862                 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2863                                      &dst_chained);
2864
2865         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
2866                                  DMA_TO_DEVICE, assoc_chained);
2867         if (likely(req->src == req->dst)) {
2868                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2869                                          DMA_BIDIRECTIONAL, src_chained);
2870         } else {
2871                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2872                                          DMA_TO_DEVICE, src_chained);
2873                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2874                                          DMA_FROM_DEVICE, dst_chained);
2875         }
2876
2877         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2878         if (dma_mapping_error(jrdev, iv_dma)) {
2879                 dev_err(jrdev, "unable to map IV\n");
2880                 return ERR_PTR(-ENOMEM);
2881         }
2882
2883         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2884               OP_ALG_ALGSEL_AES) &&
2885             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2886                 is_gcm = true;
2887
2888         /*
2889          * Check if data are contiguous.
2890          * GCM expected input sequence: IV, AAD, text
2891          * All other - expected input sequence: AAD, IV, text
2892          */
2893
2894         if (is_gcm) {
2895                 if (assoc_nents || iv_dma + ivsize !=
2896                     sg_dma_address(req->assoc) || src_nents ||
2897                     sg_dma_address(req->assoc) + req->assoclen !=
2898                     sg_dma_address(req->src))
2899                         contig &= ~GIV_SRC_CONTIG;
2900         } else {
2901                 if (assoc_nents ||
2902                     sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2903                     src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2904                         contig &= ~GIV_SRC_CONTIG;
2905         }
2906
2907         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2908                 contig &= ~GIV_DST_CONTIG;
2909
2910         if (!(contig & GIV_SRC_CONTIG)) {
2911                 assoc_nents = assoc_nents ? : 1;
2912                 src_nents = src_nents ? : 1;
2913                 sec4_sg_len += assoc_nents + 1 + src_nents;
2914                 if (req->src == req->dst &&
2915                     (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
2916                         contig &= ~GIV_DST_CONTIG;
2917         }
2918
2919         /*
2920          * Add new sg entries for GCM output sequence.
2921          * Expected output sequence: IV, encrypted text.
2922          */
2923         if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2924                 sec4_sg_len += 1 + src_nents;
2925
2926         if (unlikely(req->src != req->dst)) {
2927                 dst_nents = dst_nents ? : 1;
2928                 sec4_sg_len += 1 + dst_nents;
2929         }
2930
2931         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2932
2933         /* allocate space for base edesc and hw desc commands, link tables */
2934         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
2935                         sec4_sg_bytes, GFP_DMA | flags);
2936         if (!edesc) {
2937                 dev_err(jrdev, "could not allocate extended descriptor\n");
2938                 return ERR_PTR(-ENOMEM);
2939         }
2940
2941         edesc->assoc_nents = assoc_nents;
2942         edesc->assoc_chained = assoc_chained;
2943         edesc->src_nents = src_nents;
2944         edesc->src_chained = src_chained;
2945         edesc->dst_nents = dst_nents;
2946         edesc->dst_chained = dst_chained;
2947         edesc->iv_dma = iv_dma;
2948         edesc->sec4_sg_bytes = sec4_sg_bytes;
2949         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2950                          desc_bytes;
2951         *contig_ptr = contig;
2952
2953         sec4_sg_index = 0;
2954         if (!(contig & GIV_SRC_CONTIG)) {
2955                 if (!is_gcm) {
2956                         sg_to_sec4_sg(req->assoc, assoc_nents,
2957                                       edesc->sec4_sg + sec4_sg_index, 0);
2958                         sec4_sg_index += assoc_nents;
2959                 }
2960
2961                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2962                                    iv_dma, ivsize, 0);
2963                 sec4_sg_index += 1;
2964
2965                 if (is_gcm) {
2966                         sg_to_sec4_sg(req->assoc, assoc_nents,
2967                                       edesc->sec4_sg + sec4_sg_index, 0);
2968                         sec4_sg_index += assoc_nents;
2969                 }
2970
2971                 sg_to_sec4_sg_last(req->src, src_nents,
2972                                    edesc->sec4_sg +
2973                                    sec4_sg_index, 0);
2974                 sec4_sg_index += src_nents;
2975         }
2976
2977         if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2978                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2979                                    iv_dma, ivsize, 0);
2980                 sec4_sg_index += 1;
2981                 sg_to_sec4_sg_last(req->src, src_nents,
2982                                    edesc->sec4_sg + sec4_sg_index, 0);
2983         }
2984
2985         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
2986                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2987                                    iv_dma, ivsize, 0);
2988                 sec4_sg_index += 1;
2989                 sg_to_sec4_sg_last(req->dst, dst_nents,
2990                                    edesc->sec4_sg + sec4_sg_index, 0);
2991         }
2992         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2993                                             sec4_sg_bytes, DMA_TO_DEVICE);
2994         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2995                 dev_err(jrdev, "unable to map S/G table\n");
2996                 return ERR_PTR(-ENOMEM);
2997         }
2998
2999         return edesc;
3000 }
3001
3002 static int aead_givencrypt(struct aead_givcrypt_request *areq)
3003 {
3004         struct aead_request *req = &areq->areq;
3005         struct aead_edesc *edesc;
3006         struct crypto_aead *aead = crypto_aead_reqtfm(req);
3007         struct caam_ctx *ctx = crypto_aead_ctx(aead);
3008         struct device *jrdev = ctx->jrdev;
3009         u32 contig;
3010         u32 *desc;
3011         int ret = 0;
3012
3013         /* allocate extended descriptor */
3014         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
3015                                      CAAM_CMD_SZ, &contig);
3016
3017         if (IS_ERR(edesc))
3018                 return PTR_ERR(edesc);
3019
3020 #ifdef DEBUG
3021         print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
3022                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
3023                        req->cryptlen, 1);
3024 #endif
3025
3026         /* Create and submit job descriptor*/
3027         init_aead_giv_job(ctx->sh_desc_givenc,
3028                           ctx->sh_desc_givenc_dma, edesc, req, contig);
3029 #ifdef DEBUG
3030         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
3031                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3032                        desc_bytes(edesc->hw_desc), 1);
3033 #endif
3034
3035         desc = edesc->hw_desc;
3036         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
3037         if (!ret) {
3038                 ret = -EINPROGRESS;
3039         } else {
3040                 aead_unmap(jrdev, edesc, req);
3041                 kfree(edesc);
3042         }
3043
3044         return ret;
3045 }
3046
3047 static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
3048 {
3049         return aead_encrypt(&areq->areq);
3050 }
3051
3052 /*
3053  * allocate and map the ablkcipher extended descriptor for ablkcipher
3054  */
3055 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
3056                                                        *req, int desc_bytes,
3057                                                        bool *iv_contig_out)
3058 {
3059         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3060         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3061         struct device *jrdev = ctx->jrdev;
3062         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3063                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3064                        GFP_KERNEL : GFP_ATOMIC;
3065         int src_nents, dst_nents = 0, sec4_sg_bytes;
3066         struct ablkcipher_edesc *edesc;
3067         dma_addr_t iv_dma = 0;
3068         bool iv_contig = false;
3069         int sgc;
3070         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3071         bool src_chained = false, dst_chained = false;
3072         int sec4_sg_index;
3073
3074         src_nents = sg_count(req->src, req->nbytes, &src_chained);
3075
3076         if (req->dst != req->src)
3077                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3078
3079         if (likely(req->src == req->dst)) {
3080                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3081                                          DMA_BIDIRECTIONAL, src_chained);
3082         } else {
3083                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3084                                          DMA_TO_DEVICE, src_chained);
3085                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3086                                          DMA_FROM_DEVICE, dst_chained);
3087         }
3088
3089         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
3090         if (dma_mapping_error(jrdev, iv_dma)) {
3091                 dev_err(jrdev, "unable to map IV\n");
3092                 return ERR_PTR(-ENOMEM);
3093         }
3094
3095         /*
3096          * Check if iv can be contiguous with source and destination.
3097          * If so, include it. If not, create scatterlist.
3098          */
3099         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
3100                 iv_contig = true;
3101         else
3102                 src_nents = src_nents ? : 1;
3103         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3104                         sizeof(struct sec4_sg_entry);
3105
3106         /* allocate space for base edesc and hw desc commands, link tables */
3107         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
3108                         sec4_sg_bytes, GFP_DMA | flags);
3109         if (!edesc) {
3110                 dev_err(jrdev, "could not allocate extended descriptor\n");
3111                 return ERR_PTR(-ENOMEM);
3112         }
3113
3114         edesc->src_nents = src_nents;
3115         edesc->src_chained = src_chained;
3116         edesc->dst_nents = dst_nents;
3117         edesc->dst_chained = dst_chained;
3118         edesc->sec4_sg_bytes = sec4_sg_bytes;
3119         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3120                          desc_bytes;
3121
3122         sec4_sg_index = 0;
3123         if (!iv_contig) {
3124                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
3125                 sg_to_sec4_sg_last(req->src, src_nents,
3126                                    edesc->sec4_sg + 1, 0);
3127                 sec4_sg_index += 1 + src_nents;
3128         }
3129
3130         if (dst_nents) {
3131                 sg_to_sec4_sg_last(req->dst, dst_nents,
3132                         edesc->sec4_sg + sec4_sg_index, 0);
3133         }
3134
3135         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3136                                             sec4_sg_bytes, DMA_TO_DEVICE);
3137         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3138                 dev_err(jrdev, "unable to map S/G table\n");
3139                 return ERR_PTR(-ENOMEM);
3140         }
3141
3142         edesc->iv_dma = iv_dma;
3143
3144 #ifdef DEBUG
3145         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
3146                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3147                        sec4_sg_bytes, 1);
3148 #endif
3149
3150         *iv_contig_out = iv_contig;
3151         return edesc;
3152 }
3153
3154 static int ablkcipher_encrypt(struct ablkcipher_request *req)
3155 {
3156         struct ablkcipher_edesc *edesc;
3157         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3158         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3159         struct device *jrdev = ctx->jrdev;
3160         bool iv_contig;
3161         u32 *desc;
3162         int ret = 0;
3163
3164         /* allocate extended descriptor */
3165         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3166                                        CAAM_CMD_SZ, &iv_contig);
3167         if (IS_ERR(edesc))
3168                 return PTR_ERR(edesc);
3169
3170         /* Create and submit job descriptor*/
3171         init_ablkcipher_job(ctx->sh_desc_enc,
3172                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
3173 #ifdef DEBUG
3174         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
3175                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3176                        desc_bytes(edesc->hw_desc), 1);
3177 #endif
3178         desc = edesc->hw_desc;
3179         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3180
3181         if (!ret) {
3182                 ret = -EINPROGRESS;
3183         } else {
3184                 ablkcipher_unmap(jrdev, edesc, req);
3185                 kfree(edesc);
3186         }
3187
3188         return ret;
3189 }
3190
3191 static int ablkcipher_decrypt(struct ablkcipher_request *req)
3192 {
3193         struct ablkcipher_edesc *edesc;
3194         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3195         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3196         struct device *jrdev = ctx->jrdev;
3197         bool iv_contig;
3198         u32 *desc;
3199         int ret = 0;
3200
3201         /* allocate extended descriptor */
3202         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3203                                        CAAM_CMD_SZ, &iv_contig);
3204         if (IS_ERR(edesc))
3205                 return PTR_ERR(edesc);
3206
3207         /* Create and submit job descriptor*/
3208         init_ablkcipher_job(ctx->sh_desc_dec,
3209                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
3210         desc = edesc->hw_desc;
3211 #ifdef DEBUG
3212         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
3213                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3214                        desc_bytes(edesc->hw_desc), 1);
3215 #endif
3216
3217         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3218         if (!ret) {
3219                 ret = -EINPROGRESS;
3220         } else {
3221                 ablkcipher_unmap(jrdev, edesc, req);
3222                 kfree(edesc);
3223         }
3224
3225         return ret;
3226 }
3227
3228 /*
3229  * allocate and map the ablkcipher extended descriptor
3230  * for ablkcipher givencrypt
3231  */
3232 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3233                                 struct skcipher_givcrypt_request *greq,
3234                                 int desc_bytes,
3235                                 bool *iv_contig_out)
3236 {
3237         struct ablkcipher_request *req = &greq->creq;
3238         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3239         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3240         struct device *jrdev = ctx->jrdev;
3241         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3242                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3243                        GFP_KERNEL : GFP_ATOMIC;
3244         int src_nents, dst_nents = 0, sec4_sg_bytes;
3245         struct ablkcipher_edesc *edesc;
3246         dma_addr_t iv_dma = 0;
3247         bool iv_contig = false;
3248         int sgc;
3249         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3250         bool src_chained = false, dst_chained = false;
3251         int sec4_sg_index;
3252
3253         src_nents = sg_count(req->src, req->nbytes, &src_chained);
3254
3255         if (unlikely(req->dst != req->src))
3256                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3257
3258         if (likely(req->src == req->dst)) {
3259                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3260                                          DMA_BIDIRECTIONAL, src_chained);
3261         } else {
3262                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3263                                          DMA_TO_DEVICE, src_chained);
3264                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3265                                          DMA_FROM_DEVICE, dst_chained);
3266         }
3267
3268         /*
3269          * Check if iv can be contiguous with source and destination.
3270          * If so, include it. If not, create scatterlist.
3271          */
3272         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3273         if (dma_mapping_error(jrdev, iv_dma)) {
3274                 dev_err(jrdev, "unable to map IV\n");
3275                 return ERR_PTR(-ENOMEM);
3276         }
3277
3278         if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3279                 iv_contig = true;
3280         else
3281                 dst_nents = dst_nents ? : 1;
3282         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3283                         sizeof(struct sec4_sg_entry);
3284
3285         /* allocate space for base edesc and hw desc commands, link tables */
3286         edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3287                         sec4_sg_bytes, GFP_DMA | flags);
3288         if (!edesc) {
3289                 dev_err(jrdev, "could not allocate extended descriptor\n");
3290                 return ERR_PTR(-ENOMEM);
3291         }
3292
3293         edesc->src_nents = src_nents;
3294         edesc->src_chained = src_chained;
3295         edesc->dst_nents = dst_nents;
3296         edesc->dst_chained = dst_chained;
3297         edesc->sec4_sg_bytes = sec4_sg_bytes;
3298         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3299                          desc_bytes;
3300
3301         sec4_sg_index = 0;
3302         if (src_nents) {
3303                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3304                 sec4_sg_index += src_nents;
3305         }
3306
3307         if (!iv_contig) {
3308                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3309                                    iv_dma, ivsize, 0);
3310                 sec4_sg_index += 1;
3311                 sg_to_sec4_sg_last(req->dst, dst_nents,
3312                                    edesc->sec4_sg + sec4_sg_index, 0);
3313         }
3314
3315         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3316                                             sec4_sg_bytes, DMA_TO_DEVICE);
3317         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3318                 dev_err(jrdev, "unable to map S/G table\n");
3319                 return ERR_PTR(-ENOMEM);
3320         }
3321         edesc->iv_dma = iv_dma;
3322
3323 #ifdef DEBUG
3324         print_hex_dump(KERN_ERR,
3325                        "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3326                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3327                        sec4_sg_bytes, 1);
3328 #endif
3329
3330         *iv_contig_out = iv_contig;
3331         return edesc;
3332 }
3333
3334 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3335 {
3336         struct ablkcipher_request *req = &creq->creq;
3337         struct ablkcipher_edesc *edesc;
3338         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3339         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3340         struct device *jrdev = ctx->jrdev;
3341         bool iv_contig;
3342         u32 *desc;
3343         int ret = 0;
3344
3345         /* allocate extended descriptor */
3346         edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3347                                        CAAM_CMD_SZ, &iv_contig);
3348         if (IS_ERR(edesc))
3349                 return PTR_ERR(edesc);
3350
3351         /* Create and submit job descriptor*/
3352         init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3353                                 edesc, req, iv_contig);
3354 #ifdef DEBUG
3355         print_hex_dump(KERN_ERR,
3356                        "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3357                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3358                        desc_bytes(edesc->hw_desc), 1);
3359 #endif
3360         desc = edesc->hw_desc;
3361         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3362
3363         if (!ret) {
3364                 ret = -EINPROGRESS;
3365         } else {
3366                 ablkcipher_unmap(jrdev, edesc, req);
3367                 kfree(edesc);
3368         }
3369
3370         return ret;
3371 }
3372
3373 #define template_aead           template_u.aead
3374 #define template_ablkcipher     template_u.ablkcipher
3375 struct caam_alg_template {
3376         char name[CRYPTO_MAX_ALG_NAME];
3377         char driver_name[CRYPTO_MAX_ALG_NAME];
3378         unsigned int blocksize;
3379         u32 type;
3380         union {
3381                 struct ablkcipher_alg ablkcipher;
3382                 struct aead_alg aead;
3383                 struct blkcipher_alg blkcipher;
3384                 struct cipher_alg cipher;
3385                 struct compress_alg compress;
3386                 struct rng_alg rng;
3387         } template_u;
3388         u32 class1_alg_type;
3389         u32 class2_alg_type;
3390         u32 alg_op;
3391 };
3392
3393 static struct caam_alg_template driver_algs[] = {
3394         /* single-pass ipsec_esp descriptor */
3395         {
3396                 .name = "authenc(hmac(md5),ecb(cipher_null))",
3397                 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3398                 .blocksize = NULL_BLOCK_SIZE,
3399                 .type = CRYPTO_ALG_TYPE_AEAD,
3400                 .template_aead = {
3401                         .setkey = aead_setkey,
3402                         .setauthsize = aead_setauthsize,
3403                         .encrypt = aead_encrypt,
3404                         .decrypt = aead_decrypt,
3405                         .givencrypt = aead_null_givencrypt,
3406                         .geniv = "<built-in>",
3407                         .ivsize = NULL_IV_SIZE,
3408                         .maxauthsize = MD5_DIGEST_SIZE,
3409                         },
3410                 .class1_alg_type = 0,
3411                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3412                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3413         },
3414         {
3415                 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3416                 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3417                 .blocksize = NULL_BLOCK_SIZE,
3418                 .type = CRYPTO_ALG_TYPE_AEAD,
3419                 .template_aead = {
3420                         .setkey = aead_setkey,
3421                         .setauthsize = aead_setauthsize,
3422                         .encrypt = aead_encrypt,
3423                         .decrypt = aead_decrypt,
3424                         .givencrypt = aead_null_givencrypt,
3425                         .geniv = "<built-in>",
3426                         .ivsize = NULL_IV_SIZE,
3427                         .maxauthsize = SHA1_DIGEST_SIZE,
3428                         },
3429                 .class1_alg_type = 0,
3430                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3431                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3432         },
3433         {
3434                 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3435                 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3436                 .blocksize = NULL_BLOCK_SIZE,
3437                 .type = CRYPTO_ALG_TYPE_AEAD,
3438                 .template_aead = {
3439                         .setkey = aead_setkey,
3440                         .setauthsize = aead_setauthsize,
3441                         .encrypt = aead_encrypt,
3442                         .decrypt = aead_decrypt,
3443                         .givencrypt = aead_null_givencrypt,
3444                         .geniv = "<built-in>",
3445                         .ivsize = NULL_IV_SIZE,
3446                         .maxauthsize = SHA224_DIGEST_SIZE,
3447                         },
3448                 .class1_alg_type = 0,
3449                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3450                                    OP_ALG_AAI_HMAC_PRECOMP,
3451                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3452         },
3453         {
3454                 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3455                 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3456                 .blocksize = NULL_BLOCK_SIZE,
3457                 .type = CRYPTO_ALG_TYPE_AEAD,
3458                 .template_aead = {
3459                         .setkey = aead_setkey,
3460                         .setauthsize = aead_setauthsize,
3461                         .encrypt = aead_encrypt,
3462                         .decrypt = aead_decrypt,
3463                         .givencrypt = aead_null_givencrypt,
3464                         .geniv = "<built-in>",
3465                         .ivsize = NULL_IV_SIZE,
3466                         .maxauthsize = SHA256_DIGEST_SIZE,
3467                         },
3468                 .class1_alg_type = 0,
3469                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3470                                    OP_ALG_AAI_HMAC_PRECOMP,
3471                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3472         },
3473         {
3474                 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3475                 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3476                 .blocksize = NULL_BLOCK_SIZE,
3477                 .type = CRYPTO_ALG_TYPE_AEAD,
3478                 .template_aead = {
3479                         .setkey = aead_setkey,
3480                         .setauthsize = aead_setauthsize,
3481                         .encrypt = aead_encrypt,
3482                         .decrypt = aead_decrypt,
3483                         .givencrypt = aead_null_givencrypt,
3484                         .geniv = "<built-in>",
3485                         .ivsize = NULL_IV_SIZE,
3486                         .maxauthsize = SHA384_DIGEST_SIZE,
3487                         },
3488                 .class1_alg_type = 0,
3489                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3490                                    OP_ALG_AAI_HMAC_PRECOMP,
3491                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3492         },
3493         {
3494                 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3495                 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3496                 .blocksize = NULL_BLOCK_SIZE,
3497                 .type = CRYPTO_ALG_TYPE_AEAD,
3498                 .template_aead = {
3499                         .setkey = aead_setkey,
3500                         .setauthsize = aead_setauthsize,
3501                         .encrypt = aead_encrypt,
3502                         .decrypt = aead_decrypt,
3503                         .givencrypt = aead_null_givencrypt,
3504                         .geniv = "<built-in>",
3505                         .ivsize = NULL_IV_SIZE,
3506                         .maxauthsize = SHA512_DIGEST_SIZE,
3507                         },
3508                 .class1_alg_type = 0,
3509                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3510                                    OP_ALG_AAI_HMAC_PRECOMP,
3511                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3512         },
3513         {
3514                 .name = "authenc(hmac(md5),cbc(aes))",
3515                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3516                 .blocksize = AES_BLOCK_SIZE,
3517                 .type = CRYPTO_ALG_TYPE_AEAD,
3518                 .template_aead = {
3519                         .setkey = aead_setkey,
3520                         .setauthsize = aead_setauthsize,
3521                         .encrypt = aead_encrypt,
3522                         .decrypt = aead_decrypt,
3523                         .givencrypt = aead_givencrypt,
3524                         .geniv = "<built-in>",
3525                         .ivsize = AES_BLOCK_SIZE,
3526                         .maxauthsize = MD5_DIGEST_SIZE,
3527                         },
3528                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3529                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3530                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3531         },
3532         {
3533                 .name = "authenc(hmac(sha1),cbc(aes))",
3534                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3535                 .blocksize = AES_BLOCK_SIZE,
3536                 .type = CRYPTO_ALG_TYPE_AEAD,
3537                 .template_aead = {
3538                         .setkey = aead_setkey,
3539                         .setauthsize = aead_setauthsize,
3540                         .encrypt = aead_encrypt,
3541                         .decrypt = aead_decrypt,
3542                         .givencrypt = aead_givencrypt,
3543                         .geniv = "<built-in>",
3544                         .ivsize = AES_BLOCK_SIZE,
3545                         .maxauthsize = SHA1_DIGEST_SIZE,
3546                         },
3547                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3548                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3549                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3550         },
3551         {
3552                 .name = "authenc(hmac(sha224),cbc(aes))",
3553                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3554                 .blocksize = AES_BLOCK_SIZE,
3555                 .type = CRYPTO_ALG_TYPE_AEAD,
3556                 .template_aead = {
3557                         .setkey = aead_setkey,
3558                         .setauthsize = aead_setauthsize,
3559                         .encrypt = aead_encrypt,
3560                         .decrypt = aead_decrypt,
3561                         .givencrypt = aead_givencrypt,
3562                         .geniv = "<built-in>",
3563                         .ivsize = AES_BLOCK_SIZE,
3564                         .maxauthsize = SHA224_DIGEST_SIZE,
3565                         },
3566                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3567                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3568                                    OP_ALG_AAI_HMAC_PRECOMP,
3569                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3570         },
3571         {
3572                 .name = "authenc(hmac(sha256),cbc(aes))",
3573                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3574                 .blocksize = AES_BLOCK_SIZE,
3575                 .type = CRYPTO_ALG_TYPE_AEAD,
3576                 .template_aead = {
3577                         .setkey = aead_setkey,
3578                         .setauthsize = aead_setauthsize,
3579                         .encrypt = aead_encrypt,
3580                         .decrypt = aead_decrypt,
3581                         .givencrypt = aead_givencrypt,
3582                         .geniv = "<built-in>",
3583                         .ivsize = AES_BLOCK_SIZE,
3584                         .maxauthsize = SHA256_DIGEST_SIZE,
3585                         },
3586                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3587                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3588                                    OP_ALG_AAI_HMAC_PRECOMP,
3589                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3590         },
3591         {
3592                 .name = "authenc(hmac(sha384),cbc(aes))",
3593                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3594                 .blocksize = AES_BLOCK_SIZE,
3595                 .type = CRYPTO_ALG_TYPE_AEAD,
3596                 .template_aead = {
3597                         .setkey = aead_setkey,
3598                         .setauthsize = aead_setauthsize,
3599                         .encrypt = aead_encrypt,
3600                         .decrypt = aead_decrypt,
3601                         .givencrypt = aead_givencrypt,
3602                         .geniv = "<built-in>",
3603                         .ivsize = AES_BLOCK_SIZE,
3604                         .maxauthsize = SHA384_DIGEST_SIZE,
3605                         },
3606                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3607                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3608                                    OP_ALG_AAI_HMAC_PRECOMP,
3609                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3610         },
3611
3612         {
3613                 .name = "authenc(hmac(sha512),cbc(aes))",
3614                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3615                 .blocksize = AES_BLOCK_SIZE,
3616                 .type = CRYPTO_ALG_TYPE_AEAD,
3617                 .template_aead = {
3618                         .setkey = aead_setkey,
3619                         .setauthsize = aead_setauthsize,
3620                         .encrypt = aead_encrypt,
3621                         .decrypt = aead_decrypt,
3622                         .givencrypt = aead_givencrypt,
3623                         .geniv = "<built-in>",
3624                         .ivsize = AES_BLOCK_SIZE,
3625                         .maxauthsize = SHA512_DIGEST_SIZE,
3626                         },
3627                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3628                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3629                                    OP_ALG_AAI_HMAC_PRECOMP,
3630                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3631         },
3632         {
3633                 .name = "authenc(hmac(md5),cbc(des3_ede))",
3634                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3635                 .blocksize = DES3_EDE_BLOCK_SIZE,
3636                 .type = CRYPTO_ALG_TYPE_AEAD,
3637                 .template_aead = {
3638                         .setkey = aead_setkey,
3639                         .setauthsize = aead_setauthsize,
3640                         .encrypt = aead_encrypt,
3641                         .decrypt = aead_decrypt,
3642                         .givencrypt = aead_givencrypt,
3643                         .geniv = "<built-in>",
3644                         .ivsize = DES3_EDE_BLOCK_SIZE,
3645                         .maxauthsize = MD5_DIGEST_SIZE,
3646                         },
3647                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3648                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3649                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3650         },
3651         {
3652                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3653                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3654                 .blocksize = DES3_EDE_BLOCK_SIZE,
3655                 .type = CRYPTO_ALG_TYPE_AEAD,
3656                 .template_aead = {
3657                         .setkey = aead_setkey,
3658                         .setauthsize = aead_setauthsize,
3659                         .encrypt = aead_encrypt,
3660                         .decrypt = aead_decrypt,
3661                         .givencrypt = aead_givencrypt,
3662                         .geniv = "<built-in>",
3663                         .ivsize = DES3_EDE_BLOCK_SIZE,
3664                         .maxauthsize = SHA1_DIGEST_SIZE,
3665                         },
3666                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3667                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3668                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3669         },
3670         {
3671                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3672                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3673                 .blocksize = DES3_EDE_BLOCK_SIZE,
3674                 .type = CRYPTO_ALG_TYPE_AEAD,
3675                 .template_aead = {
3676                         .setkey = aead_setkey,
3677                         .setauthsize = aead_setauthsize,
3678                         .encrypt = aead_encrypt,
3679                         .decrypt = aead_decrypt,
3680                         .givencrypt = aead_givencrypt,
3681                         .geniv = "<built-in>",
3682                         .ivsize = DES3_EDE_BLOCK_SIZE,
3683                         .maxauthsize = SHA224_DIGEST_SIZE,
3684                         },
3685                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3686                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3687                                    OP_ALG_AAI_HMAC_PRECOMP,
3688                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3689         },
3690         {
3691                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3692                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3693                 .blocksize = DES3_EDE_BLOCK_SIZE,
3694                 .type = CRYPTO_ALG_TYPE_AEAD,
3695                 .template_aead = {
3696                         .setkey = aead_setkey,
3697                         .setauthsize = aead_setauthsize,
3698                         .encrypt = aead_encrypt,
3699                         .decrypt = aead_decrypt,
3700                         .givencrypt = aead_givencrypt,
3701                         .geniv = "<built-in>",
3702                         .ivsize = DES3_EDE_BLOCK_SIZE,
3703                         .maxauthsize = SHA256_DIGEST_SIZE,
3704                         },
3705                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3706                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3707                                    OP_ALG_AAI_HMAC_PRECOMP,
3708                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3709         },
3710         {
3711                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3712                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3713                 .blocksize = DES3_EDE_BLOCK_SIZE,
3714                 .type = CRYPTO_ALG_TYPE_AEAD,
3715                 .template_aead = {
3716                         .setkey = aead_setkey,
3717                         .setauthsize = aead_setauthsize,
3718                         .encrypt = aead_encrypt,
3719                         .decrypt = aead_decrypt,
3720                         .givencrypt = aead_givencrypt,
3721                         .geniv = "<built-in>",
3722                         .ivsize = DES3_EDE_BLOCK_SIZE,
3723                         .maxauthsize = SHA384_DIGEST_SIZE,
3724                         },
3725                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3726                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3727                                    OP_ALG_AAI_HMAC_PRECOMP,
3728                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3729         },
3730         {
3731                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3732                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3733                 .blocksize = DES3_EDE_BLOCK_SIZE,
3734                 .type = CRYPTO_ALG_TYPE_AEAD,
3735                 .template_aead = {
3736                         .setkey = aead_setkey,
3737                         .setauthsize = aead_setauthsize,
3738                         .encrypt = aead_encrypt,
3739                         .decrypt = aead_decrypt,
3740                         .givencrypt = aead_givencrypt,
3741                         .geniv = "<built-in>",
3742                         .ivsize = DES3_EDE_BLOCK_SIZE,
3743                         .maxauthsize = SHA512_DIGEST_SIZE,
3744                         },
3745                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3746                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3747                                    OP_ALG_AAI_HMAC_PRECOMP,
3748                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3749         },
3750         {
3751                 .name = "authenc(hmac(md5),cbc(des))",
3752                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3753                 .blocksize = DES_BLOCK_SIZE,
3754                 .type = CRYPTO_ALG_TYPE_AEAD,
3755                 .template_aead = {
3756                         .setkey = aead_setkey,
3757                         .setauthsize = aead_setauthsize,
3758                         .encrypt = aead_encrypt,
3759                         .decrypt = aead_decrypt,
3760                         .givencrypt = aead_givencrypt,
3761                         .geniv = "<built-in>",
3762                         .ivsize = DES_BLOCK_SIZE,
3763                         .maxauthsize = MD5_DIGEST_SIZE,
3764                         },
3765                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3766                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3767                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3768         },
3769         {
3770                 .name = "authenc(hmac(sha1),cbc(des))",
3771                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3772                 .blocksize = DES_BLOCK_SIZE,
3773                 .type = CRYPTO_ALG_TYPE_AEAD,
3774                 .template_aead = {
3775                         .setkey = aead_setkey,
3776                         .setauthsize = aead_setauthsize,
3777                         .encrypt = aead_encrypt,
3778                         .decrypt = aead_decrypt,
3779                         .givencrypt = aead_givencrypt,
3780                         .geniv = "<built-in>",
3781                         .ivsize = DES_BLOCK_SIZE,
3782                         .maxauthsize = SHA1_DIGEST_SIZE,
3783                         },
3784                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3785                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3786                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3787         },
3788         {
3789                 .name = "authenc(hmac(sha224),cbc(des))",
3790                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3791                 .blocksize = DES_BLOCK_SIZE,
3792                 .type = CRYPTO_ALG_TYPE_AEAD,
3793                 .template_aead = {
3794                         .setkey = aead_setkey,
3795                         .setauthsize = aead_setauthsize,
3796                         .encrypt = aead_encrypt,
3797                         .decrypt = aead_decrypt,
3798                         .givencrypt = aead_givencrypt,
3799                         .geniv = "<built-in>",
3800                         .ivsize = DES_BLOCK_SIZE,
3801                         .maxauthsize = SHA224_DIGEST_SIZE,
3802                         },
3803                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3804                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3805                                    OP_ALG_AAI_HMAC_PRECOMP,
3806                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3807         },
3808         {
3809                 .name = "authenc(hmac(sha256),cbc(des))",
3810                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3811                 .blocksize = DES_BLOCK_SIZE,
3812                 .type = CRYPTO_ALG_TYPE_AEAD,
3813                 .template_aead = {
3814                         .setkey = aead_setkey,
3815                         .setauthsize = aead_setauthsize,
3816                         .encrypt = aead_encrypt,
3817                         .decrypt = aead_decrypt,
3818                         .givencrypt = aead_givencrypt,
3819                         .geniv = "<built-in>",
3820                         .ivsize = DES_BLOCK_SIZE,
3821                         .maxauthsize = SHA256_DIGEST_SIZE,
3822                         },
3823                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3824                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3825                                    OP_ALG_AAI_HMAC_PRECOMP,
3826                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3827         },
3828         {
3829                 .name = "authenc(hmac(sha384),cbc(des))",
3830                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3831                 .blocksize = DES_BLOCK_SIZE,
3832                 .type = CRYPTO_ALG_TYPE_AEAD,
3833                 .template_aead = {
3834                         .setkey = aead_setkey,
3835                         .setauthsize = aead_setauthsize,
3836                         .encrypt = aead_encrypt,
3837                         .decrypt = aead_decrypt,
3838                         .givencrypt = aead_givencrypt,
3839                         .geniv = "<built-in>",
3840                         .ivsize = DES_BLOCK_SIZE,
3841                         .maxauthsize = SHA384_DIGEST_SIZE,
3842                         },
3843                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3844                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3845                                    OP_ALG_AAI_HMAC_PRECOMP,
3846                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3847         },
3848         {
3849                 .name = "authenc(hmac(sha512),cbc(des))",
3850                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3851                 .blocksize = DES_BLOCK_SIZE,
3852                 .type = CRYPTO_ALG_TYPE_AEAD,
3853                 .template_aead = {
3854                         .setkey = aead_setkey,
3855                         .setauthsize = aead_setauthsize,
3856                         .encrypt = aead_encrypt,
3857                         .decrypt = aead_decrypt,
3858                         .givencrypt = aead_givencrypt,
3859                         .geniv = "<built-in>",
3860                         .ivsize = DES_BLOCK_SIZE,
3861                         .maxauthsize = SHA512_DIGEST_SIZE,
3862                         },
3863                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3864                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3865                                    OP_ALG_AAI_HMAC_PRECOMP,
3866                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3867         },
3868         {
3869                 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3870                 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3871                 .blocksize = 1,
3872                 .type = CRYPTO_ALG_TYPE_AEAD,
3873                 .template_aead = {
3874                         .setkey = aead_setkey,
3875                         .setauthsize = aead_setauthsize,
3876                         .encrypt = aead_encrypt,
3877                         .decrypt = aead_decrypt,
3878                         .givencrypt = aead_givencrypt,
3879                         .geniv = "<built-in>",
3880                         .ivsize = CTR_RFC3686_IV_SIZE,
3881                         .maxauthsize = MD5_DIGEST_SIZE,
3882                         },
3883                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3884                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3885                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3886         },
3887         {
3888                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3889                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3890                 .blocksize = 1,
3891                 .type = CRYPTO_ALG_TYPE_AEAD,
3892                 .template_aead = {
3893                         .setkey = aead_setkey,
3894                         .setauthsize = aead_setauthsize,
3895                         .encrypt = aead_encrypt,
3896                         .decrypt = aead_decrypt,
3897                         .givencrypt = aead_givencrypt,
3898                         .geniv = "<built-in>",
3899                         .ivsize = CTR_RFC3686_IV_SIZE,
3900                         .maxauthsize = SHA1_DIGEST_SIZE,
3901                         },
3902                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3903                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3904                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3905         },
3906         {
3907                 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3908                 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3909                 .blocksize = 1,
3910                 .type = CRYPTO_ALG_TYPE_AEAD,
3911                 .template_aead = {
3912                         .setkey = aead_setkey,
3913                         .setauthsize = aead_setauthsize,
3914                         .encrypt = aead_encrypt,
3915                         .decrypt = aead_decrypt,
3916                         .givencrypt = aead_givencrypt,
3917                         .geniv = "<built-in>",
3918                         .ivsize = CTR_RFC3686_IV_SIZE,
3919                         .maxauthsize = SHA224_DIGEST_SIZE,
3920                         },
3921                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3922                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3923                                    OP_ALG_AAI_HMAC_PRECOMP,
3924                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3925         },
3926         {
3927                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3928                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3929                 .blocksize = 1,
3930                 .type = CRYPTO_ALG_TYPE_AEAD,
3931                 .template_aead = {
3932                         .setkey = aead_setkey,
3933                         .setauthsize = aead_setauthsize,
3934                         .encrypt = aead_encrypt,
3935                         .decrypt = aead_decrypt,
3936                         .givencrypt = aead_givencrypt,
3937                         .geniv = "<built-in>",
3938                         .ivsize = CTR_RFC3686_IV_SIZE,
3939                         .maxauthsize = SHA256_DIGEST_SIZE,
3940                         },
3941                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3942                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3943                                    OP_ALG_AAI_HMAC_PRECOMP,
3944                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3945         },
3946         {
3947                 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3948                 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3949                 .blocksize = 1,
3950                 .type = CRYPTO_ALG_TYPE_AEAD,
3951                 .template_aead = {
3952                         .setkey = aead_setkey,
3953                         .setauthsize = aead_setauthsize,
3954                         .encrypt = aead_encrypt,
3955                         .decrypt = aead_decrypt,
3956                         .givencrypt = aead_givencrypt,
3957                         .geniv = "<built-in>",
3958                         .ivsize = CTR_RFC3686_IV_SIZE,
3959                         .maxauthsize = SHA384_DIGEST_SIZE,
3960                         },
3961                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3962                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3963                                    OP_ALG_AAI_HMAC_PRECOMP,
3964                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3965         },
3966         {
3967                 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3968                 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3969                 .blocksize = 1,
3970                 .type = CRYPTO_ALG_TYPE_AEAD,
3971                 .template_aead = {
3972                         .setkey = aead_setkey,
3973                         .setauthsize = aead_setauthsize,
3974                         .encrypt = aead_encrypt,
3975                         .decrypt = aead_decrypt,
3976                         .givencrypt = aead_givencrypt,
3977                         .geniv = "<built-in>",
3978                         .ivsize = CTR_RFC3686_IV_SIZE,
3979                         .maxauthsize = SHA512_DIGEST_SIZE,
3980                         },
3981                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3982                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3983                                    OP_ALG_AAI_HMAC_PRECOMP,
3984                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3985         },
3986         {
3987                 .name = "rfc4106(gcm(aes))",
3988                 .driver_name = "rfc4106-gcm-aes-caam",
3989                 .blocksize = 1,
3990                 .type = CRYPTO_ALG_TYPE_AEAD,
3991                 .template_aead = {
3992                         .setkey = rfc4106_setkey,
3993                         .setauthsize = rfc4106_setauthsize,
3994                         .encrypt = aead_encrypt,
3995                         .decrypt = aead_decrypt,
3996                         .givencrypt = aead_givencrypt,
3997                         .geniv = "<built-in>",
3998                         .ivsize = 8,
3999                         .maxauthsize = AES_BLOCK_SIZE,
4000                         },
4001                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4002         },
4003         {
4004                 .name = "rfc4543(gcm(aes))",
4005                 .driver_name = "rfc4543-gcm-aes-caam",
4006                 .blocksize = 1,
4007                 .type = CRYPTO_ALG_TYPE_AEAD,
4008                 .template_aead = {
4009                         .setkey = rfc4543_setkey,
4010                         .setauthsize = rfc4543_setauthsize,
4011                         .encrypt = aead_encrypt,
4012                         .decrypt = aead_decrypt,
4013                         .givencrypt = aead_givencrypt,
4014                         .geniv = "<built-in>",
4015                         .ivsize = 8,
4016                         .maxauthsize = AES_BLOCK_SIZE,
4017                         },
4018                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4019         },
4020         /* Galois Counter Mode */
4021         {
4022                 .name = "gcm(aes)",
4023                 .driver_name = "gcm-aes-caam",
4024                 .blocksize = 1,
4025                 .type = CRYPTO_ALG_TYPE_AEAD,
4026                 .template_aead = {
4027                         .setkey = gcm_setkey,
4028                         .setauthsize = gcm_setauthsize,
4029                         .encrypt = aead_encrypt,
4030                         .decrypt = aead_decrypt,
4031                         .givencrypt = NULL,
4032                         .geniv = "<built-in>",
4033                         .ivsize = 12,
4034                         .maxauthsize = AES_BLOCK_SIZE,
4035                         },
4036                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4037         },
4038         /* ablkcipher descriptor */
4039         {
4040                 .name = "cbc(aes)",
4041                 .driver_name = "cbc-aes-caam",
4042                 .blocksize = AES_BLOCK_SIZE,
4043                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4044                 .template_ablkcipher = {
4045                         .setkey = ablkcipher_setkey,
4046                         .encrypt = ablkcipher_encrypt,
4047                         .decrypt = ablkcipher_decrypt,
4048                         .givencrypt = ablkcipher_givencrypt,
4049                         .geniv = "<built-in>",
4050                         .min_keysize = AES_MIN_KEY_SIZE,
4051                         .max_keysize = AES_MAX_KEY_SIZE,
4052                         .ivsize = AES_BLOCK_SIZE,
4053                         },
4054                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4055         },
4056         {
4057                 .name = "cbc(des3_ede)",
4058                 .driver_name = "cbc-3des-caam",
4059                 .blocksize = DES3_EDE_BLOCK_SIZE,
4060                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4061                 .template_ablkcipher = {
4062                         .setkey = ablkcipher_setkey,
4063                         .encrypt = ablkcipher_encrypt,
4064                         .decrypt = ablkcipher_decrypt,
4065                         .givencrypt = ablkcipher_givencrypt,
4066                         .geniv = "<built-in>",
4067                         .min_keysize = DES3_EDE_KEY_SIZE,
4068                         .max_keysize = DES3_EDE_KEY_SIZE,
4069                         .ivsize = DES3_EDE_BLOCK_SIZE,
4070                         },
4071                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
4072         },
4073         {
4074                 .name = "cbc(des)",
4075                 .driver_name = "cbc-des-caam",
4076                 .blocksize = DES_BLOCK_SIZE,
4077                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4078                 .template_ablkcipher = {
4079                         .setkey = ablkcipher_setkey,
4080                         .encrypt = ablkcipher_encrypt,
4081                         .decrypt = ablkcipher_decrypt,
4082                         .givencrypt = ablkcipher_givencrypt,
4083                         .geniv = "<built-in>",
4084                         .min_keysize = DES_KEY_SIZE,
4085                         .max_keysize = DES_KEY_SIZE,
4086                         .ivsize = DES_BLOCK_SIZE,
4087                         },
4088                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4089         },
4090         {
4091                 .name = "ctr(aes)",
4092                 .driver_name = "ctr-aes-caam",
4093                 .blocksize = 1,
4094                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4095                 .template_ablkcipher = {
4096                         .setkey = ablkcipher_setkey,
4097                         .encrypt = ablkcipher_encrypt,
4098                         .decrypt = ablkcipher_decrypt,
4099                         .geniv = "chainiv",
4100                         .min_keysize = AES_MIN_KEY_SIZE,
4101                         .max_keysize = AES_MAX_KEY_SIZE,
4102                         .ivsize = AES_BLOCK_SIZE,
4103                         },
4104                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
4105         },
4106         {
4107                 .name = "rfc3686(ctr(aes))",
4108                 .driver_name = "rfc3686-ctr-aes-caam",
4109                 .blocksize = 1,
4110                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4111                 .template_ablkcipher = {
4112                         .setkey = ablkcipher_setkey,
4113                         .encrypt = ablkcipher_encrypt,
4114                         .decrypt = ablkcipher_decrypt,
4115                         .givencrypt = ablkcipher_givencrypt,
4116                         .geniv = "<built-in>",
4117                         .min_keysize = AES_MIN_KEY_SIZE +
4118                                        CTR_RFC3686_NONCE_SIZE,
4119                         .max_keysize = AES_MAX_KEY_SIZE +
4120                                        CTR_RFC3686_NONCE_SIZE,
4121                         .ivsize = CTR_RFC3686_IV_SIZE,
4122                         },
4123                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
4124         }
4125 };
4126
4127 struct caam_crypto_alg {
4128         struct list_head entry;
4129         int class1_alg_type;
4130         int class2_alg_type;
4131         int alg_op;
4132         struct crypto_alg crypto_alg;
4133 };
4134
4135 static int caam_cra_init(struct crypto_tfm *tfm)
4136 {
4137         struct crypto_alg *alg = tfm->__crt_alg;
4138         struct caam_crypto_alg *caam_alg =
4139                  container_of(alg, struct caam_crypto_alg, crypto_alg);
4140         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4141
4142         ctx->jrdev = caam_jr_alloc();
4143         if (IS_ERR(ctx->jrdev)) {
4144                 pr_err("Job Ring Device allocation for transform failed\n");
4145                 return PTR_ERR(ctx->jrdev);
4146         }
4147
4148         /* copy descriptor header template value */
4149         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
4150         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
4151         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
4152
4153         return 0;
4154 }
4155
4156 static void caam_cra_exit(struct crypto_tfm *tfm)
4157 {
4158         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4159
4160         if (ctx->sh_desc_enc_dma &&
4161             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4162                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4163                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4164         if (ctx->sh_desc_dec_dma &&
4165             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4166                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4167                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4168         if (ctx->sh_desc_givenc_dma &&
4169             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4170                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4171                                  desc_bytes(ctx->sh_desc_givenc),
4172                                  DMA_TO_DEVICE);
4173         if (ctx->key_dma &&
4174             !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4175                 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4176                                  ctx->enckeylen + ctx->split_key_pad_len,
4177                                  DMA_TO_DEVICE);
4178
4179         caam_jr_free(ctx->jrdev);
4180 }
4181
4182 static void __exit caam_algapi_exit(void)
4183 {
4184
4185         struct caam_crypto_alg *t_alg, *n;
4186
4187         if (!alg_list.next)
4188                 return;
4189
4190         list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4191                 crypto_unregister_alg(&t_alg->crypto_alg);
4192                 list_del(&t_alg->entry);
4193                 kfree(t_alg);
4194         }
4195 }
4196
4197 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4198                                               *template)
4199 {
4200         struct caam_crypto_alg *t_alg;
4201         struct crypto_alg *alg;
4202
4203         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
4204         if (!t_alg) {
4205                 pr_err("failed to allocate t_alg\n");
4206                 return ERR_PTR(-ENOMEM);
4207         }
4208
4209         alg = &t_alg->crypto_alg;
4210
4211         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4212         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4213                  template->driver_name);
4214         alg->cra_module = THIS_MODULE;
4215         alg->cra_init = caam_cra_init;
4216         alg->cra_exit = caam_cra_exit;
4217         alg->cra_priority = CAAM_CRA_PRIORITY;
4218         alg->cra_blocksize = template->blocksize;
4219         alg->cra_alignmask = 0;
4220         alg->cra_ctxsize = sizeof(struct caam_ctx);
4221         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4222                          template->type;
4223         switch (template->type) {
4224         case CRYPTO_ALG_TYPE_GIVCIPHER:
4225                 alg->cra_type = &crypto_givcipher_type;
4226                 alg->cra_ablkcipher = template->template_ablkcipher;
4227                 break;
4228         case CRYPTO_ALG_TYPE_ABLKCIPHER:
4229                 alg->cra_type = &crypto_ablkcipher_type;
4230                 alg->cra_ablkcipher = template->template_ablkcipher;
4231                 break;
4232         case CRYPTO_ALG_TYPE_AEAD:
4233                 alg->cra_type = &crypto_aead_type;
4234                 alg->cra_aead = template->template_aead;
4235                 break;
4236         }
4237
4238         t_alg->class1_alg_type = template->class1_alg_type;
4239         t_alg->class2_alg_type = template->class2_alg_type;
4240         t_alg->alg_op = template->alg_op;
4241
4242         return t_alg;
4243 }
4244
4245 static int __init caam_algapi_init(void)
4246 {
4247         struct device_node *dev_node;
4248         struct platform_device *pdev;
4249         struct device *ctrldev;
4250         void *priv;
4251         int i = 0, err = 0;
4252
4253         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4254         if (!dev_node) {
4255                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4256                 if (!dev_node)
4257                         return -ENODEV;
4258         }
4259
4260         pdev = of_find_device_by_node(dev_node);
4261         if (!pdev) {
4262                 of_node_put(dev_node);
4263                 return -ENODEV;
4264         }
4265
4266         ctrldev = &pdev->dev;
4267         priv = dev_get_drvdata(ctrldev);
4268         of_node_put(dev_node);
4269
4270         /*
4271          * If priv is NULL, it's probably because the caam driver wasn't
4272          * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4273          */
4274         if (!priv)
4275                 return -ENODEV;
4276
4277
4278         INIT_LIST_HEAD(&alg_list);
4279
4280         /* register crypto algorithms the device supports */
4281         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4282                 /* TODO: check if h/w supports alg */
4283                 struct caam_crypto_alg *t_alg;
4284
4285                 t_alg = caam_alg_alloc(&driver_algs[i]);
4286                 if (IS_ERR(t_alg)) {
4287                         err = PTR_ERR(t_alg);
4288                         pr_warn("%s alg allocation failed\n",
4289                                 driver_algs[i].driver_name);
4290                         continue;
4291                 }
4292
4293                 err = crypto_register_alg(&t_alg->crypto_alg);
4294                 if (err) {
4295                         pr_warn("%s alg registration failed\n",
4296                                 t_alg->crypto_alg.cra_driver_name);
4297                         kfree(t_alg);
4298                 } else
4299                         list_add_tail(&t_alg->entry, &alg_list);
4300         }
4301         if (!list_empty(&alg_list))
4302                 pr_info("caam algorithms registered in /proc/crypto\n");
4303
4304         return err;
4305 }
4306
4307 module_init(caam_algapi_init);
4308 module_exit(caam_algapi_exit);
4309
4310 MODULE_LICENSE("GPL");
4311 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4312 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");