These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56         u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57         struct crypto_aes_ctx aes_key_expanded
58                 __attribute__ ((__aligned__(AESNI_ALIGN)));
59         u8 nonce[4];
60 };
61
62 struct aesni_gcm_set_hash_subkey_result {
63         int err;
64         struct completion completion;
65 };
66
67 struct aesni_hash_subkey_req_data {
68         u8 iv[16];
69         struct aesni_gcm_set_hash_subkey_result result;
70         struct scatterlist sg;
71 };
72
73 struct aesni_lrw_ctx {
74         struct lrw_table_ctx lrw_table;
75         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77
78 struct aesni_xts_ctx {
79         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103
104 #ifdef CONFIG_X86_64
105
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159                 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161                 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163                 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172                         const u8 *in, unsigned long plaintext_len, u8 *iv,
173                         const u8 *aad, unsigned long aad_len,
174                         u8 *auth_tag, unsigned long auth_tag_len);
175
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
178                         const u8 *aad, unsigned long aad_len,
179                         u8 *auth_tag, unsigned long auth_tag_len);
180
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182                         const u8 *in, unsigned long plaintext_len, u8 *iv,
183                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184                         u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189                                 aad_len, auth_tag, auth_tag_len);
190         } else {
191                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193                                         aad_len, auth_tag, auth_tag_len);
194         }
195 }
196
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205                                 aad_len, auth_tag, auth_tag_len);
206         } else {
207                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209                                         aad_len, auth_tag, auth_tag_len);
210         }
211 }
212 #endif
213
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233                         const u8 *in, unsigned long plaintext_len, u8 *iv,
234                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235                         u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240                                 aad_len, auth_tag, auth_tag_len);
241         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         } else {
246                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248                                         aad_len, auth_tag, auth_tag_len);
249         }
250 }
251
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260                                 aad, aad_len, auth_tag, auth_tag_len);
261         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264                                         aad_len, auth_tag, auth_tag_len);
265         } else {
266                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268                                         aad_len, auth_tag, auth_tag_len);
269         }
270 }
271 #endif
272
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long plaintext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
280                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281                         u8 *auth_tag, unsigned long auth_tag_len);
282
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286         unsigned long align = AESNI_ALIGN;
287
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
291 }
292 #endif
293
294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295 {
296         unsigned long addr = (unsigned long)raw_ctx;
297         unsigned long align = AESNI_ALIGN;
298
299         if (align <= crypto_tfm_ctx_alignment())
300                 align = 1;
301         return (struct crypto_aes_ctx *)ALIGN(addr, align);
302 }
303
304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305                               const u8 *in_key, unsigned int key_len)
306 {
307         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308         u32 *flags = &tfm->crt_flags;
309         int err;
310
311         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312             key_len != AES_KEYSIZE_256) {
313                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314                 return -EINVAL;
315         }
316
317         if (!irq_fpu_usable())
318                 err = crypto_aes_expand_key(ctx, in_key, key_len);
319         else {
320                 kernel_fpu_begin();
321                 err = aesni_set_key(ctx, in_key, key_len);
322                 kernel_fpu_end();
323         }
324
325         return err;
326 }
327
328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329                        unsigned int key_len)
330 {
331         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332 }
333
334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335 {
336         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337
338         if (!irq_fpu_usable())
339                 crypto_aes_encrypt_x86(ctx, dst, src);
340         else {
341                 kernel_fpu_begin();
342                 aesni_enc(ctx, dst, src);
343                 kernel_fpu_end();
344         }
345 }
346
347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350
351         if (!irq_fpu_usable())
352                 crypto_aes_decrypt_x86(ctx, dst, src);
353         else {
354                 kernel_fpu_begin();
355                 aesni_dec(ctx, dst, src);
356                 kernel_fpu_end();
357         }
358 }
359
360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363
364         aesni_enc(ctx, dst, src);
365 }
366
367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368 {
369         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370
371         aesni_dec(ctx, dst, src);
372 }
373
374 static int ecb_encrypt(struct blkcipher_desc *desc,
375                        struct scatterlist *dst, struct scatterlist *src,
376                        unsigned int nbytes)
377 {
378         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379         struct blkcipher_walk walk;
380         int err;
381
382         blkcipher_walk_init(&walk, dst, src, nbytes);
383         err = blkcipher_walk_virt(desc, &walk);
384         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
385
386         while ((nbytes = walk.nbytes)) {
387                 kernel_fpu_begin();
388                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389                                 nbytes & AES_BLOCK_MASK);
390                 kernel_fpu_end();
391                 nbytes &= AES_BLOCK_SIZE - 1;
392                 err = blkcipher_walk_done(desc, &walk, nbytes);
393         }
394
395         return err;
396 }
397
398 static int ecb_decrypt(struct blkcipher_desc *desc,
399                        struct scatterlist *dst, struct scatterlist *src,
400                        unsigned int nbytes)
401 {
402         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403         struct blkcipher_walk walk;
404         int err;
405
406         blkcipher_walk_init(&walk, dst, src, nbytes);
407         err = blkcipher_walk_virt(desc, &walk);
408         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409
410         while ((nbytes = walk.nbytes)) {
411                 kernel_fpu_begin();
412                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413                               nbytes & AES_BLOCK_MASK);
414                 kernel_fpu_end();
415                 nbytes &= AES_BLOCK_SIZE - 1;
416                 err = blkcipher_walk_done(desc, &walk, nbytes);
417         }
418
419         return err;
420 }
421
422 static int cbc_encrypt(struct blkcipher_desc *desc,
423                        struct scatterlist *dst, struct scatterlist *src,
424                        unsigned int nbytes)
425 {
426         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427         struct blkcipher_walk walk;
428         int err;
429
430         blkcipher_walk_init(&walk, dst, src, nbytes);
431         err = blkcipher_walk_virt(desc, &walk);
432         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433
434         while ((nbytes = walk.nbytes)) {
435                 kernel_fpu_begin();
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 kernel_fpu_end();
439                 nbytes &= AES_BLOCK_SIZE - 1;
440                 err = blkcipher_walk_done(desc, &walk, nbytes);
441         }
442
443         return err;
444 }
445
446 static int cbc_decrypt(struct blkcipher_desc *desc,
447                        struct scatterlist *dst, struct scatterlist *src,
448                        unsigned int nbytes)
449 {
450         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451         struct blkcipher_walk walk;
452         int err;
453
454         blkcipher_walk_init(&walk, dst, src, nbytes);
455         err = blkcipher_walk_virt(desc, &walk);
456         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457
458         while ((nbytes = walk.nbytes)) {
459                 kernel_fpu_begin();
460                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461                               nbytes & AES_BLOCK_MASK, walk.iv);
462                 kernel_fpu_end();
463                 nbytes &= AES_BLOCK_SIZE - 1;
464                 err = blkcipher_walk_done(desc, &walk, nbytes);
465         }
466
467         return err;
468 }
469
470 #ifdef CONFIG_X86_64
471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472                             struct blkcipher_walk *walk)
473 {
474         u8 *ctrblk = walk->iv;
475         u8 keystream[AES_BLOCK_SIZE];
476         u8 *src = walk->src.virt.addr;
477         u8 *dst = walk->dst.virt.addr;
478         unsigned int nbytes = walk->nbytes;
479
480         aesni_enc(ctx, keystream, ctrblk);
481         crypto_xor(keystream, src, nbytes);
482         memcpy(dst, keystream, nbytes);
483         crypto_inc(ctrblk, AES_BLOCK_SIZE);
484 }
485
486 #ifdef CONFIG_AS_AVX
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488                               const u8 *in, unsigned int len, u8 *iv)
489 {
490         /*
491          * based on key length, override with the by8 version
492          * of ctr mode encryption/decryption for improved performance
493          * aes_set_key_common() ensures that key length is one of
494          * {128,192,256}
495          */
496         if (ctx->key_length == AES_KEYSIZE_128)
497                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498         else if (ctx->key_length == AES_KEYSIZE_192)
499                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500         else
501                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502 }
503 #endif
504
505 static int ctr_crypt(struct blkcipher_desc *desc,
506                      struct scatterlist *dst, struct scatterlist *src,
507                      unsigned int nbytes)
508 {
509         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510         struct blkcipher_walk walk;
511         int err;
512
513         blkcipher_walk_init(&walk, dst, src, nbytes);
514         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516
517         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
518                 kernel_fpu_begin();
519                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520                                       nbytes & AES_BLOCK_MASK, walk.iv);
521                 kernel_fpu_end();
522                 nbytes &= AES_BLOCK_SIZE - 1;
523                 err = blkcipher_walk_done(desc, &walk, nbytes);
524         }
525         if (walk.nbytes) {
526                 kernel_fpu_begin();
527                 ctr_crypt_final(ctx, &walk);
528                 kernel_fpu_end();
529                 err = blkcipher_walk_done(desc, &walk, 0);
530         }
531
532         return err;
533 }
534 #endif
535
536 static int ablk_ecb_init(struct crypto_tfm *tfm)
537 {
538         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
539 }
540
541 static int ablk_cbc_init(struct crypto_tfm *tfm)
542 {
543         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
544 }
545
546 #ifdef CONFIG_X86_64
547 static int ablk_ctr_init(struct crypto_tfm *tfm)
548 {
549         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
550 }
551
552 #endif
553
554 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
555 static int ablk_pcbc_init(struct crypto_tfm *tfm)
556 {
557         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
558 }
559 #endif
560
561 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
562 {
563         aesni_ecb_enc(ctx, blks, blks, nbytes);
564 }
565
566 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
567 {
568         aesni_ecb_dec(ctx, blks, blks, nbytes);
569 }
570
571 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
572                             unsigned int keylen)
573 {
574         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
575         int err;
576
577         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
578                                  keylen - AES_BLOCK_SIZE);
579         if (err)
580                 return err;
581
582         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
583 }
584
585 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
586 {
587         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
588
589         lrw_free_table(&ctx->lrw_table);
590 }
591
592 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
593                        struct scatterlist *src, unsigned int nbytes)
594 {
595         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
596         be128 buf[8];
597         struct lrw_crypt_req req = {
598                 .tbuf = buf,
599                 .tbuflen = sizeof(buf),
600
601                 .table_ctx = &ctx->lrw_table,
602                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
603                 .crypt_fn = lrw_xts_encrypt_callback,
604         };
605         int ret;
606
607         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
608
609         kernel_fpu_begin();
610         ret = lrw_crypt(desc, dst, src, nbytes, &req);
611         kernel_fpu_end();
612
613         return ret;
614 }
615
616 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
617                        struct scatterlist *src, unsigned int nbytes)
618 {
619         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
620         be128 buf[8];
621         struct lrw_crypt_req req = {
622                 .tbuf = buf,
623                 .tbuflen = sizeof(buf),
624
625                 .table_ctx = &ctx->lrw_table,
626                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
627                 .crypt_fn = lrw_xts_decrypt_callback,
628         };
629         int ret;
630
631         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
632
633         kernel_fpu_begin();
634         ret = lrw_crypt(desc, dst, src, nbytes, &req);
635         kernel_fpu_end();
636
637         return ret;
638 }
639
640 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
641                             unsigned int keylen)
642 {
643         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
644         u32 *flags = &tfm->crt_flags;
645         int err;
646
647         /* key consists of keys of equal size concatenated, therefore
648          * the length must be even
649          */
650         if (keylen % 2) {
651                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
652                 return -EINVAL;
653         }
654
655         /* first half of xts-key is for crypt */
656         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
657         if (err)
658                 return err;
659
660         /* second half of xts-key is for tweak */
661         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
662                                   keylen / 2);
663 }
664
665
666 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
667 {
668         aesni_enc(ctx, out, in);
669 }
670
671 #ifdef CONFIG_X86_64
672
673 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
674 {
675         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
676 }
677
678 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
679 {
680         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
681 }
682
683 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
684 {
685         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
686 }
687
688 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
689 {
690         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
691 }
692
693 static const struct common_glue_ctx aesni_enc_xts = {
694         .num_funcs = 2,
695         .fpu_blocks_limit = 1,
696
697         .funcs = { {
698                 .num_blocks = 8,
699                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
700         }, {
701                 .num_blocks = 1,
702                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
703         } }
704 };
705
706 static const struct common_glue_ctx aesni_dec_xts = {
707         .num_funcs = 2,
708         .fpu_blocks_limit = 1,
709
710         .funcs = { {
711                 .num_blocks = 8,
712                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
713         }, {
714                 .num_blocks = 1,
715                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
716         } }
717 };
718
719 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
720                        struct scatterlist *src, unsigned int nbytes)
721 {
722         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
723
724         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
725                                      XTS_TWEAK_CAST(aesni_xts_tweak),
726                                      aes_ctx(ctx->raw_tweak_ctx),
727                                      aes_ctx(ctx->raw_crypt_ctx));
728 }
729
730 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
731                        struct scatterlist *src, unsigned int nbytes)
732 {
733         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
734
735         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
736                                      XTS_TWEAK_CAST(aesni_xts_tweak),
737                                      aes_ctx(ctx->raw_tweak_ctx),
738                                      aes_ctx(ctx->raw_crypt_ctx));
739 }
740
741 #else
742
743 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
744                        struct scatterlist *src, unsigned int nbytes)
745 {
746         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
747         be128 buf[8];
748         struct xts_crypt_req req = {
749                 .tbuf = buf,
750                 .tbuflen = sizeof(buf),
751
752                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
753                 .tweak_fn = aesni_xts_tweak,
754                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
755                 .crypt_fn = lrw_xts_encrypt_callback,
756         };
757         int ret;
758
759         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
760
761         kernel_fpu_begin();
762         ret = xts_crypt(desc, dst, src, nbytes, &req);
763         kernel_fpu_end();
764
765         return ret;
766 }
767
768 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
769                        struct scatterlist *src, unsigned int nbytes)
770 {
771         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
772         be128 buf[8];
773         struct xts_crypt_req req = {
774                 .tbuf = buf,
775                 .tbuflen = sizeof(buf),
776
777                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
778                 .tweak_fn = aesni_xts_tweak,
779                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
780                 .crypt_fn = lrw_xts_decrypt_callback,
781         };
782         int ret;
783
784         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
785
786         kernel_fpu_begin();
787         ret = xts_crypt(desc, dst, src, nbytes, &req);
788         kernel_fpu_end();
789
790         return ret;
791 }
792
793 #endif
794
795 #ifdef CONFIG_X86_64
796 static int rfc4106_init(struct crypto_aead *aead)
797 {
798         struct cryptd_aead *cryptd_tfm;
799         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
800
801         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
802                                        CRYPTO_ALG_INTERNAL,
803                                        CRYPTO_ALG_INTERNAL);
804         if (IS_ERR(cryptd_tfm))
805                 return PTR_ERR(cryptd_tfm);
806
807         *ctx = cryptd_tfm;
808         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
809         return 0;
810 }
811
812 static void rfc4106_exit(struct crypto_aead *aead)
813 {
814         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
815
816         cryptd_free_aead(*ctx);
817 }
818
819 static void
820 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
821 {
822         struct aesni_gcm_set_hash_subkey_result *result = req->data;
823
824         if (err == -EINPROGRESS)
825                 return;
826         result->err = err;
827         complete(&result->completion);
828 }
829
830 static int
831 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
832 {
833         struct crypto_ablkcipher *ctr_tfm;
834         struct ablkcipher_request *req;
835         int ret = -EINVAL;
836         struct aesni_hash_subkey_req_data *req_data;
837
838         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
839         if (IS_ERR(ctr_tfm))
840                 return PTR_ERR(ctr_tfm);
841
842         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
843         if (ret)
844                 goto out_free_ablkcipher;
845
846         ret = -ENOMEM;
847         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
848         if (!req)
849                 goto out_free_ablkcipher;
850
851         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
852         if (!req_data)
853                 goto out_free_request;
854
855         memset(req_data->iv, 0, sizeof(req_data->iv));
856
857         /* Clear the data in the hash sub key container to zero.*/
858         /* We want to cipher all zeros to create the hash sub key. */
859         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
860
861         init_completion(&req_data->result.completion);
862         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
863         ablkcipher_request_set_tfm(req, ctr_tfm);
864         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
865                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
866                                         rfc4106_set_hash_subkey_done,
867                                         &req_data->result);
868
869         ablkcipher_request_set_crypt(req, &req_data->sg,
870                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
871
872         ret = crypto_ablkcipher_encrypt(req);
873         if (ret == -EINPROGRESS || ret == -EBUSY) {
874                 ret = wait_for_completion_interruptible
875                         (&req_data->result.completion);
876                 if (!ret)
877                         ret = req_data->result.err;
878         }
879         kfree(req_data);
880 out_free_request:
881         ablkcipher_request_free(req);
882 out_free_ablkcipher:
883         crypto_free_ablkcipher(ctr_tfm);
884         return ret;
885 }
886
887 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
888                                   unsigned int key_len)
889 {
890         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
891
892         if (key_len < 4) {
893                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
894                 return -EINVAL;
895         }
896         /*Account for 4 byte nonce at the end.*/
897         key_len -= 4;
898
899         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
900
901         return aes_set_key_common(crypto_aead_tfm(aead),
902                                   &ctx->aes_key_expanded, key, key_len) ?:
903                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
904 }
905
906 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
907                            unsigned int key_len)
908 {
909         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
910         struct cryptd_aead *cryptd_tfm = *ctx;
911
912         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
913 }
914
915 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
916                                        unsigned int authsize)
917 {
918         switch (authsize) {
919         case 8:
920         case 12:
921         case 16:
922                 break;
923         default:
924                 return -EINVAL;
925         }
926
927         return 0;
928 }
929
930 /* This is the Integrity Check Value (aka the authentication tag length and can
931  * be 8, 12 or 16 bytes long. */
932 static int rfc4106_set_authsize(struct crypto_aead *parent,
933                                 unsigned int authsize)
934 {
935         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
936         struct cryptd_aead *cryptd_tfm = *ctx;
937
938         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
939 }
940
941 static int helper_rfc4106_encrypt(struct aead_request *req)
942 {
943         u8 one_entry_in_sg = 0;
944         u8 *src, *dst, *assoc;
945         __be32 counter = cpu_to_be32(1);
946         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
947         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
948         void *aes_ctx = &(ctx->aes_key_expanded);
949         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
950         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
951         struct scatter_walk src_sg_walk;
952         struct scatter_walk dst_sg_walk;
953         unsigned int i;
954
955         /* Assuming we are supporting rfc4106 64-bit extended */
956         /* sequence numbers We need to have the AAD length equal */
957         /* to 16 or 20 bytes */
958         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
959                 return -EINVAL;
960
961         /* IV below built */
962         for (i = 0; i < 4; i++)
963                 *(iv+i) = ctx->nonce[i];
964         for (i = 0; i < 8; i++)
965                 *(iv+4+i) = req->iv[i];
966         *((__be32 *)(iv+12)) = counter;
967
968         if (sg_is_last(req->src) &&
969             req->src->offset + req->src->length <= PAGE_SIZE &&
970             sg_is_last(req->dst) &&
971             req->dst->offset + req->dst->length <= PAGE_SIZE) {
972                 one_entry_in_sg = 1;
973                 scatterwalk_start(&src_sg_walk, req->src);
974                 assoc = scatterwalk_map(&src_sg_walk);
975                 src = assoc + req->assoclen;
976                 dst = src;
977                 if (unlikely(req->src != req->dst)) {
978                         scatterwalk_start(&dst_sg_walk, req->dst);
979                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
980                 }
981         } else {
982                 /* Allocate memory for src, dst, assoc */
983                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
984                         GFP_ATOMIC);
985                 if (unlikely(!assoc))
986                         return -ENOMEM;
987                 scatterwalk_map_and_copy(assoc, req->src, 0,
988                                          req->assoclen + req->cryptlen, 0);
989                 src = assoc + req->assoclen;
990                 dst = src;
991         }
992
993         kernel_fpu_begin();
994         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
995                           ctx->hash_subkey, assoc, req->assoclen - 8,
996                           dst + req->cryptlen, auth_tag_len);
997         kernel_fpu_end();
998
999         /* The authTag (aka the Integrity Check Value) needs to be written
1000          * back to the packet. */
1001         if (one_entry_in_sg) {
1002                 if (unlikely(req->src != req->dst)) {
1003                         scatterwalk_unmap(dst - req->assoclen);
1004                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1005                         scatterwalk_done(&dst_sg_walk, 1, 0);
1006                 }
1007                 scatterwalk_unmap(assoc);
1008                 scatterwalk_advance(&src_sg_walk, req->src->length);
1009                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1010         } else {
1011                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1012                                          req->cryptlen + auth_tag_len, 1);
1013                 kfree(assoc);
1014         }
1015         return 0;
1016 }
1017
1018 static int helper_rfc4106_decrypt(struct aead_request *req)
1019 {
1020         u8 one_entry_in_sg = 0;
1021         u8 *src, *dst, *assoc;
1022         unsigned long tempCipherLen = 0;
1023         __be32 counter = cpu_to_be32(1);
1024         int retval = 0;
1025         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1026         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1027         void *aes_ctx = &(ctx->aes_key_expanded);
1028         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1029         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1030         u8 authTag[16];
1031         struct scatter_walk src_sg_walk;
1032         struct scatter_walk dst_sg_walk;
1033         unsigned int i;
1034
1035         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1036                 return -EINVAL;
1037
1038         /* Assuming we are supporting rfc4106 64-bit extended */
1039         /* sequence numbers We need to have the AAD length */
1040         /* equal to 16 or 20 bytes */
1041
1042         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1043         /* IV below built */
1044         for (i = 0; i < 4; i++)
1045                 *(iv+i) = ctx->nonce[i];
1046         for (i = 0; i < 8; i++)
1047                 *(iv+4+i) = req->iv[i];
1048         *((__be32 *)(iv+12)) = counter;
1049
1050         if (sg_is_last(req->src) &&
1051             req->src->offset + req->src->length <= PAGE_SIZE &&
1052             sg_is_last(req->dst) &&
1053             req->dst->offset + req->dst->length <= PAGE_SIZE) {
1054                 one_entry_in_sg = 1;
1055                 scatterwalk_start(&src_sg_walk, req->src);
1056                 assoc = scatterwalk_map(&src_sg_walk);
1057                 src = assoc + req->assoclen;
1058                 dst = src;
1059                 if (unlikely(req->src != req->dst)) {
1060                         scatterwalk_start(&dst_sg_walk, req->dst);
1061                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1062                 }
1063
1064         } else {
1065                 /* Allocate memory for src, dst, assoc */
1066                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1067                 if (!assoc)
1068                         return -ENOMEM;
1069                 scatterwalk_map_and_copy(assoc, req->src, 0,
1070                                          req->assoclen + req->cryptlen, 0);
1071                 src = assoc + req->assoclen;
1072                 dst = src;
1073         }
1074
1075         kernel_fpu_begin();
1076         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1077                           ctx->hash_subkey, assoc, req->assoclen - 8,
1078                           authTag, auth_tag_len);
1079         kernel_fpu_end();
1080
1081         /* Compare generated tag with passed in tag. */
1082         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1083                 -EBADMSG : 0;
1084
1085         if (one_entry_in_sg) {
1086                 if (unlikely(req->src != req->dst)) {
1087                         scatterwalk_unmap(dst - req->assoclen);
1088                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1089                         scatterwalk_done(&dst_sg_walk, 1, 0);
1090                 }
1091                 scatterwalk_unmap(assoc);
1092                 scatterwalk_advance(&src_sg_walk, req->src->length);
1093                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1094         } else {
1095                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1096                                          tempCipherLen, 1);
1097                 kfree(assoc);
1098         }
1099         return retval;
1100 }
1101
1102 static int rfc4106_encrypt(struct aead_request *req)
1103 {
1104         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1105         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1106         struct cryptd_aead *cryptd_tfm = *ctx;
1107
1108         aead_request_set_tfm(req, irq_fpu_usable() ?
1109                                   cryptd_aead_child(cryptd_tfm) :
1110                                   &cryptd_tfm->base);
1111
1112         return crypto_aead_encrypt(req);
1113 }
1114
1115 static int rfc4106_decrypt(struct aead_request *req)
1116 {
1117         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1118         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1119         struct cryptd_aead *cryptd_tfm = *ctx;
1120
1121         aead_request_set_tfm(req, irq_fpu_usable() ?
1122                                   cryptd_aead_child(cryptd_tfm) :
1123                                   &cryptd_tfm->base);
1124
1125         return crypto_aead_decrypt(req);
1126 }
1127 #endif
1128
1129 static struct crypto_alg aesni_algs[] = { {
1130         .cra_name               = "aes",
1131         .cra_driver_name        = "aes-aesni",
1132         .cra_priority           = 300,
1133         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1134         .cra_blocksize          = AES_BLOCK_SIZE,
1135         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1136                                   AESNI_ALIGN - 1,
1137         .cra_alignmask          = 0,
1138         .cra_module             = THIS_MODULE,
1139         .cra_u  = {
1140                 .cipher = {
1141                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1142                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1143                         .cia_setkey             = aes_set_key,
1144                         .cia_encrypt            = aes_encrypt,
1145                         .cia_decrypt            = aes_decrypt
1146                 }
1147         }
1148 }, {
1149         .cra_name               = "__aes-aesni",
1150         .cra_driver_name        = "__driver-aes-aesni",
1151         .cra_priority           = 0,
1152         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1153         .cra_blocksize          = AES_BLOCK_SIZE,
1154         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1155                                   AESNI_ALIGN - 1,
1156         .cra_alignmask          = 0,
1157         .cra_module             = THIS_MODULE,
1158         .cra_u  = {
1159                 .cipher = {
1160                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1161                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1162                         .cia_setkey             = aes_set_key,
1163                         .cia_encrypt            = __aes_encrypt,
1164                         .cia_decrypt            = __aes_decrypt
1165                 }
1166         }
1167 }, {
1168         .cra_name               = "__ecb-aes-aesni",
1169         .cra_driver_name        = "__driver-ecb-aes-aesni",
1170         .cra_priority           = 0,
1171         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1172                                   CRYPTO_ALG_INTERNAL,
1173         .cra_blocksize          = AES_BLOCK_SIZE,
1174         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1175                                   AESNI_ALIGN - 1,
1176         .cra_alignmask          = 0,
1177         .cra_type               = &crypto_blkcipher_type,
1178         .cra_module             = THIS_MODULE,
1179         .cra_u = {
1180                 .blkcipher = {
1181                         .min_keysize    = AES_MIN_KEY_SIZE,
1182                         .max_keysize    = AES_MAX_KEY_SIZE,
1183                         .setkey         = aes_set_key,
1184                         .encrypt        = ecb_encrypt,
1185                         .decrypt        = ecb_decrypt,
1186                 },
1187         },
1188 }, {
1189         .cra_name               = "__cbc-aes-aesni",
1190         .cra_driver_name        = "__driver-cbc-aes-aesni",
1191         .cra_priority           = 0,
1192         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1193                                   CRYPTO_ALG_INTERNAL,
1194         .cra_blocksize          = AES_BLOCK_SIZE,
1195         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1196                                   AESNI_ALIGN - 1,
1197         .cra_alignmask          = 0,
1198         .cra_type               = &crypto_blkcipher_type,
1199         .cra_module             = THIS_MODULE,
1200         .cra_u = {
1201                 .blkcipher = {
1202                         .min_keysize    = AES_MIN_KEY_SIZE,
1203                         .max_keysize    = AES_MAX_KEY_SIZE,
1204                         .setkey         = aes_set_key,
1205                         .encrypt        = cbc_encrypt,
1206                         .decrypt        = cbc_decrypt,
1207                 },
1208         },
1209 }, {
1210         .cra_name               = "ecb(aes)",
1211         .cra_driver_name        = "ecb-aes-aesni",
1212         .cra_priority           = 400,
1213         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1214         .cra_blocksize          = AES_BLOCK_SIZE,
1215         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1216         .cra_alignmask          = 0,
1217         .cra_type               = &crypto_ablkcipher_type,
1218         .cra_module             = THIS_MODULE,
1219         .cra_init               = ablk_ecb_init,
1220         .cra_exit               = ablk_exit,
1221         .cra_u = {
1222                 .ablkcipher = {
1223                         .min_keysize    = AES_MIN_KEY_SIZE,
1224                         .max_keysize    = AES_MAX_KEY_SIZE,
1225                         .setkey         = ablk_set_key,
1226                         .encrypt        = ablk_encrypt,
1227                         .decrypt        = ablk_decrypt,
1228                 },
1229         },
1230 }, {
1231         .cra_name               = "cbc(aes)",
1232         .cra_driver_name        = "cbc-aes-aesni",
1233         .cra_priority           = 400,
1234         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1235         .cra_blocksize          = AES_BLOCK_SIZE,
1236         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1237         .cra_alignmask          = 0,
1238         .cra_type               = &crypto_ablkcipher_type,
1239         .cra_module             = THIS_MODULE,
1240         .cra_init               = ablk_cbc_init,
1241         .cra_exit               = ablk_exit,
1242         .cra_u = {
1243                 .ablkcipher = {
1244                         .min_keysize    = AES_MIN_KEY_SIZE,
1245                         .max_keysize    = AES_MAX_KEY_SIZE,
1246                         .ivsize         = AES_BLOCK_SIZE,
1247                         .setkey         = ablk_set_key,
1248                         .encrypt        = ablk_encrypt,
1249                         .decrypt        = ablk_decrypt,
1250                 },
1251         },
1252 #ifdef CONFIG_X86_64
1253 }, {
1254         .cra_name               = "__ctr-aes-aesni",
1255         .cra_driver_name        = "__driver-ctr-aes-aesni",
1256         .cra_priority           = 0,
1257         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1258                                   CRYPTO_ALG_INTERNAL,
1259         .cra_blocksize          = 1,
1260         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1261                                   AESNI_ALIGN - 1,
1262         .cra_alignmask          = 0,
1263         .cra_type               = &crypto_blkcipher_type,
1264         .cra_module             = THIS_MODULE,
1265         .cra_u = {
1266                 .blkcipher = {
1267                         .min_keysize    = AES_MIN_KEY_SIZE,
1268                         .max_keysize    = AES_MAX_KEY_SIZE,
1269                         .ivsize         = AES_BLOCK_SIZE,
1270                         .setkey         = aes_set_key,
1271                         .encrypt        = ctr_crypt,
1272                         .decrypt        = ctr_crypt,
1273                 },
1274         },
1275 }, {
1276         .cra_name               = "ctr(aes)",
1277         .cra_driver_name        = "ctr-aes-aesni",
1278         .cra_priority           = 400,
1279         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1280         .cra_blocksize          = 1,
1281         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1282         .cra_alignmask          = 0,
1283         .cra_type               = &crypto_ablkcipher_type,
1284         .cra_module             = THIS_MODULE,
1285         .cra_init               = ablk_ctr_init,
1286         .cra_exit               = ablk_exit,
1287         .cra_u = {
1288                 .ablkcipher = {
1289                         .min_keysize    = AES_MIN_KEY_SIZE,
1290                         .max_keysize    = AES_MAX_KEY_SIZE,
1291                         .ivsize         = AES_BLOCK_SIZE,
1292                         .setkey         = ablk_set_key,
1293                         .encrypt        = ablk_encrypt,
1294                         .decrypt        = ablk_encrypt,
1295                         .geniv          = "chainiv",
1296                 },
1297         },
1298 #endif
1299 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1300 }, {
1301         .cra_name               = "pcbc(aes)",
1302         .cra_driver_name        = "pcbc-aes-aesni",
1303         .cra_priority           = 400,
1304         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1305         .cra_blocksize          = AES_BLOCK_SIZE,
1306         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1307         .cra_alignmask          = 0,
1308         .cra_type               = &crypto_ablkcipher_type,
1309         .cra_module             = THIS_MODULE,
1310         .cra_init               = ablk_pcbc_init,
1311         .cra_exit               = ablk_exit,
1312         .cra_u = {
1313                 .ablkcipher = {
1314                         .min_keysize    = AES_MIN_KEY_SIZE,
1315                         .max_keysize    = AES_MAX_KEY_SIZE,
1316                         .ivsize         = AES_BLOCK_SIZE,
1317                         .setkey         = ablk_set_key,
1318                         .encrypt        = ablk_encrypt,
1319                         .decrypt        = ablk_decrypt,
1320                 },
1321         },
1322 #endif
1323 }, {
1324         .cra_name               = "__lrw-aes-aesni",
1325         .cra_driver_name        = "__driver-lrw-aes-aesni",
1326         .cra_priority           = 0,
1327         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1328                                   CRYPTO_ALG_INTERNAL,
1329         .cra_blocksize          = AES_BLOCK_SIZE,
1330         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1331         .cra_alignmask          = 0,
1332         .cra_type               = &crypto_blkcipher_type,
1333         .cra_module             = THIS_MODULE,
1334         .cra_exit               = lrw_aesni_exit_tfm,
1335         .cra_u = {
1336                 .blkcipher = {
1337                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1338                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1339                         .ivsize         = AES_BLOCK_SIZE,
1340                         .setkey         = lrw_aesni_setkey,
1341                         .encrypt        = lrw_encrypt,
1342                         .decrypt        = lrw_decrypt,
1343                 },
1344         },
1345 }, {
1346         .cra_name               = "__xts-aes-aesni",
1347         .cra_driver_name        = "__driver-xts-aes-aesni",
1348         .cra_priority           = 0,
1349         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1350                                   CRYPTO_ALG_INTERNAL,
1351         .cra_blocksize          = AES_BLOCK_SIZE,
1352         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1353         .cra_alignmask          = 0,
1354         .cra_type               = &crypto_blkcipher_type,
1355         .cra_module             = THIS_MODULE,
1356         .cra_u = {
1357                 .blkcipher = {
1358                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1359                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1360                         .ivsize         = AES_BLOCK_SIZE,
1361                         .setkey         = xts_aesni_setkey,
1362                         .encrypt        = xts_encrypt,
1363                         .decrypt        = xts_decrypt,
1364                 },
1365         },
1366 }, {
1367         .cra_name               = "lrw(aes)",
1368         .cra_driver_name        = "lrw-aes-aesni",
1369         .cra_priority           = 400,
1370         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1371         .cra_blocksize          = AES_BLOCK_SIZE,
1372         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1373         .cra_alignmask          = 0,
1374         .cra_type               = &crypto_ablkcipher_type,
1375         .cra_module             = THIS_MODULE,
1376         .cra_init               = ablk_init,
1377         .cra_exit               = ablk_exit,
1378         .cra_u = {
1379                 .ablkcipher = {
1380                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1381                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1382                         .ivsize         = AES_BLOCK_SIZE,
1383                         .setkey         = ablk_set_key,
1384                         .encrypt        = ablk_encrypt,
1385                         .decrypt        = ablk_decrypt,
1386                 },
1387         },
1388 }, {
1389         .cra_name               = "xts(aes)",
1390         .cra_driver_name        = "xts-aes-aesni",
1391         .cra_priority           = 400,
1392         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1393         .cra_blocksize          = AES_BLOCK_SIZE,
1394         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1395         .cra_alignmask          = 0,
1396         .cra_type               = &crypto_ablkcipher_type,
1397         .cra_module             = THIS_MODULE,
1398         .cra_init               = ablk_init,
1399         .cra_exit               = ablk_exit,
1400         .cra_u = {
1401                 .ablkcipher = {
1402                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1403                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1404                         .ivsize         = AES_BLOCK_SIZE,
1405                         .setkey         = ablk_set_key,
1406                         .encrypt        = ablk_encrypt,
1407                         .decrypt        = ablk_decrypt,
1408                 },
1409         },
1410 } };
1411
1412 #ifdef CONFIG_X86_64
1413 static struct aead_alg aesni_aead_algs[] = { {
1414         .setkey                 = common_rfc4106_set_key,
1415         .setauthsize            = common_rfc4106_set_authsize,
1416         .encrypt                = helper_rfc4106_encrypt,
1417         .decrypt                = helper_rfc4106_decrypt,
1418         .ivsize                 = 8,
1419         .maxauthsize            = 16,
1420         .base = {
1421                 .cra_name               = "__gcm-aes-aesni",
1422                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1423                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1424                 .cra_blocksize          = 1,
1425                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1426                 .cra_alignmask          = AESNI_ALIGN - 1,
1427                 .cra_module             = THIS_MODULE,
1428         },
1429 }, {
1430         .init                   = rfc4106_init,
1431         .exit                   = rfc4106_exit,
1432         .setkey                 = rfc4106_set_key,
1433         .setauthsize            = rfc4106_set_authsize,
1434         .encrypt                = rfc4106_encrypt,
1435         .decrypt                = rfc4106_decrypt,
1436         .ivsize                 = 8,
1437         .maxauthsize            = 16,
1438         .base = {
1439                 .cra_name               = "rfc4106(gcm(aes))",
1440                 .cra_driver_name        = "rfc4106-gcm-aesni",
1441                 .cra_priority           = 400,
1442                 .cra_flags              = CRYPTO_ALG_ASYNC,
1443                 .cra_blocksize          = 1,
1444                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1445                 .cra_module             = THIS_MODULE,
1446         },
1447 } };
1448 #else
1449 static struct aead_alg aesni_aead_algs[0];
1450 #endif
1451
1452
1453 static const struct x86_cpu_id aesni_cpu_id[] = {
1454         X86_FEATURE_MATCH(X86_FEATURE_AES),
1455         {}
1456 };
1457 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1458
1459 static int __init aesni_init(void)
1460 {
1461         int err;
1462
1463         if (!x86_match_cpu(aesni_cpu_id))
1464                 return -ENODEV;
1465 #ifdef CONFIG_X86_64
1466 #ifdef CONFIG_AS_AVX2
1467         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1468                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1469                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1470                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1471         } else
1472 #endif
1473 #ifdef CONFIG_AS_AVX
1474         if (boot_cpu_has(X86_FEATURE_AVX)) {
1475                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1476                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1477                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1478         } else
1479 #endif
1480         {
1481                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1482                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1483                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1484         }
1485         aesni_ctr_enc_tfm = aesni_ctr_enc;
1486 #ifdef CONFIG_AS_AVX
1487         if (cpu_has_avx) {
1488                 /* optimize performance of ctr mode encryption transform */
1489                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1490                 pr_info("AES CTR mode by8 optimization enabled\n");
1491         }
1492 #endif
1493 #endif
1494
1495         err = crypto_fpu_init();
1496         if (err)
1497                 return err;
1498
1499         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1500         if (err)
1501                 goto fpu_exit;
1502
1503         err = crypto_register_aeads(aesni_aead_algs,
1504                                     ARRAY_SIZE(aesni_aead_algs));
1505         if (err)
1506                 goto unregister_algs;
1507
1508         return err;
1509
1510 unregister_algs:
1511         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1512 fpu_exit:
1513         crypto_fpu_exit();
1514         return err;
1515 }
1516
1517 static void __exit aesni_exit(void)
1518 {
1519         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1520         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1521
1522         crypto_fpu_exit();
1523 }
1524
1525 late_initcall(aesni_init);
1526 module_exit(aesni_exit);
1527
1528 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1529 MODULE_LICENSE("GPL");
1530 MODULE_ALIAS_CRYPTO("aes");