Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 /* This data is stored at the end of the crypto_tfm struct.
48  * It's a type of per "session" data storage location.
49  * This needs to be 16 byte aligned.
50  */
51 struct aesni_rfc4106_gcm_ctx {
52         u8 hash_subkey[16];
53         struct crypto_aes_ctx aes_key_expanded;
54         u8 nonce[4];
55         struct cryptd_aead *cryptd_tfm;
56 };
57
58 struct aesni_gcm_set_hash_subkey_result {
59         int err;
60         struct completion completion;
61 };
62
63 struct aesni_hash_subkey_req_data {
64         u8 iv[16];
65         struct aesni_gcm_set_hash_subkey_result result;
66         struct scatterlist sg;
67 };
68
69 #define AESNI_ALIGN     (16)
70 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
71 #define RFC4106_HASH_SUBKEY_SIZE 16
72
73 struct aesni_lrw_ctx {
74         struct lrw_table_ctx lrw_table;
75         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77
78 struct aesni_xts_ctx {
79         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103
104 #ifdef CONFIG_X86_64
105
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159                 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161                 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163                 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172                         const u8 *in, unsigned long plaintext_len, u8 *iv,
173                         const u8 *aad, unsigned long aad_len,
174                         u8 *auth_tag, unsigned long auth_tag_len);
175
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
178                         const u8 *aad, unsigned long aad_len,
179                         u8 *auth_tag, unsigned long auth_tag_len);
180
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182                         const u8 *in, unsigned long plaintext_len, u8 *iv,
183                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184                         u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189                                 aad_len, auth_tag, auth_tag_len);
190         } else {
191                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193                                         aad_len, auth_tag, auth_tag_len);
194         }
195 }
196
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205                                 aad_len, auth_tag, auth_tag_len);
206         } else {
207                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209                                         aad_len, auth_tag, auth_tag_len);
210         }
211 }
212 #endif
213
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233                         const u8 *in, unsigned long plaintext_len, u8 *iv,
234                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235                         u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240                                 aad_len, auth_tag, auth_tag_len);
241         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         } else {
246                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248                                         aad_len, auth_tag, auth_tag_len);
249         }
250 }
251
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260                                 aad, aad_len, auth_tag, auth_tag_len);
261         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264                                         aad_len, auth_tag, auth_tag_len);
265         } else {
266                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268                                         aad_len, auth_tag, auth_tag_len);
269         }
270 }
271 #endif
272
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long plaintext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
280                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281                         u8 *auth_tag, unsigned long auth_tag_len);
282
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286         return
287                 (struct aesni_rfc4106_gcm_ctx *)
288                 PTR_ALIGN((u8 *)
289                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
290 }
291 #endif
292
293 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
294 {
295         unsigned long addr = (unsigned long)raw_ctx;
296         unsigned long align = AESNI_ALIGN;
297
298         if (align <= crypto_tfm_ctx_alignment())
299                 align = 1;
300         return (struct crypto_aes_ctx *)ALIGN(addr, align);
301 }
302
303 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
304                               const u8 *in_key, unsigned int key_len)
305 {
306         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
307         u32 *flags = &tfm->crt_flags;
308         int err;
309
310         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
311             key_len != AES_KEYSIZE_256) {
312                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
313                 return -EINVAL;
314         }
315
316         if (!irq_fpu_usable())
317                 err = crypto_aes_expand_key(ctx, in_key, key_len);
318         else {
319                 kernel_fpu_begin();
320                 err = aesni_set_key(ctx, in_key, key_len);
321                 kernel_fpu_end();
322         }
323
324         return err;
325 }
326
327 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
328                        unsigned int key_len)
329 {
330         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
331 }
332
333 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334 {
335         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
337         if (!irq_fpu_usable())
338                 crypto_aes_encrypt_x86(ctx, dst, src);
339         else {
340                 kernel_fpu_begin();
341                 aesni_enc(ctx, dst, src);
342                 kernel_fpu_end();
343         }
344 }
345
346 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347 {
348         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
350         if (!irq_fpu_usable())
351                 crypto_aes_decrypt_x86(ctx, dst, src);
352         else {
353                 kernel_fpu_begin();
354                 aesni_dec(ctx, dst, src);
355                 kernel_fpu_end();
356         }
357 }
358
359 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
360 {
361         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
362
363         aesni_enc(ctx, dst, src);
364 }
365
366 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
367 {
368         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
369
370         aesni_dec(ctx, dst, src);
371 }
372
373 static int ecb_encrypt(struct blkcipher_desc *desc,
374                        struct scatterlist *dst, struct scatterlist *src,
375                        unsigned int nbytes)
376 {
377         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
378         struct blkcipher_walk walk;
379         int err;
380
381         blkcipher_walk_init(&walk, dst, src, nbytes);
382         err = blkcipher_walk_virt(desc, &walk);
383         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
384
385         while ((nbytes = walk.nbytes)) {
386                 kernel_fpu_begin();
387                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
388                                 nbytes & AES_BLOCK_MASK);
389                 kernel_fpu_end();
390                 nbytes &= AES_BLOCK_SIZE - 1;
391                 err = blkcipher_walk_done(desc, &walk, nbytes);
392         }
393
394         return err;
395 }
396
397 static int ecb_decrypt(struct blkcipher_desc *desc,
398                        struct scatterlist *dst, struct scatterlist *src,
399                        unsigned int nbytes)
400 {
401         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
402         struct blkcipher_walk walk;
403         int err;
404
405         blkcipher_walk_init(&walk, dst, src, nbytes);
406         err = blkcipher_walk_virt(desc, &walk);
407         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
408
409         while ((nbytes = walk.nbytes)) {
410                 kernel_fpu_begin();
411                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
412                               nbytes & AES_BLOCK_MASK);
413                 kernel_fpu_end();
414                 nbytes &= AES_BLOCK_SIZE - 1;
415                 err = blkcipher_walk_done(desc, &walk, nbytes);
416         }
417
418         return err;
419 }
420
421 static int cbc_encrypt(struct blkcipher_desc *desc,
422                        struct scatterlist *dst, struct scatterlist *src,
423                        unsigned int nbytes)
424 {
425         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
426         struct blkcipher_walk walk;
427         int err;
428
429         blkcipher_walk_init(&walk, dst, src, nbytes);
430         err = blkcipher_walk_virt(desc, &walk);
431         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
432
433         while ((nbytes = walk.nbytes)) {
434                 kernel_fpu_begin();
435                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
436                               nbytes & AES_BLOCK_MASK, walk.iv);
437                 kernel_fpu_end();
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = blkcipher_walk_done(desc, &walk, nbytes);
440         }
441
442         return err;
443 }
444
445 static int cbc_decrypt(struct blkcipher_desc *desc,
446                        struct scatterlist *dst, struct scatterlist *src,
447                        unsigned int nbytes)
448 {
449         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
450         struct blkcipher_walk walk;
451         int err;
452
453         blkcipher_walk_init(&walk, dst, src, nbytes);
454         err = blkcipher_walk_virt(desc, &walk);
455         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457         while ((nbytes = walk.nbytes)) {
458                 kernel_fpu_begin();
459                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
460                               nbytes & AES_BLOCK_MASK, walk.iv);
461                 kernel_fpu_end();
462                 nbytes &= AES_BLOCK_SIZE - 1;
463                 err = blkcipher_walk_done(desc, &walk, nbytes);
464         }
465
466         return err;
467 }
468
469 #ifdef CONFIG_X86_64
470 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
471                             struct blkcipher_walk *walk)
472 {
473         u8 *ctrblk = walk->iv;
474         u8 keystream[AES_BLOCK_SIZE];
475         u8 *src = walk->src.virt.addr;
476         u8 *dst = walk->dst.virt.addr;
477         unsigned int nbytes = walk->nbytes;
478
479         aesni_enc(ctx, keystream, ctrblk);
480         crypto_xor(keystream, src, nbytes);
481         memcpy(dst, keystream, nbytes);
482         crypto_inc(ctrblk, AES_BLOCK_SIZE);
483 }
484
485 #ifdef CONFIG_AS_AVX
486 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
487                               const u8 *in, unsigned int len, u8 *iv)
488 {
489         /*
490          * based on key length, override with the by8 version
491          * of ctr mode encryption/decryption for improved performance
492          * aes_set_key_common() ensures that key length is one of
493          * {128,192,256}
494          */
495         if (ctx->key_length == AES_KEYSIZE_128)
496                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
497         else if (ctx->key_length == AES_KEYSIZE_192)
498                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
499         else
500                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
501 }
502 #endif
503
504 static int ctr_crypt(struct blkcipher_desc *desc,
505                      struct scatterlist *dst, struct scatterlist *src,
506                      unsigned int nbytes)
507 {
508         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
509         struct blkcipher_walk walk;
510         int err;
511
512         blkcipher_walk_init(&walk, dst, src, nbytes);
513         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
514         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
515
516         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
517                 kernel_fpu_begin();
518                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
519                                       nbytes & AES_BLOCK_MASK, walk.iv);
520                 kernel_fpu_end();
521                 nbytes &= AES_BLOCK_SIZE - 1;
522                 err = blkcipher_walk_done(desc, &walk, nbytes);
523         }
524         if (walk.nbytes) {
525                 kernel_fpu_begin();
526                 ctr_crypt_final(ctx, &walk);
527                 kernel_fpu_end();
528                 err = blkcipher_walk_done(desc, &walk, 0);
529         }
530
531         return err;
532 }
533 #endif
534
535 static int ablk_ecb_init(struct crypto_tfm *tfm)
536 {
537         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
538 }
539
540 static int ablk_cbc_init(struct crypto_tfm *tfm)
541 {
542         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
543 }
544
545 #ifdef CONFIG_X86_64
546 static int ablk_ctr_init(struct crypto_tfm *tfm)
547 {
548         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
549 }
550
551 #endif
552
553 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
554 static int ablk_pcbc_init(struct crypto_tfm *tfm)
555 {
556         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
557 }
558 #endif
559
560 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
561 {
562         aesni_ecb_enc(ctx, blks, blks, nbytes);
563 }
564
565 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
566 {
567         aesni_ecb_dec(ctx, blks, blks, nbytes);
568 }
569
570 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
571                             unsigned int keylen)
572 {
573         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
574         int err;
575
576         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
577                                  keylen - AES_BLOCK_SIZE);
578         if (err)
579                 return err;
580
581         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
582 }
583
584 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
585 {
586         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
587
588         lrw_free_table(&ctx->lrw_table);
589 }
590
591 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
592                        struct scatterlist *src, unsigned int nbytes)
593 {
594         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
595         be128 buf[8];
596         struct lrw_crypt_req req = {
597                 .tbuf = buf,
598                 .tbuflen = sizeof(buf),
599
600                 .table_ctx = &ctx->lrw_table,
601                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
602                 .crypt_fn = lrw_xts_encrypt_callback,
603         };
604         int ret;
605
606         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
607
608         kernel_fpu_begin();
609         ret = lrw_crypt(desc, dst, src, nbytes, &req);
610         kernel_fpu_end();
611
612         return ret;
613 }
614
615 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
616                        struct scatterlist *src, unsigned int nbytes)
617 {
618         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
619         be128 buf[8];
620         struct lrw_crypt_req req = {
621                 .tbuf = buf,
622                 .tbuflen = sizeof(buf),
623
624                 .table_ctx = &ctx->lrw_table,
625                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
626                 .crypt_fn = lrw_xts_decrypt_callback,
627         };
628         int ret;
629
630         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
631
632         kernel_fpu_begin();
633         ret = lrw_crypt(desc, dst, src, nbytes, &req);
634         kernel_fpu_end();
635
636         return ret;
637 }
638
639 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
640                             unsigned int keylen)
641 {
642         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
643         u32 *flags = &tfm->crt_flags;
644         int err;
645
646         /* key consists of keys of equal size concatenated, therefore
647          * the length must be even
648          */
649         if (keylen % 2) {
650                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
651                 return -EINVAL;
652         }
653
654         /* first half of xts-key is for crypt */
655         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
656         if (err)
657                 return err;
658
659         /* second half of xts-key is for tweak */
660         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
661                                   keylen / 2);
662 }
663
664
665 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
666 {
667         aesni_enc(ctx, out, in);
668 }
669
670 #ifdef CONFIG_X86_64
671
672 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
673 {
674         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
675 }
676
677 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
678 {
679         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
680 }
681
682 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
683 {
684         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
685 }
686
687 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
688 {
689         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
690 }
691
692 static const struct common_glue_ctx aesni_enc_xts = {
693         .num_funcs = 2,
694         .fpu_blocks_limit = 1,
695
696         .funcs = { {
697                 .num_blocks = 8,
698                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
699         }, {
700                 .num_blocks = 1,
701                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
702         } }
703 };
704
705 static const struct common_glue_ctx aesni_dec_xts = {
706         .num_funcs = 2,
707         .fpu_blocks_limit = 1,
708
709         .funcs = { {
710                 .num_blocks = 8,
711                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
712         }, {
713                 .num_blocks = 1,
714                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
715         } }
716 };
717
718 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
719                        struct scatterlist *src, unsigned int nbytes)
720 {
721         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
722
723         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
724                                      XTS_TWEAK_CAST(aesni_xts_tweak),
725                                      aes_ctx(ctx->raw_tweak_ctx),
726                                      aes_ctx(ctx->raw_crypt_ctx));
727 }
728
729 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
730                        struct scatterlist *src, unsigned int nbytes)
731 {
732         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
733
734         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
735                                      XTS_TWEAK_CAST(aesni_xts_tweak),
736                                      aes_ctx(ctx->raw_tweak_ctx),
737                                      aes_ctx(ctx->raw_crypt_ctx));
738 }
739
740 #else
741
742 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
743                        struct scatterlist *src, unsigned int nbytes)
744 {
745         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
746         be128 buf[8];
747         struct xts_crypt_req req = {
748                 .tbuf = buf,
749                 .tbuflen = sizeof(buf),
750
751                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
752                 .tweak_fn = aesni_xts_tweak,
753                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
754                 .crypt_fn = lrw_xts_encrypt_callback,
755         };
756         int ret;
757
758         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
759
760         kernel_fpu_begin();
761         ret = xts_crypt(desc, dst, src, nbytes, &req);
762         kernel_fpu_end();
763
764         return ret;
765 }
766
767 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
768                        struct scatterlist *src, unsigned int nbytes)
769 {
770         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
771         be128 buf[8];
772         struct xts_crypt_req req = {
773                 .tbuf = buf,
774                 .tbuflen = sizeof(buf),
775
776                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
777                 .tweak_fn = aesni_xts_tweak,
778                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
779                 .crypt_fn = lrw_xts_decrypt_callback,
780         };
781         int ret;
782
783         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
784
785         kernel_fpu_begin();
786         ret = xts_crypt(desc, dst, src, nbytes, &req);
787         kernel_fpu_end();
788
789         return ret;
790 }
791
792 #endif
793
794 #ifdef CONFIG_X86_64
795 static int rfc4106_init(struct crypto_tfm *tfm)
796 {
797         struct cryptd_aead *cryptd_tfm;
798         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
799                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
800         struct crypto_aead *cryptd_child;
801         struct aesni_rfc4106_gcm_ctx *child_ctx;
802         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
803                                        CRYPTO_ALG_INTERNAL,
804                                        CRYPTO_ALG_INTERNAL);
805         if (IS_ERR(cryptd_tfm))
806                 return PTR_ERR(cryptd_tfm);
807
808         cryptd_child = cryptd_aead_child(cryptd_tfm);
809         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
810         memcpy(child_ctx, ctx, sizeof(*ctx));
811         ctx->cryptd_tfm = cryptd_tfm;
812         tfm->crt_aead.reqsize = sizeof(struct aead_request)
813                 + crypto_aead_reqsize(&cryptd_tfm->base);
814         return 0;
815 }
816
817 static void rfc4106_exit(struct crypto_tfm *tfm)
818 {
819         struct aesni_rfc4106_gcm_ctx *ctx =
820                 (struct aesni_rfc4106_gcm_ctx *)
821                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
822         if (!IS_ERR(ctx->cryptd_tfm))
823                 cryptd_free_aead(ctx->cryptd_tfm);
824         return;
825 }
826
827 static void
828 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
829 {
830         struct aesni_gcm_set_hash_subkey_result *result = req->data;
831
832         if (err == -EINPROGRESS)
833                 return;
834         result->err = err;
835         complete(&result->completion);
836 }
837
838 static int
839 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
840 {
841         struct crypto_ablkcipher *ctr_tfm;
842         struct ablkcipher_request *req;
843         int ret = -EINVAL;
844         struct aesni_hash_subkey_req_data *req_data;
845
846         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
847         if (IS_ERR(ctr_tfm))
848                 return PTR_ERR(ctr_tfm);
849
850         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
851
852         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
853         if (ret)
854                 goto out_free_ablkcipher;
855
856         ret = -ENOMEM;
857         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
858         if (!req)
859                 goto out_free_ablkcipher;
860
861         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
862         if (!req_data)
863                 goto out_free_request;
864
865         memset(req_data->iv, 0, sizeof(req_data->iv));
866
867         /* Clear the data in the hash sub key container to zero.*/
868         /* We want to cipher all zeros to create the hash sub key. */
869         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
870
871         init_completion(&req_data->result.completion);
872         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
873         ablkcipher_request_set_tfm(req, ctr_tfm);
874         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
875                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
876                                         rfc4106_set_hash_subkey_done,
877                                         &req_data->result);
878
879         ablkcipher_request_set_crypt(req, &req_data->sg,
880                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
881
882         ret = crypto_ablkcipher_encrypt(req);
883         if (ret == -EINPROGRESS || ret == -EBUSY) {
884                 ret = wait_for_completion_interruptible
885                         (&req_data->result.completion);
886                 if (!ret)
887                         ret = req_data->result.err;
888         }
889         kfree(req_data);
890 out_free_request:
891         ablkcipher_request_free(req);
892 out_free_ablkcipher:
893         crypto_free_ablkcipher(ctr_tfm);
894         return ret;
895 }
896
897 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
898                                   unsigned int key_len)
899 {
900         int ret = 0;
901         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
902         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
903         u8 *new_key_align, *new_key_mem = NULL;
904
905         if (key_len < 4) {
906                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
907                 return -EINVAL;
908         }
909         /*Account for 4 byte nonce at the end.*/
910         key_len -= 4;
911         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
912             key_len != AES_KEYSIZE_256) {
913                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
914                 return -EINVAL;
915         }
916
917         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
918         /*This must be on a 16 byte boundary!*/
919         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
920                 return -EINVAL;
921
922         if ((unsigned long)key % AESNI_ALIGN) {
923                 /*key is not aligned: use an auxuliar aligned pointer*/
924                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
925                 if (!new_key_mem)
926                         return -ENOMEM;
927
928                 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
929                 memcpy(new_key_align, key, key_len);
930                 key = new_key_align;
931         }
932
933         if (!irq_fpu_usable())
934                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
935                 key, key_len);
936         else {
937                 kernel_fpu_begin();
938                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
939                 kernel_fpu_end();
940         }
941         /*This must be on a 16 byte boundary!*/
942         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
943                 ret = -EINVAL;
944                 goto exit;
945         }
946         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
947 exit:
948         kfree(new_key_mem);
949         return ret;
950 }
951
952 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
953                            unsigned int key_len)
954 {
955         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
956         struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
957         struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
958         struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
959         int ret;
960
961         ret = crypto_aead_setkey(child, key, key_len);
962         if (!ret) {
963                 memcpy(ctx, c_ctx, sizeof(*ctx));
964                 ctx->cryptd_tfm = cryptd_tfm;
965         }
966         return ret;
967 }
968
969 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
970                                        unsigned int authsize)
971 {
972         switch (authsize) {
973         case 8:
974         case 12:
975         case 16:
976                 break;
977         default:
978                 return -EINVAL;
979         }
980         crypto_aead_crt(aead)->authsize = authsize;
981         return 0;
982 }
983
984 /* This is the Integrity Check Value (aka the authentication tag length and can
985  * be 8, 12 or 16 bytes long. */
986 static int rfc4106_set_authsize(struct crypto_aead *parent,
987                                 unsigned int authsize)
988 {
989         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
990         struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
991         int ret;
992
993         ret = crypto_aead_setauthsize(child, authsize);
994         if (!ret)
995                 crypto_aead_crt(parent)->authsize = authsize;
996         return ret;
997 }
998
999 static int __driver_rfc4106_encrypt(struct aead_request *req)
1000 {
1001         u8 one_entry_in_sg = 0;
1002         u8 *src, *dst, *assoc;
1003         __be32 counter = cpu_to_be32(1);
1004         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1005         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1006         u32 key_len = ctx->aes_key_expanded.key_length;
1007         void *aes_ctx = &(ctx->aes_key_expanded);
1008         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1009         u8 iv_tab[16+AESNI_ALIGN];
1010         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1011         struct scatter_walk src_sg_walk;
1012         struct scatter_walk assoc_sg_walk;
1013         struct scatter_walk dst_sg_walk;
1014         unsigned int i;
1015
1016         /* Assuming we are supporting rfc4106 64-bit extended */
1017         /* sequence numbers We need to have the AAD length equal */
1018         /* to 8 or 12 bytes */
1019         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1020                 return -EINVAL;
1021         if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1022                 return -EINVAL;
1023         if (unlikely(key_len != AES_KEYSIZE_128 &&
1024                      key_len != AES_KEYSIZE_192 &&
1025                      key_len != AES_KEYSIZE_256))
1026                 return -EINVAL;
1027
1028         /* IV below built */
1029         for (i = 0; i < 4; i++)
1030                 *(iv+i) = ctx->nonce[i];
1031         for (i = 0; i < 8; i++)
1032                 *(iv+4+i) = req->iv[i];
1033         *((__be32 *)(iv+12)) = counter;
1034
1035         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1036                 one_entry_in_sg = 1;
1037                 scatterwalk_start(&src_sg_walk, req->src);
1038                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1039                 src = scatterwalk_map(&src_sg_walk);
1040                 assoc = scatterwalk_map(&assoc_sg_walk);
1041                 dst = src;
1042                 if (unlikely(req->src != req->dst)) {
1043                         scatterwalk_start(&dst_sg_walk, req->dst);
1044                         dst = scatterwalk_map(&dst_sg_walk);
1045                 }
1046
1047         } else {
1048                 /* Allocate memory for src, dst, assoc */
1049                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1050                         GFP_ATOMIC);
1051                 if (unlikely(!src))
1052                         return -ENOMEM;
1053                 assoc = (src + req->cryptlen + auth_tag_len);
1054                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1055                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1056                                         req->assoclen, 0);
1057                 dst = src;
1058         }
1059
1060         aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1061                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1062                 + ((unsigned long)req->cryptlen), auth_tag_len);
1063
1064         /* The authTag (aka the Integrity Check Value) needs to be written
1065          * back to the packet. */
1066         if (one_entry_in_sg) {
1067                 if (unlikely(req->src != req->dst)) {
1068                         scatterwalk_unmap(dst);
1069                         scatterwalk_done(&dst_sg_walk, 0, 0);
1070                 }
1071                 scatterwalk_unmap(src);
1072                 scatterwalk_unmap(assoc);
1073                 scatterwalk_done(&src_sg_walk, 0, 0);
1074                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1075         } else {
1076                 scatterwalk_map_and_copy(dst, req->dst, 0,
1077                         req->cryptlen + auth_tag_len, 1);
1078                 kfree(src);
1079         }
1080         return 0;
1081 }
1082
1083 static int __driver_rfc4106_decrypt(struct aead_request *req)
1084 {
1085         u8 one_entry_in_sg = 0;
1086         u8 *src, *dst, *assoc;
1087         unsigned long tempCipherLen = 0;
1088         __be32 counter = cpu_to_be32(1);
1089         int retval = 0;
1090         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1091         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1092         u32 key_len = ctx->aes_key_expanded.key_length;
1093         void *aes_ctx = &(ctx->aes_key_expanded);
1094         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1095         u8 iv_and_authTag[32+AESNI_ALIGN];
1096         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1097         u8 *authTag = iv + 16;
1098         struct scatter_walk src_sg_walk;
1099         struct scatter_walk assoc_sg_walk;
1100         struct scatter_walk dst_sg_walk;
1101         unsigned int i;
1102
1103         if (unlikely((req->cryptlen < auth_tag_len) ||
1104                 (req->assoclen != 8 && req->assoclen != 12)))
1105                 return -EINVAL;
1106         if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1107                 return -EINVAL;
1108         if (unlikely(key_len != AES_KEYSIZE_128 &&
1109                      key_len != AES_KEYSIZE_192 &&
1110                      key_len != AES_KEYSIZE_256))
1111                 return -EINVAL;
1112
1113         /* Assuming we are supporting rfc4106 64-bit extended */
1114         /* sequence numbers We need to have the AAD length */
1115         /* equal to 8 or 12 bytes */
1116
1117         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1118         /* IV below built */
1119         for (i = 0; i < 4; i++)
1120                 *(iv+i) = ctx->nonce[i];
1121         for (i = 0; i < 8; i++)
1122                 *(iv+4+i) = req->iv[i];
1123         *((__be32 *)(iv+12)) = counter;
1124
1125         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1126                 one_entry_in_sg = 1;
1127                 scatterwalk_start(&src_sg_walk, req->src);
1128                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1129                 src = scatterwalk_map(&src_sg_walk);
1130                 assoc = scatterwalk_map(&assoc_sg_walk);
1131                 dst = src;
1132                 if (unlikely(req->src != req->dst)) {
1133                         scatterwalk_start(&dst_sg_walk, req->dst);
1134                         dst = scatterwalk_map(&dst_sg_walk);
1135                 }
1136
1137         } else {
1138                 /* Allocate memory for src, dst, assoc */
1139                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1140                 if (!src)
1141                         return -ENOMEM;
1142                 assoc = (src + req->cryptlen);
1143                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1144                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1145                         req->assoclen, 0);
1146                 dst = src;
1147         }
1148
1149         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1150                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1151                 authTag, auth_tag_len);
1152
1153         /* Compare generated tag with passed in tag. */
1154         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1155                 -EBADMSG : 0;
1156
1157         if (one_entry_in_sg) {
1158                 if (unlikely(req->src != req->dst)) {
1159                         scatterwalk_unmap(dst);
1160                         scatterwalk_done(&dst_sg_walk, 0, 0);
1161                 }
1162                 scatterwalk_unmap(src);
1163                 scatterwalk_unmap(assoc);
1164                 scatterwalk_done(&src_sg_walk, 0, 0);
1165                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1166         } else {
1167                 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
1168                 kfree(src);
1169         }
1170         return retval;
1171 }
1172
1173 static int rfc4106_encrypt(struct aead_request *req)
1174 {
1175         int ret;
1176         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1177         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1178
1179         if (!irq_fpu_usable()) {
1180                 struct aead_request *cryptd_req =
1181                         (struct aead_request *) aead_request_ctx(req);
1182
1183                 memcpy(cryptd_req, req, sizeof(*req));
1184                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1185                 ret = crypto_aead_encrypt(cryptd_req);
1186         } else {
1187                 kernel_fpu_begin();
1188                 ret = __driver_rfc4106_encrypt(req);
1189                 kernel_fpu_end();
1190         }
1191         return ret;
1192 }
1193
1194 static int rfc4106_decrypt(struct aead_request *req)
1195 {
1196         int ret;
1197         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1198         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1199
1200         if (!irq_fpu_usable()) {
1201                 struct aead_request *cryptd_req =
1202                         (struct aead_request *) aead_request_ctx(req);
1203
1204                 memcpy(cryptd_req, req, sizeof(*req));
1205                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1206                 ret = crypto_aead_decrypt(cryptd_req);
1207         } else {
1208                 kernel_fpu_begin();
1209                 ret = __driver_rfc4106_decrypt(req);
1210                 kernel_fpu_end();
1211         }
1212         return ret;
1213 }
1214
1215 static int helper_rfc4106_encrypt(struct aead_request *req)
1216 {
1217         int ret;
1218
1219         if (unlikely(!irq_fpu_usable())) {
1220                 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1221                 ret = -EINVAL;
1222         } else {
1223                 kernel_fpu_begin();
1224                 ret = __driver_rfc4106_encrypt(req);
1225                 kernel_fpu_end();
1226         }
1227         return ret;
1228 }
1229
1230 static int helper_rfc4106_decrypt(struct aead_request *req)
1231 {
1232         int ret;
1233
1234         if (unlikely(!irq_fpu_usable())) {
1235                 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1236                 ret = -EINVAL;
1237         } else {
1238                 kernel_fpu_begin();
1239                 ret = __driver_rfc4106_decrypt(req);
1240                 kernel_fpu_end();
1241         }
1242         return ret;
1243 }
1244 #endif
1245
1246 static struct crypto_alg aesni_algs[] = { {
1247         .cra_name               = "aes",
1248         .cra_driver_name        = "aes-aesni",
1249         .cra_priority           = 300,
1250         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1251         .cra_blocksize          = AES_BLOCK_SIZE,
1252         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1253                                   AESNI_ALIGN - 1,
1254         .cra_alignmask          = 0,
1255         .cra_module             = THIS_MODULE,
1256         .cra_u  = {
1257                 .cipher = {
1258                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1259                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1260                         .cia_setkey             = aes_set_key,
1261                         .cia_encrypt            = aes_encrypt,
1262                         .cia_decrypt            = aes_decrypt
1263                 }
1264         }
1265 }, {
1266         .cra_name               = "__aes-aesni",
1267         .cra_driver_name        = "__driver-aes-aesni",
1268         .cra_priority           = 0,
1269         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1270         .cra_blocksize          = AES_BLOCK_SIZE,
1271         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1272                                   AESNI_ALIGN - 1,
1273         .cra_alignmask          = 0,
1274         .cra_module             = THIS_MODULE,
1275         .cra_u  = {
1276                 .cipher = {
1277                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1278                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1279                         .cia_setkey             = aes_set_key,
1280                         .cia_encrypt            = __aes_encrypt,
1281                         .cia_decrypt            = __aes_decrypt
1282                 }
1283         }
1284 }, {
1285         .cra_name               = "__ecb-aes-aesni",
1286         .cra_driver_name        = "__driver-ecb-aes-aesni",
1287         .cra_priority           = 0,
1288         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1289                                   CRYPTO_ALG_INTERNAL,
1290         .cra_blocksize          = AES_BLOCK_SIZE,
1291         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1292                                   AESNI_ALIGN - 1,
1293         .cra_alignmask          = 0,
1294         .cra_type               = &crypto_blkcipher_type,
1295         .cra_module             = THIS_MODULE,
1296         .cra_u = {
1297                 .blkcipher = {
1298                         .min_keysize    = AES_MIN_KEY_SIZE,
1299                         .max_keysize    = AES_MAX_KEY_SIZE,
1300                         .setkey         = aes_set_key,
1301                         .encrypt        = ecb_encrypt,
1302                         .decrypt        = ecb_decrypt,
1303                 },
1304         },
1305 }, {
1306         .cra_name               = "__cbc-aes-aesni",
1307         .cra_driver_name        = "__driver-cbc-aes-aesni",
1308         .cra_priority           = 0,
1309         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1310                                   CRYPTO_ALG_INTERNAL,
1311         .cra_blocksize          = AES_BLOCK_SIZE,
1312         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1313                                   AESNI_ALIGN - 1,
1314         .cra_alignmask          = 0,
1315         .cra_type               = &crypto_blkcipher_type,
1316         .cra_module             = THIS_MODULE,
1317         .cra_u = {
1318                 .blkcipher = {
1319                         .min_keysize    = AES_MIN_KEY_SIZE,
1320                         .max_keysize    = AES_MAX_KEY_SIZE,
1321                         .setkey         = aes_set_key,
1322                         .encrypt        = cbc_encrypt,
1323                         .decrypt        = cbc_decrypt,
1324                 },
1325         },
1326 }, {
1327         .cra_name               = "ecb(aes)",
1328         .cra_driver_name        = "ecb-aes-aesni",
1329         .cra_priority           = 400,
1330         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1331         .cra_blocksize          = AES_BLOCK_SIZE,
1332         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1333         .cra_alignmask          = 0,
1334         .cra_type               = &crypto_ablkcipher_type,
1335         .cra_module             = THIS_MODULE,
1336         .cra_init               = ablk_ecb_init,
1337         .cra_exit               = ablk_exit,
1338         .cra_u = {
1339                 .ablkcipher = {
1340                         .min_keysize    = AES_MIN_KEY_SIZE,
1341                         .max_keysize    = AES_MAX_KEY_SIZE,
1342                         .setkey         = ablk_set_key,
1343                         .encrypt        = ablk_encrypt,
1344                         .decrypt        = ablk_decrypt,
1345                 },
1346         },
1347 }, {
1348         .cra_name               = "cbc(aes)",
1349         .cra_driver_name        = "cbc-aes-aesni",
1350         .cra_priority           = 400,
1351         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1352         .cra_blocksize          = AES_BLOCK_SIZE,
1353         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1354         .cra_alignmask          = 0,
1355         .cra_type               = &crypto_ablkcipher_type,
1356         .cra_module             = THIS_MODULE,
1357         .cra_init               = ablk_cbc_init,
1358         .cra_exit               = ablk_exit,
1359         .cra_u = {
1360                 .ablkcipher = {
1361                         .min_keysize    = AES_MIN_KEY_SIZE,
1362                         .max_keysize    = AES_MAX_KEY_SIZE,
1363                         .ivsize         = AES_BLOCK_SIZE,
1364                         .setkey         = ablk_set_key,
1365                         .encrypt        = ablk_encrypt,
1366                         .decrypt        = ablk_decrypt,
1367                 },
1368         },
1369 #ifdef CONFIG_X86_64
1370 }, {
1371         .cra_name               = "__ctr-aes-aesni",
1372         .cra_driver_name        = "__driver-ctr-aes-aesni",
1373         .cra_priority           = 0,
1374         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1375                                   CRYPTO_ALG_INTERNAL,
1376         .cra_blocksize          = 1,
1377         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1378                                   AESNI_ALIGN - 1,
1379         .cra_alignmask          = 0,
1380         .cra_type               = &crypto_blkcipher_type,
1381         .cra_module             = THIS_MODULE,
1382         .cra_u = {
1383                 .blkcipher = {
1384                         .min_keysize    = AES_MIN_KEY_SIZE,
1385                         .max_keysize    = AES_MAX_KEY_SIZE,
1386                         .ivsize         = AES_BLOCK_SIZE,
1387                         .setkey         = aes_set_key,
1388                         .encrypt        = ctr_crypt,
1389                         .decrypt        = ctr_crypt,
1390                 },
1391         },
1392 }, {
1393         .cra_name               = "ctr(aes)",
1394         .cra_driver_name        = "ctr-aes-aesni",
1395         .cra_priority           = 400,
1396         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1397         .cra_blocksize          = 1,
1398         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1399         .cra_alignmask          = 0,
1400         .cra_type               = &crypto_ablkcipher_type,
1401         .cra_module             = THIS_MODULE,
1402         .cra_init               = ablk_ctr_init,
1403         .cra_exit               = ablk_exit,
1404         .cra_u = {
1405                 .ablkcipher = {
1406                         .min_keysize    = AES_MIN_KEY_SIZE,
1407                         .max_keysize    = AES_MAX_KEY_SIZE,
1408                         .ivsize         = AES_BLOCK_SIZE,
1409                         .setkey         = ablk_set_key,
1410                         .encrypt        = ablk_encrypt,
1411                         .decrypt        = ablk_encrypt,
1412                         .geniv          = "chainiv",
1413                 },
1414         },
1415 }, {
1416         .cra_name               = "__gcm-aes-aesni",
1417         .cra_driver_name        = "__driver-gcm-aes-aesni",
1418         .cra_priority           = 0,
1419         .cra_flags              = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
1420         .cra_blocksize          = 1,
1421         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1422                                   AESNI_ALIGN,
1423         .cra_alignmask          = 0,
1424         .cra_type               = &crypto_aead_type,
1425         .cra_module             = THIS_MODULE,
1426         .cra_u = {
1427                 .aead = {
1428                         .setkey         = common_rfc4106_set_key,
1429                         .setauthsize    = common_rfc4106_set_authsize,
1430                         .encrypt        = helper_rfc4106_encrypt,
1431                         .decrypt        = helper_rfc4106_decrypt,
1432                         .ivsize         = 8,
1433                         .maxauthsize    = 16,
1434                 },
1435         },
1436 }, {
1437         .cra_name               = "rfc4106(gcm(aes))",
1438         .cra_driver_name        = "rfc4106-gcm-aesni",
1439         .cra_priority           = 400,
1440         .cra_flags              = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1441         .cra_blocksize          = 1,
1442         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1443                                   AESNI_ALIGN,
1444         .cra_alignmask          = 0,
1445         .cra_type               = &crypto_nivaead_type,
1446         .cra_module             = THIS_MODULE,
1447         .cra_init               = rfc4106_init,
1448         .cra_exit               = rfc4106_exit,
1449         .cra_u = {
1450                 .aead = {
1451                         .setkey         = rfc4106_set_key,
1452                         .setauthsize    = rfc4106_set_authsize,
1453                         .encrypt        = rfc4106_encrypt,
1454                         .decrypt        = rfc4106_decrypt,
1455                         .geniv          = "seqiv",
1456                         .ivsize         = 8,
1457                         .maxauthsize    = 16,
1458                 },
1459         },
1460 #endif
1461 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1462 }, {
1463         .cra_name               = "pcbc(aes)",
1464         .cra_driver_name        = "pcbc-aes-aesni",
1465         .cra_priority           = 400,
1466         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1467         .cra_blocksize          = AES_BLOCK_SIZE,
1468         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1469         .cra_alignmask          = 0,
1470         .cra_type               = &crypto_ablkcipher_type,
1471         .cra_module             = THIS_MODULE,
1472         .cra_init               = ablk_pcbc_init,
1473         .cra_exit               = ablk_exit,
1474         .cra_u = {
1475                 .ablkcipher = {
1476                         .min_keysize    = AES_MIN_KEY_SIZE,
1477                         .max_keysize    = AES_MAX_KEY_SIZE,
1478                         .ivsize         = AES_BLOCK_SIZE,
1479                         .setkey         = ablk_set_key,
1480                         .encrypt        = ablk_encrypt,
1481                         .decrypt        = ablk_decrypt,
1482                 },
1483         },
1484 #endif
1485 }, {
1486         .cra_name               = "__lrw-aes-aesni",
1487         .cra_driver_name        = "__driver-lrw-aes-aesni",
1488         .cra_priority           = 0,
1489         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1490                                   CRYPTO_ALG_INTERNAL,
1491         .cra_blocksize          = AES_BLOCK_SIZE,
1492         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1493         .cra_alignmask          = 0,
1494         .cra_type               = &crypto_blkcipher_type,
1495         .cra_module             = THIS_MODULE,
1496         .cra_exit               = lrw_aesni_exit_tfm,
1497         .cra_u = {
1498                 .blkcipher = {
1499                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1500                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1501                         .ivsize         = AES_BLOCK_SIZE,
1502                         .setkey         = lrw_aesni_setkey,
1503                         .encrypt        = lrw_encrypt,
1504                         .decrypt        = lrw_decrypt,
1505                 },
1506         },
1507 }, {
1508         .cra_name               = "__xts-aes-aesni",
1509         .cra_driver_name        = "__driver-xts-aes-aesni",
1510         .cra_priority           = 0,
1511         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1512                                   CRYPTO_ALG_INTERNAL,
1513         .cra_blocksize          = AES_BLOCK_SIZE,
1514         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1515         .cra_alignmask          = 0,
1516         .cra_type               = &crypto_blkcipher_type,
1517         .cra_module             = THIS_MODULE,
1518         .cra_u = {
1519                 .blkcipher = {
1520                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1521                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1522                         .ivsize         = AES_BLOCK_SIZE,
1523                         .setkey         = xts_aesni_setkey,
1524                         .encrypt        = xts_encrypt,
1525                         .decrypt        = xts_decrypt,
1526                 },
1527         },
1528 }, {
1529         .cra_name               = "lrw(aes)",
1530         .cra_driver_name        = "lrw-aes-aesni",
1531         .cra_priority           = 400,
1532         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1533         .cra_blocksize          = AES_BLOCK_SIZE,
1534         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1535         .cra_alignmask          = 0,
1536         .cra_type               = &crypto_ablkcipher_type,
1537         .cra_module             = THIS_MODULE,
1538         .cra_init               = ablk_init,
1539         .cra_exit               = ablk_exit,
1540         .cra_u = {
1541                 .ablkcipher = {
1542                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1543                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1544                         .ivsize         = AES_BLOCK_SIZE,
1545                         .setkey         = ablk_set_key,
1546                         .encrypt        = ablk_encrypt,
1547                         .decrypt        = ablk_decrypt,
1548                 },
1549         },
1550 }, {
1551         .cra_name               = "xts(aes)",
1552         .cra_driver_name        = "xts-aes-aesni",
1553         .cra_priority           = 400,
1554         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1555         .cra_blocksize          = AES_BLOCK_SIZE,
1556         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1557         .cra_alignmask          = 0,
1558         .cra_type               = &crypto_ablkcipher_type,
1559         .cra_module             = THIS_MODULE,
1560         .cra_init               = ablk_init,
1561         .cra_exit               = ablk_exit,
1562         .cra_u = {
1563                 .ablkcipher = {
1564                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1565                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1566                         .ivsize         = AES_BLOCK_SIZE,
1567                         .setkey         = ablk_set_key,
1568                         .encrypt        = ablk_encrypt,
1569                         .decrypt        = ablk_decrypt,
1570                 },
1571         },
1572 } };
1573
1574
1575 static const struct x86_cpu_id aesni_cpu_id[] = {
1576         X86_FEATURE_MATCH(X86_FEATURE_AES),
1577         {}
1578 };
1579 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1580
1581 static int __init aesni_init(void)
1582 {
1583         int err;
1584
1585         if (!x86_match_cpu(aesni_cpu_id))
1586                 return -ENODEV;
1587 #ifdef CONFIG_X86_64
1588 #ifdef CONFIG_AS_AVX2
1589         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1590                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1591                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1592                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1593         } else
1594 #endif
1595 #ifdef CONFIG_AS_AVX
1596         if (boot_cpu_has(X86_FEATURE_AVX)) {
1597                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1598                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1599                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1600         } else
1601 #endif
1602         {
1603                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1604                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1605                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1606         }
1607         aesni_ctr_enc_tfm = aesni_ctr_enc;
1608 #ifdef CONFIG_AS_AVX
1609         if (cpu_has_avx) {
1610                 /* optimize performance of ctr mode encryption transform */
1611                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1612                 pr_info("AES CTR mode by8 optimization enabled\n");
1613         }
1614 #endif
1615 #endif
1616
1617         err = crypto_fpu_init();
1618         if (err)
1619                 return err;
1620
1621         return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1622 }
1623
1624 static void __exit aesni_exit(void)
1625 {
1626         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1627
1628         crypto_fpu_exit();
1629 }
1630
1631 module_init(aesni_init);
1632 module_exit(aesni_exit);
1633
1634 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1635 MODULE_LICENSE("GPL");
1636 MODULE_ALIAS_CRYPTO("aes");