2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
47 #define AESNI_ALIGN 16
48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
51 /* This data is stored at the end of the crypto_tfm struct.
52 * It's a type of per "session" data storage location.
53 * This needs to be 16 byte aligned.
55 struct aesni_rfc4106_gcm_ctx {
56 u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57 struct crypto_aes_ctx aes_key_expanded
58 __attribute__ ((__aligned__(AESNI_ALIGN)));
62 struct aesni_gcm_set_hash_subkey_result {
64 struct completion completion;
67 struct aesni_hash_subkey_req_data {
69 struct aesni_gcm_set_hash_subkey_result result;
70 struct scatterlist sg;
73 struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
78 struct aesni_xts_ctx {
79 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107 const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, unsigned int len, u8 *iv);
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112 const u8 *in, bool enc, u8 *iv);
114 /* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131 const u8 *in, unsigned long plaintext_len, u8 *iv,
132 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133 u8 *auth_tag, unsigned long auth_tag_len);
135 /* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152 const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 u8 *auth_tag, unsigned long auth_tag_len);
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163 void *keys, u8 *out, unsigned int num_bytes);
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172 const u8 *in, unsigned long plaintext_len, u8 *iv,
173 const u8 *aad, unsigned long aad_len,
174 u8 *auth_tag, unsigned long auth_tag_len);
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177 const u8 *in, unsigned long ciphertext_len, u8 *iv,
178 const u8 *aad, unsigned long aad_len,
179 u8 *auth_tag, unsigned long auth_tag_len);
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182 const u8 *in, unsigned long plaintext_len, u8 *iv,
183 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184 u8 *auth_tag, unsigned long auth_tag_len)
186 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189 aad_len, auth_tag, auth_tag_len);
191 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193 aad_len, auth_tag, auth_tag_len);
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198 const u8 *in, unsigned long ciphertext_len, u8 *iv,
199 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200 u8 *auth_tag, unsigned long auth_tag_len)
202 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205 aad_len, auth_tag, auth_tag_len);
207 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209 aad_len, auth_tag, auth_tag_len);
214 #ifdef CONFIG_AS_AVX2
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223 const u8 *in, unsigned long plaintext_len, u8 *iv,
224 const u8 *aad, unsigned long aad_len,
225 u8 *auth_tag, unsigned long auth_tag_len);
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 const u8 *aad, unsigned long aad_len,
230 u8 *auth_tag, unsigned long auth_tag_len);
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233 const u8 *in, unsigned long plaintext_len, u8 *iv,
234 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235 u8 *auth_tag, unsigned long auth_tag_len)
237 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 aad_len, auth_tag, auth_tag_len);
241 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 aad_len, auth_tag, auth_tag_len);
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 aad_len, auth_tag, auth_tag_len);
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 u8 *auth_tag, unsigned long auth_tag_len)
257 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260 aad, aad_len, auth_tag, auth_tag_len);
261 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264 aad_len, auth_tag, auth_tag_len);
266 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268 aad_len, auth_tag, auth_tag_len);
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274 const u8 *in, unsigned long plaintext_len, u8 *iv,
275 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276 u8 *auth_tag, unsigned long auth_tag_len);
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279 const u8 *in, unsigned long ciphertext_len, u8 *iv,
280 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281 u8 *auth_tag, unsigned long auth_tag_len);
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
286 unsigned long align = AESNI_ALIGN;
288 if (align <= crypto_tfm_ctx_alignment())
290 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
296 unsigned long addr = (unsigned long)raw_ctx;
297 unsigned long align = AESNI_ALIGN;
299 if (align <= crypto_tfm_ctx_alignment())
301 return (struct crypto_aes_ctx *)ALIGN(addr, align);
304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305 const u8 *in_key, unsigned int key_len)
307 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308 u32 *flags = &tfm->crt_flags;
311 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312 key_len != AES_KEYSIZE_256) {
313 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
317 if (!irq_fpu_usable())
318 err = crypto_aes_expand_key(ctx, in_key, key_len);
321 err = aesni_set_key(ctx, in_key, key_len);
328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329 unsigned int key_len)
331 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
336 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
338 if (!irq_fpu_usable())
339 crypto_aes_encrypt_x86(ctx, dst, src);
342 aesni_enc(ctx, dst, src);
347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
351 if (!irq_fpu_usable())
352 crypto_aes_decrypt_x86(ctx, dst, src);
355 aesni_dec(ctx, dst, src);
360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
362 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
364 aesni_enc(ctx, dst, src);
367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
369 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
371 aesni_dec(ctx, dst, src);
374 static int ecb_encrypt(struct blkcipher_desc *desc,
375 struct scatterlist *dst, struct scatterlist *src,
378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379 struct blkcipher_walk walk;
382 blkcipher_walk_init(&walk, dst, src, nbytes);
383 err = blkcipher_walk_virt(desc, &walk);
384 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
386 while ((nbytes = walk.nbytes)) {
388 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389 nbytes & AES_BLOCK_MASK);
391 nbytes &= AES_BLOCK_SIZE - 1;
392 err = blkcipher_walk_done(desc, &walk, nbytes);
398 static int ecb_decrypt(struct blkcipher_desc *desc,
399 struct scatterlist *dst, struct scatterlist *src,
402 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403 struct blkcipher_walk walk;
406 blkcipher_walk_init(&walk, dst, src, nbytes);
407 err = blkcipher_walk_virt(desc, &walk);
408 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
410 while ((nbytes = walk.nbytes)) {
412 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413 nbytes & AES_BLOCK_MASK);
415 nbytes &= AES_BLOCK_SIZE - 1;
416 err = blkcipher_walk_done(desc, &walk, nbytes);
422 static int cbc_encrypt(struct blkcipher_desc *desc,
423 struct scatterlist *dst, struct scatterlist *src,
426 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427 struct blkcipher_walk walk;
430 blkcipher_walk_init(&walk, dst, src, nbytes);
431 err = blkcipher_walk_virt(desc, &walk);
432 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
434 while ((nbytes = walk.nbytes)) {
436 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437 nbytes & AES_BLOCK_MASK, walk.iv);
439 nbytes &= AES_BLOCK_SIZE - 1;
440 err = blkcipher_walk_done(desc, &walk, nbytes);
446 static int cbc_decrypt(struct blkcipher_desc *desc,
447 struct scatterlist *dst, struct scatterlist *src,
450 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451 struct blkcipher_walk walk;
454 blkcipher_walk_init(&walk, dst, src, nbytes);
455 err = blkcipher_walk_virt(desc, &walk);
456 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
458 while ((nbytes = walk.nbytes)) {
460 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461 nbytes & AES_BLOCK_MASK, walk.iv);
463 nbytes &= AES_BLOCK_SIZE - 1;
464 err = blkcipher_walk_done(desc, &walk, nbytes);
471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472 struct blkcipher_walk *walk)
474 u8 *ctrblk = walk->iv;
475 u8 keystream[AES_BLOCK_SIZE];
476 u8 *src = walk->src.virt.addr;
477 u8 *dst = walk->dst.virt.addr;
478 unsigned int nbytes = walk->nbytes;
480 aesni_enc(ctx, keystream, ctrblk);
481 crypto_xor(keystream, src, nbytes);
482 memcpy(dst, keystream, nbytes);
483 crypto_inc(ctrblk, AES_BLOCK_SIZE);
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488 const u8 *in, unsigned int len, u8 *iv)
491 * based on key length, override with the by8 version
492 * of ctr mode encryption/decryption for improved performance
493 * aes_set_key_common() ensures that key length is one of
496 if (ctx->key_length == AES_KEYSIZE_128)
497 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498 else if (ctx->key_length == AES_KEYSIZE_192)
499 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
501 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
505 static int ctr_crypt(struct blkcipher_desc *desc,
506 struct scatterlist *dst, struct scatterlist *src,
509 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510 struct blkcipher_walk walk;
513 blkcipher_walk_init(&walk, dst, src, nbytes);
514 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
517 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
519 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520 nbytes & AES_BLOCK_MASK, walk.iv);
522 nbytes &= AES_BLOCK_SIZE - 1;
523 err = blkcipher_walk_done(desc, &walk, nbytes);
527 ctr_crypt_final(ctx, &walk);
529 err = blkcipher_walk_done(desc, &walk, 0);
536 static int ablk_ecb_init(struct crypto_tfm *tfm)
538 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
541 static int ablk_cbc_init(struct crypto_tfm *tfm)
543 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
547 static int ablk_ctr_init(struct crypto_tfm *tfm)
549 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
554 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
555 static int ablk_pcbc_init(struct crypto_tfm *tfm)
557 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
561 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
563 aesni_ecb_enc(ctx, blks, blks, nbytes);
566 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
568 aesni_ecb_dec(ctx, blks, blks, nbytes);
571 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
574 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
577 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
578 keylen - AES_BLOCK_SIZE);
582 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
585 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
587 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
589 lrw_free_table(&ctx->lrw_table);
592 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
593 struct scatterlist *src, unsigned int nbytes)
595 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
597 struct lrw_crypt_req req = {
599 .tbuflen = sizeof(buf),
601 .table_ctx = &ctx->lrw_table,
602 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
603 .crypt_fn = lrw_xts_encrypt_callback,
607 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
610 ret = lrw_crypt(desc, dst, src, nbytes, &req);
616 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
617 struct scatterlist *src, unsigned int nbytes)
619 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
621 struct lrw_crypt_req req = {
623 .tbuflen = sizeof(buf),
625 .table_ctx = &ctx->lrw_table,
626 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
627 .crypt_fn = lrw_xts_decrypt_callback,
631 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
634 ret = lrw_crypt(desc, dst, src, nbytes, &req);
640 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
643 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
644 u32 *flags = &tfm->crt_flags;
647 /* key consists of keys of equal size concatenated, therefore
648 * the length must be even
651 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
655 /* first half of xts-key is for crypt */
656 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
660 /* second half of xts-key is for tweak */
661 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
666 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
668 aesni_enc(ctx, out, in);
673 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
675 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
678 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
680 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
683 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
685 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
688 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
690 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
693 static const struct common_glue_ctx aesni_enc_xts = {
695 .fpu_blocks_limit = 1,
699 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
702 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
706 static const struct common_glue_ctx aesni_dec_xts = {
708 .fpu_blocks_limit = 1,
712 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
715 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
719 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
720 struct scatterlist *src, unsigned int nbytes)
722 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
724 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
725 XTS_TWEAK_CAST(aesni_xts_tweak),
726 aes_ctx(ctx->raw_tweak_ctx),
727 aes_ctx(ctx->raw_crypt_ctx));
730 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
731 struct scatterlist *src, unsigned int nbytes)
733 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
735 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
736 XTS_TWEAK_CAST(aesni_xts_tweak),
737 aes_ctx(ctx->raw_tweak_ctx),
738 aes_ctx(ctx->raw_crypt_ctx));
743 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
744 struct scatterlist *src, unsigned int nbytes)
746 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
748 struct xts_crypt_req req = {
750 .tbuflen = sizeof(buf),
752 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
753 .tweak_fn = aesni_xts_tweak,
754 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
755 .crypt_fn = lrw_xts_encrypt_callback,
759 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
762 ret = xts_crypt(desc, dst, src, nbytes, &req);
768 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
769 struct scatterlist *src, unsigned int nbytes)
771 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
773 struct xts_crypt_req req = {
775 .tbuflen = sizeof(buf),
777 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
778 .tweak_fn = aesni_xts_tweak,
779 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
780 .crypt_fn = lrw_xts_decrypt_callback,
784 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
787 ret = xts_crypt(desc, dst, src, nbytes, &req);
796 static int rfc4106_init(struct crypto_aead *aead)
798 struct cryptd_aead *cryptd_tfm;
799 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
801 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
803 CRYPTO_ALG_INTERNAL);
804 if (IS_ERR(cryptd_tfm))
805 return PTR_ERR(cryptd_tfm);
808 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
812 static void rfc4106_exit(struct crypto_aead *aead)
814 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
816 cryptd_free_aead(*ctx);
820 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
822 struct aesni_gcm_set_hash_subkey_result *result = req->data;
824 if (err == -EINPROGRESS)
827 complete(&result->completion);
831 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
833 struct crypto_ablkcipher *ctr_tfm;
834 struct ablkcipher_request *req;
836 struct aesni_hash_subkey_req_data *req_data;
838 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
840 return PTR_ERR(ctr_tfm);
842 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
844 goto out_free_ablkcipher;
847 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
849 goto out_free_ablkcipher;
851 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
853 goto out_free_request;
855 memset(req_data->iv, 0, sizeof(req_data->iv));
857 /* Clear the data in the hash sub key container to zero.*/
858 /* We want to cipher all zeros to create the hash sub key. */
859 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
861 init_completion(&req_data->result.completion);
862 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
863 ablkcipher_request_set_tfm(req, ctr_tfm);
864 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
865 CRYPTO_TFM_REQ_MAY_BACKLOG,
866 rfc4106_set_hash_subkey_done,
869 ablkcipher_request_set_crypt(req, &req_data->sg,
870 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
872 ret = crypto_ablkcipher_encrypt(req);
873 if (ret == -EINPROGRESS || ret == -EBUSY) {
874 ret = wait_for_completion_interruptible
875 (&req_data->result.completion);
877 ret = req_data->result.err;
881 ablkcipher_request_free(req);
883 crypto_free_ablkcipher(ctr_tfm);
887 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
888 unsigned int key_len)
890 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
893 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
896 /*Account for 4 byte nonce at the end.*/
899 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
901 return aes_set_key_common(crypto_aead_tfm(aead),
902 &ctx->aes_key_expanded, key, key_len) ?:
903 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
906 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
907 unsigned int key_len)
909 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
910 struct cryptd_aead *cryptd_tfm = *ctx;
912 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
915 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
916 unsigned int authsize)
930 /* This is the Integrity Check Value (aka the authentication tag length and can
931 * be 8, 12 or 16 bytes long. */
932 static int rfc4106_set_authsize(struct crypto_aead *parent,
933 unsigned int authsize)
935 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
936 struct cryptd_aead *cryptd_tfm = *ctx;
938 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
941 static int helper_rfc4106_encrypt(struct aead_request *req)
943 u8 one_entry_in_sg = 0;
944 u8 *src, *dst, *assoc;
945 __be32 counter = cpu_to_be32(1);
946 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
947 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
948 void *aes_ctx = &(ctx->aes_key_expanded);
949 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
950 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
951 struct scatter_walk src_sg_walk;
952 struct scatter_walk dst_sg_walk;
955 /* Assuming we are supporting rfc4106 64-bit extended */
956 /* sequence numbers We need to have the AAD length equal */
957 /* to 16 or 20 bytes */
958 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
962 for (i = 0; i < 4; i++)
963 *(iv+i) = ctx->nonce[i];
964 for (i = 0; i < 8; i++)
965 *(iv+4+i) = req->iv[i];
966 *((__be32 *)(iv+12)) = counter;
968 if (sg_is_last(req->src) &&
969 req->src->offset + req->src->length <= PAGE_SIZE &&
970 sg_is_last(req->dst) &&
971 req->dst->offset + req->dst->length <= PAGE_SIZE) {
973 scatterwalk_start(&src_sg_walk, req->src);
974 assoc = scatterwalk_map(&src_sg_walk);
975 src = assoc + req->assoclen;
977 if (unlikely(req->src != req->dst)) {
978 scatterwalk_start(&dst_sg_walk, req->dst);
979 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
982 /* Allocate memory for src, dst, assoc */
983 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
985 if (unlikely(!assoc))
987 scatterwalk_map_and_copy(assoc, req->src, 0,
988 req->assoclen + req->cryptlen, 0);
989 src = assoc + req->assoclen;
994 aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
995 ctx->hash_subkey, assoc, req->assoclen - 8,
996 dst + req->cryptlen, auth_tag_len);
999 /* The authTag (aka the Integrity Check Value) needs to be written
1000 * back to the packet. */
1001 if (one_entry_in_sg) {
1002 if (unlikely(req->src != req->dst)) {
1003 scatterwalk_unmap(dst - req->assoclen);
1004 scatterwalk_advance(&dst_sg_walk, req->dst->length);
1005 scatterwalk_done(&dst_sg_walk, 1, 0);
1007 scatterwalk_unmap(assoc);
1008 scatterwalk_advance(&src_sg_walk, req->src->length);
1009 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1011 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1012 req->cryptlen + auth_tag_len, 1);
1018 static int helper_rfc4106_decrypt(struct aead_request *req)
1020 u8 one_entry_in_sg = 0;
1021 u8 *src, *dst, *assoc;
1022 unsigned long tempCipherLen = 0;
1023 __be32 counter = cpu_to_be32(1);
1025 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1026 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1027 void *aes_ctx = &(ctx->aes_key_expanded);
1028 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1029 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1031 struct scatter_walk src_sg_walk;
1032 struct scatter_walk dst_sg_walk;
1035 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1038 /* Assuming we are supporting rfc4106 64-bit extended */
1039 /* sequence numbers We need to have the AAD length */
1040 /* equal to 16 or 20 bytes */
1042 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1043 /* IV below built */
1044 for (i = 0; i < 4; i++)
1045 *(iv+i) = ctx->nonce[i];
1046 for (i = 0; i < 8; i++)
1047 *(iv+4+i) = req->iv[i];
1048 *((__be32 *)(iv+12)) = counter;
1050 if (sg_is_last(req->src) &&
1051 req->src->offset + req->src->length <= PAGE_SIZE &&
1052 sg_is_last(req->dst) &&
1053 req->dst->offset + req->dst->length <= PAGE_SIZE) {
1054 one_entry_in_sg = 1;
1055 scatterwalk_start(&src_sg_walk, req->src);
1056 assoc = scatterwalk_map(&src_sg_walk);
1057 src = assoc + req->assoclen;
1059 if (unlikely(req->src != req->dst)) {
1060 scatterwalk_start(&dst_sg_walk, req->dst);
1061 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1065 /* Allocate memory for src, dst, assoc */
1066 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1069 scatterwalk_map_and_copy(assoc, req->src, 0,
1070 req->assoclen + req->cryptlen, 0);
1071 src = assoc + req->assoclen;
1076 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1077 ctx->hash_subkey, assoc, req->assoclen - 8,
1078 authTag, auth_tag_len);
1081 /* Compare generated tag with passed in tag. */
1082 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1085 if (one_entry_in_sg) {
1086 if (unlikely(req->src != req->dst)) {
1087 scatterwalk_unmap(dst - req->assoclen);
1088 scatterwalk_advance(&dst_sg_walk, req->dst->length);
1089 scatterwalk_done(&dst_sg_walk, 1, 0);
1091 scatterwalk_unmap(assoc);
1092 scatterwalk_advance(&src_sg_walk, req->src->length);
1093 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1095 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1102 static int rfc4106_encrypt(struct aead_request *req)
1104 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1105 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1106 struct cryptd_aead *cryptd_tfm = *ctx;
1108 aead_request_set_tfm(req, irq_fpu_usable() ?
1109 cryptd_aead_child(cryptd_tfm) :
1112 return crypto_aead_encrypt(req);
1115 static int rfc4106_decrypt(struct aead_request *req)
1117 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1118 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1119 struct cryptd_aead *cryptd_tfm = *ctx;
1121 aead_request_set_tfm(req, irq_fpu_usable() ?
1122 cryptd_aead_child(cryptd_tfm) :
1125 return crypto_aead_decrypt(req);
1129 static struct crypto_alg aesni_algs[] = { {
1131 .cra_driver_name = "aes-aesni",
1132 .cra_priority = 300,
1133 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1134 .cra_blocksize = AES_BLOCK_SIZE,
1135 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1138 .cra_module = THIS_MODULE,
1141 .cia_min_keysize = AES_MIN_KEY_SIZE,
1142 .cia_max_keysize = AES_MAX_KEY_SIZE,
1143 .cia_setkey = aes_set_key,
1144 .cia_encrypt = aes_encrypt,
1145 .cia_decrypt = aes_decrypt
1149 .cra_name = "__aes-aesni",
1150 .cra_driver_name = "__driver-aes-aesni",
1152 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1153 .cra_blocksize = AES_BLOCK_SIZE,
1154 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1157 .cra_module = THIS_MODULE,
1160 .cia_min_keysize = AES_MIN_KEY_SIZE,
1161 .cia_max_keysize = AES_MAX_KEY_SIZE,
1162 .cia_setkey = aes_set_key,
1163 .cia_encrypt = __aes_encrypt,
1164 .cia_decrypt = __aes_decrypt
1168 .cra_name = "__ecb-aes-aesni",
1169 .cra_driver_name = "__driver-ecb-aes-aesni",
1171 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1172 CRYPTO_ALG_INTERNAL,
1173 .cra_blocksize = AES_BLOCK_SIZE,
1174 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1177 .cra_type = &crypto_blkcipher_type,
1178 .cra_module = THIS_MODULE,
1181 .min_keysize = AES_MIN_KEY_SIZE,
1182 .max_keysize = AES_MAX_KEY_SIZE,
1183 .setkey = aes_set_key,
1184 .encrypt = ecb_encrypt,
1185 .decrypt = ecb_decrypt,
1189 .cra_name = "__cbc-aes-aesni",
1190 .cra_driver_name = "__driver-cbc-aes-aesni",
1192 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1193 CRYPTO_ALG_INTERNAL,
1194 .cra_blocksize = AES_BLOCK_SIZE,
1195 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1198 .cra_type = &crypto_blkcipher_type,
1199 .cra_module = THIS_MODULE,
1202 .min_keysize = AES_MIN_KEY_SIZE,
1203 .max_keysize = AES_MAX_KEY_SIZE,
1204 .setkey = aes_set_key,
1205 .encrypt = cbc_encrypt,
1206 .decrypt = cbc_decrypt,
1210 .cra_name = "ecb(aes)",
1211 .cra_driver_name = "ecb-aes-aesni",
1212 .cra_priority = 400,
1213 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1214 .cra_blocksize = AES_BLOCK_SIZE,
1215 .cra_ctxsize = sizeof(struct async_helper_ctx),
1217 .cra_type = &crypto_ablkcipher_type,
1218 .cra_module = THIS_MODULE,
1219 .cra_init = ablk_ecb_init,
1220 .cra_exit = ablk_exit,
1223 .min_keysize = AES_MIN_KEY_SIZE,
1224 .max_keysize = AES_MAX_KEY_SIZE,
1225 .setkey = ablk_set_key,
1226 .encrypt = ablk_encrypt,
1227 .decrypt = ablk_decrypt,
1231 .cra_name = "cbc(aes)",
1232 .cra_driver_name = "cbc-aes-aesni",
1233 .cra_priority = 400,
1234 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1235 .cra_blocksize = AES_BLOCK_SIZE,
1236 .cra_ctxsize = sizeof(struct async_helper_ctx),
1238 .cra_type = &crypto_ablkcipher_type,
1239 .cra_module = THIS_MODULE,
1240 .cra_init = ablk_cbc_init,
1241 .cra_exit = ablk_exit,
1244 .min_keysize = AES_MIN_KEY_SIZE,
1245 .max_keysize = AES_MAX_KEY_SIZE,
1246 .ivsize = AES_BLOCK_SIZE,
1247 .setkey = ablk_set_key,
1248 .encrypt = ablk_encrypt,
1249 .decrypt = ablk_decrypt,
1252 #ifdef CONFIG_X86_64
1254 .cra_name = "__ctr-aes-aesni",
1255 .cra_driver_name = "__driver-ctr-aes-aesni",
1257 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1258 CRYPTO_ALG_INTERNAL,
1260 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1263 .cra_type = &crypto_blkcipher_type,
1264 .cra_module = THIS_MODULE,
1267 .min_keysize = AES_MIN_KEY_SIZE,
1268 .max_keysize = AES_MAX_KEY_SIZE,
1269 .ivsize = AES_BLOCK_SIZE,
1270 .setkey = aes_set_key,
1271 .encrypt = ctr_crypt,
1272 .decrypt = ctr_crypt,
1276 .cra_name = "ctr(aes)",
1277 .cra_driver_name = "ctr-aes-aesni",
1278 .cra_priority = 400,
1279 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1281 .cra_ctxsize = sizeof(struct async_helper_ctx),
1283 .cra_type = &crypto_ablkcipher_type,
1284 .cra_module = THIS_MODULE,
1285 .cra_init = ablk_ctr_init,
1286 .cra_exit = ablk_exit,
1289 .min_keysize = AES_MIN_KEY_SIZE,
1290 .max_keysize = AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .setkey = ablk_set_key,
1293 .encrypt = ablk_encrypt,
1294 .decrypt = ablk_encrypt,
1299 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1301 .cra_name = "pcbc(aes)",
1302 .cra_driver_name = "pcbc-aes-aesni",
1303 .cra_priority = 400,
1304 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1305 .cra_blocksize = AES_BLOCK_SIZE,
1306 .cra_ctxsize = sizeof(struct async_helper_ctx),
1308 .cra_type = &crypto_ablkcipher_type,
1309 .cra_module = THIS_MODULE,
1310 .cra_init = ablk_pcbc_init,
1311 .cra_exit = ablk_exit,
1314 .min_keysize = AES_MIN_KEY_SIZE,
1315 .max_keysize = AES_MAX_KEY_SIZE,
1316 .ivsize = AES_BLOCK_SIZE,
1317 .setkey = ablk_set_key,
1318 .encrypt = ablk_encrypt,
1319 .decrypt = ablk_decrypt,
1324 .cra_name = "__lrw-aes-aesni",
1325 .cra_driver_name = "__driver-lrw-aes-aesni",
1327 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1328 CRYPTO_ALG_INTERNAL,
1329 .cra_blocksize = AES_BLOCK_SIZE,
1330 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1332 .cra_type = &crypto_blkcipher_type,
1333 .cra_module = THIS_MODULE,
1334 .cra_exit = lrw_aesni_exit_tfm,
1337 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1338 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1339 .ivsize = AES_BLOCK_SIZE,
1340 .setkey = lrw_aesni_setkey,
1341 .encrypt = lrw_encrypt,
1342 .decrypt = lrw_decrypt,
1346 .cra_name = "__xts-aes-aesni",
1347 .cra_driver_name = "__driver-xts-aes-aesni",
1349 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1350 CRYPTO_ALG_INTERNAL,
1351 .cra_blocksize = AES_BLOCK_SIZE,
1352 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1354 .cra_type = &crypto_blkcipher_type,
1355 .cra_module = THIS_MODULE,
1358 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1359 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1360 .ivsize = AES_BLOCK_SIZE,
1361 .setkey = xts_aesni_setkey,
1362 .encrypt = xts_encrypt,
1363 .decrypt = xts_decrypt,
1367 .cra_name = "lrw(aes)",
1368 .cra_driver_name = "lrw-aes-aesni",
1369 .cra_priority = 400,
1370 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1371 .cra_blocksize = AES_BLOCK_SIZE,
1372 .cra_ctxsize = sizeof(struct async_helper_ctx),
1374 .cra_type = &crypto_ablkcipher_type,
1375 .cra_module = THIS_MODULE,
1376 .cra_init = ablk_init,
1377 .cra_exit = ablk_exit,
1380 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1381 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1382 .ivsize = AES_BLOCK_SIZE,
1383 .setkey = ablk_set_key,
1384 .encrypt = ablk_encrypt,
1385 .decrypt = ablk_decrypt,
1389 .cra_name = "xts(aes)",
1390 .cra_driver_name = "xts-aes-aesni",
1391 .cra_priority = 400,
1392 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1393 .cra_blocksize = AES_BLOCK_SIZE,
1394 .cra_ctxsize = sizeof(struct async_helper_ctx),
1396 .cra_type = &crypto_ablkcipher_type,
1397 .cra_module = THIS_MODULE,
1398 .cra_init = ablk_init,
1399 .cra_exit = ablk_exit,
1402 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1403 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1404 .ivsize = AES_BLOCK_SIZE,
1405 .setkey = ablk_set_key,
1406 .encrypt = ablk_encrypt,
1407 .decrypt = ablk_decrypt,
1412 #ifdef CONFIG_X86_64
1413 static struct aead_alg aesni_aead_algs[] = { {
1414 .setkey = common_rfc4106_set_key,
1415 .setauthsize = common_rfc4106_set_authsize,
1416 .encrypt = helper_rfc4106_encrypt,
1417 .decrypt = helper_rfc4106_decrypt,
1421 .cra_name = "__gcm-aes-aesni",
1422 .cra_driver_name = "__driver-gcm-aes-aesni",
1423 .cra_flags = CRYPTO_ALG_INTERNAL,
1425 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1426 .cra_alignmask = AESNI_ALIGN - 1,
1427 .cra_module = THIS_MODULE,
1430 .init = rfc4106_init,
1431 .exit = rfc4106_exit,
1432 .setkey = rfc4106_set_key,
1433 .setauthsize = rfc4106_set_authsize,
1434 .encrypt = rfc4106_encrypt,
1435 .decrypt = rfc4106_decrypt,
1439 .cra_name = "rfc4106(gcm(aes))",
1440 .cra_driver_name = "rfc4106-gcm-aesni",
1441 .cra_priority = 400,
1442 .cra_flags = CRYPTO_ALG_ASYNC,
1444 .cra_ctxsize = sizeof(struct cryptd_aead *),
1445 .cra_module = THIS_MODULE,
1449 static struct aead_alg aesni_aead_algs[0];
1453 static const struct x86_cpu_id aesni_cpu_id[] = {
1454 X86_FEATURE_MATCH(X86_FEATURE_AES),
1457 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1459 static int __init aesni_init(void)
1463 if (!x86_match_cpu(aesni_cpu_id))
1465 #ifdef CONFIG_X86_64
1466 #ifdef CONFIG_AS_AVX2
1467 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1468 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1469 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1470 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1473 #ifdef CONFIG_AS_AVX
1474 if (boot_cpu_has(X86_FEATURE_AVX)) {
1475 pr_info("AVX version of gcm_enc/dec engaged.\n");
1476 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1477 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1481 pr_info("SSE version of gcm_enc/dec engaged.\n");
1482 aesni_gcm_enc_tfm = aesni_gcm_enc;
1483 aesni_gcm_dec_tfm = aesni_gcm_dec;
1485 aesni_ctr_enc_tfm = aesni_ctr_enc;
1486 #ifdef CONFIG_AS_AVX
1488 /* optimize performance of ctr mode encryption transform */
1489 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1490 pr_info("AES CTR mode by8 optimization enabled\n");
1495 err = crypto_fpu_init();
1499 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1503 err = crypto_register_aeads(aesni_aead_algs,
1504 ARRAY_SIZE(aesni_aead_algs));
1506 goto unregister_algs;
1511 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1517 static void __exit aesni_exit(void)
1519 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1520 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1525 late_initcall(aesni_init);
1526 module_exit(aesni_exit);
1528 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1529 MODULE_LICENSE("GPL");
1530 MODULE_ALIAS_CRYPTO("aes");