2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
115 struct crypto_tfm *tfm;
116 uint8_t salt[AES_BLOCK_SIZE];
117 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
120 struct qat_alg_ablkcipher_ctx {
121 struct icp_qat_hw_cipher_algo_blk *enc_cd;
122 struct icp_qat_hw_cipher_algo_blk *dec_cd;
123 dma_addr_t enc_cd_paddr;
124 dma_addr_t dec_cd_paddr;
125 struct icp_qat_fw_la_bulk_req enc_fw_req;
126 struct icp_qat_fw_la_bulk_req dec_fw_req;
127 struct qat_crypto_instance *inst;
128 struct crypto_tfm *tfm;
129 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
132 static int get_current_node(void)
134 return cpu_data(current_thread_info()->cpu).phys_proc_id;
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
139 switch (qat_hash_alg) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1:
141 return ICP_QAT_HW_SHA1_STATE1_SZ;
142 case ICP_QAT_HW_AUTH_ALGO_SHA256:
143 return ICP_QAT_HW_SHA256_STATE1_SZ;
144 case ICP_QAT_HW_AUTH_ALGO_SHA512:
145 return ICP_QAT_HW_SHA512_STATE1_SZ;
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153 struct qat_alg_aead_ctx *ctx,
154 const uint8_t *auth_key,
155 unsigned int auth_keylen)
157 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158 struct sha1_state sha1;
159 struct sha256_state sha256;
160 struct sha512_state sha512;
161 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
162 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
163 char ipad[block_size];
164 char opad[block_size];
165 __be32 *hash_state_out;
166 __be64 *hash512_state_out;
169 memset(ipad, 0, block_size);
170 memset(opad, 0, block_size);
171 shash->tfm = ctx->hash_tfm;
174 if (auth_keylen > block_size) {
175 int ret = crypto_shash_digest(shash, auth_key,
180 memcpy(opad, ipad, digest_size);
182 memcpy(ipad, auth_key, auth_keylen);
183 memcpy(opad, auth_key, auth_keylen);
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
193 if (crypto_shash_init(shash))
196 if (crypto_shash_update(shash, ipad, block_size))
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
204 if (crypto_shash_export(shash, &sha1))
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
210 if (crypto_shash_export(shash, &sha256))
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
216 if (crypto_shash_export(shash, &sha512))
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
225 if (crypto_shash_init(shash))
228 if (crypto_shash_update(shash, opad, block_size))
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
237 if (crypto_shash_export(shash, &sha1))
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
243 if (crypto_shash_export(shash, &sha256))
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
249 if (crypto_shash_export(shash, &sha512))
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
257 memzero_explicit(ipad, block_size);
258 memzero_explicit(opad, block_size);
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
265 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267 header->comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269 QAT_COMN_PTR_TYPE_SGL);
270 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_PARTIAL_NONE);
272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
273 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
274 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_NO_PROTO);
276 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_UPDATE_STATE);
280 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
282 struct crypto_authenc_keys *keys)
284 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
285 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
286 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
287 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
288 struct icp_qat_hw_auth_algo_blk *hash =
289 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
290 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
291 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
292 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
293 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
294 void *ptr = &req_tmpl->cd_ctrl;
295 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
296 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
299 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
300 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
301 hash->sha.inner_setup.auth_config.config =
302 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
303 ctx->qat_hash_alg, digestsize);
304 hash->sha.inner_setup.auth_counter.counter =
305 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
307 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
311 qat_alg_init_common_hdr(header);
312 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
313 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
314 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
315 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
316 ICP_QAT_FW_LA_RET_AUTH_RES);
317 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
318 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
319 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
320 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
322 /* Cipher CD config setup */
323 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
324 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
325 cipher_cd_ctrl->cipher_cfg_offset = 0;
326 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
327 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
328 /* Auth CD config setup */
329 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
330 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
331 hash_cd_ctrl->inner_res_sz = digestsize;
332 hash_cd_ctrl->final_sz = digestsize;
334 switch (ctx->qat_hash_alg) {
335 case ICP_QAT_HW_AUTH_ALGO_SHA1:
336 hash_cd_ctrl->inner_state1_sz =
337 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
338 hash_cd_ctrl->inner_state2_sz =
339 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
341 case ICP_QAT_HW_AUTH_ALGO_SHA256:
342 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
343 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
345 case ICP_QAT_HW_AUTH_ALGO_SHA512:
346 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
347 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
353 ((sizeof(struct icp_qat_hw_auth_setup) +
354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
360 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
362 struct crypto_authenc_keys *keys)
364 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
365 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
366 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
367 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
368 struct icp_qat_hw_cipher_algo_blk *cipher =
369 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
370 sizeof(struct icp_qat_hw_auth_setup) +
371 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
372 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
373 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
374 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
375 void *ptr = &req_tmpl->cd_ctrl;
376 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
377 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
378 struct icp_qat_fw_la_auth_req_params *auth_param =
379 (struct icp_qat_fw_la_auth_req_params *)
380 ((char *)&req_tmpl->serv_specif_rqpars +
381 sizeof(struct icp_qat_fw_la_cipher_req_params));
384 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
385 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
386 hash->sha.inner_setup.auth_config.config =
387 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
390 hash->sha.inner_setup.auth_counter.counter =
391 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
393 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
397 qat_alg_init_common_hdr(header);
398 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
399 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
401 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
402 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
403 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
404 ICP_QAT_FW_LA_CMP_AUTH_RES);
405 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
406 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
408 /* Cipher CD config setup */
409 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
410 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
411 cipher_cd_ctrl->cipher_cfg_offset =
412 (sizeof(struct icp_qat_hw_auth_setup) +
413 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
414 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
415 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
417 /* Auth CD config setup */
418 hash_cd_ctrl->hash_cfg_offset = 0;
419 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
420 hash_cd_ctrl->inner_res_sz = digestsize;
421 hash_cd_ctrl->final_sz = digestsize;
423 switch (ctx->qat_hash_alg) {
424 case ICP_QAT_HW_AUTH_ALGO_SHA1:
425 hash_cd_ctrl->inner_state1_sz =
426 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
427 hash_cd_ctrl->inner_state2_sz =
428 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
430 case ICP_QAT_HW_AUTH_ALGO_SHA256:
431 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
432 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
434 case ICP_QAT_HW_AUTH_ALGO_SHA512:
435 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
436 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
442 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
443 ((sizeof(struct icp_qat_hw_auth_setup) +
444 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
445 auth_param->auth_res_sz = digestsize;
446 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
447 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
451 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
452 struct icp_qat_fw_la_bulk_req *req,
453 struct icp_qat_hw_cipher_algo_blk *cd,
454 const uint8_t *key, unsigned int keylen)
456 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
457 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
458 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
460 memcpy(cd->aes.key, key, keylen);
461 qat_alg_init_common_hdr(header);
462 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
463 cd_pars->u.s.content_desc_params_sz =
464 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
465 /* Cipher CD config setup */
466 cd_ctrl->cipher_key_sz = keylen >> 3;
467 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
468 cd_ctrl->cipher_cfg_offset = 0;
469 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
470 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
473 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
474 int alg, const uint8_t *key,
477 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
478 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
479 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
481 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
482 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
483 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
486 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
487 int alg, const uint8_t *key,
490 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
491 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
492 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
494 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
495 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
496 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
499 static int qat_alg_validate_key(int key_len, int *alg)
502 case AES_KEYSIZE_128:
503 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505 case AES_KEYSIZE_192:
506 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508 case AES_KEYSIZE_256:
509 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
517 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
518 const uint8_t *key, unsigned int keylen)
520 struct crypto_authenc_keys keys;
523 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
526 if (crypto_authenc_extractkeys(&keys, key, keylen))
529 if (qat_alg_validate_key(keys.enckeylen, &alg))
532 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
535 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
540 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
546 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
552 if (qat_alg_validate_key(keylen, &alg))
555 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
556 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
559 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
563 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
566 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
569 spin_lock(&ctx->lock);
572 dev = &GET_DEV(ctx->inst->accel_dev);
573 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
574 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
575 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
576 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
579 int node = get_current_node();
580 struct qat_crypto_instance *inst =
581 qat_crypto_get_instance_node(node);
583 spin_unlock(&ctx->lock);
587 dev = &GET_DEV(inst->accel_dev);
589 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
593 spin_unlock(&ctx->lock);
596 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
600 spin_unlock(&ctx->lock);
604 spin_unlock(&ctx->lock);
605 if (qat_alg_aead_init_sessions(ctx, key, keylen))
611 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
612 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
613 ctx->dec_cd, ctx->dec_cd_paddr);
616 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
617 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
618 ctx->enc_cd, ctx->enc_cd_paddr);
623 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
624 struct qat_crypto_request *qat_req)
626 struct device *dev = &GET_DEV(inst->accel_dev);
627 struct qat_alg_buf_list *bl = qat_req->buf.bl;
628 struct qat_alg_buf_list *blout = qat_req->buf.blout;
629 dma_addr_t blp = qat_req->buf.blp;
630 dma_addr_t blpout = qat_req->buf.bloutp;
631 size_t sz = qat_req->buf.sz;
632 size_t sz_out = qat_req->buf.sz_out;
635 for (i = 0; i < bl->num_bufs; i++)
636 dma_unmap_single(dev, bl->bufers[i].addr,
637 bl->bufers[i].len, DMA_BIDIRECTIONAL);
639 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
642 /* If out of place operation dma unmap only data */
643 int bufless = blout->num_bufs - blout->num_mapped_bufs;
645 for (i = bufless; i < blout->num_bufs; i++) {
646 dma_unmap_single(dev, blout->bufers[i].addr,
647 blout->bufers[i].len,
650 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
655 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
656 struct scatterlist *assoc,
657 struct scatterlist *sgl,
658 struct scatterlist *sglout, uint8_t *iv,
660 struct qat_crypto_request *qat_req)
662 struct device *dev = &GET_DEV(inst->accel_dev);
663 int i, bufs = 0, sg_nctr = 0;
664 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
665 struct qat_alg_buf_list *bufl;
666 struct qat_alg_buf_list *buflout = NULL;
668 dma_addr_t bloutp = 0;
669 struct scatterlist *sg;
670 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
671 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
676 bufl = kzalloc_node(sz, GFP_ATOMIC,
677 dev_to_node(&GET_DEV(inst->accel_dev)));
681 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
682 if (unlikely(dma_mapping_error(dev, blp)))
685 for_each_sg(assoc, sg, assoc_n, i) {
688 bufl->bufers[bufs].addr = dma_map_single(dev,
692 bufl->bufers[bufs].len = sg->length;
693 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
698 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
700 bufl->bufers[bufs].len = ivlen;
701 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
706 for_each_sg(sgl, sg, n, i) {
707 int y = sg_nctr + bufs;
712 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
715 bufl->bufers[y].len = sg->length;
716 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
720 bufl->num_bufs = sg_nctr + bufs;
721 qat_req->buf.bl = bufl;
722 qat_req->buf.blp = blp;
723 qat_req->buf.sz = sz;
724 /* Handle out of place operation */
726 struct qat_alg_buf *bufers;
728 n = sg_nents(sglout);
729 sz_out = sizeof(struct qat_alg_buf_list) +
730 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
732 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
733 dev_to_node(&GET_DEV(inst->accel_dev)));
734 if (unlikely(!buflout))
736 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
737 if (unlikely(dma_mapping_error(dev, bloutp)))
739 bufers = buflout->bufers;
740 /* For out of place operation dma map only data and
741 * reuse assoc mapping and iv */
742 for (i = 0; i < bufs; i++) {
743 bufers[i].len = bufl->bufers[i].len;
744 bufers[i].addr = bufl->bufers[i].addr;
746 for_each_sg(sglout, sg, n, i) {
747 int y = sg_nctr + bufs;
752 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
755 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
757 bufers[y].len = sg->length;
760 buflout->num_bufs = sg_nctr + bufs;
761 buflout->num_mapped_bufs = sg_nctr;
762 qat_req->buf.blout = buflout;
763 qat_req->buf.bloutp = bloutp;
764 qat_req->buf.sz_out = sz_out;
766 /* Otherwise set the src and dst to the same address */
767 qat_req->buf.bloutp = qat_req->buf.blp;
768 qat_req->buf.sz_out = 0;
772 dev_err(dev, "Failed to map buf for dma\n");
774 for (i = 0; i < n + bufs; i++)
775 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
776 dma_unmap_single(dev, bufl->bufers[i].addr,
780 if (!dma_mapping_error(dev, blp))
781 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
783 if (sgl != sglout && buflout) {
784 n = sg_nents(sglout);
785 for (i = bufs; i < n + bufs; i++)
786 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
787 dma_unmap_single(dev, buflout->bufers[i].addr,
788 buflout->bufers[i].len,
790 if (!dma_mapping_error(dev, bloutp))
791 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
797 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
798 struct qat_crypto_request *qat_req)
800 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
801 struct qat_crypto_instance *inst = ctx->inst;
802 struct aead_request *areq = qat_req->aead_req;
803 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
804 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
806 qat_alg_free_bufl(inst, qat_req);
807 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
809 areq->base.complete(&areq->base, res);
812 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
813 struct qat_crypto_request *qat_req)
815 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
816 struct qat_crypto_instance *inst = ctx->inst;
817 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
818 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
819 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
821 qat_alg_free_bufl(inst, qat_req);
822 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
824 areq->base.complete(&areq->base, res);
827 void qat_alg_callback(void *resp)
829 struct icp_qat_fw_la_resp *qat_resp = resp;
830 struct qat_crypto_request *qat_req =
831 (void *)(__force long)qat_resp->opaque_data;
833 qat_req->cb(qat_resp, qat_req);
836 static int qat_alg_aead_dec(struct aead_request *areq)
838 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
839 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
840 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
841 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
842 struct icp_qat_fw_la_cipher_req_params *cipher_param;
843 struct icp_qat_fw_la_auth_req_params *auth_param;
844 struct icp_qat_fw_la_bulk_req *msg;
845 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
848 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
849 areq->iv, AES_BLOCK_SIZE, qat_req);
854 *msg = ctx->dec_fw_req;
855 qat_req->aead_ctx = ctx;
856 qat_req->aead_req = areq;
857 qat_req->cb = qat_aead_alg_callback;
858 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
859 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
860 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
861 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
862 cipher_param->cipher_length = areq->cryptlen - digst_size;
863 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
864 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
865 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
866 auth_param->auth_off = 0;
867 auth_param->auth_len = areq->assoclen +
868 cipher_param->cipher_length + AES_BLOCK_SIZE;
870 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
871 } while (ret == -EAGAIN && ctr++ < 10);
873 if (ret == -EAGAIN) {
874 qat_alg_free_bufl(ctx->inst, qat_req);
880 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
883 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
884 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
885 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
886 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
887 struct icp_qat_fw_la_cipher_req_params *cipher_param;
888 struct icp_qat_fw_la_auth_req_params *auth_param;
889 struct icp_qat_fw_la_bulk_req *msg;
892 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
893 iv, AES_BLOCK_SIZE, qat_req);
898 *msg = ctx->enc_fw_req;
899 qat_req->aead_ctx = ctx;
900 qat_req->aead_req = areq;
901 qat_req->cb = qat_aead_alg_callback;
902 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
903 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
904 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
905 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
906 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
909 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
910 cipher_param->cipher_offset = areq->assoclen;
912 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
913 cipher_param->cipher_length = areq->cryptlen;
914 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
916 auth_param->auth_off = 0;
917 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
920 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
921 } while (ret == -EAGAIN && ctr++ < 10);
923 if (ret == -EAGAIN) {
924 qat_alg_free_bufl(ctx->inst, qat_req);
930 static int qat_alg_aead_enc(struct aead_request *areq)
932 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
935 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
937 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
938 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
939 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
942 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
943 seq = cpu_to_be64(req->seq);
944 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
945 &seq, sizeof(uint64_t));
946 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
949 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
953 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
956 spin_lock(&ctx->lock);
959 dev = &GET_DEV(ctx->inst->accel_dev);
960 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
961 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
963 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
966 int node = get_current_node();
967 struct qat_crypto_instance *inst =
968 qat_crypto_get_instance_node(node);
970 spin_unlock(&ctx->lock);
974 dev = &GET_DEV(inst->accel_dev);
976 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
980 spin_unlock(&ctx->lock);
983 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
987 spin_unlock(&ctx->lock);
991 spin_unlock(&ctx->lock);
992 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
998 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
999 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1000 ctx->dec_cd, ctx->dec_cd_paddr);
1003 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1004 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1005 ctx->enc_cd, ctx->enc_cd_paddr);
1010 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1012 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1013 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1014 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1015 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1016 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1017 struct icp_qat_fw_la_bulk_req *msg;
1020 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1025 msg = &qat_req->req;
1026 *msg = ctx->enc_fw_req;
1027 qat_req->ablkcipher_ctx = ctx;
1028 qat_req->ablkcipher_req = req;
1029 qat_req->cb = qat_ablkcipher_alg_callback;
1030 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1031 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1032 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1033 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1034 cipher_param->cipher_length = req->nbytes;
1035 cipher_param->cipher_offset = 0;
1036 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1038 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1039 } while (ret == -EAGAIN && ctr++ < 10);
1041 if (ret == -EAGAIN) {
1042 qat_alg_free_bufl(ctx->inst, qat_req);
1045 return -EINPROGRESS;
1048 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1050 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1051 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1052 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1053 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1054 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1055 struct icp_qat_fw_la_bulk_req *msg;
1058 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1063 msg = &qat_req->req;
1064 *msg = ctx->dec_fw_req;
1065 qat_req->ablkcipher_ctx = ctx;
1066 qat_req->ablkcipher_req = req;
1067 qat_req->cb = qat_ablkcipher_alg_callback;
1068 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1069 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1070 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1071 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1072 cipher_param->cipher_length = req->nbytes;
1073 cipher_param->cipher_offset = 0;
1074 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1076 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1077 } while (ret == -EAGAIN && ctr++ < 10);
1079 if (ret == -EAGAIN) {
1080 qat_alg_free_bufl(ctx->inst, qat_req);
1083 return -EINPROGRESS;
1086 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1087 enum icp_qat_hw_auth_algo hash,
1088 const char *hash_name)
1090 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1092 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1093 if (IS_ERR(ctx->hash_tfm))
1095 spin_lock_init(&ctx->lock);
1096 ctx->qat_hash_alg = hash;
1097 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1098 sizeof(struct qat_crypto_request);
1103 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1105 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1108 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1110 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1113 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1115 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1118 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1120 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1121 struct qat_crypto_instance *inst = ctx->inst;
1124 if (!IS_ERR(ctx->hash_tfm))
1125 crypto_free_shash(ctx->hash_tfm);
1130 dev = &GET_DEV(inst->accel_dev);
1132 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1133 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1134 ctx->enc_cd, ctx->enc_cd_paddr);
1137 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1138 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1139 ctx->dec_cd, ctx->dec_cd_paddr);
1141 qat_crypto_put_instance(inst);
1144 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1146 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1148 spin_lock_init(&ctx->lock);
1149 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1150 sizeof(struct qat_crypto_request);
1155 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1157 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1158 struct qat_crypto_instance *inst = ctx->inst;
1164 dev = &GET_DEV(inst->accel_dev);
1166 memset(ctx->enc_cd, 0,
1167 sizeof(struct icp_qat_hw_cipher_algo_blk));
1168 dma_free_coherent(dev,
1169 sizeof(struct icp_qat_hw_cipher_algo_blk),
1170 ctx->enc_cd, ctx->enc_cd_paddr);
1173 memset(ctx->dec_cd, 0,
1174 sizeof(struct icp_qat_hw_cipher_algo_blk));
1175 dma_free_coherent(dev,
1176 sizeof(struct icp_qat_hw_cipher_algo_blk),
1177 ctx->dec_cd, ctx->dec_cd_paddr);
1179 qat_crypto_put_instance(inst);
1182 static struct crypto_alg qat_algs[] = { {
1183 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1184 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1185 .cra_priority = 4001,
1186 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1187 .cra_blocksize = AES_BLOCK_SIZE,
1188 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1190 .cra_type = &crypto_aead_type,
1191 .cra_module = THIS_MODULE,
1192 .cra_init = qat_alg_aead_sha1_init,
1193 .cra_exit = qat_alg_aead_exit,
1196 .setkey = qat_alg_aead_setkey,
1197 .decrypt = qat_alg_aead_dec,
1198 .encrypt = qat_alg_aead_enc,
1199 .givencrypt = qat_alg_aead_genivenc,
1200 .ivsize = AES_BLOCK_SIZE,
1201 .maxauthsize = SHA1_DIGEST_SIZE,
1205 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1206 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1207 .cra_priority = 4001,
1208 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1209 .cra_blocksize = AES_BLOCK_SIZE,
1210 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1212 .cra_type = &crypto_aead_type,
1213 .cra_module = THIS_MODULE,
1214 .cra_init = qat_alg_aead_sha256_init,
1215 .cra_exit = qat_alg_aead_exit,
1218 .setkey = qat_alg_aead_setkey,
1219 .decrypt = qat_alg_aead_dec,
1220 .encrypt = qat_alg_aead_enc,
1221 .givencrypt = qat_alg_aead_genivenc,
1222 .ivsize = AES_BLOCK_SIZE,
1223 .maxauthsize = SHA256_DIGEST_SIZE,
1227 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1228 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1229 .cra_priority = 4001,
1230 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1231 .cra_blocksize = AES_BLOCK_SIZE,
1232 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1234 .cra_type = &crypto_aead_type,
1235 .cra_module = THIS_MODULE,
1236 .cra_init = qat_alg_aead_sha512_init,
1237 .cra_exit = qat_alg_aead_exit,
1240 .setkey = qat_alg_aead_setkey,
1241 .decrypt = qat_alg_aead_dec,
1242 .encrypt = qat_alg_aead_enc,
1243 .givencrypt = qat_alg_aead_genivenc,
1244 .ivsize = AES_BLOCK_SIZE,
1245 .maxauthsize = SHA512_DIGEST_SIZE,
1249 .cra_name = "cbc(aes)",
1250 .cra_driver_name = "qat_aes_cbc",
1251 .cra_priority = 4001,
1252 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1253 .cra_blocksize = AES_BLOCK_SIZE,
1254 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1256 .cra_type = &crypto_ablkcipher_type,
1257 .cra_module = THIS_MODULE,
1258 .cra_init = qat_alg_ablkcipher_init,
1259 .cra_exit = qat_alg_ablkcipher_exit,
1262 .setkey = qat_alg_ablkcipher_setkey,
1263 .decrypt = qat_alg_ablkcipher_decrypt,
1264 .encrypt = qat_alg_ablkcipher_encrypt,
1265 .min_keysize = AES_MIN_KEY_SIZE,
1266 .max_keysize = AES_MAX_KEY_SIZE,
1267 .ivsize = AES_BLOCK_SIZE,
1272 int qat_algs_register(void)
1274 if (atomic_add_return(1, &active_dev) == 1) {
1277 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1278 qat_algs[i].cra_flags =
1279 (qat_algs[i].cra_type == &crypto_aead_type) ?
1280 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1281 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1283 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1288 int qat_algs_unregister(void)
1290 if (atomic_sub_return(1, &active_dev) == 0)
1291 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1295 int qat_algs_init(void)
1297 atomic_set(&active_dev, 0);
1298 crypto_get_default_rng();
1302 void qat_algs_exit(void)
1304 crypto_put_default_rng();