2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
85 struct qat_alg_buf_list {
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
92 /* Common content descriptor */
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
106 struct qat_alg_aead_ctx {
107 struct qat_alg_cd *enc_cd;
108 struct qat_alg_cd *dec_cd;
109 dma_addr_t enc_cd_paddr;
110 dma_addr_t dec_cd_paddr;
111 struct icp_qat_fw_la_bulk_req enc_fw_req;
112 struct icp_qat_fw_la_bulk_req dec_fw_req;
113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst;
116 struct crypto_tfm *tfm;
117 uint8_t salt[AES_BLOCK_SIZE];
118 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
121 struct qat_alg_ablkcipher_ctx {
122 struct icp_qat_hw_cipher_algo_blk *enc_cd;
123 struct icp_qat_hw_cipher_algo_blk *dec_cd;
124 dma_addr_t enc_cd_paddr;
125 dma_addr_t dec_cd_paddr;
126 struct icp_qat_fw_la_bulk_req enc_fw_req;
127 struct icp_qat_fw_la_bulk_req dec_fw_req;
128 struct qat_crypto_instance *inst;
129 struct crypto_tfm *tfm;
130 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
133 static int get_current_node(void)
135 return cpu_data(current_thread_info()->cpu).phys_proc_id;
138 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
140 switch (qat_hash_alg) {
141 case ICP_QAT_HW_AUTH_ALGO_SHA1:
142 return ICP_QAT_HW_SHA1_STATE1_SZ;
143 case ICP_QAT_HW_AUTH_ALGO_SHA256:
144 return ICP_QAT_HW_SHA256_STATE1_SZ;
145 case ICP_QAT_HW_AUTH_ALGO_SHA512:
146 return ICP_QAT_HW_SHA512_STATE1_SZ;
153 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
154 struct qat_alg_aead_ctx *ctx,
155 const uint8_t *auth_key,
156 unsigned int auth_keylen)
158 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
159 struct sha1_state sha1;
160 struct sha256_state sha256;
161 struct sha512_state sha512;
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 char ipad[block_size];
165 char opad[block_size];
166 __be32 *hash_state_out;
167 __be64 *hash512_state_out;
170 memset(ipad, 0, block_size);
171 memset(opad, 0, block_size);
172 shash->tfm = ctx->hash_tfm;
175 if (auth_keylen > block_size) {
176 int ret = crypto_shash_digest(shash, auth_key,
181 memcpy(opad, ipad, digest_size);
183 memcpy(ipad, auth_key, auth_keylen);
184 memcpy(opad, auth_key, auth_keylen);
187 for (i = 0; i < block_size; i++) {
188 char *ipad_ptr = ipad + i;
189 char *opad_ptr = opad + i;
194 if (crypto_shash_init(shash))
197 if (crypto_shash_update(shash, ipad, block_size))
200 hash_state_out = (__be32 *)hash->sha.state1;
201 hash512_state_out = (__be64 *)hash_state_out;
203 switch (ctx->qat_hash_alg) {
204 case ICP_QAT_HW_AUTH_ALGO_SHA1:
205 if (crypto_shash_export(shash, &sha1))
207 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208 *hash_state_out = cpu_to_be32(*(sha1.state + i));
210 case ICP_QAT_HW_AUTH_ALGO_SHA256:
211 if (crypto_shash_export(shash, &sha256))
213 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
214 *hash_state_out = cpu_to_be32(*(sha256.state + i));
216 case ICP_QAT_HW_AUTH_ALGO_SHA512:
217 if (crypto_shash_export(shash, &sha512))
219 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
220 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
226 if (crypto_shash_init(shash))
229 if (crypto_shash_update(shash, opad, block_size))
232 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
233 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
234 hash512_state_out = (__be64 *)hash_state_out;
236 switch (ctx->qat_hash_alg) {
237 case ICP_QAT_HW_AUTH_ALGO_SHA1:
238 if (crypto_shash_export(shash, &sha1))
240 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241 *hash_state_out = cpu_to_be32(*(sha1.state + i));
243 case ICP_QAT_HW_AUTH_ALGO_SHA256:
244 if (crypto_shash_export(shash, &sha256))
246 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
247 *hash_state_out = cpu_to_be32(*(sha256.state + i));
249 case ICP_QAT_HW_AUTH_ALGO_SHA512:
250 if (crypto_shash_export(shash, &sha512))
252 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
253 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
258 memzero_explicit(ipad, block_size);
259 memzero_explicit(opad, block_size);
263 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
266 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
267 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
268 header->comn_req_flags =
269 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
270 QAT_COMN_PTR_TYPE_SGL);
271 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_PARTIAL_NONE);
273 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
274 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
275 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
276 ICP_QAT_FW_LA_NO_PROTO);
277 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
278 ICP_QAT_FW_LA_NO_UPDATE_STATE);
281 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
283 struct crypto_authenc_keys *keys)
285 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
286 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
287 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
288 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
289 struct icp_qat_hw_auth_algo_blk *hash =
290 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
291 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
292 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
293 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
294 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
295 void *ptr = &req_tmpl->cd_ctrl;
296 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
297 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
300 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
301 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
302 hash->sha.inner_setup.auth_config.config =
303 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
304 ctx->qat_hash_alg, digestsize);
305 hash->sha.inner_setup.auth_counter.counter =
306 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
308 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
312 qat_alg_init_common_hdr(header);
313 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
314 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
315 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
316 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
317 ICP_QAT_FW_LA_RET_AUTH_RES);
318 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
319 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
320 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
321 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
323 /* Cipher CD config setup */
324 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
325 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
326 cipher_cd_ctrl->cipher_cfg_offset = 0;
327 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
328 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
329 /* Auth CD config setup */
330 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
331 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
332 hash_cd_ctrl->inner_res_sz = digestsize;
333 hash_cd_ctrl->final_sz = digestsize;
335 switch (ctx->qat_hash_alg) {
336 case ICP_QAT_HW_AUTH_ALGO_SHA1:
337 hash_cd_ctrl->inner_state1_sz =
338 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
339 hash_cd_ctrl->inner_state2_sz =
340 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
342 case ICP_QAT_HW_AUTH_ALGO_SHA256:
343 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
344 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
346 case ICP_QAT_HW_AUTH_ALGO_SHA512:
347 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
348 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
353 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
354 ((sizeof(struct icp_qat_hw_auth_setup) +
355 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
356 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
357 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
361 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
363 struct crypto_authenc_keys *keys)
365 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
366 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
367 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
368 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
369 struct icp_qat_hw_cipher_algo_blk *cipher =
370 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
371 sizeof(struct icp_qat_hw_auth_setup) +
372 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
373 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
374 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
375 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
376 void *ptr = &req_tmpl->cd_ctrl;
377 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
378 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
379 struct icp_qat_fw_la_auth_req_params *auth_param =
380 (struct icp_qat_fw_la_auth_req_params *)
381 ((char *)&req_tmpl->serv_specif_rqpars +
382 sizeof(struct icp_qat_fw_la_cipher_req_params));
385 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
386 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
387 hash->sha.inner_setup.auth_config.config =
388 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
391 hash->sha.inner_setup.auth_counter.counter =
392 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
394 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
398 qat_alg_init_common_hdr(header);
399 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
400 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
401 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
402 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
403 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
404 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
405 ICP_QAT_FW_LA_CMP_AUTH_RES);
406 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
407 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
409 /* Cipher CD config setup */
410 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
411 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
412 cipher_cd_ctrl->cipher_cfg_offset =
413 (sizeof(struct icp_qat_hw_auth_setup) +
414 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
415 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
416 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
418 /* Auth CD config setup */
419 hash_cd_ctrl->hash_cfg_offset = 0;
420 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
421 hash_cd_ctrl->inner_res_sz = digestsize;
422 hash_cd_ctrl->final_sz = digestsize;
424 switch (ctx->qat_hash_alg) {
425 case ICP_QAT_HW_AUTH_ALGO_SHA1:
426 hash_cd_ctrl->inner_state1_sz =
427 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
428 hash_cd_ctrl->inner_state2_sz =
429 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
431 case ICP_QAT_HW_AUTH_ALGO_SHA256:
432 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
433 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
435 case ICP_QAT_HW_AUTH_ALGO_SHA512:
436 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
437 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
443 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
444 ((sizeof(struct icp_qat_hw_auth_setup) +
445 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
446 auth_param->auth_res_sz = digestsize;
447 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
448 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
452 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
453 struct icp_qat_fw_la_bulk_req *req,
454 struct icp_qat_hw_cipher_algo_blk *cd,
455 const uint8_t *key, unsigned int keylen)
457 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
458 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
459 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
461 memcpy(cd->aes.key, key, keylen);
462 qat_alg_init_common_hdr(header);
463 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
464 cd_pars->u.s.content_desc_params_sz =
465 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
466 /* Cipher CD config setup */
467 cd_ctrl->cipher_key_sz = keylen >> 3;
468 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
469 cd_ctrl->cipher_cfg_offset = 0;
470 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
471 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
474 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
475 int alg, const uint8_t *key,
478 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
479 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
480 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
482 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
483 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
484 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
487 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
488 int alg, const uint8_t *key,
491 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
492 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
493 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
495 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
496 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
497 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
500 static int qat_alg_validate_key(int key_len, int *alg)
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
518 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
519 const uint8_t *key, unsigned int keylen)
521 struct crypto_authenc_keys keys;
524 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
527 if (crypto_authenc_extractkeys(&keys, key, keylen))
530 if (qat_alg_validate_key(keys.enckeylen, &alg))
533 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
536 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
541 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
547 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
553 if (qat_alg_validate_key(keylen, &alg))
556 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
557 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
560 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
564 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
567 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
570 spin_lock(&ctx->lock);
573 dev = &GET_DEV(ctx->inst->accel_dev);
574 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
575 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
576 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
577 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
580 int node = get_current_node();
581 struct qat_crypto_instance *inst =
582 qat_crypto_get_instance_node(node);
584 spin_unlock(&ctx->lock);
588 dev = &GET_DEV(inst->accel_dev);
590 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
594 spin_unlock(&ctx->lock);
597 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
601 spin_unlock(&ctx->lock);
605 spin_unlock(&ctx->lock);
606 if (qat_alg_aead_init_sessions(ctx, key, keylen))
612 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
613 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
614 ctx->dec_cd, ctx->dec_cd_paddr);
617 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
618 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
619 ctx->enc_cd, ctx->enc_cd_paddr);
624 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
625 struct qat_crypto_request *qat_req)
627 struct device *dev = &GET_DEV(inst->accel_dev);
628 struct qat_alg_buf_list *bl = qat_req->buf.bl;
629 struct qat_alg_buf_list *blout = qat_req->buf.blout;
630 dma_addr_t blp = qat_req->buf.blp;
631 dma_addr_t blpout = qat_req->buf.bloutp;
632 size_t sz = qat_req->buf.sz;
633 size_t sz_out = qat_req->buf.sz_out;
636 for (i = 0; i < bl->num_bufs; i++)
637 dma_unmap_single(dev, bl->bufers[i].addr,
638 bl->bufers[i].len, DMA_BIDIRECTIONAL);
640 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
643 /* If out of place operation dma unmap only data */
644 int bufless = blout->num_bufs - blout->num_mapped_bufs;
646 for (i = bufless; i < blout->num_bufs; i++) {
647 dma_unmap_single(dev, blout->bufers[i].addr,
648 blout->bufers[i].len,
651 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
656 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
657 struct scatterlist *assoc,
658 struct scatterlist *sgl,
659 struct scatterlist *sglout, uint8_t *iv,
661 struct qat_crypto_request *qat_req)
663 struct device *dev = &GET_DEV(inst->accel_dev);
664 int i, bufs = 0, sg_nctr = 0;
665 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
666 struct qat_alg_buf_list *bufl;
667 struct qat_alg_buf_list *buflout = NULL;
669 dma_addr_t bloutp = 0;
670 struct scatterlist *sg;
671 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
672 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
677 bufl = kzalloc_node(sz, GFP_ATOMIC,
678 dev_to_node(&GET_DEV(inst->accel_dev)));
682 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
683 if (unlikely(dma_mapping_error(dev, blp)))
686 for_each_sg(assoc, sg, assoc_n, i) {
689 bufl->bufers[bufs].addr = dma_map_single(dev,
693 bufl->bufers[bufs].len = sg->length;
694 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
699 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
701 bufl->bufers[bufs].len = ivlen;
702 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
707 for_each_sg(sgl, sg, n, i) {
708 int y = sg_nctr + bufs;
713 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
716 bufl->bufers[y].len = sg->length;
717 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
721 bufl->num_bufs = sg_nctr + bufs;
722 qat_req->buf.bl = bufl;
723 qat_req->buf.blp = blp;
724 qat_req->buf.sz = sz;
725 /* Handle out of place operation */
727 struct qat_alg_buf *bufers;
729 n = sg_nents(sglout);
730 sz_out = sizeof(struct qat_alg_buf_list) +
731 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
733 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
734 dev_to_node(&GET_DEV(inst->accel_dev)));
735 if (unlikely(!buflout))
737 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
738 if (unlikely(dma_mapping_error(dev, bloutp)))
740 bufers = buflout->bufers;
741 /* For out of place operation dma map only data and
742 * reuse assoc mapping and iv */
743 for (i = 0; i < bufs; i++) {
744 bufers[i].len = bufl->bufers[i].len;
745 bufers[i].addr = bufl->bufers[i].addr;
747 for_each_sg(sglout, sg, n, i) {
748 int y = sg_nctr + bufs;
753 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
756 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
758 bufers[y].len = sg->length;
761 buflout->num_bufs = sg_nctr + bufs;
762 buflout->num_mapped_bufs = sg_nctr;
763 qat_req->buf.blout = buflout;
764 qat_req->buf.bloutp = bloutp;
765 qat_req->buf.sz_out = sz_out;
767 /* Otherwise set the src and dst to the same address */
768 qat_req->buf.bloutp = qat_req->buf.blp;
769 qat_req->buf.sz_out = 0;
773 dev_err(dev, "Failed to map buf for dma\n");
775 for (i = 0; i < n + bufs; i++)
776 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
777 dma_unmap_single(dev, bufl->bufers[i].addr,
781 if (!dma_mapping_error(dev, blp))
782 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
784 if (sgl != sglout && buflout) {
785 n = sg_nents(sglout);
786 for (i = bufs; i < n + bufs; i++)
787 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
788 dma_unmap_single(dev, buflout->bufers[i].addr,
789 buflout->bufers[i].len,
791 if (!dma_mapping_error(dev, bloutp))
792 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
798 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
799 struct qat_crypto_request *qat_req)
801 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
802 struct qat_crypto_instance *inst = ctx->inst;
803 struct aead_request *areq = qat_req->aead_req;
804 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
805 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
807 qat_alg_free_bufl(inst, qat_req);
808 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
810 areq->base.complete(&areq->base, res);
813 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
814 struct qat_crypto_request *qat_req)
816 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
817 struct qat_crypto_instance *inst = ctx->inst;
818 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
819 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
820 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
822 qat_alg_free_bufl(inst, qat_req);
823 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
825 areq->base.complete(&areq->base, res);
828 void qat_alg_callback(void *resp)
830 struct icp_qat_fw_la_resp *qat_resp = resp;
831 struct qat_crypto_request *qat_req =
832 (void *)(__force long)qat_resp->opaque_data;
834 qat_req->cb(qat_resp, qat_req);
837 static int qat_alg_aead_dec(struct aead_request *areq)
839 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
840 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
841 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
842 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
843 struct icp_qat_fw_la_cipher_req_params *cipher_param;
844 struct icp_qat_fw_la_auth_req_params *auth_param;
845 struct icp_qat_fw_la_bulk_req *msg;
846 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
849 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
850 areq->iv, AES_BLOCK_SIZE, qat_req);
855 *msg = ctx->dec_fw_req;
856 qat_req->aead_ctx = ctx;
857 qat_req->aead_req = areq;
858 qat_req->cb = qat_aead_alg_callback;
859 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
860 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
861 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
862 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
863 cipher_param->cipher_length = areq->cryptlen - digst_size;
864 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
865 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
866 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
867 auth_param->auth_off = 0;
868 auth_param->auth_len = areq->assoclen +
869 cipher_param->cipher_length + AES_BLOCK_SIZE;
871 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
872 } while (ret == -EAGAIN && ctr++ < 10);
874 if (ret == -EAGAIN) {
875 qat_alg_free_bufl(ctx->inst, qat_req);
881 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
884 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
885 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
886 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
887 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
888 struct icp_qat_fw_la_cipher_req_params *cipher_param;
889 struct icp_qat_fw_la_auth_req_params *auth_param;
890 struct icp_qat_fw_la_bulk_req *msg;
893 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
894 iv, AES_BLOCK_SIZE, qat_req);
899 *msg = ctx->enc_fw_req;
900 qat_req->aead_ctx = ctx;
901 qat_req->aead_req = areq;
902 qat_req->cb = qat_aead_alg_callback;
903 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
904 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
905 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
906 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
907 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
910 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
911 cipher_param->cipher_offset = areq->assoclen;
913 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
914 cipher_param->cipher_length = areq->cryptlen;
915 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
917 auth_param->auth_off = 0;
918 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
921 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
922 } while (ret == -EAGAIN && ctr++ < 10);
924 if (ret == -EAGAIN) {
925 qat_alg_free_bufl(ctx->inst, qat_req);
931 static int qat_alg_aead_enc(struct aead_request *areq)
933 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
936 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
938 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
939 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
940 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
943 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
944 seq = cpu_to_be64(req->seq);
945 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
946 &seq, sizeof(uint64_t));
947 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
950 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
954 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
957 spin_lock(&ctx->lock);
960 dev = &GET_DEV(ctx->inst->accel_dev);
961 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
962 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
963 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
964 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
967 int node = get_current_node();
968 struct qat_crypto_instance *inst =
969 qat_crypto_get_instance_node(node);
971 spin_unlock(&ctx->lock);
975 dev = &GET_DEV(inst->accel_dev);
977 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
981 spin_unlock(&ctx->lock);
984 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
988 spin_unlock(&ctx->lock);
992 spin_unlock(&ctx->lock);
993 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
999 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
1000 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1001 ctx->dec_cd, ctx->dec_cd_paddr);
1004 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1005 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1006 ctx->enc_cd, ctx->enc_cd_paddr);
1011 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1013 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1014 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1015 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1016 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1017 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1018 struct icp_qat_fw_la_bulk_req *msg;
1021 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1026 msg = &qat_req->req;
1027 *msg = ctx->enc_fw_req;
1028 qat_req->ablkcipher_ctx = ctx;
1029 qat_req->ablkcipher_req = req;
1030 qat_req->cb = qat_ablkcipher_alg_callback;
1031 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1032 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1033 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1034 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1035 cipher_param->cipher_length = req->nbytes;
1036 cipher_param->cipher_offset = 0;
1037 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1039 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1040 } while (ret == -EAGAIN && ctr++ < 10);
1042 if (ret == -EAGAIN) {
1043 qat_alg_free_bufl(ctx->inst, qat_req);
1046 return -EINPROGRESS;
1049 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1051 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1052 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1053 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1054 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1055 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1056 struct icp_qat_fw_la_bulk_req *msg;
1059 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1064 msg = &qat_req->req;
1065 *msg = ctx->dec_fw_req;
1066 qat_req->ablkcipher_ctx = ctx;
1067 qat_req->ablkcipher_req = req;
1068 qat_req->cb = qat_ablkcipher_alg_callback;
1069 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1070 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1071 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1072 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1073 cipher_param->cipher_length = req->nbytes;
1074 cipher_param->cipher_offset = 0;
1075 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1077 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1078 } while (ret == -EAGAIN && ctr++ < 10);
1080 if (ret == -EAGAIN) {
1081 qat_alg_free_bufl(ctx->inst, qat_req);
1084 return -EINPROGRESS;
1087 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1088 enum icp_qat_hw_auth_algo hash,
1089 const char *hash_name)
1091 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1093 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1094 if (IS_ERR(ctx->hash_tfm))
1096 spin_lock_init(&ctx->lock);
1097 ctx->qat_hash_alg = hash;
1098 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1099 sizeof(struct qat_crypto_request);
1104 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1106 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1109 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1111 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1114 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1116 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1119 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1121 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1122 struct qat_crypto_instance *inst = ctx->inst;
1125 if (!IS_ERR(ctx->hash_tfm))
1126 crypto_free_shash(ctx->hash_tfm);
1131 dev = &GET_DEV(inst->accel_dev);
1133 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1134 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1135 ctx->enc_cd, ctx->enc_cd_paddr);
1138 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1139 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1140 ctx->dec_cd, ctx->dec_cd_paddr);
1142 qat_crypto_put_instance(inst);
1145 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1147 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1149 spin_lock_init(&ctx->lock);
1150 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1151 sizeof(struct qat_crypto_request);
1156 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1158 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1159 struct qat_crypto_instance *inst = ctx->inst;
1165 dev = &GET_DEV(inst->accel_dev);
1167 memset(ctx->enc_cd, 0,
1168 sizeof(struct icp_qat_hw_cipher_algo_blk));
1169 dma_free_coherent(dev,
1170 sizeof(struct icp_qat_hw_cipher_algo_blk),
1171 ctx->enc_cd, ctx->enc_cd_paddr);
1174 memset(ctx->dec_cd, 0,
1175 sizeof(struct icp_qat_hw_cipher_algo_blk));
1176 dma_free_coherent(dev,
1177 sizeof(struct icp_qat_hw_cipher_algo_blk),
1178 ctx->dec_cd, ctx->dec_cd_paddr);
1180 qat_crypto_put_instance(inst);
1183 static struct crypto_alg qat_algs[] = { {
1184 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1185 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1186 .cra_priority = 4001,
1187 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1188 .cra_blocksize = AES_BLOCK_SIZE,
1189 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1191 .cra_type = &crypto_aead_type,
1192 .cra_module = THIS_MODULE,
1193 .cra_init = qat_alg_aead_sha1_init,
1194 .cra_exit = qat_alg_aead_exit,
1197 .setkey = qat_alg_aead_setkey,
1198 .decrypt = qat_alg_aead_dec,
1199 .encrypt = qat_alg_aead_enc,
1200 .givencrypt = qat_alg_aead_genivenc,
1201 .ivsize = AES_BLOCK_SIZE,
1202 .maxauthsize = SHA1_DIGEST_SIZE,
1206 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1207 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1208 .cra_priority = 4001,
1209 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1210 .cra_blocksize = AES_BLOCK_SIZE,
1211 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1213 .cra_type = &crypto_aead_type,
1214 .cra_module = THIS_MODULE,
1215 .cra_init = qat_alg_aead_sha256_init,
1216 .cra_exit = qat_alg_aead_exit,
1219 .setkey = qat_alg_aead_setkey,
1220 .decrypt = qat_alg_aead_dec,
1221 .encrypt = qat_alg_aead_enc,
1222 .givencrypt = qat_alg_aead_genivenc,
1223 .ivsize = AES_BLOCK_SIZE,
1224 .maxauthsize = SHA256_DIGEST_SIZE,
1228 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1229 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1230 .cra_priority = 4001,
1231 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1232 .cra_blocksize = AES_BLOCK_SIZE,
1233 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1235 .cra_type = &crypto_aead_type,
1236 .cra_module = THIS_MODULE,
1237 .cra_init = qat_alg_aead_sha512_init,
1238 .cra_exit = qat_alg_aead_exit,
1241 .setkey = qat_alg_aead_setkey,
1242 .decrypt = qat_alg_aead_dec,
1243 .encrypt = qat_alg_aead_enc,
1244 .givencrypt = qat_alg_aead_genivenc,
1245 .ivsize = AES_BLOCK_SIZE,
1246 .maxauthsize = SHA512_DIGEST_SIZE,
1250 .cra_name = "cbc(aes)",
1251 .cra_driver_name = "qat_aes_cbc",
1252 .cra_priority = 4001,
1253 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1254 .cra_blocksize = AES_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1257 .cra_type = &crypto_ablkcipher_type,
1258 .cra_module = THIS_MODULE,
1259 .cra_init = qat_alg_ablkcipher_init,
1260 .cra_exit = qat_alg_ablkcipher_exit,
1263 .setkey = qat_alg_ablkcipher_setkey,
1264 .decrypt = qat_alg_ablkcipher_decrypt,
1265 .encrypt = qat_alg_ablkcipher_encrypt,
1266 .min_keysize = AES_MIN_KEY_SIZE,
1267 .max_keysize = AES_MAX_KEY_SIZE,
1268 .ivsize = AES_BLOCK_SIZE,
1273 int qat_algs_register(void)
1277 mutex_lock(&algs_lock);
1278 if (++active_devs == 1) {
1281 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1282 qat_algs[i].cra_flags =
1283 (qat_algs[i].cra_type == &crypto_aead_type) ?
1284 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1285 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1287 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1289 mutex_unlock(&algs_lock);
1293 int qat_algs_unregister(void)
1297 mutex_lock(&algs_lock);
1298 if (--active_devs == 0)
1299 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1300 mutex_unlock(&algs_lock);
1304 int qat_algs_init(void)
1306 crypto_get_default_rng();
1310 void qat_algs_exit(void)
1312 crypto_put_default_rng();