1dc5b0a17cf7205f45f01b7995464860634cdbc9
[kvmfornfv.git] / kernel / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                                        ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                                        ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static atomic_t active_dev;
77
78 struct qat_alg_buf {
79         uint32_t len;
80         uint32_t resrvd;
81         uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85         uint64_t resrvd;
86         uint32_t num_bufs;
87         uint32_t num_mapped_bufs;
88         struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93         union {
94                 struct qat_enc { /* Encrypt content desc */
95                         struct icp_qat_hw_cipher_algo_blk cipher;
96                         struct icp_qat_hw_auth_algo_blk hash;
97                 } qat_enc_cd;
98                 struct qat_dec { /* Decrytp content desc */
99                         struct icp_qat_hw_auth_algo_blk hash;
100                         struct icp_qat_hw_cipher_algo_blk cipher;
101                 } qat_dec_cd;
102         };
103 } __aligned(64);
104
105 struct qat_alg_aead_ctx {
106         struct qat_alg_cd *enc_cd;
107         struct qat_alg_cd *dec_cd;
108         dma_addr_t enc_cd_paddr;
109         dma_addr_t dec_cd_paddr;
110         struct icp_qat_fw_la_bulk_req enc_fw_req;
111         struct icp_qat_fw_la_bulk_req dec_fw_req;
112         struct crypto_shash *hash_tfm;
113         enum icp_qat_hw_auth_algo qat_hash_alg;
114         struct qat_crypto_instance *inst;
115         struct crypto_tfm *tfm;
116         uint8_t salt[AES_BLOCK_SIZE];
117         spinlock_t lock;        /* protects qat_alg_aead_ctx struct */
118 };
119
120 struct qat_alg_ablkcipher_ctx {
121         struct icp_qat_hw_cipher_algo_blk *enc_cd;
122         struct icp_qat_hw_cipher_algo_blk *dec_cd;
123         dma_addr_t enc_cd_paddr;
124         dma_addr_t dec_cd_paddr;
125         struct icp_qat_fw_la_bulk_req enc_fw_req;
126         struct icp_qat_fw_la_bulk_req dec_fw_req;
127         struct qat_crypto_instance *inst;
128         struct crypto_tfm *tfm;
129         spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
130 };
131
132 static int get_current_node(void)
133 {
134         return cpu_data(current_thread_info()->cpu).phys_proc_id;
135 }
136
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
138 {
139         switch (qat_hash_alg) {
140         case ICP_QAT_HW_AUTH_ALGO_SHA1:
141                 return ICP_QAT_HW_SHA1_STATE1_SZ;
142         case ICP_QAT_HW_AUTH_ALGO_SHA256:
143                 return ICP_QAT_HW_SHA256_STATE1_SZ;
144         case ICP_QAT_HW_AUTH_ALGO_SHA512:
145                 return ICP_QAT_HW_SHA512_STATE1_SZ;
146         default:
147                 return -EFAULT;
148         };
149         return -EFAULT;
150 }
151
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153                                   struct qat_alg_aead_ctx *ctx,
154                                   const uint8_t *auth_key,
155                                   unsigned int auth_keylen)
156 {
157         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158         struct sha1_state sha1;
159         struct sha256_state sha256;
160         struct sha512_state sha512;
161         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
162         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
163         char ipad[block_size];
164         char opad[block_size];
165         __be32 *hash_state_out;
166         __be64 *hash512_state_out;
167         int i, offset;
168
169         memset(ipad, 0, block_size);
170         memset(opad, 0, block_size);
171         shash->tfm = ctx->hash_tfm;
172         shash->flags = 0x0;
173
174         if (auth_keylen > block_size) {
175                 int ret = crypto_shash_digest(shash, auth_key,
176                                               auth_keylen, ipad);
177                 if (ret)
178                         return ret;
179
180                 memcpy(opad, ipad, digest_size);
181         } else {
182                 memcpy(ipad, auth_key, auth_keylen);
183                 memcpy(opad, auth_key, auth_keylen);
184         }
185
186         for (i = 0; i < block_size; i++) {
187                 char *ipad_ptr = ipad + i;
188                 char *opad_ptr = opad + i;
189                 *ipad_ptr ^= 0x36;
190                 *opad_ptr ^= 0x5C;
191         }
192
193         if (crypto_shash_init(shash))
194                 return -EFAULT;
195
196         if (crypto_shash_update(shash, ipad, block_size))
197                 return -EFAULT;
198
199         hash_state_out = (__be32 *)hash->sha.state1;
200         hash512_state_out = (__be64 *)hash_state_out;
201
202         switch (ctx->qat_hash_alg) {
203         case ICP_QAT_HW_AUTH_ALGO_SHA1:
204                 if (crypto_shash_export(shash, &sha1))
205                         return -EFAULT;
206                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
208                 break;
209         case ICP_QAT_HW_AUTH_ALGO_SHA256:
210                 if (crypto_shash_export(shash, &sha256))
211                         return -EFAULT;
212                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
214                 break;
215         case ICP_QAT_HW_AUTH_ALGO_SHA512:
216                 if (crypto_shash_export(shash, &sha512))
217                         return -EFAULT;
218                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
220                 break;
221         default:
222                 return -EFAULT;
223         }
224
225         if (crypto_shash_init(shash))
226                 return -EFAULT;
227
228         if (crypto_shash_update(shash, opad, block_size))
229                 return -EFAULT;
230
231         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233         hash512_state_out = (__be64 *)hash_state_out;
234
235         switch (ctx->qat_hash_alg) {
236         case ICP_QAT_HW_AUTH_ALGO_SHA1:
237                 if (crypto_shash_export(shash, &sha1))
238                         return -EFAULT;
239                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
241                 break;
242         case ICP_QAT_HW_AUTH_ALGO_SHA256:
243                 if (crypto_shash_export(shash, &sha256))
244                         return -EFAULT;
245                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
247                 break;
248         case ICP_QAT_HW_AUTH_ALGO_SHA512:
249                 if (crypto_shash_export(shash, &sha512))
250                         return -EFAULT;
251                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
253                 break;
254         default:
255                 return -EFAULT;
256         }
257         memzero_explicit(ipad, block_size);
258         memzero_explicit(opad, block_size);
259         return 0;
260 }
261
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263 {
264         header->hdr_flags =
265                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267         header->comn_req_flags =
268                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269                                             QAT_COMN_PTR_TYPE_SGL);
270         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
271                                   ICP_QAT_FW_LA_PARTIAL_NONE);
272         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
273                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
274         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
275                                 ICP_QAT_FW_LA_NO_PROTO);
276         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
277                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
278 }
279
280 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
281                                          int alg,
282                                          struct crypto_authenc_keys *keys)
283 {
284         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
285         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
286         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
287         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
288         struct icp_qat_hw_auth_algo_blk *hash =
289                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
290                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
291         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
292         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
293         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
294         void *ptr = &req_tmpl->cd_ctrl;
295         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
296         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
297
298         /* CD setup */
299         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
300         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
301         hash->sha.inner_setup.auth_config.config =
302                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
303                                              ctx->qat_hash_alg, digestsize);
304         hash->sha.inner_setup.auth_counter.counter =
305                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
306
307         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
308                 return -EFAULT;
309
310         /* Request setup */
311         qat_alg_init_common_hdr(header);
312         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
313         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
314                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
315         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
316                                    ICP_QAT_FW_LA_RET_AUTH_RES);
317         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
318                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
319         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
320         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
321
322         /* Cipher CD config setup */
323         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
324         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
325         cipher_cd_ctrl->cipher_cfg_offset = 0;
326         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
327         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
328         /* Auth CD config setup */
329         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
330         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
331         hash_cd_ctrl->inner_res_sz = digestsize;
332         hash_cd_ctrl->final_sz = digestsize;
333
334         switch (ctx->qat_hash_alg) {
335         case ICP_QAT_HW_AUTH_ALGO_SHA1:
336                 hash_cd_ctrl->inner_state1_sz =
337                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
338                 hash_cd_ctrl->inner_state2_sz =
339                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
340                 break;
341         case ICP_QAT_HW_AUTH_ALGO_SHA256:
342                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
343                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
344                 break;
345         case ICP_QAT_HW_AUTH_ALGO_SHA512:
346                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
347                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
348                 break;
349         default:
350                 break;
351         }
352         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
353                         ((sizeof(struct icp_qat_hw_auth_setup) +
354                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
355         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
356         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
357         return 0;
358 }
359
360 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
361                                          int alg,
362                                          struct crypto_authenc_keys *keys)
363 {
364         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
365         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
366         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
367         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
368         struct icp_qat_hw_cipher_algo_blk *cipher =
369                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
370                 sizeof(struct icp_qat_hw_auth_setup) +
371                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
372         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
373         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
374         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
375         void *ptr = &req_tmpl->cd_ctrl;
376         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
377         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
378         struct icp_qat_fw_la_auth_req_params *auth_param =
379                 (struct icp_qat_fw_la_auth_req_params *)
380                 ((char *)&req_tmpl->serv_specif_rqpars +
381                 sizeof(struct icp_qat_fw_la_cipher_req_params));
382
383         /* CD setup */
384         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
385         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
386         hash->sha.inner_setup.auth_config.config =
387                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
388                                              ctx->qat_hash_alg,
389                                              digestsize);
390         hash->sha.inner_setup.auth_counter.counter =
391                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
392
393         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
394                 return -EFAULT;
395
396         /* Request setup */
397         qat_alg_init_common_hdr(header);
398         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
399         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
400                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
401         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
402                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
403         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
404                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
405         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
406         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
407
408         /* Cipher CD config setup */
409         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
410         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
411         cipher_cd_ctrl->cipher_cfg_offset =
412                 (sizeof(struct icp_qat_hw_auth_setup) +
413                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
414         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
415         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
416
417         /* Auth CD config setup */
418         hash_cd_ctrl->hash_cfg_offset = 0;
419         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
420         hash_cd_ctrl->inner_res_sz = digestsize;
421         hash_cd_ctrl->final_sz = digestsize;
422
423         switch (ctx->qat_hash_alg) {
424         case ICP_QAT_HW_AUTH_ALGO_SHA1:
425                 hash_cd_ctrl->inner_state1_sz =
426                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
427                 hash_cd_ctrl->inner_state2_sz =
428                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
429                 break;
430         case ICP_QAT_HW_AUTH_ALGO_SHA256:
431                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
432                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
433                 break;
434         case ICP_QAT_HW_AUTH_ALGO_SHA512:
435                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
436                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
437                 break;
438         default:
439                 break;
440         }
441
442         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
443                         ((sizeof(struct icp_qat_hw_auth_setup) +
444                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
445         auth_param->auth_res_sz = digestsize;
446         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
447         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448         return 0;
449 }
450
451 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
452                                         struct icp_qat_fw_la_bulk_req *req,
453                                         struct icp_qat_hw_cipher_algo_blk *cd,
454                                         const uint8_t *key, unsigned int keylen)
455 {
456         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
457         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
458         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
459
460         memcpy(cd->aes.key, key, keylen);
461         qat_alg_init_common_hdr(header);
462         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
463         cd_pars->u.s.content_desc_params_sz =
464                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
465         /* Cipher CD config setup */
466         cd_ctrl->cipher_key_sz = keylen >> 3;
467         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
468         cd_ctrl->cipher_cfg_offset = 0;
469         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
470         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
471 }
472
473 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
474                                         int alg, const uint8_t *key,
475                                         unsigned int keylen)
476 {
477         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
478         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
479         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
480
481         qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
482         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
483         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
484 }
485
486 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
487                                         int alg, const uint8_t *key,
488                                         unsigned int keylen)
489 {
490         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
491         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
492         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
493
494         qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
495         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
496         dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
497 }
498
499 static int qat_alg_validate_key(int key_len, int *alg)
500 {
501         switch (key_len) {
502         case AES_KEYSIZE_128:
503                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
504                 break;
505         case AES_KEYSIZE_192:
506                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
507                 break;
508         case AES_KEYSIZE_256:
509                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
510                 break;
511         default:
512                 return -EINVAL;
513         }
514         return 0;
515 }
516
517 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
518                                       const uint8_t *key, unsigned int keylen)
519 {
520         struct crypto_authenc_keys keys;
521         int alg;
522
523         if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
524                 return -EFAULT;
525
526         if (crypto_authenc_extractkeys(&keys, key, keylen))
527                 goto bad_key;
528
529         if (qat_alg_validate_key(keys.enckeylen, &alg))
530                 goto bad_key;
531
532         if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
533                 goto error;
534
535         if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
536                 goto error;
537
538         return 0;
539 bad_key:
540         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541         return -EINVAL;
542 error:
543         return -EFAULT;
544 }
545
546 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
547                                             const uint8_t *key,
548                                             unsigned int keylen)
549 {
550         int alg;
551
552         if (qat_alg_validate_key(keylen, &alg))
553                 goto bad_key;
554
555         qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
556         qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
557         return 0;
558 bad_key:
559         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
560         return -EINVAL;
561 }
562
563 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
564                                unsigned int keylen)
565 {
566         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
567         struct device *dev;
568
569         spin_lock(&ctx->lock);
570         if (ctx->enc_cd) {
571                 /* rekeying */
572                 dev = &GET_DEV(ctx->inst->accel_dev);
573                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
574                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
575                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
576                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
577         } else {
578                 /* new key */
579                 int node = get_current_node();
580                 struct qat_crypto_instance *inst =
581                                 qat_crypto_get_instance_node(node);
582                 if (!inst) {
583                         spin_unlock(&ctx->lock);
584                         return -EINVAL;
585                 }
586
587                 dev = &GET_DEV(inst->accel_dev);
588                 ctx->inst = inst;
589                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
590                                                   &ctx->enc_cd_paddr,
591                                                   GFP_ATOMIC);
592                 if (!ctx->enc_cd) {
593                         spin_unlock(&ctx->lock);
594                         return -ENOMEM;
595                 }
596                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
597                                                   &ctx->dec_cd_paddr,
598                                                   GFP_ATOMIC);
599                 if (!ctx->dec_cd) {
600                         spin_unlock(&ctx->lock);
601                         goto out_free_enc;
602                 }
603         }
604         spin_unlock(&ctx->lock);
605         if (qat_alg_aead_init_sessions(ctx, key, keylen))
606                 goto out_free_all;
607
608         return 0;
609
610 out_free_all:
611         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
612         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
613                           ctx->dec_cd, ctx->dec_cd_paddr);
614         ctx->dec_cd = NULL;
615 out_free_enc:
616         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
617         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
618                           ctx->enc_cd, ctx->enc_cd_paddr);
619         ctx->enc_cd = NULL;
620         return -ENOMEM;
621 }
622
623 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
624                               struct qat_crypto_request *qat_req)
625 {
626         struct device *dev = &GET_DEV(inst->accel_dev);
627         struct qat_alg_buf_list *bl = qat_req->buf.bl;
628         struct qat_alg_buf_list *blout = qat_req->buf.blout;
629         dma_addr_t blp = qat_req->buf.blp;
630         dma_addr_t blpout = qat_req->buf.bloutp;
631         size_t sz = qat_req->buf.sz;
632         size_t sz_out = qat_req->buf.sz_out;
633         int i;
634
635         for (i = 0; i < bl->num_bufs; i++)
636                 dma_unmap_single(dev, bl->bufers[i].addr,
637                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
638
639         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
640         kfree(bl);
641         if (blp != blpout) {
642                 /* If out of place operation dma unmap only data */
643                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
644
645                 for (i = bufless; i < blout->num_bufs; i++) {
646                         dma_unmap_single(dev, blout->bufers[i].addr,
647                                          blout->bufers[i].len,
648                                          DMA_BIDIRECTIONAL);
649                 }
650                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
651                 kfree(blout);
652         }
653 }
654
655 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
656                                struct scatterlist *assoc,
657                                struct scatterlist *sgl,
658                                struct scatterlist *sglout, uint8_t *iv,
659                                uint8_t ivlen,
660                                struct qat_crypto_request *qat_req)
661 {
662         struct device *dev = &GET_DEV(inst->accel_dev);
663         int i, bufs = 0, sg_nctr = 0;
664         int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
665         struct qat_alg_buf_list *bufl;
666         struct qat_alg_buf_list *buflout = NULL;
667         dma_addr_t blp;
668         dma_addr_t bloutp = 0;
669         struct scatterlist *sg;
670         size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
671                         ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
672
673         if (unlikely(!n))
674                 return -EINVAL;
675
676         bufl = kzalloc_node(sz, GFP_ATOMIC,
677                             dev_to_node(&GET_DEV(inst->accel_dev)));
678         if (unlikely(!bufl))
679                 return -ENOMEM;
680
681         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
682         if (unlikely(dma_mapping_error(dev, blp)))
683                 goto err;
684
685         for_each_sg(assoc, sg, assoc_n, i) {
686                 if (!sg->length)
687                         continue;
688                 bufl->bufers[bufs].addr = dma_map_single(dev,
689                                                          sg_virt(sg),
690                                                          sg->length,
691                                                          DMA_BIDIRECTIONAL);
692                 bufl->bufers[bufs].len = sg->length;
693                 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
694                         goto err;
695                 bufs++;
696         }
697         if (ivlen) {
698                 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
699                                                          DMA_BIDIRECTIONAL);
700                 bufl->bufers[bufs].len = ivlen;
701                 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
702                         goto err;
703                 bufs++;
704         }
705
706         for_each_sg(sgl, sg, n, i) {
707                 int y = sg_nctr + bufs;
708
709                 if (!sg->length)
710                         continue;
711
712                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
713                                                       sg->length,
714                                                       DMA_BIDIRECTIONAL);
715                 bufl->bufers[y].len = sg->length;
716                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
717                         goto err;
718                 sg_nctr++;
719         }
720         bufl->num_bufs = sg_nctr + bufs;
721         qat_req->buf.bl = bufl;
722         qat_req->buf.blp = blp;
723         qat_req->buf.sz = sz;
724         /* Handle out of place operation */
725         if (sgl != sglout) {
726                 struct qat_alg_buf *bufers;
727
728                 n = sg_nents(sglout);
729                 sz_out = sizeof(struct qat_alg_buf_list) +
730                         ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
731                 sg_nctr = 0;
732                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
733                                        dev_to_node(&GET_DEV(inst->accel_dev)));
734                 if (unlikely(!buflout))
735                         goto err;
736                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
737                 if (unlikely(dma_mapping_error(dev, bloutp)))
738                         goto err;
739                 bufers = buflout->bufers;
740                 /* For out of place operation dma map only data and
741                  * reuse assoc mapping and iv */
742                 for (i = 0; i < bufs; i++) {
743                         bufers[i].len = bufl->bufers[i].len;
744                         bufers[i].addr = bufl->bufers[i].addr;
745                 }
746                 for_each_sg(sglout, sg, n, i) {
747                         int y = sg_nctr + bufs;
748
749                         if (!sg->length)
750                                 continue;
751
752                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
753                                                         sg->length,
754                                                         DMA_BIDIRECTIONAL);
755                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
756                                 goto err;
757                         bufers[y].len = sg->length;
758                         sg_nctr++;
759                 }
760                 buflout->num_bufs = sg_nctr + bufs;
761                 buflout->num_mapped_bufs = sg_nctr;
762                 qat_req->buf.blout = buflout;
763                 qat_req->buf.bloutp = bloutp;
764                 qat_req->buf.sz_out = sz_out;
765         } else {
766                 /* Otherwise set the src and dst to the same address */
767                 qat_req->buf.bloutp = qat_req->buf.blp;
768                 qat_req->buf.sz_out = 0;
769         }
770         return 0;
771 err:
772         dev_err(dev, "Failed to map buf for dma\n");
773         sg_nctr = 0;
774         for (i = 0; i < n + bufs; i++)
775                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
776                         dma_unmap_single(dev, bufl->bufers[i].addr,
777                                          bufl->bufers[i].len,
778                                          DMA_BIDIRECTIONAL);
779
780         if (!dma_mapping_error(dev, blp))
781                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
782         kfree(bufl);
783         if (sgl != sglout && buflout) {
784                 n = sg_nents(sglout);
785                 for (i = bufs; i < n + bufs; i++)
786                         if (!dma_mapping_error(dev, buflout->bufers[i].addr))
787                                 dma_unmap_single(dev, buflout->bufers[i].addr,
788                                                  buflout->bufers[i].len,
789                                                  DMA_BIDIRECTIONAL);
790                 if (!dma_mapping_error(dev, bloutp))
791                         dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
792                 kfree(buflout);
793         }
794         return -ENOMEM;
795 }
796
797 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
798                                   struct qat_crypto_request *qat_req)
799 {
800         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
801         struct qat_crypto_instance *inst = ctx->inst;
802         struct aead_request *areq = qat_req->aead_req;
803         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
804         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
805
806         qat_alg_free_bufl(inst, qat_req);
807         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
808                 res = -EBADMSG;
809         areq->base.complete(&areq->base, res);
810 }
811
812 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
813                                         struct qat_crypto_request *qat_req)
814 {
815         struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
816         struct qat_crypto_instance *inst = ctx->inst;
817         struct ablkcipher_request *areq = qat_req->ablkcipher_req;
818         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
819         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
820
821         qat_alg_free_bufl(inst, qat_req);
822         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
823                 res = -EINVAL;
824         areq->base.complete(&areq->base, res);
825 }
826
827 void qat_alg_callback(void *resp)
828 {
829         struct icp_qat_fw_la_resp *qat_resp = resp;
830         struct qat_crypto_request *qat_req =
831                                 (void *)(__force long)qat_resp->opaque_data;
832
833         qat_req->cb(qat_resp, qat_req);
834 }
835
836 static int qat_alg_aead_dec(struct aead_request *areq)
837 {
838         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
839         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
840         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
841         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
842         struct icp_qat_fw_la_cipher_req_params *cipher_param;
843         struct icp_qat_fw_la_auth_req_params *auth_param;
844         struct icp_qat_fw_la_bulk_req *msg;
845         int digst_size = crypto_aead_crt(aead_tfm)->authsize;
846         int ret, ctr = 0;
847
848         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
849                                   areq->iv, AES_BLOCK_SIZE, qat_req);
850         if (unlikely(ret))
851                 return ret;
852
853         msg = &qat_req->req;
854         *msg = ctx->dec_fw_req;
855         qat_req->aead_ctx = ctx;
856         qat_req->aead_req = areq;
857         qat_req->cb = qat_aead_alg_callback;
858         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
859         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
860         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
861         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
862         cipher_param->cipher_length = areq->cryptlen - digst_size;
863         cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
864         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
865         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
866         auth_param->auth_off = 0;
867         auth_param->auth_len = areq->assoclen +
868                                 cipher_param->cipher_length + AES_BLOCK_SIZE;
869         do {
870                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
871         } while (ret == -EAGAIN && ctr++ < 10);
872
873         if (ret == -EAGAIN) {
874                 qat_alg_free_bufl(ctx->inst, qat_req);
875                 return -EBUSY;
876         }
877         return -EINPROGRESS;
878 }
879
880 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
881                                      int enc_iv)
882 {
883         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
884         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
885         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
886         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
887         struct icp_qat_fw_la_cipher_req_params *cipher_param;
888         struct icp_qat_fw_la_auth_req_params *auth_param;
889         struct icp_qat_fw_la_bulk_req *msg;
890         int ret, ctr = 0;
891
892         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
893                                   iv, AES_BLOCK_SIZE, qat_req);
894         if (unlikely(ret))
895                 return ret;
896
897         msg = &qat_req->req;
898         *msg = ctx->enc_fw_req;
899         qat_req->aead_ctx = ctx;
900         qat_req->aead_req = areq;
901         qat_req->cb = qat_aead_alg_callback;
902         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
903         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
904         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
905         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
906         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
907
908         if (enc_iv) {
909                 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
910                 cipher_param->cipher_offset = areq->assoclen;
911         } else {
912                 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
913                 cipher_param->cipher_length = areq->cryptlen;
914                 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
915         }
916         auth_param->auth_off = 0;
917         auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
918
919         do {
920                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
921         } while (ret == -EAGAIN && ctr++ < 10);
922
923         if (ret == -EAGAIN) {
924                 qat_alg_free_bufl(ctx->inst, qat_req);
925                 return -EBUSY;
926         }
927         return -EINPROGRESS;
928 }
929
930 static int qat_alg_aead_enc(struct aead_request *areq)
931 {
932         return qat_alg_aead_enc_internal(areq, areq->iv, 0);
933 }
934
935 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
936 {
937         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
938         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
939         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
940         __be64 seq;
941
942         memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
943         seq = cpu_to_be64(req->seq);
944         memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
945                &seq, sizeof(uint64_t));
946         return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
947 }
948
949 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
950                                      const uint8_t *key,
951                                      unsigned int keylen)
952 {
953         struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
954         struct device *dev;
955
956         spin_lock(&ctx->lock);
957         if (ctx->enc_cd) {
958                 /* rekeying */
959                 dev = &GET_DEV(ctx->inst->accel_dev);
960                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
961                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
963                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
964         } else {
965                 /* new key */
966                 int node = get_current_node();
967                 struct qat_crypto_instance *inst =
968                                 qat_crypto_get_instance_node(node);
969                 if (!inst) {
970                         spin_unlock(&ctx->lock);
971                         return -EINVAL;
972                 }
973
974                 dev = &GET_DEV(inst->accel_dev);
975                 ctx->inst = inst;
976                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
977                                                   &ctx->enc_cd_paddr,
978                                                   GFP_ATOMIC);
979                 if (!ctx->enc_cd) {
980                         spin_unlock(&ctx->lock);
981                         return -ENOMEM;
982                 }
983                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
984                                                   &ctx->dec_cd_paddr,
985                                                   GFP_ATOMIC);
986                 if (!ctx->dec_cd) {
987                         spin_unlock(&ctx->lock);
988                         goto out_free_enc;
989                 }
990         }
991         spin_unlock(&ctx->lock);
992         if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
993                 goto out_free_all;
994
995         return 0;
996
997 out_free_all:
998         memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
999         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1000                           ctx->dec_cd, ctx->dec_cd_paddr);
1001         ctx->dec_cd = NULL;
1002 out_free_enc:
1003         memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1004         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1005                           ctx->enc_cd, ctx->enc_cd_paddr);
1006         ctx->enc_cd = NULL;
1007         return -ENOMEM;
1008 }
1009
1010 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1011 {
1012         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1013         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1014         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1015         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1016         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1017         struct icp_qat_fw_la_bulk_req *msg;
1018         int ret, ctr = 0;
1019
1020         ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1021                                   NULL, 0, qat_req);
1022         if (unlikely(ret))
1023                 return ret;
1024
1025         msg = &qat_req->req;
1026         *msg = ctx->enc_fw_req;
1027         qat_req->ablkcipher_ctx = ctx;
1028         qat_req->ablkcipher_req = req;
1029         qat_req->cb = qat_ablkcipher_alg_callback;
1030         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1031         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1032         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1033         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1034         cipher_param->cipher_length = req->nbytes;
1035         cipher_param->cipher_offset = 0;
1036         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1037         do {
1038                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1039         } while (ret == -EAGAIN && ctr++ < 10);
1040
1041         if (ret == -EAGAIN) {
1042                 qat_alg_free_bufl(ctx->inst, qat_req);
1043                 return -EBUSY;
1044         }
1045         return -EINPROGRESS;
1046 }
1047
1048 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1049 {
1050         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1051         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1052         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1053         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1054         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1055         struct icp_qat_fw_la_bulk_req *msg;
1056         int ret, ctr = 0;
1057
1058         ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1059                                   NULL, 0, qat_req);
1060         if (unlikely(ret))
1061                 return ret;
1062
1063         msg = &qat_req->req;
1064         *msg = ctx->dec_fw_req;
1065         qat_req->ablkcipher_ctx = ctx;
1066         qat_req->ablkcipher_req = req;
1067         qat_req->cb = qat_ablkcipher_alg_callback;
1068         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1069         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1070         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1071         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1072         cipher_param->cipher_length = req->nbytes;
1073         cipher_param->cipher_offset = 0;
1074         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1075         do {
1076                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1077         } while (ret == -EAGAIN && ctr++ < 10);
1078
1079         if (ret == -EAGAIN) {
1080                 qat_alg_free_bufl(ctx->inst, qat_req);
1081                 return -EBUSY;
1082         }
1083         return -EINPROGRESS;
1084 }
1085
1086 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1087                              enum icp_qat_hw_auth_algo hash,
1088                              const char *hash_name)
1089 {
1090         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1091
1092         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1093         if (IS_ERR(ctx->hash_tfm))
1094                 return -EFAULT;
1095         spin_lock_init(&ctx->lock);
1096         ctx->qat_hash_alg = hash;
1097         tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1098                                 sizeof(struct qat_crypto_request);
1099         ctx->tfm = tfm;
1100         return 0;
1101 }
1102
1103 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1104 {
1105         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1106 }
1107
1108 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1109 {
1110         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1111 }
1112
1113 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1114 {
1115         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1116 }
1117
1118 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1119 {
1120         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1121         struct qat_crypto_instance *inst = ctx->inst;
1122         struct device *dev;
1123
1124         if (!IS_ERR(ctx->hash_tfm))
1125                 crypto_free_shash(ctx->hash_tfm);
1126
1127         if (!inst)
1128                 return;
1129
1130         dev = &GET_DEV(inst->accel_dev);
1131         if (ctx->enc_cd) {
1132                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1133                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1134                                   ctx->enc_cd, ctx->enc_cd_paddr);
1135         }
1136         if (ctx->dec_cd) {
1137                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1138                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1139                                   ctx->dec_cd, ctx->dec_cd_paddr);
1140         }
1141         qat_crypto_put_instance(inst);
1142 }
1143
1144 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1145 {
1146         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1147
1148         spin_lock_init(&ctx->lock);
1149         tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1150                                         sizeof(struct qat_crypto_request);
1151         ctx->tfm = tfm;
1152         return 0;
1153 }
1154
1155 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1156 {
1157         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1158         struct qat_crypto_instance *inst = ctx->inst;
1159         struct device *dev;
1160
1161         if (!inst)
1162                 return;
1163
1164         dev = &GET_DEV(inst->accel_dev);
1165         if (ctx->enc_cd) {
1166                 memset(ctx->enc_cd, 0,
1167                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1168                 dma_free_coherent(dev,
1169                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1170                                   ctx->enc_cd, ctx->enc_cd_paddr);
1171         }
1172         if (ctx->dec_cd) {
1173                 memset(ctx->dec_cd, 0,
1174                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1175                 dma_free_coherent(dev,
1176                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1177                                   ctx->dec_cd, ctx->dec_cd_paddr);
1178         }
1179         qat_crypto_put_instance(inst);
1180 }
1181
1182 static struct crypto_alg qat_algs[] = { {
1183         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1184         .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1185         .cra_priority = 4001,
1186         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1187         .cra_blocksize = AES_BLOCK_SIZE,
1188         .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1189         .cra_alignmask = 0,
1190         .cra_type = &crypto_aead_type,
1191         .cra_module = THIS_MODULE,
1192         .cra_init = qat_alg_aead_sha1_init,
1193         .cra_exit = qat_alg_aead_exit,
1194         .cra_u = {
1195                 .aead = {
1196                         .setkey = qat_alg_aead_setkey,
1197                         .decrypt = qat_alg_aead_dec,
1198                         .encrypt = qat_alg_aead_enc,
1199                         .givencrypt = qat_alg_aead_genivenc,
1200                         .ivsize = AES_BLOCK_SIZE,
1201                         .maxauthsize = SHA1_DIGEST_SIZE,
1202                 },
1203         },
1204 }, {
1205         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1206         .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1207         .cra_priority = 4001,
1208         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1209         .cra_blocksize = AES_BLOCK_SIZE,
1210         .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1211         .cra_alignmask = 0,
1212         .cra_type = &crypto_aead_type,
1213         .cra_module = THIS_MODULE,
1214         .cra_init = qat_alg_aead_sha256_init,
1215         .cra_exit = qat_alg_aead_exit,
1216         .cra_u = {
1217                 .aead = {
1218                         .setkey = qat_alg_aead_setkey,
1219                         .decrypt = qat_alg_aead_dec,
1220                         .encrypt = qat_alg_aead_enc,
1221                         .givencrypt = qat_alg_aead_genivenc,
1222                         .ivsize = AES_BLOCK_SIZE,
1223                         .maxauthsize = SHA256_DIGEST_SIZE,
1224                 },
1225         },
1226 }, {
1227         .cra_name = "authenc(hmac(sha512),cbc(aes))",
1228         .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1229         .cra_priority = 4001,
1230         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1231         .cra_blocksize = AES_BLOCK_SIZE,
1232         .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1233         .cra_alignmask = 0,
1234         .cra_type = &crypto_aead_type,
1235         .cra_module = THIS_MODULE,
1236         .cra_init = qat_alg_aead_sha512_init,
1237         .cra_exit = qat_alg_aead_exit,
1238         .cra_u = {
1239                 .aead = {
1240                         .setkey = qat_alg_aead_setkey,
1241                         .decrypt = qat_alg_aead_dec,
1242                         .encrypt = qat_alg_aead_enc,
1243                         .givencrypt = qat_alg_aead_genivenc,
1244                         .ivsize = AES_BLOCK_SIZE,
1245                         .maxauthsize = SHA512_DIGEST_SIZE,
1246                 },
1247         },
1248 }, {
1249         .cra_name = "cbc(aes)",
1250         .cra_driver_name = "qat_aes_cbc",
1251         .cra_priority = 4001,
1252         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1253         .cra_blocksize = AES_BLOCK_SIZE,
1254         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1255         .cra_alignmask = 0,
1256         .cra_type = &crypto_ablkcipher_type,
1257         .cra_module = THIS_MODULE,
1258         .cra_init = qat_alg_ablkcipher_init,
1259         .cra_exit = qat_alg_ablkcipher_exit,
1260         .cra_u = {
1261                 .ablkcipher = {
1262                         .setkey = qat_alg_ablkcipher_setkey,
1263                         .decrypt = qat_alg_ablkcipher_decrypt,
1264                         .encrypt = qat_alg_ablkcipher_encrypt,
1265                         .min_keysize = AES_MIN_KEY_SIZE,
1266                         .max_keysize = AES_MAX_KEY_SIZE,
1267                         .ivsize = AES_BLOCK_SIZE,
1268                 },
1269         },
1270 } };
1271
1272 int qat_algs_register(void)
1273 {
1274         if (atomic_add_return(1, &active_dev) == 1) {
1275                 int i;
1276
1277                 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1278                         qat_algs[i].cra_flags =
1279                                 (qat_algs[i].cra_type == &crypto_aead_type) ?
1280                                 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1281                                 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1282
1283                 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1284         }
1285         return 0;
1286 }
1287
1288 int qat_algs_unregister(void)
1289 {
1290         if (atomic_sub_return(1, &active_dev) == 0)
1291                 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1292         return 0;
1293 }
1294
1295 int qat_algs_init(void)
1296 {
1297         atomic_set(&active_dev, 0);
1298         crypto_get_default_rng();
1299         return 0;
1300 }
1301
1302 void qat_algs_exit(void)
1303 {
1304         crypto_put_default_rng();
1305 }