These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / crypto / qat / qat_common / qat_algs.c
index 34139a8..59e4c3a 100644 (file)
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crypto.h>
-#include <crypto/aead.h>
+#include <crypto/internal/aead.h>
 #include <crypto/aes.h>
 #include <crypto/sha.h>
 #include <crypto/hash.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
-#include <crypto/rng.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
 #include "adf_transport.h"
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
 
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
                                       ICP_QAT_HW_CIPHER_ENCRYPT)
 
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
                                       ICP_QAT_HW_CIPHER_DECRYPT)
 
@@ -113,9 +112,6 @@ struct qat_alg_aead_ctx {
        struct crypto_shash *hash_tfm;
        enum icp_qat_hw_auth_algo qat_hash_alg;
        struct qat_crypto_instance *inst;
-       struct crypto_tfm *tfm;
-       uint8_t salt[AES_BLOCK_SIZE];
-       spinlock_t lock;        /* protects qat_alg_aead_ctx struct */
 };
 
 struct qat_alg_ablkcipher_ctx {
@@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx {
        spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
 };
 
-static int get_current_node(void)
-{
-       return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 {
        switch (qat_hash_alg) {
@@ -278,12 +269,13 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 }
 
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
                                         int alg,
-                                        struct crypto_authenc_keys *keys)
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
 {
-       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
        struct icp_qat_hw_auth_algo_blk *hash =
@@ -297,7 +289,7 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 
        /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
        hash->sha.inner_setup.auth_config.config =
                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -358,12 +350,13 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
        return 0;
 }
 
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
                                         int alg,
-                                        struct crypto_authenc_keys *keys)
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
 {
-       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
        struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -382,7 +375,7 @@ static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
                sizeof(struct icp_qat_fw_la_cipher_req_params));
 
        /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
        hash->sha.inner_setup.auth_config.config =
                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -473,7 +466,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
 
 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
                                        int alg, const uint8_t *key,
-                                       unsigned int keylen)
+                                       unsigned int keylen, int mode)
 {
        struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
        struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
@@ -481,12 +474,12 @@ static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
 
        qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
-       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 }
 
 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
                                        int alg, const uint8_t *key,
-                                       unsigned int keylen)
+                                       unsigned int keylen, int mode)
 {
        struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
        struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
@@ -494,51 +487,67 @@ static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
 
        qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
-       dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+
+       if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_DEC(alg, mode);
+       else
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_ENC(alg, mode);
 }
 
-static int qat_alg_validate_key(int key_len, int *alg)
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
 {
-       switch (key_len) {
-       case AES_KEYSIZE_128:
-               *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
-               break;
-       case AES_KEYSIZE_192:
-               *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
-               break;
-       case AES_KEYSIZE_256:
-               *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
-               break;
-       default:
-               return -EINVAL;
+       if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+               switch (key_len) {
+               case AES_KEYSIZE_128:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_192:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+                       break;
+               case AES_KEYSIZE_256:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               switch (key_len) {
+               case AES_KEYSIZE_128 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_256 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
        return 0;
 }
 
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
-                                     const uint8_t *key, unsigned int keylen)
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+                                     unsigned int keylen,  int mode)
 {
        struct crypto_authenc_keys keys;
        int alg;
 
-       if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
-               return -EFAULT;
-
        if (crypto_authenc_extractkeys(&keys, key, keylen))
                goto bad_key;
 
-       if (qat_alg_validate_key(keys.enckeylen, &alg))
+       if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
                goto bad_key;
 
-       if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+       if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
                goto error;
 
-       if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
                goto error;
 
        return 0;
 bad_key:
-       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
        return -EINVAL;
 error:
        return -EFAULT;
@@ -546,15 +555,16 @@ error:
 
 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
                                            const uint8_t *key,
-                                           unsigned int keylen)
+                                           unsigned int keylen,
+                                           int mode)
 {
        int alg;
 
-       if (qat_alg_validate_key(keylen, &alg))
+       if (qat_alg_validate_key(keylen, &alg, mode))
                goto bad_key;
 
-       qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
-       qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
+       qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
+       qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
        return 0;
 bad_key:
        crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -567,7 +577,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct device *dev;
 
-       spin_lock(&ctx->lock);
        if (ctx->enc_cd) {
                /* rekeying */
                dev = &GET_DEV(ctx->inst->accel_dev);
@@ -581,7 +590,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
                struct qat_crypto_instance *inst =
                                qat_crypto_get_instance_node(node);
                if (!inst) {
-                       spin_unlock(&ctx->lock);
                        return -EINVAL;
                }
 
@@ -591,19 +599,17 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
                                                  &ctx->enc_cd_paddr,
                                                  GFP_ATOMIC);
                if (!ctx->enc_cd) {
-                       spin_unlock(&ctx->lock);
                        return -ENOMEM;
                }
                ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
                                                  &ctx->dec_cd_paddr,
                                                  GFP_ATOMIC);
                if (!ctx->dec_cd) {
-                       spin_unlock(&ctx->lock);
                        goto out_free_enc;
                }
        }
-       spin_unlock(&ctx->lock);
-       if (qat_alg_aead_init_sessions(ctx, key, keylen))
+       if (qat_alg_aead_init_sessions(tfm, key, keylen,
+                                      ICP_QAT_HW_CIPHER_CBC_MODE))
                goto out_free_all;
 
        return 0;
@@ -654,22 +660,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 }
 
 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
-                              struct scatterlist *assoc,
                               struct scatterlist *sgl,
-                              struct scatterlist *sglout, uint8_t *iv,
-                              uint8_t ivlen,
+                              struct scatterlist *sglout,
                               struct qat_crypto_request *qat_req)
 {
        struct device *dev = &GET_DEV(inst->accel_dev);
-       int i, bufs = 0, sg_nctr = 0;
-       int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+       int i, sg_nctr = 0;
+       int n = sg_nents(sgl);
        struct qat_alg_buf_list *bufl;
        struct qat_alg_buf_list *buflout = NULL;
        dma_addr_t blp;
        dma_addr_t bloutp = 0;
        struct scatterlist *sg;
        size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
-                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+                       ((1 + n) * sizeof(struct qat_alg_buf));
 
        if (unlikely(!n))
                return -EINVAL;
@@ -683,29 +687,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(dma_mapping_error(dev, blp)))
                goto err;
 
-       for_each_sg(assoc, sg, assoc_n, i) {
-               if (!sg->length)
-                       continue;
-               bufl->bufers[bufs].addr = dma_map_single(dev,
-                                                        sg_virt(sg),
-                                                        sg->length,
-                                                        DMA_BIDIRECTIONAL);
-               bufl->bufers[bufs].len = sg->length;
-               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-                       goto err;
-               bufs++;
-       }
-       if (ivlen) {
-               bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
-                                                        DMA_BIDIRECTIONAL);
-               bufl->bufers[bufs].len = ivlen;
-               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-                       goto err;
-               bufs++;
-       }
-
        for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr + bufs;
+               int y = sg_nctr;
 
                if (!sg->length)
                        continue;
@@ -718,7 +701,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                        goto err;
                sg_nctr++;
        }
-       bufl->num_bufs = sg_nctr + bufs;
+       bufl->num_bufs = sg_nctr;
        qat_req->buf.bl = bufl;
        qat_req->buf.blp = blp;
        qat_req->buf.sz = sz;
@@ -728,7 +711,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 
                n = sg_nents(sglout);
                sz_out = sizeof(struct qat_alg_buf_list) +
-                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+                       ((1 + n) * sizeof(struct qat_alg_buf));
                sg_nctr = 0;
                buflout = kzalloc_node(sz_out, GFP_ATOMIC,
                                       dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -738,14 +721,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                if (unlikely(dma_mapping_error(dev, bloutp)))
                        goto err;
                bufers = buflout->bufers;
-               /* For out of place operation dma map only data and
-                * reuse assoc mapping and iv */
-               for (i = 0; i < bufs; i++) {
-                       bufers[i].len = bufl->bufers[i].len;
-                       bufers[i].addr = bufl->bufers[i].addr;
-               }
                for_each_sg(sglout, sg, n, i) {
-                       int y = sg_nctr + bufs;
+                       int y = sg_nctr;
 
                        if (!sg->length)
                                continue;
@@ -758,7 +735,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                        bufers[y].len = sg->length;
                        sg_nctr++;
                }
-               buflout->num_bufs = sg_nctr + bufs;
+               buflout->num_bufs = sg_nctr;
                buflout->num_mapped_bufs = sg_nctr;
                qat_req->buf.blout = buflout;
                qat_req->buf.bloutp = bloutp;
@@ -772,7 +749,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 err:
        dev_err(dev, "Failed to map buf for dma\n");
        sg_nctr = 0;
-       for (i = 0; i < n + bufs; i++)
+       for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
                        dma_unmap_single(dev, bufl->bufers[i].addr,
                                         bufl->bufers[i].len,
@@ -783,7 +760,7 @@ err:
        kfree(bufl);
        if (sgl != sglout && buflout) {
                n = sg_nents(sglout);
-               for (i = bufs; i < n + bufs; i++)
+               for (i = 0; i < n; i++)
                        if (!dma_mapping_error(dev, buflout->bufers[i].addr))
                                dma_unmap_single(dev, buflout->bufers[i].addr,
                                                 buflout->bufers[i].len,
@@ -843,11 +820,10 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
        struct icp_qat_fw_la_bulk_req *msg;
-       int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+       int digst_size = crypto_aead_authsize(aead_tfm);
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
-                                 areq->iv, AES_BLOCK_SIZE, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -861,12 +837,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
        cipher_param->cipher_length = areq->cryptlen - digst_size;
-       cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       cipher_param->cipher_offset = areq->assoclen;
        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
        auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen +
-                               cipher_param->cipher_length + AES_BLOCK_SIZE;
+       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
        do {
                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
        } while (ret == -EAGAIN && ctr++ < 10);
@@ -878,8 +853,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
-                                    int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
 {
        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -888,10 +862,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
        struct icp_qat_fw_la_bulk_req *msg;
+       uint8_t *iv = areq->iv;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
-                                 iv, AES_BLOCK_SIZE, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -906,16 +880,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 
-       if (enc_iv) {
-               cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
-               cipher_param->cipher_offset = areq->assoclen;
-       } else {
-               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-               cipher_param->cipher_length = areq->cryptlen;
-               cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
-       }
+       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+       cipher_param->cipher_length = areq->cryptlen;
+       cipher_param->cipher_offset = areq->assoclen;
+
        auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen;
 
        do {
                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -928,28 +898,9 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
-       return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       __be64 seq;
-
-       memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
-       seq = cpu_to_be64(req->seq);
-       memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
-              &seq, sizeof(uint64_t));
-       return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
-                                    const uint8_t *key,
-                                    unsigned int keylen)
+                                    const u8 *key, unsigned int keylen,
+                                    int mode)
 {
        struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
        struct device *dev;
@@ -990,7 +941,7 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
                }
        }
        spin_unlock(&ctx->lock);
-       if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
+       if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
                goto out_free_all;
 
        return 0;
@@ -1008,6 +959,27 @@ out_free_enc:
        return -ENOMEM;
 }
 
+static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
+                                        const u8 *key, unsigned int keylen)
+{
+       return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+                                        ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
+                                        const u8 *key, unsigned int keylen)
+{
+       return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+                                        ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
+                                        const u8 *key, unsigned int keylen)
+{
+       return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+                                        ICP_QAT_HW_CIPHER_XTS_MODE);
+}
+
 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1018,8 +990,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
        struct icp_qat_fw_la_bulk_req *msg;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
-                                 NULL, 0, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -1056,8 +1027,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
        struct icp_qat_fw_la_bulk_req *msg;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
-                                 NULL, 0, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -1084,46 +1054,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
                             enum icp_qat_hw_auth_algo hash,
                             const char *hash_name)
 {
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
        if (IS_ERR(ctx->hash_tfm))
-               return -EFAULT;
-       spin_lock_init(&ctx->lock);
+               return PTR_ERR(ctx->hash_tfm);
        ctx->qat_hash_alg = hash;
-       tfm->crt_aead.reqsize = sizeof(struct aead_request) +
-                               sizeof(struct qat_crypto_request);
-       ctx->tfm = tfm;
+       crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+                                    sizeof(struct qat_crypto_request));
        return 0;
 }
 
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
 }
 
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
 }
 
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
 }
 
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
 {
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct qat_crypto_instance *inst = ctx->inst;
        struct device *dev;
 
-       if (!IS_ERR(ctx->hash_tfm))
-               crypto_free_shash(ctx->hash_tfm);
+       crypto_free_shash(ctx->hash_tfm);
 
        if (!inst)
                return;
@@ -1180,75 +1147,107 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
        qat_crypto_put_instance(inst);
 }
 
-static struct crypto_alg qat_algs[] = { {
-       .cra_name = "authenc(hmac(sha1),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha1",
-       .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-       .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
-       .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha1_init,
-       .cra_exit = qat_alg_aead_exit,
-       .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
-               },
+
+static struct aead_alg qat_aeads[] = { {
+       .base = {
+               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha1_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+       .base = {
+               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
        },
+       .init = qat_alg_aead_sha256_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA256_DIGEST_SIZE,
 }, {
-       .cra_name = "authenc(hmac(sha256),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+       .base = {
+               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha512_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
+       .cra_name = "cbc(aes)",
+       .cra_driver_name = "qat_aes_cbc",
        .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
        .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+       .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
        .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
+       .cra_type = &crypto_ablkcipher_type,
        .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha256_init,
-       .cra_exit = qat_alg_aead_exit,
+       .cra_init = qat_alg_ablkcipher_init,
+       .cra_exit = qat_alg_ablkcipher_exit,
        .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
+               .ablkcipher = {
+                       .setkey = qat_alg_ablkcipher_cbc_setkey,
+                       .decrypt = qat_alg_ablkcipher_decrypt,
+                       .encrypt = qat_alg_ablkcipher_encrypt,
+                       .min_keysize = AES_MIN_KEY_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
                },
        },
 }, {
-       .cra_name = "authenc(hmac(sha512),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+       .cra_name = "ctr(aes)",
+       .cra_driver_name = "qat_aes_ctr",
        .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
        .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+       .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
        .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
+       .cra_type = &crypto_ablkcipher_type,
        .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha512_init,
-       .cra_exit = qat_alg_aead_exit,
+       .cra_init = qat_alg_ablkcipher_init,
+       .cra_exit = qat_alg_ablkcipher_exit,
        .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
+               .ablkcipher = {
+                       .setkey = qat_alg_ablkcipher_ctr_setkey,
+                       .decrypt = qat_alg_ablkcipher_decrypt,
+                       .encrypt = qat_alg_ablkcipher_encrypt,
+                       .min_keysize = AES_MIN_KEY_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA512_DIGEST_SIZE,
                },
        },
 }, {
-       .cra_name = "cbc(aes)",
-       .cra_driver_name = "qat_aes_cbc",
+       .cra_name = "xts(aes)",
+       .cra_driver_name = "qat_aes_xts",
        .cra_priority = 4001,
        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
        .cra_blocksize = AES_BLOCK_SIZE,
@@ -1260,7 +1259,7 @@ static struct crypto_alg qat_algs[] = { {
        .cra_exit = qat_alg_ablkcipher_exit,
        .cra_u = {
                .ablkcipher = {
-                       .setkey = qat_alg_ablkcipher_setkey,
+                       .setkey = qat_alg_ablkcipher_xts_setkey,
                        .decrypt = qat_alg_ablkcipher_decrypt,
                        .encrypt = qat_alg_ablkcipher_encrypt,
                        .min_keysize = AES_MIN_KEY_SIZE,
@@ -1272,42 +1271,44 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       int ret = 0;
+       int ret = 0, i;
 
        mutex_lock(&algs_lock);
-       if (++active_devs == 1) {
-               int i;
+       if (++active_devs != 1)
+               goto unlock;
 
-               for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
-                       qat_algs[i].cra_flags =
-                               (qat_algs[i].cra_type == &crypto_aead_type) ?
-                               CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
-                               CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+       for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+               qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       }
-       mutex_unlock(&algs_lock);
-       return ret;
-}
+       ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       if (ret)
+               goto unlock;
 
-int qat_algs_unregister(void)
-{
-       int ret = 0;
+       for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+               qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
 
-       mutex_lock(&algs_lock);
-       if (--active_devs == 0)
-               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       if (ret)
+               goto unreg_algs;
+
+unlock:
        mutex_unlock(&algs_lock);
        return ret;
-}
 
-int qat_algs_init(void)
-{
-       crypto_get_default_rng();
-       return 0;
+unreg_algs:
+       crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       goto unlock;
 }
 
-void qat_algs_exit(void)
+void qat_algs_unregister(void)
 {
-       crypto_put_default_rng();
+       mutex_lock(&algs_lock);
+       if (--active_devs != 0)
+               goto unlock;
+
+       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
+       mutex_unlock(&algs_lock);
 }