These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / crypto / caam / caamhash.c
index 0436997..49106ea 100644 (file)
@@ -127,13 +127,22 @@ struct caam_hash_state {
        int buflen_0;
        u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
        int buflen_1;
-       u8 caam_ctx[MAX_CTX_LEN];
+       u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
        int (*update)(struct ahash_request *req);
        int (*final)(struct ahash_request *req);
        int (*finup)(struct ahash_request *req);
        int current_buf;
 };
 
+struct caam_export_state {
+       u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+       u8 caam_ctx[MAX_CTX_LEN];
+       int buflen;
+       int (*update)(struct ahash_request *req);
+       int (*final)(struct ahash_request *req);
+       int (*finup)(struct ahash_request *req);
+};
+
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -181,10 +190,9 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
 /* Map req->src and put it in link table */
 static inline void src_map_to_sec4_sg(struct device *jrdev,
                                      struct scatterlist *src, int src_nents,
-                                     struct sec4_sg_entry *sec4_sg,
-                                     bool chained)
+                                     struct sec4_sg_entry *sec4_sg)
 {
-       dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
+       dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
        sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
 }
 
@@ -585,7 +593,6 @@ badkey:
  * ahash_edesc - s/w-extended ahash descriptor
  * @dst_dma: physical mapped address of req->result
  * @sec4_sg_dma: physical mapped address of h/w link table
- * @chained: if source is chained
  * @src_nents: number of segments in input scatterlist
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg: pointer to h/w link table
@@ -594,7 +601,6 @@ badkey:
 struct ahash_edesc {
        dma_addr_t dst_dma;
        dma_addr_t sec4_sg_dma;
-       bool chained;
        int src_nents;
        int sec4_sg_bytes;
        struct sec4_sg_entry *sec4_sg;
@@ -606,8 +612,7 @@ static inline void ahash_unmap(struct device *dev,
                        struct ahash_request *req, int dst_len)
 {
        if (edesc->src_nents)
-               dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
-                                    DMA_TO_DEVICE, edesc->chained);
+               dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
        if (edesc->dst_dma)
                dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
 
@@ -788,7 +793,6 @@ static int ahash_update_ctx(struct ahash_request *req)
        dma_addr_t ptr = ctx->sh_desc_update_dma;
        int src_nents, sec4_sg_bytes, sec4_sg_src_index;
        struct ahash_edesc *edesc;
-       bool chained = false;
        int ret = 0;
        int sh_len;
 
@@ -797,8 +801,8 @@ static int ahash_update_ctx(struct ahash_request *req)
        to_hash = in_len - *next_buflen;
 
        if (to_hash) {
-               src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
-                                      &chained);
+               src_nents = sg_nents_for_len(req->src,
+                                            req->nbytes - (*next_buflen));
                sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
                sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
                                 sizeof(struct sec4_sg_entry);
@@ -807,7 +811,7 @@ static int ahash_update_ctx(struct ahash_request *req)
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -816,7 +820,6 @@ static int ahash_update_ctx(struct ahash_request *req)
                }
 
                edesc->src_nents = src_nents;
-               edesc->chained = chained;
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
@@ -833,19 +836,18 @@ static int ahash_update_ctx(struct ahash_request *req)
 
                if (src_nents) {
                        src_map_to_sec4_sg(jrdev, req->src, src_nents,
-                                          edesc->sec4_sg + sec4_sg_src_index,
-                                          chained);
-                       if (*next_buflen) {
+                                          edesc->sec4_sg + sec4_sg_src_index);
+                       if (*next_buflen)
                                scatterwalk_map_and_copy(next_buf, req->src,
                                                         to_hash - *buflen,
                                                         *next_buflen, 0);
-                               state->current_buf = !state->current_buf;
-                       }
                } else {
                        (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
                                                        SEC4_SG_LEN_FIN;
                }
 
+               state->current_buf = !state->current_buf;
+
                sh_len = desc_len(sh_desc);
                desc = edesc->hw_desc;
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -919,8 +921,8 @@ static int ahash_final_ctx(struct ahash_request *req)
        sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -996,18 +998,17 @@ static int ahash_finup_ctx(struct ahash_request *req)
        int src_nents;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
-       bool chained = false;
        int ret = 0;
        int sh_len;
 
-       src_nents = __sg_count(req->src, req->nbytes, &chained);
+       src_nents = sg_nents_for_len(req->src, req->nbytes);
        sec4_sg_src_index = 1 + (buflen ? 1 : 0);
        sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
                         sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1018,7 +1019,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
        init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
        edesc->src_nents = src_nents;
-       edesc->chained = chained;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
@@ -1033,7 +1033,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
                                                last_buflen);
 
        src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
-                          sec4_sg_src_index, chained);
+                          sec4_sg_src_index);
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1081,19 +1081,17 @@ static int ahash_digest(struct ahash_request *req)
        int src_nents, sec4_sg_bytes;
        dma_addr_t src_dma;
        struct ahash_edesc *edesc;
-       bool chained = false;
        int ret = 0;
        u32 options;
        int sh_len;
 
-       src_nents = sg_count(req->src, req->nbytes, &chained);
-       dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
-                          chained);
+       src_nents = sg_count(req->src, req->nbytes);
+       dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
        sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
-                       DESC_JOB_IO_LEN, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1102,7 +1100,6 @@ static int ahash_digest(struct ahash_request *req)
                          DESC_JOB_IO_LEN;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->src_nents = src_nents;
-       edesc->chained = chained;
 
        sh_len = desc_len(sh_desc);
        desc = edesc->hw_desc;
@@ -1166,8 +1163,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        int sh_len;
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
-                       GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1229,7 +1225,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
        struct ahash_edesc *edesc;
        u32 *desc, *sh_desc = ctx->sh_desc_update_first;
        dma_addr_t ptr = ctx->sh_desc_update_first_dma;
-       bool chained = false;
        int ret = 0;
        int sh_len;
 
@@ -1237,8 +1232,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
        to_hash = in_len - *next_buflen;
 
        if (to_hash) {
-               src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
-                                      &chained);
+               src_nents = sg_nents_for_len(req->src,
+                                            req->nbytes - (*next_buflen));
                sec4_sg_bytes = (1 + src_nents) *
                                sizeof(struct sec4_sg_entry);
 
@@ -1246,7 +1241,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -1255,7 +1250,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                }
 
                edesc->src_nents = src_nents;
-               edesc->chained = chained;
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
@@ -1264,14 +1258,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
                                                    buf, *buflen);
                src_map_to_sec4_sg(jrdev, req->src, src_nents,
-                                  edesc->sec4_sg + 1, chained);
+                                  edesc->sec4_sg + 1);
                if (*next_buflen) {
                        scatterwalk_map_and_copy(next_buf, req->src,
                                                 to_hash - *buflen,
                                                 *next_buflen, 0);
-                       state->current_buf = !state->current_buf;
                }
 
+               state->current_buf = !state->current_buf;
+
                sh_len = desc_len(sh_desc);
                desc = edesc->hw_desc;
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -1343,18 +1338,17 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        int sec4_sg_bytes, sec4_sg_src_index, src_nents;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
-       bool chained = false;
        int sh_len;
        int ret = 0;
 
-       src_nents = __sg_count(req->src, req->nbytes, &chained);
+       src_nents = sg_nents_for_len(req->src, req->nbytes);
        sec4_sg_src_index = 2;
        sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
                         sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1365,7 +1359,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
        edesc->src_nents = src_nents;
-       edesc->chained = chained;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
@@ -1374,8 +1367,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
                                                state->buf_dma, buflen,
                                                last_buflen);
 
-       src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
-                          chained);
+       src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1429,7 +1421,6 @@ static int ahash_update_first(struct ahash_request *req)
        dma_addr_t src_dma;
        u32 options;
        struct ahash_edesc *edesc;
-       bool chained = false;
        int ret = 0;
        int sh_len;
 
@@ -1438,17 +1429,15 @@ static int ahash_update_first(struct ahash_request *req)
        to_hash = req->nbytes - *next_buflen;
 
        if (to_hash) {
-               src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
-                                    &chained);
-               dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                  DMA_TO_DEVICE, chained);
+               src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+               dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
                sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
                /*
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -1457,7 +1446,6 @@ static int ahash_update_first(struct ahash_request *req)
                }
 
                edesc->src_nents = src_nents;
-               edesc->chained = chained;
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
@@ -1574,25 +1562,42 @@ static int ahash_final(struct ahash_request *req)
 
 static int ahash_export(struct ahash_request *req, void *out)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
+       struct caam_export_state *export = out;
+       int len;
+       u8 *buf;
+
+       if (state->current_buf) {
+               buf = state->buf_1;
+               len = state->buflen_1;
+       } else {
+               buf = state->buf_0;
+               len = state->buflen_1;
+       }
+
+       memcpy(export->buf, buf, len);
+       memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+       export->buflen = len;
+       export->update = state->update;
+       export->final = state->final;
+       export->finup = state->finup;
 
-       memcpy(out, ctx, sizeof(struct caam_hash_ctx));
-       memcpy(out + sizeof(struct caam_hash_ctx), state,
-              sizeof(struct caam_hash_state));
        return 0;
 }
 
 static int ahash_import(struct ahash_request *req, const void *in)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
+       const struct caam_export_state *export = in;
+
+       memset(state, 0, sizeof(*state));
+       memcpy(state->buf_0, export->buf, export->buflen);
+       memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+       state->buflen_0 = export->buflen;
+       state->update = export->update;
+       state->final = export->final;
+       state->finup = export->finup;
 
-       memcpy(ctx, in, sizeof(struct caam_hash_ctx));
-       memcpy(state, in + sizeof(struct caam_hash_ctx),
-              sizeof(struct caam_hash_state));
        return 0;
 }
 
@@ -1626,8 +1631,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = SHA1_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_SHA1,
                .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
        }, {
@@ -1647,8 +1653,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = SHA224_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_SHA224,
                .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
        }, {
@@ -1668,8 +1675,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = SHA256_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_SHA256,
                .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
        }, {
@@ -1689,8 +1697,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = SHA384_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_SHA384,
                .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
        }, {
@@ -1710,8 +1719,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = SHA512_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_SHA512,
                .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
        }, {
@@ -1731,8 +1741,9 @@ static struct caam_hash_template driver_hash[] = {
                        .setkey = ahash_setkey,
                        .halg = {
                                .digestsize = MD5_DIGEST_SIZE,
-                               },
+                               .statesize = sizeof(struct caam_export_state),
                        },
+               },
                .alg_type = OP_ALG_ALGSEL_MD5,
                .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
        },
@@ -1842,7 +1853,7 @@ caam_hash_alloc(struct caam_hash_template *template,
        struct ahash_alg *halg;
        struct crypto_alg *alg;
 
-       t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
        if (!t_alg) {
                pr_err("failed to allocate t_alg\n");
                return ERR_PTR(-ENOMEM);
@@ -1884,8 +1895,10 @@ static int __init caam_algapi_hash_init(void)
        struct device_node *dev_node;
        struct platform_device *pdev;
        struct device *ctrldev;
-       void *priv;
        int i = 0, err = 0;
+       struct caam_drv_private *priv;
+       unsigned int md_limit = SHA512_DIGEST_SIZE;
+       u32 cha_inst, cha_vid;
 
        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
        if (!dev_node) {
@@ -1911,43 +1924,65 @@ static int __init caam_algapi_hash_init(void)
        if (!priv)
                return -ENODEV;
 
+       /*
+        * Register crypto algorithms the device supports.  First, identify
+        * presence and attributes of MD block.
+        */
+       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+       /*
+        * Skip registration of any hashing algorithms if MD block
+        * is not present.
+        */
+       if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+               return -ENODEV;
+
+       /* Limit digest size based on LP256 */
+       if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+               md_limit = SHA256_DIGEST_SIZE;
+
        INIT_LIST_HEAD(&hash_list);
 
        /* register crypto algorithms the device supports */
        for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
-               /* TODO: check if h/w supports alg */
                struct caam_hash_alg *t_alg;
+               struct caam_hash_template *alg = driver_hash + i;
+
+               /* If MD size is not supported by device, skip registration */
+               if (alg->template_ahash.halg.digestsize > md_limit)
+                       continue;
 
                /* register hmac version */
-               t_alg = caam_hash_alloc(&driver_hash[i], true);
+               t_alg = caam_hash_alloc(alg, true);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
-                       pr_warn("%s alg allocation failed\n",
-                               driver_hash[i].driver_name);
+                       pr_warn("%s alg allocation failed\n", alg->driver_name);
                        continue;
                }
 
                err = crypto_register_ahash(&t_alg->ahash_alg);
                if (err) {
-                       pr_warn("%s alg registration failed\n",
-                               t_alg->ahash_alg.halg.base.cra_driver_name);
+                       pr_warn("%s alg registration failed: %d\n",
+                               t_alg->ahash_alg.halg.base.cra_driver_name,
+                               err);
                        kfree(t_alg);
                } else
                        list_add_tail(&t_alg->entry, &hash_list);
 
                /* register unkeyed version */
-               t_alg = caam_hash_alloc(&driver_hash[i], false);
+               t_alg = caam_hash_alloc(alg, false);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
-                       pr_warn("%s alg allocation failed\n",
-                               driver_hash[i].driver_name);
+                       pr_warn("%s alg allocation failed\n", alg->driver_name);
                        continue;
                }
 
                err = crypto_register_ahash(&t_alg->ahash_alg);
                if (err) {
-                       pr_warn("%s alg registration failed\n",
-                               t_alg->ahash_alg.halg.base.cra_driver_name);
+                       pr_warn("%s alg registration failed: %d\n",
+                               t_alg->ahash_alg.halg.base.cra_driver_name,
+                               err);
                        kfree(t_alg);
                } else
                        list_add_tail(&t_alg->entry, &hash_list);