2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
21 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
33 unsigned int ileft = areq->nbytes;
34 unsigned int oleft = areq->nbytes;
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
40 if (areq->nbytes == 0)
44 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
48 if (!areq->src || !areq->dst) {
49 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
53 spin_lock_irqsave(&ss->slock, flags);
55 for (i = 0; i < op->keylen; i += 4)
56 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
59 for (i = 0; i < 4 && i < ivsize / 4; i++) {
60 v = *(u32 *)(areq->info + i * 4);
61 writel(v, ss->base + SS_IV0 + i * 4);
64 writel(mode, ss->base + SS_CTL);
66 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
67 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
68 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
69 SG_MITER_TO_SG | SG_MITER_ATOMIC);
72 if (!mi.addr || !mo.addr) {
73 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
78 ileft = areq->nbytes / 4;
79 oleft = areq->nbytes / 4;
83 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
86 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
89 if (oi == mi.length) {
94 spaces = readl(ss->base + SS_FCSR);
95 rx_cnt = SS_RXFIFO_SPACES(spaces);
96 tx_cnt = SS_TXFIFO_SPACES(spaces);
98 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
101 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
104 if (oo == mo.length) {
111 for (i = 0; i < 4 && i < ivsize / 4; i++) {
112 v = readl(ss->base + SS_IV0 + i * 4);
113 *(u32 *)(areq->info + i * 4) = v;
120 writel(0, ss->base + SS_CTL);
121 spin_unlock_irqrestore(&ss->slock, flags);
125 /* Generic function that support SG with size not multiple of 4 */
126 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
128 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
129 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
130 struct sun4i_ss_ctx *ss = op->ss;
132 struct scatterlist *in_sg = areq->src;
133 struct scatterlist *out_sg = areq->dst;
134 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
135 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
136 u32 mode = ctx->mode;
137 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
138 u32 rx_cnt = SS_RX_DEFAULT;
143 unsigned int ileft = areq->nbytes;
144 unsigned int oleft = areq->nbytes;
146 struct sg_mapping_iter mi, mo;
147 unsigned int oi, oo; /* offset for in and out */
148 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
149 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
150 unsigned int ob = 0; /* offset in buf */
151 unsigned int obo = 0; /* offset in bufo*/
152 unsigned int obl = 0; /* length of data in bufo */
155 if (areq->nbytes == 0)
159 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
163 if (!areq->src || !areq->dst) {
164 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
169 * if we have only SGs with size multiple of 4,
170 * we can use the SS optimized function
172 while (in_sg && no_chunk == 1) {
173 if ((in_sg->length % 4) != 0)
175 in_sg = sg_next(in_sg);
177 while (out_sg && no_chunk == 1) {
178 if ((out_sg->length % 4) != 0)
180 out_sg = sg_next(out_sg);
184 return sun4i_ss_opti_poll(areq);
186 spin_lock_irqsave(&ss->slock, flags);
188 for (i = 0; i < op->keylen; i += 4)
189 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
192 for (i = 0; i < 4 && i < ivsize / 4; i++) {
193 v = *(u32 *)(areq->info + i * 4);
194 writel(v, ss->base + SS_IV0 + i * 4);
197 writel(mode, ss->base + SS_CTL);
199 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
200 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
201 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
202 SG_MITER_TO_SG | SG_MITER_ATOMIC);
205 if (!mi.addr || !mo.addr) {
206 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
210 ileft = areq->nbytes;
211 oleft = areq->nbytes;
218 * todo is the number of consecutive 4byte word that we
219 * can read from current SG
221 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
222 if (todo > 0 && ob == 0) {
223 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
229 * not enough consecutive bytes, so we need to
230 * linearize in buf. todo is in bytes
231 * After that copy, if we have a multiple of 4
232 * we need to be able to write all buf in one
233 * pass, so it is why we min() with rx_cnt
235 todo = min3(rx_cnt * 4 - ob, ileft,
237 memcpy(buf + ob, mi.addr + oi, todo);
242 writesl(ss->base + SS_RXFIFO, buf,
247 if (oi == mi.length) {
253 spaces = readl(ss->base + SS_FCSR);
254 rx_cnt = SS_RXFIFO_SPACES(spaces);
255 tx_cnt = SS_TXFIFO_SPACES(spaces);
256 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
258 oi, mi.length, ileft, areq->nbytes, rx_cnt,
259 oo, mo.length, oleft, areq->nbytes, tx_cnt,
264 /* todo in 4bytes word */
265 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
267 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
270 if (oo == mo.length) {
276 * read obl bytes in bufo, we read at maximum for
277 * emptying the device
279 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
284 * how many bytes we can copy ?
285 * no more than remaining SG size
286 * no more than remaining buffer
287 * no need to test against oleft
289 todo = min(mo.length - oo, obl - obo);
290 memcpy(mo.addr + oo, bufo + obo, todo);
294 if (oo == mo.length) {
299 /* bufo must be fully used here */
303 for (i = 0; i < 4 && i < ivsize / 4; i++) {
304 v = readl(ss->base + SS_IV0 + i * 4);
305 *(u32 *)(areq->info + i * 4) = v;
312 writel(0, ss->base + SS_CTL);
313 spin_unlock_irqrestore(&ss->slock, flags);
319 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
321 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
322 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
323 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
325 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
327 return sun4i_ss_cipher_poll(areq);
330 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
332 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
333 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
334 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
336 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
338 return sun4i_ss_cipher_poll(areq);
342 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
344 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
345 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
346 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
348 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
350 return sun4i_ss_cipher_poll(areq);
353 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
355 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
356 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
357 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
359 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
361 return sun4i_ss_cipher_poll(areq);
365 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
367 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
368 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
369 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
371 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
373 return sun4i_ss_cipher_poll(areq);
376 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
378 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
379 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
380 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
382 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
384 return sun4i_ss_cipher_poll(areq);
388 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
390 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
391 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
392 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
394 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
396 return sun4i_ss_cipher_poll(areq);
399 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
401 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
402 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
403 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
405 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
407 return sun4i_ss_cipher_poll(areq);
411 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
413 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
414 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
415 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
417 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
419 return sun4i_ss_cipher_poll(areq);
422 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
424 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
425 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
426 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
428 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
430 return sun4i_ss_cipher_poll(areq);
434 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
436 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
437 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
438 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
440 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
442 return sun4i_ss_cipher_poll(areq);
445 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
447 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
448 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
449 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
451 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
453 return sun4i_ss_cipher_poll(areq);
456 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
458 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
459 struct crypto_alg *alg = tfm->__crt_alg;
460 struct sun4i_ss_alg_template *algt;
462 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
464 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
467 tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
472 /* check and set the AES key, prepare the mode to be used */
473 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
476 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
477 struct sun4i_ss_ctx *ss = op->ss;
481 op->keymode = SS_AES_128BITS;
484 op->keymode = SS_AES_192BITS;
487 op->keymode = SS_AES_256BITS;
490 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
491 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
495 memcpy(op->key, key, keylen);
499 /* check and set the DES key, prepare the mode to be used */
500 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
503 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
504 struct sun4i_ss_ctx *ss = op->ss;
506 u32 tmp[DES_EXPKEY_WORDS];
509 if (unlikely(keylen != DES_KEY_SIZE)) {
510 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
511 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
515 flags = crypto_ablkcipher_get_flags(tfm);
517 ret = des_ekey(tmp, key);
518 if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
519 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
520 dev_dbg(ss->dev, "Weak key %u\n", keylen);
525 memcpy(op->key, key, keylen);
529 /* check and set the 3DES key, prepare the mode to be used */
530 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
533 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
534 struct sun4i_ss_ctx *ss = op->ss;
536 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
537 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
538 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
542 memcpy(op->key, key, keylen);