aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-13 08:50:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-13 08:50:16 -0700
commit39a5101f989e8d2be557136704d53990f9b402c8 (patch)
treeb9c16c6f32508939111fb6d0159d7450713a5f33 /drivers/crypto
parent865c50e1d279671728c2936cb7680eb89355eeea (diff)
parent3093e7c16e12d729c325adb3c53dde7308cefbd8 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Allow DRBG testing through user-space af_alg - Add tcrypt speed testing support for keyed hashes - Add type-safe init/exit hooks for ahash Algorithms: - Mark arc4 as obsolete and pending for future removal - Mark anubis, khazad, sead and tea as obsolete - Improve boot-time xor benchmark - Add OSCCA SM2 asymmetric cipher algorithm and use it for integrity Drivers: - Fixes and enhancement for XTS in caam - Add support for XIP8001B hwrng in xiphera-trng - Add RNG and hash support in sun8i-ce/sun8i-ss - Allow imx-rngc to be used by kernel entropy pool - Use crypto engine in omap-sham - Add support for Ingenic X1830 with ingenic" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (205 commits) X.509: Fix modular build of public_key_sm2 crypto: xor - Remove unused variable count in do_xor_speed X.509: fix error return value on the failed path crypto: bcm - Verify GCM/CCM key length in setkey crypto: qat - drop input parameter from adf_enable_aer() crypto: qat - fix function parameters descriptions crypto: atmel-tdes - use semicolons rather than commas to separate statements crypto: drivers - use semicolons rather than commas to separate statements hwrng: mxc-rnga - use semicolons rather than commas to separate statements hwrng: iproc-rng200 - use semicolons rather than commas to separate statements hwrng: stm32 - use semicolons rather than commas to separate statements crypto: xor - use ktime for template benchmarking crypto: xor - defer load time benchmark to a later time crypto: hisilicon/zip - fix the uninitalized 'curr_qm_qp_num' crypto: hisilicon/zip - fix the return value when device is busy crypto: hisilicon/zip - fix zero length input in GZIP decompress crypto: hisilicon/zip - fix the uncleared debug registers lib/mpi: Fix unused variable warnings crypto: x86/poly1305 - Remove assignments with no effect hwrng: npcm - modify readl to readb ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/allwinner/Kconfig43
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c131
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c405
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c413
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c164
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c127
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h139
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c229
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c444
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c173
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h89
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c12
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c16
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/bcm/cipher.c111
-rw-r--r--drivers/crypto/bcm/cipher.h1
-rw-r--r--drivers/crypto/bcm/spu.c23
-rw-r--r--drivers/crypto/bcm/spu.h1
-rw-r--r--drivers/crypto/bcm/spu2.c12
-rw-r--r--drivers/crypto/bcm/spu2.h1
-rw-r--r--drivers/crypto/caam/Kconfig3
-rw-r--r--drivers/crypto/caam/Makefile2
-rw-r--r--drivers/crypto/caam/caamalg.c94
-rw-r--r--drivers/crypto/caam/caamalg_desc.c28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c94
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c118
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/ctrl.c88
-rw-r--r--drivers/crypto/caam/debugfs.c96
-rw-r--r--drivers/crypto/caam/debugfs.h26
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c23
-rw-r--r--drivers/crypto/caam/intern.h17
-rw-r--r--drivers/crypto/caam/jr.c10
-rw-r--r--drivers/crypto/caam/qi.c20
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c44
-rw-r--r--drivers/crypto/ccp/ccp-ops.c3
-rw-r--r--drivers/crypto/ccree/cc_cipher.c282
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c7
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_pm.c6
-rw-r--r--drivers/crypto/chelsio/chcr_core.c2
-rw-r--r--drivers/crypto/hifn_795x.c28
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c59
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c33
-rw-r--r--drivers/crypto/hisilicon/qm.c237
-rw-r--r--drivers/crypto/hisilicon/qm.h31
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c51
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c39
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h15
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c140
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c195
-rw-r--r--drivers/crypto/img-hash.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c44
-rw-r--r--drivers/crypto/inside-secure/safexcel.h28
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c90
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c153
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c9
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h20
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c5
-rw-r--r--drivers/crypto/marvell/cesa/hash.c24
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c16
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c57
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c2
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/omap-sham.c189
-rw-r--r--drivers/crypto/padlock-aes.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c9
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c17
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c17
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c19
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c42
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c17
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/qce/sha.c1
-rw-r--r--drivers/crypto/qce/skcipher.c1
-rw-r--r--drivers/crypto/qcom-rng.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c1
-rw-r--r--drivers/crypto/s5p-sss.c28
-rw-r--r--drivers/crypto/sa2ul.c235
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c22
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c47
-rw-r--r--drivers/crypto/stm32/stm32-hash.c16
-rw-r--r--drivers/crypto/talitos.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c28
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c31
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c1
120 files changed, 3926 insertions, 1844 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 52a9b7cf6576..37593387164a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -873,6 +873,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 12e7c6a85a02..0cdfe0e8cc66 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -59,6 +59,32 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
This will create /sys/kernel/debug/sun8i-ce/stats for displaying
the number of requests per flow and per algorithm.
+config CRYPTO_DEV_SUN8I_CE_HASH
+ bool "Enable support for hash on sun8i-ce"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select MD5
+ select SHA1
+ select SHA256
+ select SHA512
+ help
+ Say y to enable support for hash algorithms.
+
+config CRYPTO_DEV_SUN8I_CE_PRNG
+ bool "Support for Allwinner Crypto Engine PRNG"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Crypto Engine.
+
+config CRYPTO_DEV_SUN8I_CE_TRNG
+ bool "Support for Allwinner Crypto Engine TRNG"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select HW_RANDOM
+ help
+ Select this option if you want to provide kernel-side support for
+ the True Random Number Generator found in the Crypto Engine.
+
config CRYPTO_DEV_SUN8I_SS
tristate "Support for Allwinner Security System cryptographic offloader"
select CRYPTO_SKCIPHER
@@ -85,3 +111,20 @@ config CRYPTO_DEV_SUN8I_SS_DEBUG
Say y to enable sun8i-ss debug stats.
This will create /sys/kernel/debug/sun8i-ss/stats for displaying
the number of requests per flow and per algorithm.
+
+config CRYPTO_DEV_SUN8I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN8I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_SUN8I_SS_HASH
+ bool "Enable support for hash on sun8i-ss"
+ depends on CRYPTO_DEV_SUN8I_SS
+ select MD5
+ select SHA1
+ select SHA256
+ help
+ Say y to enable support for hash algorithms.
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index dc35edd90034..1dff48558f53 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -9,6 +9,7 @@
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
#include "sun4i-ss.h"
+#include <asm/unaligned.h>
#include <linux/scatterlist.h>
/* This is a totally arbitrary value */
@@ -196,7 +197,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
- __le32 wb = 0;
+ u32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@@ -408,7 +409,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
- wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
+ wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@@ -417,7 +418,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
- bf[j++] = le32_to_cpu(wb);
+ ((__le32 *)bf)[j++] = cpu_to_le32(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@@ -479,16 +480,16 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
+ v = readl(ss->base + SS_MD0 + i * 4);
if (ss->variant->sha1_in_be)
- v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ put_unaligned_le32(v, areq->result + i * 4);
else
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
- memcpy(areq->result + i * 4, &v, 4);
+ put_unaligned_be32(v, areq->result + i * 4);
}
} else {
for (i = 0; i < 4; i++) {
- v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
- memcpy(areq->result + i * 4, &v, 4);
+ v = readl(ss->base + SS_MD0 + i * 4);
+ put_unaligned_le32(v, areq->result + i * 4);
}
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
index 08b68c3c1ca9..0842eb2d9408 100644
--- a/drivers/crypto/allwinner/sun8i-ce/Makefile
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_HASH) += sun8i-ce-hash.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG) += sun8i-ce-prng.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) += sun8i-ce-trng.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index b4d5fea27d20..33707a2e55ff 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher(struct skcipher_request *areq)
+static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
@@ -87,8 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- dma_addr_t addr_iv = 0, addr_key = 0;
- void *backup_iv = NULL;
u32 common, sym;
int flow, i;
int nr_sgs = 0;
@@ -119,7 +118,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
/* CTS and recent CE (H6) need length in bytes, in word otherwise */
- if (ce->variant->has_t_dlen_in_bytes)
+ if (ce->variant->cipher_t_dlen_in_bytes)
cet->t_dlen = cpu_to_le32(areq->cryptlen);
else
cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
@@ -141,41 +140,41 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
- cet->t_key = cpu_to_le32(addr_key);
- if (dma_mapping_error(ce->dev, addr_key)) {
+ rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, rctx->addr_key)) {
dev_err(ce->dev, "Cannot DMA MAP KEY\n");
err = -EFAULT;
goto theend;
}
+ cet->t_key = cpu_to_le32(rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- chan->ivlen = ivsize;
- chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!chan->bounce_iv) {
+ rctx->ivlen = ivsize;
+ rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!rctx->bounce_iv) {
err = -ENOMEM;
goto theend_key;
}
if (rctx->op_dir & CE_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
+ rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!rctx->backup_iv) {
err = -ENOMEM;
goto theend_key;
}
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
- DMA_TO_DEVICE);
- cet->t_iv = cpu_to_le32(addr_iv);
- if (dma_mapping_error(ce->dev, addr_iv)) {
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
err = -ENOMEM;
goto theend_iv;
}
+ cet->t_iv = cpu_to_le32(rctx->addr_iv);
}
if (areq->src == areq->dst) {
@@ -235,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
}
chan->timeout = areq->cryptlen;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+ rctx->nr_sgs = nr_sgs;
+ rctx->nr_sgd = nr_sgd;
+ return 0;
theend_sgs:
if (areq->src == areq->dst) {
@@ -248,34 +249,83 @@ theend_sgs:
theend_iv:
if (areq->iv && ivsize > 0) {
- if (addr_iv)
- dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
- DMA_TO_DEVICE);
+ if (rctx->addr_iv)
+ dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ kfree_sensitive(rctx->backup_iv);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(chan->bounce_iv);
+ kfree(rctx->bounce_iv);
}
theend_key:
- dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend:
return err;
}
-static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
- int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
- err = sun8i_ce_cipher(breq);
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
crypto_finalize_skcipher_request(engine, breq, err);
+ return 0;
+}
+
+static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ unsigned int ivsize, offset;
+ int nr_sgs = rctx->nr_sgs;
+ int nr_sgd = rctx->nr_sgd;
+ int flow;
+
+ flow = rctx->flow;
+ chan = &ce->chanlist[flow];
+ cet = chan->tl;
+ ivsize = crypto_skcipher_ivsize(tfm);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (cet->t_iv)
+ dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ kfree_sensitive(rctx->backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->bounce_iv);
+ }
+
+ dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
return 0;
}
@@ -347,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
- op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
+ op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
+ op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
+ op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
@@ -366,10 +416,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
{
struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync_suspend(op->ce->dev);
}
@@ -391,10 +438,7 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
@@ -416,10 +460,7 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (err)
return err;
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 138759dc8190..158422ff5695 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ce.h"
@@ -35,73 +36,108 @@
static const struct ce_variant ce_h3_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 50000000, 0 },
- }
+ },
+ .esr = ESR_H3,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_h5_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_H5,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_h6_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
- .has_t_dlen_in_bytes = true,
+ .cipher_t_dlen_in_bytes = true,
+ .hash_t_dlen_in_bits = true,
+ .prng_t_dlen_in_bytes = true,
+ .trng_t_dlen_in_bytes = true,
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
{ "ram", 0, 400000000 },
- }
+ },
+ .esr = ESR_H6,
+ .prng = CE_ALG_PRNG_V2,
+ .trng = CE_ALG_TRNG_V2,
};
static const struct ce_variant ce_a64_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_A64,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_r40_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_R40,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
+ * The flow 3 is reserve for xRNG operations
*/
int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
{
- return atomic_inc_return(&ce->flow) % MAXFLOW;
+ return atomic_inc_return(&ce->flow) % (MAXFLOW - 1);
}
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
+ struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -120,7 +156,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Be sure all data is written before enabling the task */
wmb();
- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
+ * on older SoCs, we have no reason to complicate things.
+ */
+ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
writel(v, ce->base + CE_TLR);
mutex_unlock(&ce->mlock);
@@ -128,19 +167,56 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
msecs_to_jiffies(ce->chanlist[flow].timeout));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s\n", name);
+ dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
+ ce->chanlist[flow].timeout, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
* nothing could modify the error value for this channel
*/
v = readl(ce->base + CE_ESR);
- if (v) {
+ switch (ce->variant->esr) {
+ case ESR_H3:
+ /* Sadly, the error bit is not per flow */
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ break;
+ case ESR_A64:
+ case ESR_H5:
+ case ESR_R40:
v >>= (flow * 4);
+ v &= 0xF;
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ break;
+ case ESR_H6:
+ v >>= (flow * 8);
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -150,7 +226,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
if (v & CE_ERR_ADDR_INVALID)
dev_err(ce->dev, "CE ERROR: address invalid\n");
- }
+ if (v & CE_ERR_KEYLADDER)
+ dev_err(ce->dev, "CE ERROR: key ladder configuration error\n");
+ break;
+ }
return err;
}
@@ -280,13 +359,214 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.decrypt = sun8i_ce_skdecrypt,
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_MD5,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA224,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA256,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA384,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA512,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun8i-ce-prng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_prng_init,
+ .cra_exit = sun8i_ce_prng_exit,
+ },
+ .generate = sun8i_ce_prng_generate,
+ .seed = sun8i_ce_prng_seed,
+ .seedsize = PRNG_SEED_SIZE,
+ }
+},
+#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ce_dev *ce = seq->private;
- int i;
+ unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
@@ -301,23 +581,28 @@ static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.hash.halg.base.cra_driver_name,
+ ce_algs[i].alg.hash.halg.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.rng.base.cra_driver_name,
+ ce_algs[i].alg.rng.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_bytes);
+ break;
}
}
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ seq_printf(seq, "HWRNG %lu %lu\n",
+ ce->hwrng_stat_req, ce->hwrng_stat_bytes);
+#endif
return 0;
}
-static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun8i_ce_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun8i_ce_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
#endif
static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
@@ -482,7 +767,8 @@ static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
{
- int ce_method, err, id, i;
+ int ce_method, err, id;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
ce_algs[i].ce = ce;
@@ -515,6 +801,43 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
return err;
}
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ce->variant->alg_hash[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_info(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ err = crypto_register_ahash(&ce_algs[i].alg.hash);
+ if (err) {
+ dev_err(ce->dev, "ERROR: Fail to register %s\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ if (ce->variant->prng == CE_ID_NOTSUPP) {
+ dev_info(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ err = crypto_register_rng(&ce_algs[i].alg.rng);
+ if (err) {
+ dev_err(ce->dev, "Fail to register %s\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ ce_algs[i].ce = NULL;
+ }
+ break;
default:
ce_algs[i].ce = NULL;
dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
@@ -525,7 +848,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
@@ -536,6 +859,16 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
ce_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ crypto_unregister_ahash(&ce_algs[i].alg.hash);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.rng.base.cra_name);
+ crypto_unregister_rng(&ce_algs[i].alg.rng);
+ break;
}
}
}
@@ -573,14 +906,12 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return irq;
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(ce->reset)) {
- if (PTR_ERR(ce->reset) == -EPROBE_DEFER)
- return PTR_ERR(ce->reset);
- dev_err(&pdev->dev, "No reset control found\n");
- return PTR_ERR(ce->reset);
- }
+ if (IS_ERR(ce->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
+ "No reset control found\n");
mutex_init(&ce->mlock);
+ mutex_init(&ce->rnglock);
err = sun8i_ce_allocate_chanlist(ce);
if (err)
@@ -605,6 +936,10 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err < 0)
goto error_alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ sun8i_ce_hwrng_register(ce);
+#endif
+
v = readl(ce->base + CE_CTR);
v >>= CE_DIE_ID_SHIFT;
v &= CE_DIE_ID_MASK;
@@ -634,6 +969,10 @@ static int sun8i_ce_remove(struct platform_device *pdev)
{
struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ sun8i_ce_hwrng_unregister(ce);
+#endif
+
sun8i_ce_unregister_algs(ce);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
new file mode 100644
index 000000000000..fa2f1b4fad7b
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-hash.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include "sun8i-ce.h"
+
+int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ op->ce = algt->ce;
+
+ op->enginectx.op.do_one_request = sun8i_ce_hash_run;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ /* FALLBACK */
+ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
+ algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun8i_ce_hash_reqctx) +
+ crypto_ahash_reqsize(op->fallback_tfm));
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(tfm),
+ crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_ahash(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tfmctx->fallback_tfm);
+ pm_runtime_put_sync_suspend(tfmctx->ce->dev);
+}
+
+int sun8i_ce_hash_init(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+int sun8i_ce_hash_final(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = areq->result;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_update(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_finup(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
+{
+ struct scatterlist *sg;
+
+ if (areq->nbytes == 0)
+ return true;
+ /* we need to reserve one SG for padding one */
+ if (sg_nents(areq->src) > MAX_SG - 1)
+ return true;
+ sg = areq->src;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+int sun8i_ce_hash_digest(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ struct crypto_engine *engine;
+ struct scatterlist *sg;
+ int nr_sgs, e, i;
+
+ if (sun8i_ce_hash_need_fallback(areq))
+ return sun8i_ce_hash_digest_fb(areq);
+
+ nr_sgs = sg_nents(areq->src);
+ if (nr_sgs > MAX_SG - 1)
+ return sun8i_ce_hash_digest_fb(areq);
+
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return sun8i_ce_hash_digest_fb(areq);
+ }
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ ce = algt->ce;
+
+ e = sun8i_ce_get_engine_number(ce);
+ rctx->flow = e;
+ engine = ce->chanlist[e].engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ struct scatterlist *sg;
+ int nr_sgs, flow, err;
+ unsigned int len;
+ u32 common;
+ u64 byte_count;
+ __le32 *bf;
+ void *buf;
+ int j, i, todo;
+ int nbw = 0;
+ u64 fill, min_fill;
+ __be64 *bebits;
+ __le64 *lebits;
+ void *result;
+ u64 bs;
+ int digestsize;
+ dma_addr_t addr_res, addr_pad;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ ce = algt->ce;
+
+ bs = algt->alg.hash.halg.base.cra_blocksize;
+ digestsize = algt->alg.hash.halg.digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ digestsize = SHA256_DIGEST_SIZE;
+ if (digestsize == SHA384_DIGEST_SIZE)
+ digestsize = SHA512_DIGEST_SIZE;
+
+ /* the padding could be up to two block. */
+ buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+ bf = (__le32 *)buf;
+
+ result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+ if (!result)
+ return -ENOMEM;
+
+ flow = rctx->flow;
+ chan = &ce->chanlist[flow];
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+ dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
+
+ cet = chan->tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->alg_hash[algt->ce_algo_id];
+ common |= CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ cet->t_sym_ctl = 0;
+ cet->t_asym_ctl = 0;
+
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ len = areq->nbytes;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = cpu_to_le32(todo / 4);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend;
+ }
+ addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = cpu_to_le32(addr_res);
+ cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
+ if (dma_mapping_error(ce->dev, addr_res)) {
+ dev_err(ce->dev, "DMA map dest\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ byte_count = areq->nbytes;
+ j = 0;
+ bf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ j += (fill - min_fill) / sizeof(u32);
+
+ switch (algt->ce_algo_id) {
+ case CE_ID_HASH_MD5:
+ lebits = (__le64 *)&bf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ break;
+ case CE_ID_HASH_SHA1:
+ case CE_ID_HASH_SHA224:
+ case CE_ID_HASH_SHA256:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ case CE_ID_HASH_SHA384:
+ case CE_ID_HASH_SHA512:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ }
+
+ addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
+ cet->t_src[i].addr = cpu_to_le32(addr_pad);
+ cet->t_src[i].len = cpu_to_le32(j);
+ if (dma_mapping_error(ce->dev, addr_pad)) {
+ dev_err(ce->dev, "DMA error on padding SG\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ if (ce->variant->hash_t_dlen_in_bits)
+ cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
+ else
+ cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
+
+ chan->timeout = areq->nbytes;
+
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+
+ dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+
+ kfree(buf);
+
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ kfree(result);
+theend:
+ crypto_finalize_hash_request(engine, breq, err);
+ return 0;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
new file mode 100644
index 000000000000..78503006949c
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-prng.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the PRNG
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include "sun8i-ce.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <crypto/internal/rng.h>
+
+int sun8i_ce_prng_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, 0, sizeof(struct sun8i_ce_rng_tfm_ctx));
+ return 0;
+}
+
+void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->seed = NULL;
+ ctx->slen = 0;
+}
+
+int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+
+ if (ctx->seed && ctx->slen != slen) {
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->slen = 0;
+ ctx->seed = NULL;
+ }
+ if (!ctx->seed)
+ ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ if (!ctx->seed)
+ return -ENOMEM;
+
+ memcpy(ctx->seed, seed, slen);
+ ctx->slen = slen;
+
+ return 0;
+}
+
+int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ dma_addr_t dma_iv, dma_dst;
+ int err = 0;
+ int flow = 3;
+ unsigned int todo;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ u32 common, sym;
+ void *d;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.rng);
+ ce = algt->ce;
+
+ if (ctx->slen == 0) {
+ dev_err(ce->dev, "not seeded\n");
+ return -EINVAL;
+ }
+
+ /* we want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */
+ todo = dlen + ctx->slen + PRNG_DATA_SIZE * 2;
+ todo -= todo % PRNG_DATA_SIZE;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__,
+ slen, dlen, todo, todo / PRNG_DATA_SIZE);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+#endif
+
+ dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_iv)) {
+ dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ goto err_iv;
+ }
+
+ dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_dst)) {
+ dev_err(ce->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_dst;
+ }
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ goto err_pm;
+ }
+
+ mutex_lock(&ce->rnglock);
+ chan = &ce->chanlist[flow];
+
+ cet = &chan->tl[0];
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->prng | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ /* recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->prng_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(todo);
+ else
+ cet->t_dlen = cpu_to_le32(todo / 4);
+
+ sym = PRNG_LD;
+ cet->t_sym_ctl = cpu_to_le32(sym);
+ cet->t_asym_ctl = 0;
+
+ cet->t_key = cpu_to_le32(dma_iv);
+ cet->t_iv = cpu_to_le32(dma_iv);
+
+ cet->t_dst[0].addr = cpu_to_le32(dma_dst);
+ cet->t_dst[0].len = cpu_to_le32(todo / 4);
+ ce->chanlist[flow].timeout = 2000;
+
+ err = sun8i_ce_run_task(ce, 3, "PRNG");
+ mutex_unlock(&ce->rnglock);
+
+ pm_runtime_put(ce->dev);
+
+err_pm:
+ dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE);
+err_dst:
+ dma_unmap_single(ce->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
+
+ if (!err) {
+ memcpy(dst, d, dlen);
+ memcpy(ctx->seed, d + dlen, ctx->slen);
+ }
+ memzero_explicit(d, todo);
+err_iv:
+ kfree(d);
+err_mem:
+ return err;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
new file mode 100644
index 000000000000..654328160d19
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-trng.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the TRNG
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include "sun8i-ce.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/hw_random.h>
+/*
+ * Note that according to the algorithm ID, 2 versions of the TRNG exists,
+ * The first present in H3/H5/R40/A64 and the second present in H6.
+ * This file adds support for both, but only the second is working
+ * reliabily according to rngtest.
+ **/
+
+static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct sun8i_ce_dev *ce;
+ dma_addr_t dma_dst;
+ int err = 0;
+ int flow = 3;
+ unsigned int todo;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ u32 common;
+ void *d;
+
+ ce = container_of(rng, struct sun8i_ce_dev, trng);
+
+ /* round the data length to a multiple of 32*/
+ todo = max + 32;
+ todo -= todo % 32;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d)
+ return -ENOMEM;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->hwrng_stat_req++;
+ ce->hwrng_stat_bytes += todo;
+#endif
+
+ dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_dst)) {
+ dev_err(ce->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_dst;
+ }
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ goto err_pm;
+ }
+
+ mutex_lock(&ce->rnglock);
+ chan = &ce->chanlist[flow];
+
+ cet = &chan->tl[0];
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->trng | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ /* recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->trng_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(todo);
+ else
+ cet->t_dlen = cpu_to_le32(todo / 4);
+
+ cet->t_sym_ctl = 0;
+ cet->t_asym_ctl = 0;
+
+ cet->t_dst[0].addr = cpu_to_le32(dma_dst);
+ cet->t_dst[0].len = cpu_to_le32(todo / 4);
+ ce->chanlist[flow].timeout = todo;
+
+ err = sun8i_ce_run_task(ce, 3, "TRNG");
+ mutex_unlock(&ce->rnglock);
+
+ pm_runtime_put(ce->dev);
+
+err_pm:
+ dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE);
+
+ if (!err) {
+ memcpy(data, d, max);
+ err = max;
+ }
+ memzero_explicit(d, todo);
+err_dst:
+ kfree(d);
+ return err;
+}
+
+int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce)
+{
+ int ret;
+
+ if (ce->variant->trng == CE_ID_NOTSUPP) {
+ dev_info(ce->dev, "TRNG not supported\n");
+ return 0;
+ }
+ ce->trng.name = "sun8i Crypto Engine TRNG";
+ ce->trng.read = sun8i_ce_trng_read;
+ ce->trng.quality = 1000;
+
+ ret = hwrng_register(&ce->trng);
+ if (ret)
+ dev_err(ce->dev, "Fail to register the TRNG\n");
+ return ret;
+}
+
+void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce)
+{
+ if (ce->variant->trng == CE_ID_NOTSUPP)
+ return;
+ hwrng_unregister(&ce->trng);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 963645fe4adb..558027516aed 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -12,6 +12,11 @@
#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/rng.h>
+#include <crypto/sha.h>
/* CE Registers */
#define CE_TDQ 0x00
@@ -45,6 +50,16 @@
#define CE_ALG_AES 0
#define CE_ALG_DES 1
#define CE_ALG_3DES 2
+#define CE_ALG_MD5 16
+#define CE_ALG_SHA1 17
+#define CE_ALG_SHA224 18
+#define CE_ALG_SHA256 19
+#define CE_ALG_SHA384 20
+#define CE_ALG_SHA512 21
+#define CE_ALG_TRNG 48
+#define CE_ALG_PRNG 49
+#define CE_ALG_TRNG_V2 0x1c
+#define CE_ALG_PRNG_V2 0x1d
/* Used in ce_variant */
#define CE_ID_NOTSUPP 0xFF
@@ -54,6 +69,14 @@
#define CE_ID_CIPHER_DES3 2
#define CE_ID_CIPHER_MAX 3
+#define CE_ID_HASH_MD5 0
+#define CE_ID_HASH_SHA1 1
+#define CE_ID_HASH_SHA224 2
+#define CE_ID_HASH_SHA256 3
+#define CE_ID_HASH_SHA384 4
+#define CE_ID_HASH_SHA512 5
+#define CE_ID_HASH_MAX 6
+
#define CE_ID_OP_ECB 0
#define CE_ID_OP_CBC 1
#define CE_ID_OP_MAX 2
@@ -65,6 +88,16 @@
#define CE_ERR_ADDR_INVALID BIT(5)
#define CE_ERR_KEYLADDER BIT(6)
+#define ESR_H3 0
+#define ESR_A64 1
+#define ESR_R40 2
+#define ESR_H5 3
+#define ESR_H6 4
+
+#define PRNG_DATA_SIZE (160 / 8)
+#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+#define PRNG_LD BIT(17)
+
#define CE_DIE_ID_SHIFT 16
#define CE_DIE_ID_MASK 0x07
@@ -90,16 +123,34 @@ struct ce_clock {
* struct ce_variant - Describe CE capability for each variant hardware
* @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the
* coresponding CE_ALG_XXX value
+ * @alg_hash: list of supported hashes. for each CE_ID_ this will give the
+ * corresponding CE_ALG_XXX value
* @op_mode: list of supported block modes
- * @has_t_dlen_in_bytes: Does the request size for cipher is in
+ * @cipher_t_dlen_in_bytes: Does the request size for cipher is in
+ * bytes or words
+ * @hash_t_dlen_in_bytes: Does the request size for hash is in
+ * bits or words
+ * @prng_t_dlen_in_bytes: Does the request size for PRNG is in
+ * bytes or words
+ * @trng_t_dlen_in_bytes: Does the request size for TRNG is in
* bytes or words
* @ce_clks: list of clocks needed by this variant
+ * @esr: The type of error register
+ * @prng: The CE_ALG_XXX value for the PRNG
+ * @trng: The CE_ALG_XXX value for the TRNG
*/
struct ce_variant {
char alg_cipher[CE_ID_CIPHER_MAX];
+ char alg_hash[CE_ID_HASH_MAX];
u32 op_mode[CE_ID_OP_MAX];
- bool has_t_dlen_in_bytes;
+ bool cipher_t_dlen_in_bytes;
+ bool hash_t_dlen_in_bits;
+ bool prng_t_dlen_in_bytes;
+ bool trng_t_dlen_in_bytes;
struct ce_clock ce_clks[CE_MAX_CLOCKS];
+ int esr;
+ unsigned char prng;
+ unsigned char trng;
};
struct sginfo {
@@ -129,8 +180,6 @@ struct ce_task {
/*
* struct sun8i_ce_flow - Information used by each flow
* @engine: ptr to the crypto_engine for this flow
- * @bounce_iv: buffer which contain the IV
- * @ivlen: size of bounce_iv
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
@@ -139,8 +188,6 @@ struct ce_task {
*/
struct sun8i_ce_flow {
struct crypto_engine *engine;
- void *bounce_iv;
- unsigned int ivlen;
struct completion complete;
int status;
dma_addr_t t_phy;
@@ -158,6 +205,7 @@ struct sun8i_ce_flow {
* @reset: pointer to reset controller
* @dev: the platform device
* @mlock: Control access to device registers
+ * @rnglock: Control access to the RNG (dedicated channel 3)
* @chanlist: array of all flow
* @flow: flow to use in next request
* @variant: pointer to variant specific data
@@ -170,6 +218,7 @@ struct sun8i_ce_dev {
struct reset_control *reset;
struct device *dev;
struct mutex mlock;
+ struct mutex rnglock;
struct sun8i_ce_flow *chanlist;
atomic_t flow;
const struct ce_variant *variant;
@@ -177,17 +226,38 @@ struct sun8i_ce_dev {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats;
#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ struct hwrng trng;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long hwrng_stat_req;
+ unsigned long hwrng_stat_bytes;
+#endif
+#endif
};
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
+ * @backup_iv: buffer which contain the next IV to store
+ * @bounce_iv: buffer which contain the IV
+ * @ivlen: size of bounce_iv
+ * @nr_sgs: The number of source SG (as given by dma_map_sg())
+ * @nr_sgd: The number of destination SG (as given by dma_map_sg())
+ * @addr_iv: The IV addr returned by dma_map_single, need to unmap later
+ * @addr_key: The key addr returned by dma_map_single, need to unmap later
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
+ void *backup_iv;
+ void *bounce_iv;
+ unsigned int ivlen;
+ int nr_sgs;
+ int nr_sgd;
+ dma_addr_t addr_iv;
+ dma_addr_t addr_key;
struct skcipher_request fallback_req; // keep at the end
};
@@ -208,6 +278,38 @@ struct sun8i_cipher_tfm_ctx {
};
/*
+ * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_ce_hash_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct sun8i_ce_dev *ce;
+ struct crypto_ahash *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ce_hash_reqctx - context for an ahash request
+ * @fallback_req: pre-allocated fallback request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_ce_hash_reqctx {
+ struct ahash_request fallback_req;
+ int flow;
+};
+
+/*
+ * struct sun8i_ce_prng_ctx - context for PRNG TFM
+ * @seed: The seed to use
+ * @slen: The size of the seed
+ */
+struct sun8i_ce_rng_tfm_ctx {
+ void *seed;
+ unsigned int slen;
+};
+
+/*
* struct sun8i_ce_alg_template - crypto_alg template
* @type: the CRYPTO_ALG_TYPE for this template
* @ce_algo_id: the CE_ID for this template
@@ -217,6 +319,7 @@ struct sun8i_cipher_tfm_ctx {
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
* @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
*/
struct sun8i_ce_alg_template {
u32 type;
@@ -225,10 +328,13 @@ struct sun8i_ce_alg_template {
struct sun8i_ce_dev *ce;
union {
struct skcipher_alg skcipher;
+ struct ahash_alg hash;
+ struct rng_alg rng;
} alg;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
+ unsigned long stat_bytes;
#endif
};
@@ -246,3 +352,24 @@ int sun8i_ce_skencrypt(struct skcipher_request *areq);
int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
+
+int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
+void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ce_hash_init(struct ahash_request *areq);
+int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
+int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
+int sun8i_ce_hash(struct ahash_request *areq);
+int sun8i_ce_hash_final(struct ahash_request *areq);
+int sun8i_ce_hash_update(struct ahash_request *areq);
+int sun8i_ce_hash_finup(struct ahash_request *areq);
+int sun8i_ce_hash_digest(struct ahash_request *areq);
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq);
+
+int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+void sun8i_ce_prng_exit(struct crypto_tfm *tfm);
+int sun8i_ce_prng_init(struct crypto_tfm *tfm);
+
+int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce);
+void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce);
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile
index add7b0543fd5..aabfd893c817 100644
--- a/drivers/crypto/allwinner/sun8i-ss/Makefile
+++ b/drivers/crypto/allwinner/sun8i-ss/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o
sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
+sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG) += sun8i-ss-prng.o
+sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_HASH) += sun8i-ss-hash.o
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 7b39b4495571..ed2a69f82e1c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -248,7 +248,6 @@ theend_iv:
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & SS_DECRYPTION) {
memcpy(areq->iv, backup_iv, ivsize);
- memzero_explicit(backup_iv, ivsize);
kfree_sensitive(backup_iv);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
@@ -368,10 +367,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync(op->ss->dev);
}
@@ -393,10 +389,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
@@ -419,10 +412,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 9a23515783a6..e0ddc684798d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ss.h"
@@ -40,6 +41,8 @@ static const struct ss_variant ss_a80_variant = {
static const struct ss_variant ss_a83t_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
@@ -61,7 +64,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name)
{
int flow = rctx->flow;
- u32 v = 1;
+ u32 v = SS_START;
int i;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
@@ -264,13 +267,154 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.decrypt = sun8i_ss_skdecrypt,
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun8i-ss-prng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct sun8i_ss_rng_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_prng_init,
+ .cra_exit = sun8i_ss_prng_exit,
+ },
+ .generate = sun8i_ss_prng_generate,
+ .seed = sun8i_ss_prng_seed,
+ .seedsize = PRNG_SEED_SIZE,
+ }
+},
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_MD5,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA224,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA256,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ss_dev *ss = seq->private;
- int i;
+ unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
@@ -280,28 +424,29 @@ static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
continue;
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ss_algs[i].alg.skcipher.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
+ ss_algs[i].alg.rng.base.cra_driver_name,
+ ss_algs[i].alg.rng.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ ss_algs[i].alg.hash.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ break;
}
}
return 0;
}
-static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun8i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun8i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun8i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
#endif
static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
@@ -415,7 +560,8 @@ static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss)
static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
{
- int ss_method, err, id, i;
+ int ss_method, err, id;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
ss_algs[i].ss = ss;
@@ -448,6 +594,34 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
return err;
}
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ err = crypto_register_rng(&ss_algs[i].alg.rng);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.rng.base.cra_name);
+ ss_algs[i].ss = NULL;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ id = ss_algs[i].ss_algo_id;
+ ss_method = ss->variant->alg_hash[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "Register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ if (err) {
+ dev_err(ss->dev, "ERROR: Fail to register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].ss = NULL;
+ return err;
+ }
+ break;
default:
ss_algs[i].ss = NULL;
dev_err(ss->dev, "ERROR: tried to register an unknown algo\n");
@@ -458,7 +632,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
if (!ss_algs[i].ss)
@@ -469,6 +643,16 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
ss_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.rng.base.cra_name);
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
}
}
}
@@ -545,12 +729,9 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return irq;
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
- dev_err(&pdev->dev, "No reset control found\n");
- return PTR_ERR(ss->reset);
- }
+ if (IS_ERR(ss->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ss->reset),
+ "No reset control found\n");
mutex_init(&ss->mlock);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
new file mode 100644
index 000000000000..b6ab2054f217
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-hash.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file add support for MD5 and SHA1/SHA224/SHA256.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi.rst
+ */
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include "sun8i-ss.h"
+
+int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ op->ss = algt->ss;
+
+ op->enginectx.op.do_one_request = sun8i_ss_hash_run;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ /* FALLBACK */
+ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
+ algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun8i_ss_hash_reqctx) +
+ crypto_ahash_reqsize(op->fallback_tfm));
+
+ dev_info(op->ss->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(tfm),
+ crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ goto error_pm;
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ss->dev);
+ crypto_free_ahash(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tfmctx->fallback_tfm);
+ pm_runtime_put_sync_suspend(tfmctx->ss->dev);
+}
+
+int sun8i_ss_hash_init(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+int sun8i_ss_hash_final(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = areq->result;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_update(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_finup(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
+ struct sun8i_ss_hash_reqctx *rctx,
+ const char *name)
+{
+ int flow = rctx->flow;
+ u32 v = SS_START;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[flow].stat_req++;
+#endif
+
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= rctx->method;
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!rctx->t_dst[i].addr)
+ break;
+
+ mutex_lock(&ss->mlock);
+ if (i > 0) {
+ v |= BIT(17);
+ writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
+ }
+
+ dev_dbg(ss->dev,
+ "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
+ i, flow, name, v,
+ rctx->t_src[i].len, rctx->t_dst[i].len,
+ rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
+
+ writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ mutex_unlock(&ss->mlock);
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
+{
+ struct scatterlist *sg;
+
+ if (areq->nbytes == 0)
+ return true;
+ /* we need to reserve one SG for the padding one */
+ if (sg_nents(areq->src) > MAX_SG - 1)
+ return true;
+ sg = areq->src;
+ while (sg) {
+ /* SS can operate hash only on full block size
+ * since SS support only MD5,sha1,sha224 and sha256, blocksize
+ * is always 64
+ * TODO: handle request if last SG is not len%64
+ * but this will need to copy data on a new SG of size=64
+ */
+ if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+int sun8i_ss_hash_digest(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ struct crypto_engine *engine;
+ struct scatterlist *sg;
+ int nr_sgs, e, i;
+
+ if (sun8i_ss_hash_need_fallback(areq))
+ return sun8i_ss_hash_digest_fb(areq);
+
+ nr_sgs = sg_nents(areq->src);
+ if (nr_sgs > MAX_SG - 1)
+ return sun8i_ss_hash_digest_fb(areq);
+
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return sun8i_ss_hash_digest_fb(areq);
+ }
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+
+ e = sun8i_ss_get_engine_number(ss);
+ rctx->flow = e;
+ engine = ss->flows[e].engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, areq);
+}
+
+/* sun8i_ss_hash_run - run an ahash request
+ * Send the data of the request to the SS along with an extra SG with padding
+ */
+int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ struct scatterlist *sg;
+ int nr_sgs, err, digestsize;
+ unsigned int len;
+ u64 fill, min_fill, byte_count;
+ void *pad, *result;
+ int j, i, todo;
+ __be64 *bebits;
+ __le64 *lebits;
+ dma_addr_t addr_res, addr_pad;
+ __le32 *bf;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+
+ digestsize = algt->alg.hash.halg.digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ digestsize = SHA256_DIGEST_SIZE;
+
+ /* the padding could be up to two block. */
+ pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
+ if (!pad)
+ return -ENOMEM;
+ bf = (__le32 *)pad;
+
+ result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+ if (!result)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_SG; i++) {
+ rctx->t_dst[i].addr = 0;
+ rctx->t_dst[i].len = 0;
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+#endif
+
+ rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
+
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_res)) {
+ dev_err(ss->dev, "DMA map dest\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ len = areq->nbytes;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo / 4;
+ len -= todo;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ byte_count = areq->nbytes;
+ j = 0;
+ bf[j++] = cpu_to_le32(0x80);
+
+ fill = 64 - (byte_count % 64);
+ min_fill = 3 * sizeof(u32);
+
+ if (fill < min_fill)
+ fill += 64;
+
+ j += (fill - min_fill) / sizeof(u32);
+
+ switch (algt->ss_algo_id) {
+ case SS_ID_HASH_MD5:
+ lebits = (__le64 *)&bf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ break;
+ case SS_ID_HASH_SHA1:
+ case SS_ID_HASH_SHA224:
+ case SS_ID_HASH_SHA256:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ }
+
+ addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
+ rctx->t_src[i].addr = addr_pad;
+ rctx->t_src[i].len = j;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ if (dma_mapping_error(ss->dev, addr_pad)) {
+ dev_err(ss->dev, "DMA error on padding SG\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+ dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+
+ kfree(pad);
+
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ kfree(result);
+theend:
+ crypto_finalize_hash_request(engine, breq, err);
+ return 0;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
new file mode 100644
index 000000000000..08a1473b2145
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-prng.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the PRNG found in the SS
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ */
+#include "sun8i-ss.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <crypto/internal/rng.h>
+
+int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+
+ if (ctx->seed && ctx->slen != slen) {
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->slen = 0;
+ ctx->seed = NULL;
+ }
+ if (!ctx->seed)
+ ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ if (!ctx->seed)
+ return -ENOMEM;
+
+ memcpy(ctx->seed, seed, slen);
+ ctx->slen = slen;
+
+ return 0;
+}
+
+int sun8i_ss_prng_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx));
+ return 0;
+}
+
+void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->seed = NULL;
+ ctx->slen = 0;
+}
+
+int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ dma_addr_t dma_iv, dma_dst;
+ unsigned int todo;
+ int err = 0;
+ int flow;
+ void *d;
+ u32 v;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+ if (ctx->slen == 0) {
+ dev_err(ss->dev, "The PRNG is not seeded\n");
+ return -EINVAL;
+ }
+
+ /* The SS does not give an updated seed, so we need to get a new one.
+ * So we will ask for an extra PRNG_SEED_SIZE data.
+ * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE
+ */
+ todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
+ todo -= todo % PRNG_DATA_SIZE;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d)
+ return -ENOMEM;
+
+ flow = sun8i_ss_get_engine_number(ss);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+#endif
+
+ v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START;
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, dma_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ return -EFAULT;
+ }
+
+ dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, dma_dst)) {
+ dev_err(ss->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_iv;
+ }
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ss->dev);
+ goto err_pm;
+ }
+ err = 0;
+
+ mutex_lock(&ss->mlock);
+ writel(dma_iv, ss->base + SS_IV_ADR_REG);
+ /* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */
+ writel(dma_iv, ss->base + SS_KEY_ADR_REG);
+ writel(dma_dst, ss->base + SS_DST_ADR_REG);
+ writel(todo / 4, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(todo));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo);
+ err = -EFAULT;
+ }
+ /* Since cipher and hash use the linux/cryptoengine and that we have
+ * a cryptoengine per flow, we are sure that they will issue only one
+ * request per flow.
+ * Since the cryptoengine wait for completion before submitting a new
+ * one, the mlock could be left just after the final writel.
+ * But cryptoengine cannot handle crypto_rng, so we need to be sure
+ * nothing will use our flow.
+ * The easiest way is to grab mlock until the hardware end our requests.
+ * We could have used a per flow lock, but this would increase
+ * complexity.
+ * The drawback is that no request could be handled for the other flow.
+ */
+ mutex_unlock(&ss->mlock);
+
+ pm_runtime_put(ss->dev);
+
+err_pm:
+ dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE);
+err_iv:
+ dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
+
+ if (!err) {
+ memcpy(dst, d, dlen);
+ /* Update seed */
+ memcpy(ctx->seed, d + dlen, ctx->slen);
+ }
+ memzero_explicit(d, todo);
+ kfree(d);
+
+ return err;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 0405767f1f7e..1a66457f4a20 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -8,10 +8,16 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/engine.h>
+#include <crypto/rng.h>
#include <crypto/skcipher.h>
#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#define SS_START 1
#define SS_ENCRYPTION 0
#define SS_DECRYPTION BIT(6)
@@ -19,6 +25,11 @@
#define SS_ALG_AES 0
#define SS_ALG_DES (1 << 2)
#define SS_ALG_3DES (2 << 2)
+#define SS_ALG_MD5 (3 << 2)
+#define SS_ALG_PRNG (4 << 2)
+#define SS_ALG_SHA1 (6 << 2)
+#define SS_ALG_SHA224 (7 << 2)
+#define SS_ALG_SHA256 (8 << 2)
#define SS_CTL_REG 0x00
#define SS_INT_CTL_REG 0x04
@@ -47,9 +58,17 @@
#define SS_OP_ECB 0
#define SS_OP_CBC (1 << 13)
+#define SS_ID_HASH_MD5 0
+#define SS_ID_HASH_SHA1 1
+#define SS_ID_HASH_SHA224 2
+#define SS_ID_HASH_SHA256 3
+#define SS_ID_HASH_MAX 4
+
#define SS_FLOW0 BIT(30)
#define SS_FLOW1 BIT(31)
+#define SS_PRNG_CONTINUE BIT(18)
+
#define MAX_SG 8
#define MAXFLOW 2
@@ -59,6 +78,9 @@
#define SS_DIE_ID_SHIFT 20
#define SS_DIE_ID_MASK 0x07
+#define PRNG_DATA_SIZE (160 / 8)
+#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+
/*
* struct ss_clock - Describe clocks used by sun8i-ss
* @name: Name of clock needed by this variant
@@ -75,11 +97,14 @@ struct ss_clock {
* struct ss_variant - Describe SS capability for each variant hardware
* @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the
* coresponding SS_ALG_XXX value
+ * @alg_hash: list of supported hashes. for each SS_ID_ this will give the
+ * corresponding SS_ALG_XXX value
* @op_mode: list of supported block modes
- * @ss_clks! list of clock needed by this variant
+ * @ss_clks: list of clock needed by this variant
*/
struct ss_variant {
char alg_cipher[SS_ID_CIPHER_MAX];
+ char alg_hash[SS_ID_HASH_MAX];
u32 op_mode[SS_ID_OP_MAX];
struct ss_clock ss_clks[SS_MAX_CLOCKS];
};
@@ -170,6 +195,8 @@ struct sun8i_cipher_req_ctx {
* @keylen: len of the key
* @ss: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
+ *
+ * enginectx must be the first element
*/
struct sun8i_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
@@ -180,6 +207,46 @@ struct sun8i_cipher_tfm_ctx {
};
/*
+ * struct sun8i_ss_prng_ctx - context for PRNG TFM
+ * @seed: The seed to use
+ * @slen: The size of the seed
+ */
+struct sun8i_ss_rng_tfm_ctx {
+ void *seed;
+ unsigned int slen;
+};
+
+/*
+ * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ * @ss: pointer to the private data of driver handling this TFM
+ *
+ * enginectx must be the first element
+ */
+struct sun8i_ss_hash_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct crypto_ahash *fallback_tfm;
+ struct sun8i_ss_dev *ss;
+};
+
+/*
+ * struct sun8i_ss_hash_reqctx - context for an ahash request
+ * @t_src: list of DMA address and size for source SGs
+ * @t_dst: list of DMA address and size for destination SGs
+ * @fallback_req: pre-allocated fallback request
+ * @method: the register value for the algorithm used by this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_ss_hash_reqctx {
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ struct ahash_request fallback_req;
+ u32 method;
+ int flow;
+};
+
+/*
* struct sun8i_ss_alg_template - crypto_alg template
* @type: the CRYPTO_ALG_TYPE for this template
* @ss_algo_id: the SS_ID for this template
@@ -189,6 +256,7 @@ struct sun8i_cipher_tfm_ctx {
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
* @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
*/
struct sun8i_ss_alg_template {
u32 type;
@@ -197,10 +265,13 @@ struct sun8i_ss_alg_template {
struct sun8i_ss_dev *ss;
union {
struct skcipher_alg skcipher;
+ struct rng_alg rng;
+ struct ahash_alg hash;
} alg;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
+ unsigned long stat_bytes;
#endif
};
@@ -218,3 +289,19 @@ int sun8i_ss_skencrypt(struct skcipher_request *areq);
int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss);
int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);
+int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+int sun8i_ss_prng_init(struct crypto_tfm *tfm);
+void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
+
+int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
+void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ss_hash_init(struct ahash_request *areq);
+int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
+int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);
+int sun8i_ss_hash_final(struct ahash_request *areq);
+int sun8i_ss_hash_update(struct ahash_request *areq);
+int sun8i_ss_hash_finup(struct ahash_request *areq);
+int sun8i_ss_hash_digest(struct ahash_request *areq);
+int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index f7fc0c464125..7729a637fb02 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -55,7 +55,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
- sa->sa_command_1.bf.feedback_mode = cfb,
+ sa->sa_command_1.bf.feedback_mode = cfb;
sa->sa_command_1.bf.sa_rev = 1;
sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 6b6841359190..a4e25b46cd0a 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -15,6 +15,7 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
+#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index d93210726697..8b5e07316352 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -99,7 +99,7 @@ static int meson_cipher(struct skcipher_request *areq)
unsigned int keyivlen, ivsize, offset, tloffset;
dma_addr_t phykeyiv;
void *backup_iv = NULL, *bkeyiv;
- __le32 v;
+ u32 v;
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
@@ -340,10 +340,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
{
struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
}
@@ -367,10 +364,7 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 466552acbbbb..5bbeff433c8c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -98,7 +98,7 @@ static struct meson_alg_template mc_algs[] = {
};
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
-static int meson_dbgfs_read(struct seq_file *seq, void *v)
+static int meson_debugfs_show(struct seq_file *seq, void *v)
{
struct meson_dev *mc = seq->private;
int i;
@@ -118,19 +118,7 @@ static int meson_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int meson_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, meson_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations meson_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = meson_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
#endif
static void meson_free_chanlist(struct meson_dev *mc, int i)
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index a6e14491e080..b1d286004295 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1539,7 +1539,7 @@ static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
/* Write incr32(J0) into IV. */
j0_lsw = j0[3];
- j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
+ be32_add_cpu(&j0[3], 1);
atmel_aes_write_block(dd, AES_IVR(0), j0);
j0[3] = j0_lsw;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index ed40dbb98c6b..4d63cb13a54f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -912,7 +912,7 @@ static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
alg->base.cra_flags = CRYPTO_ALG_ASYNC;
- alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
alg->init = atmel_tdes_init_tfm;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 8a7fa1ae1ade..50d169e61b41 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -165,10 +165,6 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg,
return -EFAULT;
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- /* Add buffer to catch 260-byte SUPDT field for RC4 */
- sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
-
if (stat_pad_len)
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
@@ -317,7 +313,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
u8 local_iv_ctr[MAX_IV_SIZE];
u32 stat_pad_len; /* num bytes to align status field */
u32 pad_len; /* total length of all padding */
- bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
/* number of entries in src and dst sg in mailbox message. */
@@ -391,28 +386,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
}
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4) {
- rx_frag_num++;
- if (chunk_start) {
- /*
- * for non-first RC4 chunks, use SUPDT from previous
- * response as key for this chunk.
- */
- cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_UPDT;
- } else if (!rctx->is_encrypt) {
- /*
- * First RC4 chunk. For decrypt, key in pre-built msg
- * header may have been changed if encrypt required
- * multiple chunks. So revert the key to the
- * ctx->enckey value.
- */
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_INIT;
- }
- }
-
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
flow_log("max_payload infinite\n");
else
@@ -425,14 +398,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
sizeof(rctx->msg_buf.bcm_spu_req_hdr));
- /*
- * Pass SUPDT field as key. Key field in finish() call is only used
- * when update_key has been set above for RC4. Will be ignored in
- * all other cases.
- */
spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
ctx->spu_req_hdr_len, !(rctx->is_encrypt),
- &cipher_parms, update_key, chunksize);
+ &cipher_parms, chunksize);
atomic64_add(chunksize, &iproc_priv.bytes_out);
@@ -527,9 +495,6 @@ static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
__func__, rctx->total_received, payload_len);
dump_sg(req->dst, rctx->total_received, payload_len);
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
- SPU_SUPDT_LEN);
rctx->total_received += payload_len;
if (rctx->total_received == rctx->total_todo) {
@@ -1853,26 +1818,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
return 0;
}
-static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int keylen)
-{
- struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
- int i;
-
- ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
-
- ctx->enckey[0] = 0x00; /* 0x00 */
- ctx->enckey[1] = 0x00; /* i */
- ctx->enckey[2] = 0x00; /* 0x00 */
- ctx->enckey[3] = 0x00; /* j */
- for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
- ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
-
- ctx->cipher_type = CIPHER_TYPE_INIT;
-
- return 0;
-}
-
static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
@@ -1895,9 +1840,6 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
case CIPHER_ALG_AES:
err = aes_setkey(cipher, key, keylen);
break;
- case CIPHER_ALG_RC4:
- err = rc4_setkey(cipher, key, keylen);
- break;
default:
pr_err("%s() Error: unknown cipher alg\n", __func__);
err = -EINVAL;
@@ -1905,11 +1847,9 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
if (err)
return err;
- /* RC4 already populated ctx->enkey */
- if (ctx->cipher.alg != CIPHER_ALG_RC4) {
- memcpy(ctx->enckey, key, keylen);
- ctx->enckeylen = keylen;
- }
+ memcpy(ctx->enckey, key, keylen);
+ ctx->enckeylen = keylen;
+
/* SPU needs XTS keys in the reverse order the crypto API presents */
if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
(ctx->cipher.mode == CIPHER_MODE_XTS)) {
@@ -2872,9 +2812,6 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
goto badkey;
}
break;
- case CIPHER_ALG_RC4:
- ctx->cipher_type = CIPHER_TYPE_INIT;
- break;
default:
pr_err("%s() Error: Unknown cipher alg\n", __func__);
return -EINVAL;
@@ -2930,7 +2867,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
ctx->enckeylen = keylen;
ctx->authkeylen = 0;
- memcpy(ctx->enckey, key, ctx->enckeylen);
switch (ctx->enckeylen) {
case AES_KEYSIZE_128:
@@ -2946,6 +2882,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
goto badkey;
}
+ memcpy(ctx->enckey, key, ctx->enckeylen);
+
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
ctx->authkeylen);
flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
@@ -3000,6 +2938,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3028,6 +2970,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3057,6 +3003,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < CCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = CCM_ESP_SALT_SIZE;
ctx->salt_offset = CCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
@@ -3606,25 +3556,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ecb(arc4)",
- .base.cra_driver_name = "ecb-arc4-iproc",
- .base.cra_blocksize = ARC4_BLOCK_SIZE,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_RC4,
- .mode = CIPHER_MODE_NONE,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "ofb-des-iproc",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -4526,15 +4457,9 @@ static void spu_counters_init(void)
static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
- struct spu_hw *spu = &iproc_priv.spu;
struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
- /* SPU2 does not support RC4 */
- if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
- (spu->spu_type == SPU_TYPE_SPU2))
- return 0;
-
crypto->base.cra_module = THIS_MODULE;
crypto->base.cra_priority = cipher_pri;
crypto->base.cra_alignmask = 0;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index b6d83e3aa46c..035c8389cb3d 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -388,7 +388,6 @@ struct spu_hw {
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
u32 hash_pad_len, enum hash_alg auth_alg,
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index e7562e9bf396..fe126f95c702 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -222,10 +222,6 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
cipher_key_len = 24;
name = "3DES";
break;
- case CIPHER_ALG_RC4:
- cipher_key_len = 260;
- name = "ARC4";
- break;
case CIPHER_ALG_AES:
switch (cipher_type) {
case CIPHER_TYPE_AES128:
@@ -919,21 +915,16 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spum_cipher_req_init().
- * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
- * a request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spum_cipher_req_init() fills in the encryption key.
*/
void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPUHEADER *spuh;
@@ -948,11 +939,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
/*
* In XTS mode, API puts "i" parameter (block tweak) in IV. For
@@ -981,13 +967,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
else
cipher_bits &= ~CIPHER_INBOUND;
- /* update encryption key for RC4 on non-first chunk */
- if (update_key) {
- spuh->sa.cipher_flags |=
- cipher_parms->type << CIPHER_TYPE_SHIFT;
- memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
- }
-
if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len)
/* cipher iv provided so put it in here */
memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf,
diff --git a/drivers/crypto/bcm/spu.h b/drivers/crypto/bcm/spu.h
index b247bc5b9354..dd132389bcaa 100644
--- a/drivers/crypto/bcm/spu.h
+++ b/drivers/crypto/bcm/spu.h
@@ -251,7 +251,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spum_request_pad(u8 *pad_start,
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 59abb5ecefa4..c860ffb0b4c3 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -1170,21 +1170,16 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spu_cipher_req_init().
- * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
- * request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spu_cipher_req_init() fills in the encryption key.
*/
void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPU2_FMD *fmd;
@@ -1196,11 +1191,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
flow_log(" iv len: %d\n", cipher_parms->iv_len);
flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
flow_log(" data_size: %u\n", data_size);
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
index 03af6c38df7f..6e666bfb3cfc 100644
--- a/drivers/crypto/bcm/spu2.h
+++ b/drivers/crypto/bcm/spu2.h
@@ -200,7 +200,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
enum hash_alg auth_alg, enum hash_mode auth_mode,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index bc35aa0ec07a..84ea7cba5ee5 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
+ select CRYPTO_XTS
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
@@ -114,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_DES
+ select CRYPTO_XTS
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
@@ -165,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_DES
+ select CRYPTO_XTS
help
CAAM driver for QorIQ Data Path Acceleration Architecture 2.
It handles DPSECI DPAA2 objects that sit on the Management Complex
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 68d5cc0f28e2..3570286eb9ce 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -27,6 +27,8 @@ ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
endif
+caam-$(CONFIG_DEBUG_FS) += debugfs.o
+
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 91feda5b63f6..cf5bd7666dfc 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -57,6 +57,8 @@
#include "key_gen.h"
#include "caamalg_desc.h"
#include <crypto/engine.h>
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
/*
* crypto alg
@@ -114,10 +116,13 @@ struct caam_ctx {
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
};
struct caam_skcipher_req_ctx {
struct skcipher_edesc *edesc;
+ struct skcipher_request fallback_req;
};
struct caam_aead_req_ctx {
@@ -829,11 +834,23 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(jrdev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1755,6 +1772,14 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
return ret;
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
@@ -1762,12 +1787,34 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
@@ -1905,6 +1952,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -3344,13 +3392,35 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
ctx->enginectx.op.do_one_request = skcipher_do_one_req;
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
- false);
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(fallback));
+ } else {
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+ }
+
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+
+ return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3378,7 +3448,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3412,8 +3486,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index d6c58184bb57..7571e1ac913b 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -373,6 +373,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @geniv: whether to generate Encrypted Chain IV
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
@@ -1550,13 +1551,14 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
set_jump_tgt_here(desc, key_jump_cmd);
/*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
+ * create sequence for loading the sector index / 16B tweak value
+ * Lower 8B of IV - sector index / tweak lower half
+ * Upper 8B of IV - upper half of 16B tweak
*/
append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
@@ -1565,9 +1567,11 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
- /* Store upper 8B of IV */
+ /* Store lower 8B and upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4,
@@ -1609,23 +1613,25 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
set_jump_tgt_here(desc, key_jump_cmd);
/*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
+ * create sequence for loading the sector index / 16B tweak value
+ * Lower 8B of IV - sector index / tweak lower half
+ * Upper 8B of IV - upper half of 16B tweak
*/
append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
/* Load operation */
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
skcipher_append_src_dst(desc);
- /* Store upper 8B of IV */
+ /* Store lower 8B and upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index bb1c0106a95c..66f60d78bdc8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -18,6 +18,8 @@
#include "qi.h"
#include "jr.h"
#include "caamalg_desc.h"
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
/*
* crypto alg
@@ -67,6 +69,12 @@ struct caam_ctx {
struct device *qidev;
spinlock_t lock; /* Protects multiple init of driver context */
struct caam_drv_ctx *drv_ctx[NUM_OP];
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
+};
+
+struct caam_skcipher_req_ctx {
+ struct skcipher_request fallback_req;
};
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -725,11 +733,23 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
int ret = 0;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(jrdev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1373,16 +1393,46 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+ }
+
if (unlikely(caam_congested))
return -EAGAIN;
@@ -1507,6 +1557,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -2440,9 +2491,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
+
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(fallback));
+ }
+
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
- false);
+ return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2468,7 +2542,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2502,8 +2580,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 66ae1d581168..98c1ff1744bb 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,8 @@
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
#define CAAM_CRA_PRIORITY 2000
@@ -59,7 +61,7 @@ struct caam_skcipher_alg {
};
/**
- * caam_ctx - per-session context
+ * struct caam_ctx - per-session context
* @flc: Flow Contexts array
* @key: [authentication key], encryption key
* @flc_dma: I/O virtual addresses of the Flow Contexts
@@ -80,6 +82,8 @@ struct caam_ctx {
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
};
static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
@@ -1054,12 +1058,24 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
struct caam_flc *flc;
u32 *desc;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(dev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1443,17 +1459,44 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
skcipher_request_complete(req, ecode);
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static int skcipher_encrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1480,10 +1523,30 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_decrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1537,9 +1600,34 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
+ crypto_skcipher_reqsize(fallback));
+ } else {
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ }
+
+ ret = caam_cra_init(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+
+ return ret;
}
static int caam_cra_init_aead(struct crypto_aead *tfm)
@@ -1562,7 +1650,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_cra_exit_aead(struct crypto_aead *tfm)
@@ -1665,6 +1757,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi2",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -2912,8 +3005,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init_skcipher;
alg->exit = caam_cra_exit;
@@ -2951,7 +3044,7 @@ enum hash_optype {
};
/**
- * caam_hash_ctx - ahash per-session context
+ * struct caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array
* @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts
@@ -5115,8 +5208,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
/* DPIO */
err = dpaa2_dpseci_dpio_setup(priv);
if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
goto err_dpio_setup;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index f29cb7bd7dd3..d35253407ade 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
+#include <crypto/skcipher.h>
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
@@ -186,6 +187,7 @@ struct caam_request {
void (*cbk)(void *ctx, u32 err);
void *ctx;
void *edesc;
+ struct skcipher_request fallback_req;
};
/**
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 94502f1d4b48..ca0361b2dbb0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -13,6 +13,7 @@
#include <linux/fsl/mc.h>
#include "compat.h"
+#include "debugfs.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
@@ -332,11 +333,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
kfree(desc);
- if (!ret)
- ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
- ctrldev);
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
}
/*
@@ -443,7 +443,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* by u-boot.
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
- **/
+ *
+ * @ctrl: controller region
+ */
static int caam_get_era(struct caam_ctrl __iomem *ctrl)
{
struct device_node *caam_node;
@@ -582,12 +584,10 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
}
-#ifdef CONFIG_DEBUG_FS
static void caam_remove_debugfs(void *root)
{
debugfs_remove_recursive(root);
}
-#endif
#ifdef CONFIG_FSL_MC_BUS
static bool check_version(struct fsl_mc_version *mc_version, u32 major,
@@ -619,10 +619,7 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
-#ifdef CONFIG_DEBUG_FS
- struct caam_perfmon *perfmon;
struct dentry *dfs_root;
-#endif
u32 scfgr, comp_params;
u8 rng_vid;
int pg_size;
@@ -777,21 +774,15 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->era = caam_get_era(ctrl);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
-#ifdef CONFIG_DEBUG_FS
- /*
- * FIXME: needs better naming distinction, as some amalgamation of
- * "caam" and nprop->full_name. The OF name isn't distinctive,
- * but does separate instances
- */
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
-
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
- if (ret)
- return ret;
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
+ dfs_root);
+ if (ret)
+ return ret;
+ }
- ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
-#endif
+ caam_debugfs_init(ctrlpriv, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -912,57 +903,6 @@ static int caam_probe(struct platform_device *pdev)
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
-#ifdef CONFIG_DEBUG_FS
- debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
-
- /* Controller level - global status values */
- debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
-
- /* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
- ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_kek_wrap);
-
- ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
- ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_tkek_wrap);
-
- ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
- ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_tdsk_wrap);
-#endif
-
ret = devm_of_platform_populate(dev);
if (ret)
dev_err(dev, "JR platform devices creation error\n");
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
new file mode 100644
index 000000000000..8ebf18398166
--- /dev/null
+++ b/drivers/crypto/caam/debugfs.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/debugfs.h>
+#include "compat.h"
+#include "debugfs.h"
+#include "regs.h"
+#include "intern.h"
+
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+
+#ifdef CONFIG_CAAM_QI
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+
+void caam_debugfs_qi_congested(void)
+{
+ times_congested++;
+}
+
+void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
+{
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
+}
+#endif
+
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+{
+ struct caam_perfmon *perfmon;
+
+ /*
+ * FIXME: needs better naming distinction, as some amalgamation of
+ * "caam" and nprop->full_name. The OF name isn't distinctive,
+ * but does separate instances
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
+
+ ctrlpriv->ctl = debugfs_create_dir("ctl", root);
+
+ debugfs_create_file("rq_dequeued", 0444, ctrlpriv->ctl,
+ &perfmon->req_dequeued, &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ob_enc_req, &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ib_dec_req, &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ob_enc_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", 0444, ctrlpriv->ctl,
+ &perfmon->ob_prot_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ib_dec_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", 0444, ctrlpriv->ctl,
+ &perfmon->ib_valid_bytes, &caam_fops_u64_ro);
+
+ /* Controller level - global status values */
+ debugfs_create_file("fault_addr", 0444, ctrlpriv->ctl,
+ &perfmon->faultaddr, &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", 0444, ctrlpriv->ctl,
+ &perfmon->faultdetail, &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
+ &perfmon->status, &caam_fops_u32_ro);
+
+ /* Internal covering keys (useful in non-secure mode only) */
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("kek", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
+
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("tkek", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
+
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("tdsk", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
+}
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
new file mode 100644
index 000000000000..661d768acdbf
--- /dev/null
+++ b/drivers/crypto/caam/debugfs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef CAAM_DEBUGFS_H
+#define CAAM_DEBUGFS_H
+
+struct dentry;
+struct caam_drv_private;
+
+#ifdef CONFIG_DEBUG_FS
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+#else
+static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct dentry *root)
+{}
+#endif
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+void caam_debugfs_qi_congested(void);
+void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
+#else
+static inline void caam_debugfs_qi_congested(void) {}
+static inline void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv) {}
+#endif
+
+#endif /* CAAM_DEBUGFS_H */
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
index c5bfc923abd8..0eca8c2fd916 100644
--- a/drivers/crypto/caam/dpseci-debugfs.c
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -44,33 +44,14 @@ static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_caam_priv *priv;
-
- priv = (struct dpaa2_caam_priv *)inode->i_private;
-
- err = single_open(file, dpseci_dbg_fqs_show, priv);
- if (err < 0)
- dev_err(priv->dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpseci_dbg_fq_ops = {
- .open = dpseci_dbg_fqs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpseci_dbg_fqs);
void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv)
{
priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL);
debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv,
- &dpseci_dbg_fq_ops);
+ &dpseci_dbg_fqs_fops);
}
void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 402d6a362e8c..9112279a4de0 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -195,23 +195,6 @@ static inline void caam_qi_algapi_exit(void)
#endif /* CONFIG_CAAM_QI */
-#ifdef CONFIG_DEBUG_FS
-static int caam_debugfs_u64_get(void *data, u64 *val)
-{
- *val = caam64_to_cpu(*(u64 *)data);
- return 0;
-}
-
-static int caam_debugfs_u32_get(void *data, u64 *val)
-{
- *val = caam32_to_cpu(*(u32 *)data);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#endif
-
static inline u64 caam_get_dma_mask(struct device *dev)
{
struct device_node *nprop = dev->of_node;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bf6b03b17251..6f669966ba2c 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
- * @rdev - points to the dev that identifies the Job ring to
+ * @rdev: points to the dev that identifies the Job ring to
* be released.
**/
void caam_jr_free(struct device *rdev)
@@ -349,15 +349,15 @@ EXPORT_SYMBOL(caam_jr_free);
* of this request. This has the form:
* callback(struct device *dev, u32 *desc, u32 stat, void *arg)
* where:
- * @dev: contains the job ring device that processed this
+ * dev: contains the job ring device that processed this
* response.
- * @desc: descriptor that initiated the request, same as
+ * desc: descriptor that initiated the request, same as
* "desc" being argued to caam_jr_enqueue().
- * @status: untranslated status received from CAAM. See the
+ * status: untranslated status received from CAAM. See the
* reference manual for a detailed description of
* error meaning, or see the JRSTA definitions in the
* register header file
- * @areq: optional pointer to an argument passed with the
+ * areq: optional pointer to an argument passed with the
* original request
* @areq: optional pointer to a user argument for use at callback
* time.
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b390b935db6d..ec53528d8205 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include <soc/fsl/qman.h>
+#include "debugfs.h"
#include "regs.h"
#include "qi.h"
#include "desc.h"
@@ -73,15 +74,6 @@ static struct caam_qi_priv qipriv ____cacheline_aligned;
bool caam_congested __read_mostly;
EXPORT_SYMBOL(caam_congested);
-#ifdef CONFIG_DEBUG_FS
-/*
- * This is a counter for the number of times the congestion group (where all
- * the request and response queueus are) reached congestion. Incremented
- * each time the congestion callback is called with congested == true.
- */
-static u64 times_congested;
-#endif
-
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
@@ -544,9 +536,8 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
caam_congested = congested;
if (congested) {
-#ifdef CONFIG_DEBUG_FS
- times_congested++;
-#endif
+ caam_debugfs_qi_congested();
+
pr_debug_ratelimited("CAAM entered congestion\n");
} else {
@@ -775,10 +766,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
-#ifdef CONFIG_DEBUG_FS
- debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
- &times_congested, &caam_fops_u64_ro);
-#endif
+ caam_debugfs_qi_init(ctrlpriv);
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
if (err)
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 5af0dc2a8909..ce3b91c612f0 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -451,13 +451,7 @@ static struct skcipher_alg algs[] = { {
static inline int cav_register_algs(void)
{
- int err = 0;
-
- err = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
- if (err)
- return err;
-
- return 0;
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static inline void cav_unregister_algs(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index cee2a2713038..9d14be97e381 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -451,6 +451,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 194624b4855b..d35216e2f6cd 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -460,7 +460,7 @@ static void zip_unregister_compression_device(void)
#include <linux/debugfs.h>
/* Displays ZIP device statistics */
-static int zip_show_stats(struct seq_file *s, void *unused)
+static int zip_stats_show(struct seq_file *s, void *unused)
{
u64 val = 0ull;
u64 avg_chunk = 0ull, avg_cr = 0ull;
@@ -523,7 +523,7 @@ static int zip_show_stats(struct seq_file *s, void *unused)
}
/* Clears stats data */
-static int zip_clear_stats(struct seq_file *s, void *unused)
+static int zip_clear_show(struct seq_file *s, void *unused)
{
int index = 0;
@@ -558,7 +558,7 @@ static struct zip_registers zipregs[64] = {
};
/* Prints registers' contents */
-static int zip_print_regs(struct seq_file *s, void *unused)
+static int zip_regs_show(struct seq_file *s, void *unused)
{
u64 val = 0;
int i = 0, index = 0;
@@ -584,41 +584,9 @@ static int zip_print_regs(struct seq_file *s, void *unused)
return 0;
}
-static int zip_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_show_stats, NULL);
-}
-
-static const struct file_operations zip_stats_fops = {
- .owner = THIS_MODULE,
- .open = zip_stats_open,
- .read = seq_read,
- .release = single_release,
-};
-
-static int zip_clear_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_clear_stats, NULL);
-}
-
-static const struct file_operations zip_clear_fops = {
- .owner = THIS_MODULE,
- .open = zip_clear_open,
- .read = seq_read,
- .release = single_release,
-};
-
-static int zip_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_print_regs, NULL);
-}
-
-static const struct file_operations zip_regs_fops = {
- .owner = THIS_MODULE,
- .open = zip_regs_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(zip_stats);
+DEFINE_SHOW_ATTRIBUTE(zip_clear);
+DEFINE_SHOW_ATTRIBUTE(zip_regs);
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index bd270e66185e..d6a8f4e4b14a 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -1744,7 +1745,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
break;
default:
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
} else {
/* Stash the context */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d77ae981b64b..dafa6577a845 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -75,8 +75,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
- if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
- ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
@@ -84,8 +83,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV)
return 0;
break;
default:
@@ -122,7 +120,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case DRV_CIPHER_ECB:
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -348,8 +345,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
}
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
if (hki.hw_key1 == hki.hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki.hw_key1, hki.hw_key2);
@@ -547,7 +543,6 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
/* IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
@@ -602,7 +597,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
break;
default:
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
@@ -624,16 +618,8 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
unsigned int key_len = (ctx_p->keylen / 2);
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
- unsigned int du_size = nbytes;
unsigned int key_offset = key_len;
- struct cc_crypto_alg *cc_alg =
- container_of(tfm->__crt_alg, struct cc_crypto_alg,
- skcipher_alg.base);
-
- if (cc_alg->data_unit)
- du_size = cc_alg->data_unit;
-
switch (cipher_mode) {
case DRV_CIPHER_ECB:
break;
@@ -644,7 +630,6 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
if (cipher_mode == DRV_CIPHER_ESSIV)
key_len = SHA256_DIGEST_SIZE;
@@ -661,7 +646,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
(key_dma_addr + key_offset),
key_len, NS_BIT);
}
- set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_xex_data_unit_size(&desc[*seq_size], nbytes);
set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
set_key_size_aes(&desc[*seq_size], key_len);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
@@ -758,7 +743,6 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
/* Load AES key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -1039,44 +1023,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "xts512(paes)",
- .driver_name = "xts-paes-du512-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "xts4096(paes)",
- .driver_name = "xts-paes-du4096-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "essiv(cbc(paes),sha256)",
.driver_name = "essiv-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1095,100 +1041,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv512(cbc(paes),sha256)",
- .driver_name = "essiv-paes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "essiv4096(cbc(paes),sha256)",
- .driver_name = "essiv-paes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker(paes)",
- .driver_name = "bitlocker-paes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker512(paes)",
- .driver_name = "bitlocker-paes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker4096(paes)",
- .driver_name = "bitlocker-paes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1300,42 +1152,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "xts512(aes)",
- .driver_name = "xts-aes-du512-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "xts4096(aes)",
- .driver_name = "xts-aes-du4096-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
.name = "essiv(cbc(aes),sha256)",
.driver_name = "essiv-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1353,95 +1169,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv512(cbc(aes),sha256)",
- .driver_name = "essiv-aes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "essiv4096(cbc(aes),sha256)",
- .driver_name = "essiv-aes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker(aes)",
- .driver_name = "bitlocker-aes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker512(aes)",
- .driver_name = "bitlocker-aes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker4096(aes)",
- .driver_name = "bitlocker-aes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1712,7 +1439,6 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
- t_alg->data_unit = tmpl->data_unit;
return t_alg;
}
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index ccf960a0d989..bd9a1c0896b3 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -108,7 +108,6 @@ enum drv_cipher_mode {
DRV_CIPHER_CBC_CTS = 11,
DRV_CIPHER_GCTR = 12,
DRV_CIPHER_ESSIV = 13,
- DRV_CIPHER_BITLOCKER = 14,
DRV_CIPHER_RESERVE32B = S32_MAX
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 2d50991b9a17..6f519d3e896c 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -300,11 +300,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->plat_dev = plat_dev;
clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Error getting clock: %pe\n", clk);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
new_drvdata->clk = clk;
new_drvdata->coherent = of_dma_is_coherent(np);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d938886390d2..af77b2020350 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -162,7 +162,6 @@ struct cc_crypto_alg {
int cipher_mode;
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
- unsigned int data_unit;
struct cc_drvdata *drvdata;
struct skcipher_alg skcipher_alg;
struct aead_alg aead_alg;
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d39e1664fc7e..3c65bf070c90 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_get(struct device *dev)
{
int rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
- return (rc == 1 ? 0 : rc);
+ return 0;
}
void cc_pm_put_suspend(struct device *dev)
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index bd8dac806e7a..ed7989cf151e 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -148,7 +148,7 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- mutex_lock(&drv_data.drv_mutex);
+ mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
drv_data.last_dev = list_first_entry(&drv_data.act_dev,
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 354836468c5d..7e7a8f01ea6b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -780,8 +780,8 @@ static int hifn_register_rng(struct hifn_device *dev)
dev->pk_clk_freq) * 256;
dev->rng.name = dev->name;
- dev->rng.data_present = hifn_rng_data_present,
- dev->rng.data_read = hifn_rng_data_read,
+ dev->rng.data_present = hifn_rng_data_present;
+ dev->rng.data_read = hifn_rng_data_read;
dev->rng.priv = (unsigned long)dev;
return hwrng_register(&dev->rng);
@@ -1235,7 +1235,8 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
int idx;
dma_addr_t addr;
- addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
+ addr = dma_map_page(&dev->pdev->dev, page, offset, size,
+ DMA_TO_DEVICE);
idx = dma->srci;
@@ -1293,7 +1294,8 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
int idx;
dma_addr_t addr;
- addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
+ addr = dma_map_page(&dev->pdev->dev, page, offset, size,
+ DMA_FROM_DEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
@@ -2470,7 +2472,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err)
goto err_out_disable_pci_device;
@@ -2514,8 +2516,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
- &dev->desc_dma);
+ dev->desc_virt = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct hifn_dma),
+ &dev->desc_dma, GFP_KERNEL);
if (!dev->desc_virt) {
dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n");
err = -ENOMEM;
@@ -2572,8 +2575,8 @@ err_out_free_irq:
free_irq(dev->irq, dev);
tasklet_kill(&dev->tasklet);
err_out_free_desc:
- pci_free_consistent(pdev, sizeof(struct hifn_dma),
- dev->desc_virt, dev->desc_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct hifn_dma), dev->desc_virt,
+ dev->desc_dma);
err_out_unmap_bars:
for (i = 0; i < 3; ++i)
@@ -2610,8 +2613,8 @@ static void hifn_remove(struct pci_dev *pdev)
hifn_flush(dev);
- pci_free_consistent(pdev, sizeof(struct hifn_dma),
- dev->desc_virt, dev->desc_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct hifn_dma),
+ dev->desc_virt, dev->desc_dma);
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
@@ -2642,9 +2645,6 @@ static int __init hifn_init(void)
unsigned int freq;
int err;
- /* HIFN supports only 32-bit addresses */
- BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
-
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext");
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ed730d173e95..f69252b24671 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -56,7 +56,6 @@ struct hpre_dfx {
* Just relevant for PF.
*/
struct hpre_debug {
- struct dentry *debug_root;
struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 7b5cb27d473d..a87f9904087a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -98,9 +98,6 @@ struct hpre_asym_request {
struct timespec64 req_time;
};
-static DEFINE_MUTEX(hpre_alg_lock);
-static unsigned int hpre_active_devs;
-
static int hpre_alloc_req_id(struct hpre_ctx *ctx)
{
unsigned long flags;
@@ -191,8 +188,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
hpre_req->dst = NULL;
dma_dir = DMA_FROM_DEVICE;
}
- *tmp = dma_map_single(dev, sg_virt(data),
- len, dma_dir);
+ *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
@@ -242,8 +238,8 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
((is_dh && !is_src) || !is_dh))
ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
else
- ret = hpre_prepare_dma_buf(hpre_req, data, len,
- is_src, &tmp);
+ ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
+
if (unlikely(ret))
return ret;
@@ -270,11 +266,9 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
if (src) {
if (req->src)
- dma_free_coherent(dev, ctx->key_sz,
- req->src, tmp);
+ dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
else
- dma_unmap_single(dev, tmp,
- ctx->key_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
}
tmp = le64_to_cpu(sqe->out);
@@ -477,7 +471,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
h_req->areq.dh = kreq;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
- msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
+ msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
}
msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
@@ -534,6 +528,8 @@ static int hpre_dh_compute_value(struct kpp_request *req)
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
if (unlikely(ret))
goto clear_all;
+ } else {
+ msg->in = cpu_to_le64(ctx->dh.dma_g);
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
@@ -743,7 +739,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
if (unlikely(ret))
@@ -791,11 +787,11 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
if (ctx->crt_g2_mode) {
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_CRT);
} else {
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_NCRT);
}
@@ -1160,36 +1156,25 @@ static struct kpp_alg dh = {
int hpre_algs_register(void)
{
- int ret = 0;
-
- mutex_lock(&hpre_alg_lock);
- if (++hpre_active_devs == 1) {
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
- if (ret)
- goto unlock;
+ int ret;
+
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ return ret;
#ifdef CONFIG_CRYPTO_DH
- ret = crypto_register_kpp(&dh);
- if (ret) {
- crypto_unregister_akcipher(&rsa);
- goto unlock;
- }
+ ret = crypto_register_kpp(&dh);
+ if (ret)
+ crypto_unregister_akcipher(&rsa);
#endif
- }
-unlock:
- mutex_unlock(&hpre_alg_lock);
return ret;
}
void hpre_algs_unregister(void)
{
- mutex_lock(&hpre_alg_lock);
- if (--hpre_active_devs == 0) {
- crypto_unregister_akcipher(&rsa);
+ crypto_unregister_akcipher(&rsa);
#ifdef CONFIG_CRYPTO_DH
- crypto_unregister_kpp(&dh);
+ crypto_unregister_kpp(&dh);
#endif
- }
- mutex_unlock(&hpre_alg_lock);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index b135c74fb619..a33394d91bbf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -90,7 +90,6 @@
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
-static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -106,6 +105,11 @@ struct hpre_hw_error {
const char *msg;
};
+static struct hisi_qm_list hpre_devices = {
+ .register_to_crypto = hpre_algs_register,
+ .unregister_from_crypto = hpre_algs_unregister,
+};
+
static const char * const hpre_debug_file_name[] = {
[HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en",
@@ -186,7 +190,7 @@ static const struct kernel_param_ops hpre_pf_q_num_ops = {
static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -864,9 +868,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
- hisi_qm_add_to_list(qm, &hpre_devices);
-
- ret = hpre_algs_register();
+ ret = hisi_qm_alg_register(qm, &hpre_devices);
if (ret < 0) {
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
@@ -875,18 +877,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_with_crypto_register;
+ goto err_with_alg_register;
}
return 0;
-err_with_crypto_register:
- hpre_algs_unregister();
+err_with_alg_register:
+ hisi_qm_alg_unregister(qm, &hpre_devices);
err_with_qm_start:
- hisi_qm_del_from_list(qm, &hpre_devices);
hpre_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
hisi_qm_dev_err_uninit(qm);
@@ -899,14 +900,13 @@ err_with_qm_init:
static void hpre_remove(struct pci_dev *pdev)
{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- hpre_algs_unregister();
- hisi_qm_del_from_list(qm, &hpre_devices);
+ hisi_qm_wait_task_finish(qm, &hpre_devices);
+ hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev);
+ ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@@ -918,7 +918,7 @@ static void hpre_remove(struct pci_dev *pdev)
}
hpre_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
@@ -939,6 +939,7 @@ static struct pci_driver hpre_pci_driver = {
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
hisi_qm_sriov_configure : NULL,
.err_handler = &hpre_err_handler,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hpre_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 6527c53b073f..530f23116d7c 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -180,7 +180,10 @@
#define QM_DBG_TMP_BUF_LEN 22
#define QM_PCI_COMMAND_INVALID ~0
+#define WAIT_PERIOD 20
+#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
+#define QM_EQ_DEPTH (1024 * 2)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -652,7 +655,7 @@ static void qm_work_process(struct work_struct *work)
qp = qm_to_hisi_qp(qm, eqe);
qm_poll_qp(qp, qm);
- if (qm->status.eq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -661,7 +664,7 @@ static void qm_work_process(struct work_struct *work)
qm->status.eq_head++;
}
- if (eqe_num == QM_Q_DEPTH / 2 - 1) {
+ if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
eqe_num = 0;
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
}
@@ -754,7 +757,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
qp_status->cqc_phase = true;
- atomic_set(&qp_status->flags, 0);
+ atomic_set(&qp_status->used, 0);
}
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
@@ -1046,17 +1049,7 @@ static int qm_regs_show(struct seq_file *s, void *unused)
return 0;
}
-static int qm_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, qm_regs_show, inode->i_private);
-}
-
-static const struct file_operations qm_regs_fops = {
- .owner = THIS_MODULE,
- .open = qm_regs_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qm_regs);
static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *pos)
@@ -1370,7 +1363,13 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
return -EINVAL;
ret = kstrtou32(s, 0, &xeqe_id);
- if (ret || xeqe_id >= QM_Q_DEPTH) {
+ if (ret)
+ return -EINVAL;
+
+ if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
+ dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ return -EINVAL;
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
return -EINVAL;
}
@@ -1420,17 +1419,18 @@ static int qm_dbg_help(struct hisi_qm *qm, char *s)
static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
{
struct device *dev = &qm->pdev->dev;
- char *presult, *s;
+ char *presult, *s, *s_tmp;
int ret;
s = kstrdup(cmd_buf, GFP_KERNEL);
if (!s)
return -ENOMEM;
+ s_tmp = s;
presult = strsep(&s, " ");
if (!presult) {
- kfree(s);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_buffer_free;
}
if (!strcmp(presult, "sqc"))
@@ -1459,7 +1459,8 @@ static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
if (ret)
dev_info(dev, "Please echo help\n");
- kfree(s);
+err_buffer_free:
+ kfree(s_tmp);
return ret;
}
@@ -1644,7 +1645,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH))
+ if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -1981,7 +1982,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP ||
qp->is_resetting)) {
- dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
+ dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
@@ -2215,6 +2216,82 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
}
/**
+ * qm_frozen() - Try to froze QM to cut continuous queue request. If
+ * there is user on the QM, return failure without doing anything.
+ * @qm: The qm needed to be fronzen.
+ *
+ * This function frozes QM, then we can do SRIOV disabling.
+ */
+static int qm_frozen(struct hisi_qm *qm)
+{
+ down_write(&qm->qps_lock);
+
+ if (qm->is_frozen) {
+ up_write(&qm->qps_lock);
+ return 0;
+ }
+
+ if (!qm->qp_in_used) {
+ qm->qp_in_used = qm->qp_num;
+ qm->is_frozen = true;
+ up_write(&qm->qps_lock);
+ return 0;
+ }
+
+ up_write(&qm->qps_lock);
+
+ return -EBUSY;
+}
+
+static int qm_try_frozen_vfs(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm, *vf_qm;
+ struct pci_dev *dev;
+ int ret = 0;
+
+ if (!qm_list || !pdev)
+ return -EINVAL;
+
+ /* Try to frozen all the VFs as disable SRIOV */
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+ if (pci_physfn(dev) == pdev) {
+ vf_qm = pci_get_drvdata(dev);
+ ret = qm_frozen(vf_qm);
+ if (ret)
+ goto frozen_fail;
+ }
+ }
+
+frozen_fail:
+ mutex_unlock(&qm_list->lock);
+
+ return ret;
+}
+
+/**
+ * hisi_qm_wait_task_finish() - Wait until the task is finished
+ * when removing the driver.
+ * @qm: The qm needed to wait for the task to finish.
+ * @qm_list: The list of all available devices.
+ */
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ while (qm_frozen(qm) ||
+ ((qm->fun_type == QM_HW_PF) &&
+ qm_try_frozen_vfs(qm->pdev, qm_list))) {
+ msleep(WAIT_PERIOD);
+ }
+
+ udelay(REMOVE_WAIT_DELAY);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
+
+/**
* hisi_qm_get_free_qp_num() - Get free number of qp in qm.
* @qm: The qm which want to get free qp.
*
@@ -2282,7 +2359,7 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
@@ -2292,7 +2369,7 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
if (!qm->qdma.va)
return -ENOMEM;
- QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -2338,6 +2415,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
+ qm->is_frozen = false;
}
/**
@@ -2462,7 +2540,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -2633,18 +2711,20 @@ static void qm_clear_queues(struct hisi_qm *qm)
/**
* hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped.
+ * @r: The reason to stop qm.
*
* This function stops qm and its qps, then qm can not accept request.
* Related resources are not released at this state, we can use hisi_qm_start
* to let qm start again.
*/
-int hisi_qm_stop(struct hisi_qm *qm)
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
{
struct device *dev = &qm->pdev->dev;
int ret = 0;
down_write(&qm->qps_lock);
+ qm->status.stop_reason = r;
if (!qm_avail_state(qm, QM_STOP)) {
ret = -EPERM;
goto err_unlock;
@@ -3081,11 +3161,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
/**
* hisi_qm_sriov_disable - disable virtual functions
- * @pdev: the PCI device
+ * @pdev: the PCI device.
+ * @is_frozen: true when all the VFs are frozen.
*
- * Return failure if there are VFs assigned already.
+ * Return failure if there are VFs assigned already or VF is in used.
*/
-int hisi_qm_sriov_disable(struct pci_dev *pdev)
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
@@ -3094,7 +3175,12 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev)
return -EPERM;
}
- /* remove in hpre_pci_driver will be called to free VF resources */
+ /* While VF is in used, SRIOV cannot be disabled. */
+ if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
+ pci_err(pdev, "Task is using its VF!\n");
+ return -EBUSY;
+ }
+
pci_disable_sriov(pdev);
return qm_clear_vft_config(qm);
}
@@ -3110,7 +3196,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, 0);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
}
@@ -3290,10 +3376,10 @@ static int qm_set_msi(struct hisi_qm *qm, bool set)
return 0;
}
-static int qm_vf_reset_prepare(struct hisi_qm *qm)
+static int qm_vf_reset_prepare(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
{
struct hisi_qm_list *qm_list = qm->qm_list;
- int stop_reason = qm->status.stop_reason;
struct pci_dev *pdev = qm->pdev;
struct pci_dev *virtfn;
struct hisi_qm *vf_qm;
@@ -3306,8 +3392,10 @@ static int qm_vf_reset_prepare(struct hisi_qm *qm)
continue;
if (pci_physfn(virtfn) == pdev) {
- vf_qm->status.stop_reason = stop_reason;
- ret = hisi_qm_stop(vf_qm);
+ /* save VFs PCIE BAR configuration */
+ pci_save_state(virtfn);
+
+ ret = hisi_qm_stop(vf_qm, stop_reason);
if (ret)
goto stop_fail;
}
@@ -3346,15 +3434,14 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
}
if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm);
+ ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop VFs!\n");
return ret;
}
}
- qm->status.stop_reason = QM_SOFT_RESET;
- ret = hisi_qm_stop(qm);
+ ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
return ret;
@@ -3471,6 +3558,9 @@ static int qm_vf_reset_done(struct hisi_qm *qm)
continue;
if (pci_physfn(virtfn) == pdev) {
+ /* enable VFs PCIE BAR configuration */
+ pci_restore_state(virtfn);
+
ret = qm_restart(vf_qm);
if (ret)
goto restart_fail;
@@ -3695,7 +3785,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
}
if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm);
+ ret = qm_vf_reset_prepare(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
ret);
@@ -3703,7 +3793,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
}
}
- ret = hisi_qm_stop(qm);
+ ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
return;
@@ -3821,6 +3911,23 @@ err_aeq_irq:
return ret;
}
+/**
+ * hisi_qm_dev_shutdown() - Shutdown device.
+ * @pdev: The device will be shutdown.
+ *
+ * This function will stop qm when OS shutdown or rebooting.
+ */
+void hisi_qm_dev_shutdown(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret)
+ dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
+
static void hisi_qm_controller_reset(struct work_struct *rst_work)
{
struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
@@ -3834,6 +3941,58 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
}
/**
+ * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
+ * @qm: The qm needs add.
+ * @qm_list: The qm list.
+ *
+ * This function adds qm to qm list, and will register algorithm to
+ * crypto when the qm list is empty.
+ */
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ int flag = 0;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ if (list_empty(&qm_list->list))
+ flag = 1;
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+
+ if (flag) {
+ ret = qm_list->register_to_crypto();
+ if (ret) {
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
+
+/**
+ * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
+ * qm list.
+ * @qm: The qm needs delete.
+ * @qm_list: The qm list.
+ *
+ * This function deletes qm from qm list, and will unregister algorithm
+ * from crypto when the qm list is empty.
+ */
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+
+ if (list_empty(&qm_list->list))
+ qm_list->unregister_from_crypto();
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
*
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 6c1d3c7d64ee..0420f4ce7197 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -79,7 +79,7 @@
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
-
+#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
/* page number for queue file region */
@@ -193,6 +193,8 @@ struct hisi_qm_err_ini {
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
+ int (*register_to_crypto)(void);
+ void (*unregister_from_crypto)(void);
};
struct hisi_qm {
@@ -243,6 +245,7 @@ struct hisi_qm {
const char *algs;
bool use_sva;
+ bool is_frozen;
resource_size_t phys_base;
resource_size_t phys_size;
struct uacce_device *uacce;
@@ -306,7 +309,7 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
}
ret = kstrtou32(val, 10, &n);
- if (ret || !n || n > q_num)
+ if (ret || n < QM_MIN_QNUM || n > q_num)
return -EINVAL;
return param_set_int(val, kp);
@@ -336,26 +339,10 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
mutex_init(&qm_list->lock);
}
-static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
- struct hisi_qm_list *qm_list)
-{
- mutex_lock(&qm_list->lock);
- list_add_tail(&qm->list, &qm_list->list);
- mutex_unlock(&qm_list->lock);
-}
-
-static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
- struct hisi_qm_list *qm_list)
-{
- mutex_lock(&qm_list->lock);
- list_del(&qm->list);
- mutex_unlock(&qm_list->lock);
-}
-
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
-int hisi_qm_stop(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
@@ -367,7 +354,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
-int hisi_qm_sriov_disable(struct pci_dev *pdev);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
@@ -390,4 +377,8 @@ void hisi_acc_free_sgl_pool(struct device *dev,
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
u8 alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 497969ae8b23..bb493423668c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -66,8 +66,6 @@
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
-static atomic_t sec_active_devs;
-
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -342,11 +340,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
ret = sec_alloc_pbuf_resource(dev, res);
if (ret) {
dev_err(dev, "fail to alloc pbuf dma resource!\n");
- goto alloc_fail;
+ goto alloc_pbuf_fail;
}
}
return 0;
+alloc_pbuf_fail:
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
alloc_fail:
sec_free_civ_resource(dev, res);
@@ -457,8 +458,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
- if (!ctx->qp_ctx)
- return -ENOMEM;
+ if (!ctx->qp_ctx) {
+ ret = -ENOMEM;
+ goto err_destroy_qps;
+ }
for (i = 0; i < sec->ctx_q_num; i++) {
ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
@@ -467,12 +470,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
}
return 0;
+
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+err_destroy_qps:
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
+
return ret;
}
@@ -1633,33 +1639,24 @@ static struct aead_alg sec_aeads[] = {
int sec_register_to_crypto(void)
{
- int ret = 0;
+ int ret;
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
- if (ret)
- return ret;
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
- if (ret)
- goto reg_aead_fail;
- }
-
- return ret;
-
-reg_aead_fail:
- crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
- }
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2297425486cb..548896394c4b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -99,7 +99,11 @@ struct sec_dfx_item {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
-static struct hisi_qm_list sec_devices;
+
+static struct hisi_qm_list sec_devices = {
+ .register_to_crypto = sec_register_to_crypto,
+ .unregister_from_crypto = sec_unregister_from_crypto,
+};
static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
@@ -165,7 +169,7 @@ static const struct kernel_param_ops sec_pf_q_num_ops = {
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
@@ -879,29 +883,26 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- hisi_qm_add_to_list(qm, &sec_devices);
-
- ret = sec_register_to_crypto();
+ ret = hisi_qm_alg_register(qm, &sec_devices);
if (ret < 0) {
pr_err("Failed to register driver to crypto.\n");
- goto err_remove_from_list;
+ goto err_qm_stop;
}
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_crypto_unregister;
+ goto err_alg_unregister;
}
return 0;
-err_crypto_unregister:
- sec_unregister_from_crypto();
+err_alg_unregister:
+ hisi_qm_alg_unregister(qm, &sec_devices);
-err_remove_from_list:
- hisi_qm_del_from_list(qm, &sec_devices);
+err_qm_stop:
sec_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
sec_probe_uninit(qm);
@@ -914,19 +915,16 @@ err_qm_uninit:
static void sec_remove(struct pci_dev *pdev)
{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &sec->qm;
-
- sec_unregister_from_crypto();
-
- hisi_qm_del_from_list(qm, &sec_devices);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ hisi_qm_wait_task_finish(qm, &sec_devices);
+ hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev);
+ hisi_qm_sriov_disable(pdev, qm->is_frozen);
sec_debugfs_exit(qm);
- (void)hisi_qm_stop(qm);
+ (void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
@@ -950,6 +948,7 @@ static struct pci_driver sec_pci_driver = {
.remove = sec_remove,
.err_handler = &sec_err_handler,
.sriov_configure = hisi_qm_sriov_configure,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 4484be13812b..92397f993e23 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -9,20 +9,6 @@
#include <linux/list.h>
#include "../qm.h"
-/* hisi_zip_sqe dw3 */
-#define HZIP_BD_STATUS_M GENMASK(7, 0)
-/* hisi_zip_sqe dw7 */
-#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
-/* hisi_zip_sqe dw8 */
-#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
-/* hisi_zip_sqe dw9 */
-#define HZIP_REQ_TYPE_M GENMASK(7, 0)
-#define HZIP_ALG_TYPE_ZLIB 0x02
-#define HZIP_ALG_TYPE_GZIP 0x03
-#define HZIP_BUF_TYPE_M GENMASK(11, 8)
-#define HZIP_PBUFFER 0x0
-#define HZIP_SGL 0x1
-
enum hisi_zip_error_type {
/* negative compression */
HZIP_NC_ERR = 0x0d,
@@ -39,7 +25,6 @@ struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
- struct list_head list;
struct hisi_zip_ctrl *ctrl;
struct hisi_zip_dfx dfx;
};
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 01fd6a78111d..08b4660b014c 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -6,6 +6,20 @@
#include <linux/scatterlist.h>
#include "zip.h"
+/* hisi_zip_sqe dw3 */
+#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw9 */
+#define HZIP_REQ_TYPE_M GENMASK(7, 0)
+#define HZIP_ALG_TYPE_ZLIB 0x02
+#define HZIP_ALG_TYPE_GZIP 0x03
+#define HZIP_BUF_TYPE_M GENMASK(11, 8)
+#define HZIP_PBUFFER 0x0
+#define HZIP_SGL 0x1
+
#define HZIP_ZLIB_HEAD_SIZE 2
#define HZIP_GZIP_HEAD_SIZE 10
@@ -16,22 +30,29 @@
#define GZIP_HEAD_FLG_SHIFT 3
#define GZIP_HEAD_FEXTRA_SHIFT 10
-#define GZIP_HEAD_FEXTRA_XLEN 2
+#define GZIP_HEAD_FEXTRA_XLEN 2UL
#define GZIP_HEAD_FHCRC_SIZE 2
-#define HZIP_CTX_Q_NUM 2
#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
-static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x03};
+static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
+ 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
+};
+
enum hisi_zip_alg_type {
HZIP_ALG_TYPE_COMP = 0,
HZIP_ALG_TYPE_DECOMP = 1,
};
+enum {
+ HZIP_QPC_COMP,
+ HZIP_QPC_DECOMP,
+ HZIP_CTX_Q_NUM
+};
+
#define COMP_NAME_TO_TYPE(alg_name) \
(!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
!strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
@@ -46,13 +67,13 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- int sskip;
- int dskip;
+ u32 sskip;
+ u32 dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
dma_addr_t dma_dst;
- int req_id;
+ u16 req_id;
};
struct hisi_zip_req_q {
@@ -71,8 +92,6 @@ struct hisi_zip_qp_ctx {
};
struct hisi_zip_ctx {
-#define QPC_COMP 0
-#define QPC_DECOMP 1
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
};
@@ -116,7 +135,7 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen, int sskip, int dskip)
+ u32 dlen, u32 sskip, u32 dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
@@ -143,7 +162,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- dev_err(dev, "start qp failed!\n");
+ dev_err(dev, "failed to start qp (%d)!\n", ret);
return ret;
}
@@ -166,7 +185,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
if (ret) {
- pr_err("Can not create zip qps!\n");
+ pr_err("failed to create zip qps (%d)!\n", ret);
return -ENODEV;
}
@@ -264,11 +283,11 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
return 0;
err_free_loop1:
- kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap);
+ kfree(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
err_free_loop0:
- kfree(ctx->qp_ctx[QPC_COMP].req_q.q);
+ kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
err_free_bitmap:
- kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap);
+ kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -303,8 +322,8 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
return 0;
err_free_sgl_pool0:
- hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
- ctx->qp_ctx[QPC_COMP].sgl_pool);
+ hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
+ ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
return -ENOMEM;
}
@@ -342,7 +361,6 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
-
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
@@ -377,19 +395,28 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ struct device *dev;
int ret;
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
- if (ret)
+ if (ret) {
+ pr_err("failed to init ctx (%d)!\n", ret);
return ret;
+ }
+
+ dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
ret = hisi_zip_create_req_q(ctx);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to create request queue (%d)!\n", ret);
goto err_ctx_exit;
+ }
ret = hisi_zip_create_sgl_pool(ctx);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
goto err_release_req_q;
+ }
hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
@@ -419,13 +446,15 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size)
+ if (ret != head_size) {
+ pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
+ }
return head_size;
}
-static size_t get_gzip_head_size(struct scatterlist *sgl)
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -434,13 +463,20 @@ static size_t get_gzip_head_size(struct scatterlist *sgl)
return __get_gzip_head_size(buf);
}
-static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
+ if (!acomp_req->src || !acomp_req->slen)
+ return -EINVAL;
+
+ if ((req_type == HZIP_ALG_TYPE_GZIP) &&
+ (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
+ return -EINVAL;
+
switch (req_type) {
case HZIP_ALG_TYPE_ZLIB:
return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
case HZIP_ALG_TYPE_GZIP:
- return get_gzip_head_size(src);
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
default:
pr_err("request type does not support!\n");
return -EINVAL;
@@ -462,7 +498,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- return ERR_PTR(-EBUSY);
+ return ERR_PTR(-EAGAIN);
}
set_bit(req_id, req_q->req_bitmap);
@@ -492,8 +528,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_sqe zip_sqe;
- dma_addr_t input;
- dma_addr_t output;
+ dma_addr_t input, output;
int ret;
if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
@@ -501,8 +536,11 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
- if (IS_ERR(req->hw_src))
+ if (IS_ERR(req->hw_src)) {
+ dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+ PTR_ERR(req->hw_src));
return PTR_ERR(req->hw_src);
+ }
req->dma_src = input;
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
@@ -510,6 +548,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
&output);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
+ dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ ret);
goto err_unmap_input;
}
req->dma_dst = output;
@@ -524,6 +564,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
ret = hisi_qp_send(qp, &zip_sqe);
if (ret < 0) {
atomic64_inc(&dfx->send_busy_cnt);
+ ret = -EAGAIN;
+ dev_dbg_ratelimited(dev, "failed to send request!\n");
goto err_unmap_output;
}
@@ -539,23 +581,29 @@ err_unmap_input:
static int hisi_zip_acompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP];
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
int head_size;
int ret;
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0)
- return -ENOMEM;
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+ head_size);
+ return head_size;
+ }
- req = hisi_zip_create_req(acomp_req, qp_ctx, (size_t)head_size, true);
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
if (IS_ERR(req))
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS)
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
+ }
return ret;
}
@@ -563,20 +611,28 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
static int hisi_zip_adecompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP];
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
- size_t head_size;
- int ret;
+ int head_size, ret;
- head_size = get_comp_head_size(acomp_req->src, qp_ctx->qp->req_type);
+ head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+ head_size);
+ return head_size;
+ }
req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
if (IS_ERR(req))
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS)
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+ ret);
hisi_zip_remove_req(qp_ctx, req);
+ }
return ret;
}
@@ -611,17 +667,17 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
int hisi_zip_register_to_crypto(void)
{
- int ret = 0;
+ int ret;
ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
if (ret) {
- pr_err("Zlib acomp algorithm registration failed\n");
+ pr_err("failed to register to zlib (%d)!\n", ret);
return ret;
}
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
if (ret) {
- pr_err("Gzip acomp algorithm registration failed\n");
+ pr_err("failed to register to gzip (%d)!\n", ret);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index e2845b2c963d..4bd2c811abba 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -17,7 +17,6 @@
#define PCI_DEVICE_ID_ZIP_PF 0xa250
#define PCI_DEVICE_ID_ZIP_VF 0xa251
-#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
@@ -30,18 +29,18 @@
#define DECOMP3_ENABLE BIT(5)
#define DECOMP4_ENABLE BIT(6)
#define DECOMP5_ENABLE BIT(7)
-#define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
+#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
DECOMP0_ENABLE | DECOMP1_ENABLE | \
DECOMP2_ENABLE | DECOMP3_ENABLE | \
DECOMP4_ENABLE | DECOMP5_ENABLE)
-#define DECOMP_CHECK_ENABLE BIT(16)
+#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
#define HZIP_PORT_ARCA_CHE_0 0x301040
#define HZIP_PORT_ARCA_CHE_1 0x301044
#define HZIP_PORT_AWCA_CHE_0 0x301060
#define HZIP_PORT_AWCA_CHE_1 0x301064
-#define CACHE_ALL_EN 0xffffffff
+#define HZIP_CACHE_ALL_EN 0xffffffff
#define HZIP_BD_RUSER_32_63 0x301110
#define HZIP_SGL_RUSER_32_63 0x30111c
@@ -83,7 +82,7 @@
#define HZIP_PF_DEF_Q_BASE 0
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
-#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
@@ -92,9 +91,13 @@
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
+#define HZIP_CNT_CLR_CE_EN BIT(0)
+#define HZIP_RO_CNT_CLR_CE_EN BIT(2)
+#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
+ HZIP_RO_CNT_CLR_CE_EN)
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -106,6 +109,11 @@ struct zip_dfx_item {
u32 offset;
};
+static struct hisi_qm_list zip_devices = {
+ .register_to_crypto = hisi_zip_register_to_crypto,
+ .unregister_from_crypto = hisi_zip_unregister_from_crypto,
+};
+
static struct zip_dfx_item zip_dfx_files[] = {
{"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
{"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
@@ -153,7 +161,6 @@ struct ctrl_debug_file {
*/
struct hisi_zip_ctrl {
struct hisi_zip *hisi_zip;
- struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
@@ -216,7 +223,7 @@ static const struct kernel_param_ops pf_q_num_ops = {
static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -256,15 +263,16 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
/* qm cache */
writel(AXI_M_CFG, base + QM_AXI_M_CFG);
writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
+
/* disable FLR triggered by BME(bus master enable) */
writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
- writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
- writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
- writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
- writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
@@ -280,10 +288,10 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
+ writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
base + HZIP_CLOCK_GATE_CTRL);
- /* enable sqc writeback */
+ /* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
@@ -309,7 +317,7 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -356,7 +364,7 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
if (val > qm->vfs_num)
return -EINVAL;
- /* Calculate curr_qm_qp_num and store */
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
@@ -387,7 +395,7 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
- SOFT_CTRL_CNT_CLR_CE_BIT;
+ HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
@@ -399,14 +407,14 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
- ~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
+ ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
char tbuf[HZIP_BUF_SIZE];
@@ -426,12 +434,13 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
char tbuf[HZIP_BUF_SIZE];
@@ -480,11 +489,10 @@ err_input:
static const struct file_operations ctrl_debug_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = hisi_zip_ctrl_debug_read,
+ .write = hisi_zip_ctrl_debug_write,
};
-
static int zip_debugfs_atomic64_set(void *data, u64 val)
{
if (val)
@@ -505,10 +513,8 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
zip_debugfs_atomic64_set, "%llu\n");
-static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_zip *hisi_zip = ctrl->hisi_zip;
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
@@ -517,9 +523,10 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
for (i = 0; i < HZIP_CORE_NUM; i++) {
if (i < HZIP_COMP_CORE_NUM)
- sprintf(buf, "comp_core%d", i);
+ scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
- sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
+ scnprintf(buf, sizeof(buf), "decomp_core%d",
+ i - HZIP_COMP_CORE_NUM);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -529,7 +536,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_regset32("regs", 0444, tmp_d, regset);
}
@@ -548,33 +555,32 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
debugfs_create_file(zip_dfx_files[i].name,
- 0644,
- tmp_dir,
- data,
- &zip_atomic64_ops);
+ 0644, tmp_dir, data,
+ &zip_atomic64_ops);
}
}
-static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&zip->ctrl->files[i].lock);
+ zip->ctrl->files[i].ctrl = zip->ctrl;
+ zip->ctrl->files[i].index = i;
debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ zip->ctrl->files + i,
&ctrl_debug_fops);
}
- return hisi_zip_core_debug_init(ctrl);
+ return hisi_zip_core_debug_init(qm);
}
-static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
+static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -589,8 +595,7 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
if (qm->fun_type == QM_HW_PF) {
- hisi_zip->ctrl->debug_root = dev_d;
- ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
+ ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -604,25 +609,36 @@ failed_to_create:
return ret;
}
-static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
+/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
+static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ int i, j;
+ /* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* enable register read_clear bit */
+ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
+ for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
+ for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
+ readl(qm->io_base + core_offsets[i] +
+ hzip_dfx_regs[j].offset);
+
+ /* disable register read_clear bit */
writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
+static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
- if (qm->fun_type == QM_HW_PF)
- hisi_zip_debug_regs_clear(hisi_zip);
+ if (qm->fun_type == QM_HW_PF) {
+ hisi_zip_debug_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ }
}
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -634,7 +650,7 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
while (err->msg) {
if (err->int_msk & err_sts) {
dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
+ err->msg, err->int_msk);
if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
err_val = readl(qm->io_base +
@@ -642,9 +658,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
((err_val >>
HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
- dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
- (err_val >>
- HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
}
}
err++;
@@ -729,7 +742,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
- hisi_zip_debug_regs_clear(hisi_zip);
+ hisi_zip_debug_regs_clear(qm);
return 0;
}
@@ -747,6 +760,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HZIP_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &zip_devices;
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
/*
@@ -803,32 +817,44 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_qm_uninit;
+ goto err_dev_err_uninit;
- ret = hisi_zip_debugfs_init(hisi_zip);
+ ret = hisi_zip_debugfs_init(qm);
if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
+ pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
- hisi_qm_add_to_list(qm, &zip_devices);
+ ret = hisi_qm_alg_register(qm, &zip_devices);
+ if (ret < 0) {
+ pci_err(pdev, "failed to register driver to crypto!\n");
+ goto err_qm_stop;
+ }
if (qm->uacce) {
ret = uacce_register(qm->uacce);
- if (ret)
- goto err_qm_uninit;
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_qm_alg_unregister;
+ }
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_remove_from_list;
+ goto err_qm_alg_unregister;
}
return 0;
-err_remove_from_list:
- hisi_qm_del_from_list(qm, &zip_devices);
- hisi_zip_debugfs_exit(hisi_zip);
- hisi_qm_stop(qm);
+err_qm_alg_unregister:
+ hisi_qm_alg_unregister(qm, &zip_devices);
+
+err_qm_stop:
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
+err_dev_err_uninit:
+ hisi_qm_dev_err_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
@@ -837,18 +863,18 @@ err_qm_uninit:
static void hisi_zip_remove(struct pci_dev *pdev)
{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev);
+ hisi_qm_wait_task_finish(qm, &zip_devices);
+ hisi_qm_alg_unregister(qm, &zip_devices);
- hisi_zip_debugfs_exit(hisi_zip);
- hisi_qm_stop(qm);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
@@ -866,6 +892,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_zip_register_debugfs(void)
@@ -890,29 +917,15 @@ static int __init hisi_zip_init(void)
ret = pci_register_driver(&hisi_zip_pci_driver);
if (ret < 0) {
+ hisi_zip_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
- goto err_pci;
}
- ret = hisi_zip_register_to_crypto();
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_crypto;
- }
-
- return 0;
-
-err_crypto:
- pci_unregister_driver(&hisi_zip_pci_driver);
-err_pci:
- hisi_zip_unregister_debugfs();
-
return ret;
}
static void __exit hisi_zip_exit(void)
{
- hisi_zip_unregister_from_crypto();
pci_unregister_driver(&hisi_zip_pci_driver);
hisi_zip_unregister_debugfs();
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 87226b7c2795..91f555ccbb31 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index fa7398e68858..eb2418450f12 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -304,6 +304,11 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
/* Enable access to all IFPP program memories */
writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+ /* bypass the OCE, if present */
+ if (priv->flags & EIP197_OCE)
+ writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
+ EIP197_PE_DEBUG(pe));
}
}
@@ -1495,6 +1500,9 @@ static int safexcel_probe_generic(void *pdev,
hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
+ priv->hwconfig.icever = 0;
+ priv->hwconfig.ocever = 0;
+ priv->hwconfig.psever = 0;
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
@@ -1513,8 +1521,37 @@ static int safexcel_probe_generic(void *pdev,
EIP197_N_RINGS_MASK;
if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
priv->flags |= EIP197_PE_ARB;
- if (EIP206_OPT_ICE_TYPE(peopt) == 1)
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
priv->flags |= EIP197_ICE;
+ /* Detect ICE EIP207 class. engine and version */
+ version = readl(EIP197_PE(priv) +
+ EIP197_PE_ICE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
+ dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
+ peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.icever = EIP197_VERSION_MASK(version);
+ }
+ if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
+ priv->flags |= EIP197_OCE;
+ /* Detect EIP96PP packet stream editor and version */
+ version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
+ dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.psever = EIP197_VERSION_MASK(version);
+ /* Detect OCE EIP207 class. engine and version */
+ version = readl(EIP197_PE(priv) +
+ EIP197_PE_ICE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
+ dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
+ peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
+ }
/* If not a full TRC, then assume simple TRC */
if (!(hwopt & EIP197_OPT_HAS_TRC))
priv->flags |= EIP197_SIMPLE_TRC;
@@ -1552,13 +1589,14 @@ static int safexcel_probe_generic(void *pdev,
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
- dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
priv->hwconfig.ppver, priv->hwconfig.pever,
- priv->hwconfig.algo_flags);
+ priv->hwconfig.algo_flags, priv->hwconfig.icever,
+ priv->hwconfig.ocever, priv->hwconfig.psever);
safexcel_configure(priv);
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 7c5fe382d272..9045f2d7f4c6 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -12,7 +12,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <crypto/sha3.h>
#include <crypto/skcipher.h>
+#include <linux/types.h>
#define EIP197_HIA_VERSION_BE 0xca35
#define EIP197_HIA_VERSION_LE 0x35ca
@@ -22,6 +24,7 @@
#define EIP96_VERSION_LE 0x9f60
#define EIP201_VERSION_LE 0x36c9
#define EIP206_VERSION_LE 0x31ce
+#define EIP207_VERSION_LE 0x30cf
#define EIP197_REG_LO16(reg) (reg & 0xffff)
#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
@@ -34,6 +37,7 @@
/* EIP206 OPTIONS ENCODING */
#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
+#define EIP206_OPT_OCE_TYPE(n) ((n>>10)&3)
/* EIP197 OPTIONS ENCODING */
#define EIP197_OPT_HAS_TRC BIT(31)
@@ -168,6 +172,7 @@
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
#define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n)))
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_ICE_VERSION(n) (0x0ffc + (0x2000 * (n)))
#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
@@ -176,8 +181,11 @@
#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
+#define EIP197_PE_OCE_VERSION(n) (0x1bfc + (0x2000 * (n)))
#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
+#define EIP197_PE_PSE_VERSION(n) (0x1efc + (0x2000 * (n)))
+#define EIP197_PE_DEBUG(n) (0x1ff4 + (0x2000 * (n)))
#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
@@ -352,6 +360,9 @@
/* EIP197_PE_EIP96_TOKEN_CTRL2 */
#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
+/* EIP197_PE_DEBUG */
+#define EIP197_DEBUG_OCE_BYPASS BIT(1)
+
/* EIP197_STRC_CONFIG */
#define EIP197_STRC_CONFIG_INIT BIT(31)
#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
@@ -776,6 +787,7 @@ enum safexcel_flags {
EIP197_PE_ARB = BIT(2),
EIP197_ICE = BIT(3),
EIP197_SIMPLE_TRC = BIT(4),
+ EIP197_OCE = BIT(5),
};
struct safexcel_hwconfig {
@@ -783,7 +795,10 @@ struct safexcel_hwconfig {
int hwver;
int hiaver;
int ppver;
+ int icever;
int pever;
+ int ocever;
+ int psever;
int hwdataw;
int hwcfsize;
int hwrfsize;
@@ -819,8 +834,16 @@ struct safexcel_context {
struct crypto_async_request *req, bool *complete,
int *ret);
struct safexcel_context_record *ctxr;
+ struct safexcel_crypto_priv *priv;
dma_addr_t ctxr_dma;
+ union {
+ __le32 le[SHA3_512_BLOCK_SIZE / 4];
+ __be32 be[SHA3_512_BLOCK_SIZE / 4];
+ u32 word[SHA3_512_BLOCK_SIZE / 4];
+ u8 byte[SHA3_512_BLOCK_SIZE];
+ } ipad, opad;
+
int ring;
bool needs_inv;
bool exit_inv;
@@ -898,8 +921,9 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
-int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
- void *istate, void *ostate);
+int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz);
/* available algorithms */
extern struct safexcel_alg_template safexcel_alg_ecb_des;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 1ac3253b7903..9bcfb79a030f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -61,8 +61,6 @@ struct safexcel_cipher_ctx {
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
- __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
struct crypto_cipher *hkaes;
struct crypto_aead *fback;
@@ -375,7 +373,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -406,11 +404,11 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_ahash_export_state istate, ostate;
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
int err = -EINVAL, i;
+ const char *alg;
if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
@@ -465,53 +463,37 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
/* Auth key */
switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
- if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha1";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
- if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha224";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
- if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha256";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
- if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha384";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
- if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha512";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
- if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sm3";
break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
}
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
- (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
- memcmp(ctx->opad, ostate.state, ctx->state_sz)))
- ctx->base.needs_inv = true;
+ if (safexcel_hmac_setkey(&ctx->base, keys.authkey, keys.authkeylen,
+ alg, ctx->state_sz))
+ goto badkey;
/* Now copy the keys into the context */
for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
- memcpy(ctx->ipad, &istate.state, ctx->state_sz);
- memcpy(ctx->opad, &ostate.state, ctx->state_sz);
-
memzero_explicit(&keys, sizeof(keys));
return 0;
@@ -525,7 +507,7 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
struct safexcel_cipher_req *sreq,
struct safexcel_command_desc *cdesc)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ctrl_size = ctx->key_len / sizeof(u32);
cdesc->control_data.control1 = ctx->mode;
@@ -692,7 +674,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct skcipher_request *areq = skcipher_request_cast(base);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc;
struct safexcel_command_desc *first_cdesc = NULL;
struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
@@ -718,10 +700,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
totlen_dst += digestsize;
memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
- ctx->ipad, ctx->state_sz);
+ &ctx->base.ipad, ctx->state_sz);
if (!ctx->xcm)
memcpy(ctx->base.ctxr->data + (ctx->key_len +
- ctx->state_sz) / sizeof(u32), ctx->opad,
+ ctx->state_sz) / sizeof(u32), &ctx->base.opad,
ctx->state_sz);
} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
(sreq->direction == SAFEXCEL_DECRYPT)) {
@@ -1020,7 +1002,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
@@ -1039,7 +1021,7 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
@@ -1072,7 +1054,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
@@ -1094,7 +1076,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
struct safexcel_inv_result *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ring = ctx->base.ring;
init_completion(&result->completion);
@@ -1157,7 +1139,7 @@ static int safexcel_queue_req(struct crypto_async_request *base,
enum safexcel_cipher_direction dir)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
sreq->needs_inv = false;
@@ -1211,7 +1193,7 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
@@ -1237,7 +1219,7 @@ static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
@@ -1257,7 +1239,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
@@ -1431,7 +1413,7 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
@@ -1505,7 +1487,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1604,7 +1586,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1723,7 +1705,7 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
sizeof(struct safexcel_cipher_req));
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
ctx->blocksz = AES_BLOCK_SIZE;
@@ -2466,7 +2448,7 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
@@ -2580,7 +2562,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
u32 hashkey[AES_BLOCK_SIZE >> 2];
int ret, i;
@@ -2618,7 +2600,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
- if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
+ if (be32_to_cpu(ctx->base.ipad.be[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2626,7 +2608,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
}
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] = cpu_to_be32(hashkey[i]);
+ ctx->base.ipad.be[i] = cpu_to_be32(hashkey[i]);
memzero_explicit(hashkey, AES_BLOCK_SIZE);
memzero_explicit(&aes, sizeof(aes));
@@ -2693,7 +2675,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -2714,7 +2696,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
for (i = 0; i < len / sizeof(u32); i++) {
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
- ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
+ ctx->base.ipad.be[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
cpu_to_be32(aes.key_enc[i]);
}
@@ -2815,7 +2797,7 @@ struct safexcel_alg_template safexcel_alg_ccm = {
static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
const u8 *key)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
@@ -3084,7 +3066,7 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
if (len != SM4_KEY_SIZE)
return -EINVAL;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 16a467969d8e..56d5ccb5cc00 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -20,7 +20,6 @@
struct safexcel_ahash_ctx {
struct safexcel_context base;
- struct safexcel_crypto_priv *priv;
u32 alg;
u8 key_sz;
@@ -29,9 +28,6 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
- __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
-
struct crypto_cipher *kaes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
@@ -111,7 +107,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
@@ -124,7 +120,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
*/
if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
if (req->xcbcmac)
- memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
+ memcpy(ctx->base.ctxr->data, &ctx->base.ipad, ctx->key_sz);
else
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
@@ -206,7 +202,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
} else { /* HMAC */
/* Need outer digest for HMAC finalization */
memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
- ctx->opad, req->state_sz);
+ &ctx->base.opad, req->state_sz);
/* Single pass HMAC - no digest count */
cdesc->control_data.control0 |=
@@ -275,7 +271,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
- memcpy(sreq->state, ctx->opad, sreq->digest_sz);
+ memcpy(sreq->state, &ctx->base.opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
@@ -316,7 +312,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
@@ -379,10 +375,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
// 10- padding for XCBCMAC & CMAC
req->cache[cache_len + skip] = 0x80;
// HW will use K2 iso K3 - compensate!
- for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ((__be32 *)req->cache)[i] ^=
- cpu_to_be32(le32_to_cpu(
- ctx->ipad[i] ^ ctx->ipad[i + 4]));
+ for (i = 0; i < AES_BLOCK_SIZE / 4; i++) {
+ u32 *cache = (void *)req->cache;
+ u32 *ipad = ctx->base.ipad.word;
+ u32 x;
+
+ x = ipad[i] ^ ipad[i + 4];
+ cache[i] ^= swab(x);
+ }
}
cache_len = AES_BLOCK_SIZE;
queued = queued + extra;
@@ -591,7 +591,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ret = safexcel_invalidate_cache(async, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->base.priv,
ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -620,7 +620,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async,
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
@@ -688,7 +688,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
req->needs_inv = false;
@@ -702,7 +702,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
/* invalidate for HMAC finish with odigest changed */
(req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
- ctx->opad, req->state_sz))))
+ &ctx->base.opad, req->state_sz))))
/*
* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -803,7 +803,7 @@ static int safexcel_ahash_final(struct ahash_request *areq)
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
req->len == sizeof(u32) && !areq->nbytes)) {
/* Zero length CRC32 */
- memcpy(areq->result, ctx->ipad, sizeof(u32));
+ memcpy(areq->result, &ctx->base.ipad, sizeof(u32));
return 0;
} else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
!areq->nbytes)) {
@@ -815,9 +815,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
/* Zero length (X)CBC/CMAC */
int i;
- for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ((__be32 *)areq->result)[i] =
- cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
+ u32 *result = (void *)areq->result;
+
+ /* K3 */
+ result[i] = swab(ctx->base.ipad.word[i + 4]);
+ }
areq->result[0] ^= 0x80; // 10- padding
crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
return 0;
@@ -917,7 +920,7 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
container_of(__crypto_ahash_alg(tfm->__crt_alg),
struct safexcel_alg_template, alg.ahash);
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
ctx->fb_do_setkey = false;
@@ -956,7 +959,7 @@ static int safexcel_sha1_digest(struct ahash_request *areq)
static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
/* context not allocated, skip invalidation */
@@ -1012,7 +1015,7 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA1_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA1_BLOCK_SIZE;
req->processed = SHA1_BLOCK_SIZE;
@@ -1082,8 +1085,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
}
/* Avoid leaking */
- memzero_explicit(keydup, keylen);
- kfree(keydup);
+ kfree_sensitive(keydup);
if (ret)
return ret;
@@ -1135,8 +1137,9 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
return crypto_ahash_export(areq, state);
}
-int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
- void *istate, void *ostate)
+static int __safexcel_hmac_setkey(const char *alg, const u8 *key,
+ unsigned int keylen,
+ void *istate, void *ostate)
{
struct ahash_request *areq;
struct crypto_ahash *tfm;
@@ -1185,30 +1188,38 @@ free_ahash:
return ret;
}
-static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen, const char *alg,
- unsigned int state_sz)
+int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = base->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret;
- ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
+ ret = __safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
if (ret)
return ret;
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
- (memcmp(ctx->ipad, istate.state, state_sz) ||
- memcmp(ctx->opad, ostate.state, state_sz)))
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE && base->ctxr &&
+ (memcmp(&base->ipad, istate.state, state_sz) ||
+ memcmp(&base->opad, ostate.state, state_sz)))
+ base->needs_inv = true;
- memcpy(ctx->ipad, &istate.state, state_sz);
- memcpy(ctx->opad, &ostate.state, state_sz);
+ memcpy(&base->ipad, &istate.state, state_sz);
+ memcpy(&base->opad, &ostate.state, state_sz);
return 0;
}
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ return safexcel_hmac_setkey(&ctx->base, key, keylen, alg, state_sz);
+}
+
static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -1377,7 +1388,7 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
@@ -1449,7 +1460,7 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
@@ -1635,7 +1646,7 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
@@ -1707,7 +1718,7 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
@@ -1829,7 +1840,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, MD5_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = MD5_HMAC_BLOCK_SIZE;
req->processed = MD5_HMAC_BLOCK_SIZE;
@@ -1900,7 +1911,7 @@ static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
int ret = safexcel_ahash_cra_init(tfm);
/* Default 'key' is all zeroes */
- memset(ctx->ipad, 0, sizeof(u32));
+ memset(&ctx->base.ipad, 0, sizeof(u32));
return ret;
}
@@ -1912,7 +1923,7 @@ static int safexcel_crc32_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from loaded key */
- req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
+ req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
/* Set processed to non-zero to enable invalidation detection */
req->len = sizeof(u32);
req->processed = sizeof(u32);
@@ -1934,7 +1945,7 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
if (keylen != sizeof(u32))
return -EINVAL;
- memcpy(ctx->ipad, key, sizeof(u32));
+ memcpy(&ctx->base.ipad, key, sizeof(u32));
return 0;
}
@@ -1984,7 +1995,7 @@ static int safexcel_cbcmac_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from loaded keys */
- memcpy(req->state, ctx->ipad, ctx->key_sz);
+ memcpy(req->state, &ctx->base.ipad, ctx->key_sz);
/* Set processed to non-zero to enable invalidation detection */
req->len = AES_BLOCK_SIZE;
req->processed = AES_BLOCK_SIZE;
@@ -2009,9 +2020,9 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
+ memset(&ctx->base.ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
- ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
+ ctx->base.ipad.be[i + 8] = cpu_to_be32(aes.key_enc[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
@@ -2093,8 +2104,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] =
- cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
+ ctx->base.ipad.word[i] = swab(key_tmp[i]);
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
@@ -2177,8 +2187,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->ipad[i + 8] =
- cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
+ ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
/* precompute the CMAC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
@@ -2209,7 +2218,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
/* end of code borrowed from crypto/cmac.c */
for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
+ ctx->base.ipad.be[i] = cpu_to_be32(((u32 *)consts)[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
@@ -2331,7 +2340,7 @@ static int safexcel_hmac_sm3_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SM3_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SM3_BLOCK_SIZE;
req->processed = SM3_BLOCK_SIZE;
@@ -2424,11 +2433,11 @@ static int safexcel_sha3_fbcheck(struct ahash_request *req)
/* Set fallback cipher HMAC key */
u8 key[SHA3_224_BLOCK_SIZE];
- memcpy(key, ctx->ipad,
+ memcpy(key, &ctx->base.ipad,
crypto_ahash_blocksize(ctx->fback) / 2);
memcpy(key +
crypto_ahash_blocksize(ctx->fback) / 2,
- ctx->opad,
+ &ctx->base.opad,
crypto_ahash_blocksize(ctx->fback) / 2);
ret = crypto_ahash_setkey(ctx->fback, key,
crypto_ahash_blocksize(ctx->fback));
@@ -2801,7 +2810,7 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
* first using our fallback cipher
*/
ret = crypto_shash_digest(ctx->shdesc, key, keylen,
- (u8 *)ctx->ipad);
+ ctx->base.ipad.byte);
keylen = crypto_shash_digestsize(ctx->shpre);
/*
@@ -2810,8 +2819,8 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
*/
if (keylen > crypto_ahash_blocksize(tfm) / 2)
/* Buffers overlap, need to use memmove iso memcpy! */
- memmove(ctx->opad,
- (u8 *)ctx->ipad +
+ memmove(&ctx->base.opad,
+ ctx->base.ipad.byte +
crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
} else {
@@ -2821,11 +2830,11 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
* to match the existing HMAC driver infrastructure.
*/
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
- memcpy(ctx->ipad, key, keylen);
+ memcpy(&ctx->base.ipad, key, keylen);
} else {
- memcpy(ctx->ipad, key,
+ memcpy(&ctx->base.ipad, key,
crypto_ahash_blocksize(tfm) / 2);
- memcpy(ctx->opad,
+ memcpy(&ctx->base.opad,
key + crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
}
@@ -2833,11 +2842,11 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Pad key with zeroes */
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
- memset((u8 *)ctx->ipad + keylen, 0,
+ memset(ctx->base.ipad.byte + keylen, 0,
crypto_ahash_blocksize(tfm) / 2 - keylen);
- memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
+ memset(&ctx->base.opad, 0, crypto_ahash_blocksize(tfm) / 2);
} else {
- memset((u8 *)ctx->opad + keylen -
+ memset(ctx->base.opad.byte + keylen -
crypto_ahash_blocksize(tfm) / 2, 0,
crypto_ahash_blocksize(tfm) - keylen);
}
@@ -2856,7 +2865,7 @@ static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_224_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_224_BLOCK_SIZE;
req->processed = SHA3_224_BLOCK_SIZE;
@@ -2927,7 +2936,7 @@ static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_256_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_256_BLOCK_SIZE;
req->processed = SHA3_256_BLOCK_SIZE;
@@ -2998,7 +3007,7 @@ static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_384_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_384_BLOCK_SIZE;
req->processed = SHA3_384_BLOCK_SIZE;
@@ -3069,7 +3078,7 @@ static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_512_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_512_BLOCK_SIZE;
req->processed = SHA3_512_BLOCK_SIZE;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index e454c3d44f07..90f15032c8df 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -236,8 +236,8 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->particle_size = len;
rdesc->rsvd0 = 0;
- rdesc->descriptor_overflow = 0;
- rdesc->buffer_overflow = 0;
+ rdesc->descriptor_overflow = 1; /* assume error */
+ rdesc->buffer_overflow = 1; /* assume error */
rdesc->last_seg = last;
rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
@@ -245,9 +245,10 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
- /* Clear length & error code in result token */
+ /* Clear length in result token */
rtoken->packet_length = 0;
- rtoken->error_code = 0;
+ /* Assume errors - HW will clear if not the case */
+ rtoken->error_code = 0x7fff;
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f478bb0a566a..276012e7c482 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) {
dma_free_coherent(dev,
- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+ NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
}
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index d63bca9718dc..06211858bf2e 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -437,7 +437,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
- struct resource *res;
int irq, ret, i, cpu;
u32 sram_size;
@@ -475,8 +474,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
spin_lock_init(&cesa->lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- cesa->regs = devm_ioremap_resource(dev, res);
+ cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(cesa->regs))
return PTR_ERR(cesa->regs);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index 0c9cbb681e49..fabfaaccca87 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -2,12 +2,10 @@
#ifndef __MARVELL_CESA_H__
#define __MARVELL_CESA_H__
-#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
+#include <linux/dma-direction.h>
#include <linux/dmapool.h>
#define CESA_ENGINE_OFF(i) (((i) * 0x2000))
@@ -239,7 +237,7 @@ struct mv_cesa_sec_accel_desc {
* Context associated to a cipher operation.
*/
struct mv_cesa_skcipher_op_ctx {
- u32 key[8];
+ __le32 key[8];
u32 iv[4];
};
@@ -252,7 +250,7 @@ struct mv_cesa_skcipher_op_ctx {
*/
struct mv_cesa_hash_op_ctx {
u32 iv[16];
- u32 hash[8];
+ __le32 hash[8];
};
/**
@@ -300,8 +298,14 @@ struct mv_cesa_op_ctx {
*/
struct mv_cesa_tdma_desc {
__le32 byte_cnt;
- __le32 src;
- __le32 dst;
+ union {
+ __le32 src;
+ dma_addr_t src_dma;
+ };
+ union {
+ __le32 dst;
+ dma_addr_t dst_dma;
+ };
__le32 next_dma;
/* Software state */
@@ -506,7 +510,7 @@ struct mv_cesa_hash_ctx {
*/
struct mv_cesa_hmac_ctx {
struct mv_cesa_ctx base;
- u32 iv[16];
+ __be32 iv[16];
};
/**
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 45b4d7a29833..b4a6ff9dd6d5 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -11,6 +11,8 @@
#include <crypto/aes.h>
#include <crypto/internal/des.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include "cesa.h"
@@ -262,8 +264,7 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
for (i = 0; i < remaining; i++)
- ctx->aes.key_dec[4 + i] =
- cpu_to_le32(ctx->aes.key_enc[offset + i]);
+ ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
return 0;
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f2a2fc111164..add7ea011c98 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -12,6 +12,8 @@
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include "cesa.h"
@@ -222,9 +224,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
CESA_SA_DATA_SRAM_OFFSET + len,
new_cache_ptr);
} else {
- len += mv_cesa_ahash_pad_req(creq,
- engine->sram + len +
- CESA_SA_DATA_SRAM_OFFSET);
+ i = mv_cesa_ahash_pad_req(creq, creq->cache);
+ len += i;
+ memcpy_toio(engine->sram + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
@@ -342,7 +346,7 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
*/
data = creq->base.chain.last->op->ctx.hash.hash;
for (i = 0; i < digsize / 4; i++)
- creq->state[i] = cpu_to_le32(data[i]);
+ creq->state[i] = le32_to_cpu(data[i]);
memcpy(ahashreq->result, data, digsize);
} else {
@@ -1265,10 +1269,10 @@ static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
- ctx->iv[i] = be32_to_cpu(istate.hash[i]);
+ ctx->iv[i] = cpu_to_be32(istate.hash[i]);
for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
return 0;
}
@@ -1336,10 +1340,10 @@ static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
- ctx->iv[i] = be32_to_cpu(istate.state[i]);
+ ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
@@ -1394,10 +1398,10 @@ static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
- ctx->iv[i] = be32_to_cpu(istate.state[i]);
+ ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index b81ee276fe0e..5d9c48fb72b2 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -83,10 +83,10 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
- tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
+ tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
- tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
+ tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
@@ -114,7 +114,7 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
*/
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = dreq->chain.first->cur_dma;
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
}
}
@@ -237,8 +237,8 @@ int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
- tdma->src = src;
- tdma->dst = op_desc->src;
+ tdma->src_dma = src;
+ tdma->dst_dma = op_desc->src_dma;
tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
@@ -272,7 +272,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
tdma->op = op;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = cpu_to_le32(dma_handle);
- tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
+ tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
@@ -289,8 +289,8 @@ int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
return PTR_ERR(tdma);
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
- tdma->src = src;
- tdma->dst = dst;
+ tdma->src_dma = src;
+ tdma->dst_dma = dst;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
tdma->flags = flags | CESA_TDMA_DATA;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index cc103b1bc224..40b482198ebc 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -824,18 +824,12 @@ static ssize_t eng_grp_info_show(struct device *dev,
static int create_sysfs_eng_grps_info(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
- int ret;
-
eng_grp->info_attr.show = eng_grp_info_show;
eng_grp->info_attr.store = NULL;
eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
eng_grp->info_attr.attr.mode = 0440;
sysfs_attr_init(&eng_grp->info_attr.attr);
- ret = device_create_file(dev, &eng_grp->info_attr);
- if (ret)
- return ret;
-
- return 0;
+ return device_create_file(dev, &eng_grp->info_attr);
}
static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 4ad3571ab6af..7323066724c3 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -126,7 +126,7 @@ struct mtk_aes_ctx {
struct mtk_aes_ctr_ctx {
struct mtk_aes_base_ctx base;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
@@ -242,22 +242,6 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
sg->length += dma->remainder;
}
-static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
-{
- int i;
-
- for (i = 0; i < SIZE_IN_WORDS(size); i++)
- dst[i] = cpu_to_le32(src[i]);
-}
-
-static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
-{
- int i;
-
- for (i = 0; i < SIZE_IN_WORDS(size); i++)
- dst[i] = cpu_to_be32(src[i]);
-}
-
static inline int mtk_aes_complete(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes,
int err)
@@ -321,7 +305,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Prepare enough space for authenticated tag */
if (aes->flags & AES_FLAGS_GCM)
- res->hdr += AES_BLOCK_SIZE;
+ le32_add_cpu(&res->hdr, AES_BLOCK_SIZE);
/*
* Make sure that all changes to the DMA ring are done before we
@@ -449,10 +433,10 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
return;
}
- mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
- AES_BLOCK_SIZE);
+ memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE);
ctr:
- info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
+ le32_add_cpu(&info->tfm[0],
+ le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE))));
info->tfm[1] |= AES_TFM_FULL_IV;
info->cmd[cnt++] = AES_CMD2;
ecb:
@@ -601,8 +585,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
/* Write IVs into transform state buffer. */
- mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
- AES_BLOCK_SIZE);
+ memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE);
if (unlikely(fragmented)) {
/*
@@ -654,7 +637,7 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
}
ctx->keylen = SIZE_IN_WORDS(keylen);
- mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
+ memcpy(ctx->key, key, keylen);
return 0;
}
@@ -848,7 +831,7 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes)
{
- u32 status = cryp->ring[aes->id]->res_prev->ct;
+ __le32 status = cryp->ring[aes->id]->res_prev->ct;
return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
-EBADMSG : 0);
@@ -866,7 +849,7 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
u32 cnt = 0;
- ctx->ct_hdr = AES_CT_CTRL_HDR | len;
+ ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
@@ -889,8 +872,8 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
AES_TFM_ENC_HASH;
- mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
- AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
+ memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE),
+ req->iv, ivsize);
}
static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -994,9 +977,13 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- u8 hash[AES_BLOCK_SIZE] __aligned(4) = {};
+ union {
+ u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)];
+ u8 x8[AES_BLOCK_SIZE];
+ } hash = {};
struct crypto_aes_ctx aes_ctx;
int err;
+ int i;
switch (keylen) {
case AES_KEYSIZE_128:
@@ -1019,12 +1006,16 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
return err;
- aes_encrypt(&aes_ctx, hash, hash);
+ aes_encrypt(&aes_ctx, hash.x8, hash.x8);
memzero_explicit(&aes_ctx, sizeof(aes_ctx));
- mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
- mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash,
- AES_BLOCK_SIZE);
+ memcpy(ctx->key, key, keylen);
+
+ /* Why do we need to do this? */
+ for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
+ hash.x32[i] = swab32(hash.x32[i]);
+
+ memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE);
return 0;
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 7e3ad085b5bd..9d878620e5c9 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -185,8 +185,6 @@ static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
{
- int err;
-
/* Reset DSE/DFE and correct system priorities for all rings. */
writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
writel(0, cryp->base + DFE_PRIO_0);
@@ -200,11 +198,7 @@ static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
writel(0, cryp->base + DSE_PRIO_2);
writel(0, cryp->base + DSE_PRIO_3);
- err = mtk_dfe_dse_state_check(cryp);
- if (err)
- return err;
-
- return 0;
+ return mtk_dfe_dse_state_check(cryp);
}
static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
@@ -442,7 +436,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
- int i, err = ENOMEM;
+ int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@@ -469,14 +463,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0;
err_cleanup:
- for (; i--; ) {
+ do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
- }
- return err;
+ } while (i--);
+ return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index da3f0b8814aa..3d5d7d68b03b 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -239,7 +239,7 @@ static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
{
u32 index, padlen;
- u64 bits[2];
+ __be64 bits[2];
u64 size = ctx->digcnt;
size += ctx->bufcnt;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index d8aec5153b21..3642bf83d809 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -249,7 +249,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
struct n2_ahash_alg {
struct list_head entry;
const u8 *hash_zero;
- const u32 *hash_init;
+ const u8 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
u8 auth_type;
@@ -662,7 +662,6 @@ struct n2_skcipher_context {
u8 aes[AES_MAX_KEY_SIZE];
u8 des[DES_KEY_SIZE];
u8 des3[3 * DES_KEY_SIZE];
- u8 arc4[258]; /* S-box, X, Y */
} key;
};
@@ -789,36 +788,6 @@ static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
-static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- u8 *s = ctx->key.arc4;
- u8 *x = s + 256;
- u8 *y = x + 1;
- int i, j, k;
-
- ctx->enc_type = n2alg->enc_type;
-
- j = k = 0;
- *x = 0;
- *y = 0;
- for (i = 0; i < 256; i++)
- s[i] = i;
- for (i = 0; i < 256; i++) {
- u8 a = s[i];
- j = (j + key[k] + a) & 0xff;
- s[i] = s[j];
- s[j] = a;
- if (++k >= keylen)
- k = 0;
- }
-
- return 0;
-}
-
static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
{
int this_len = nbytes;
@@ -1122,21 +1091,6 @@ struct n2_skcipher_tmpl {
};
static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* ARC4: only ECB is supported (chaining bits ignored) */
- { .name = "ecb(arc4)",
- .drv_name = "ecb-arc4",
- .block_size = 1,
- .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 1,
- .max_keysize = 256,
- .setkey = n2_arc4_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
-
/* DES: ECB CBC and CFB are supported */
{ .name = "ecb(des)",
.drv_name = "ecb-des",
@@ -1271,7 +1225,7 @@ static LIST_HEAD(skcipher_algs);
struct n2_hash_tmpl {
const char *name;
const u8 *hash_zero;
- const u32 *hash_init;
+ const u8 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
u8 block_size;
@@ -1279,7 +1233,7 @@ struct n2_hash_tmpl {
u8 hmac_type;
};
-static const u32 n2_md5_init[MD5_HASH_WORDS] = {
+static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
cpu_to_le32(MD5_H0),
cpu_to_le32(MD5_H1),
cpu_to_le32(MD5_H2),
@@ -1300,7 +1254,7 @@ static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
static const struct n2_hash_tmpl hash_tmpls[] = {
{ .name = "md5",
.hash_zero = md5_zero_message_hash,
- .hash_init = n2_md5_init,
+ .hash_init = (u8 *)n2_md5_init,
.auth_type = AUTH_TYPE_MD5,
.hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE,
@@ -1308,7 +1262,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero_message_hash,
- .hash_init = n2_sha1_init,
+ .hash_init = (u8 *)n2_sha1_init,
.auth_type = AUTH_TYPE_SHA1,
.hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE,
@@ -1316,7 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero_message_hash,
- .hash_init = n2_sha256_init,
+ .hash_init = (u8 *)n2_sha256_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
@@ -1324,7 +1278,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero_message_hash,
- .hash_init = n2_sha224_init,
+ .hash_init = (u8 *)n2_sha224_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 954d703f2981..a3b38d2c92e7 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -39,6 +39,7 @@
#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
+#include <crypto/engine.h>
#define MD5_DIGEST_SIZE 16
@@ -100,7 +101,6 @@
#define DEFAULT_AUTOSUSPEND_DELAY 1000
/* mostly device flags */
-#define FLAGS_BUSY 0
#define FLAGS_FINAL 1
#define FLAGS_DMA_ACTIVE 2
#define FLAGS_OUTPUT_READY 3
@@ -144,7 +144,7 @@ struct omap_sham_dev;
struct omap_sham_reqctx {
struct omap_sham_dev *dd;
unsigned long flags;
- unsigned long op;
+ u8 op;
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
@@ -168,6 +168,7 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
+ struct crypto_engine_ctx enginectx;
unsigned long flags;
/* fallback stuff */
@@ -219,7 +220,6 @@ struct omap_sham_dev {
struct device *dev;
void __iomem *io_base;
int irq;
- spinlock_t lock;
int err;
struct dma_chan *dma_lch;
struct tasklet_struct done_task;
@@ -230,6 +230,7 @@ struct omap_sham_dev {
int fallback_sz;
struct crypto_queue queue;
struct ahash_request *req;
+ struct crypto_engine *engine;
const struct omap_sham_pdata *pdata;
};
@@ -245,6 +246,9 @@ static struct omap_sham_drv sham = {
.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
};
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
+static void omap_sham_finish_req(struct ahash_request *req, int err);
+
static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
{
return __raw_readl(dd->io_base + offset);
@@ -456,6 +460,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask;
+ if (likely(ctx->digcnt))
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
+
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits
@@ -854,13 +861,16 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
return 0;
}
-static int omap_sham_prepare_request(struct ahash_request *req, bool update)
+static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
+ bool update = rctx->op == OP_UPDATE;
int hash_later;
bs = get_block_size(rctx);
@@ -1021,7 +1031,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
- !(dd->flags & BIT(FLAGS_HUGE));
+ !(dd->flags & BIT(FLAGS_HUGE));
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
ctx->total, ctx->digcnt, final);
@@ -1069,6 +1079,39 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
return err;
}
+static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ int err;
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
+
+ dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
+ ctx->op, ctx->total, ctx->digcnt, final);
+
+ dd->req = req;
+
+ err = omap_sham_hw_init(dd);
+ if (err)
+ return err;
+
+ if (ctx->digcnt)
+ dd->pdata->copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE)
+ err = omap_sham_update_req(dd);
+ else if (ctx->op == OP_FINAL)
+ err = omap_sham_final_req(dd);
+
+ if (err != -EINPROGRESS)
+ omap_sham_finish_req(req, err);
+
+ return 0;
+}
+
static int omap_sham_finish_hmac(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@@ -1116,25 +1159,20 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->sg = NULL;
- dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
+ BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY));
+
+ if (!err)
+ dd->pdata->copy_hash(req, 1);
if (dd->flags & BIT(FLAGS_HUGE)) {
- dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
- BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
- omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
- if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
- err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS &&
- (ctx->flags & BIT(FLAGS_FINUP)))
- err = omap_sham_final_req(dd);
- } else if (ctx->op == OP_FINAL) {
- omap_sham_final_req(dd);
- }
+ /* Re-enqueue the request */
+ omap_sham_enqueue(req, ctx->op);
return;
}
if (!err) {
- dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
@@ -1142,7 +1180,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
}
/* atomic operation is not needed here */
- dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
+ dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
pm_runtime_mark_last_busy(dd->dev);
@@ -1150,81 +1188,13 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->offset = 0;
- if (req->base.complete)
- req->base.complete(&req->base, err);
+ crypto_finalize_hash_request(dd->engine, req, err);
}
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
struct ahash_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_sham_reqctx *ctx;
- unsigned long flags;
- int err = 0, ret = 0;
-
-retry:
- spin_lock_irqsave(&dd->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&dd->queue, req);
- if (test_bit(FLAGS_BUSY, &dd->flags)) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- set_bit(FLAGS_BUSY, &dd->flags);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- dd->req = req;
- ctx = ahash_request_ctx(req);
-
- err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
- if (err || !ctx->total)
- goto err1;
-
- dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
- ctx->op, req->nbytes);
-
- err = omap_sham_hw_init(dd);
- if (err)
- goto err1;
-
- if (ctx->digcnt)
- /* request has changed - restore hash */
- dd->pdata->copy_hash(req, 0);
-
- if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
- err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
- /* no final() after finup() */
- err = omap_sham_final_req(dd);
- } else if (ctx->op == OP_FINAL) {
- err = omap_sham_final_req(dd);
- }
-err1:
- dev_dbg(dd->dev, "exit, err: %d\n", err);
-
- if (err != -EINPROGRESS) {
- /* done_task will not finish it, so do it here */
- omap_sham_finish_req(req, err);
- req = NULL;
-
- /*
- * Execute next request immediately if there is anything
- * in queue.
- */
- goto retry;
- }
-
- return ret;
+ return crypto_transfer_hash_request_to_engine(dd->engine, req);
}
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -1394,6 +1364,10 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
+ tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
+ tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
return 0;
}
@@ -1757,11 +1731,6 @@ static void omap_sham_done_task(unsigned long data)
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- omap_sham_handle_queue(dd, NULL);
- return;
- }
-
if (test_bit(FLAGS_CPU, &dd->flags)) {
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish;
@@ -1786,20 +1755,12 @@ finish:
dev_dbg(dd->dev, "update done: err: %d\n", err);
/* finish curent request */
omap_sham_finish_req(dd->req, err);
-
- /* If we are not busy, process next req */
- if (!test_bit(FLAGS_BUSY, &dd->flags))
- omap_sham_handle_queue(dd, NULL);
}
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- dev_warn(dd->dev, "Interrupt when no active requests.\n");
- } else {
- set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
- }
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
return IRQ_HANDLED;
}
@@ -2072,7 +2033,6 @@ static ssize_t queue_len_store(struct device *dev,
struct omap_sham_dev *dd = dev_get_drvdata(dev);
ssize_t status;
long value;
- unsigned long flags;
status = kstrtol(buf, 0, &value);
if (status)
@@ -2086,9 +2046,7 @@ static ssize_t queue_len_store(struct device *dev,
* than current size, it will just not accept new entries until
* it has shrank enough.
*/
- spin_lock_irqsave(&dd->lock, flags);
dd->queue.max_qlen = value;
- spin_unlock_irqrestore(&dd->lock, flags);
return size;
}
@@ -2125,7 +2083,6 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- spin_lock_init(&dd->lock);
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
@@ -2190,6 +2147,16 @@ static int omap_sham_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock);
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine) {
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine_start;
+
for (i = 0; i < dd->pdata->algs_info_size; i++) {
if (dd->pdata->algs_info[i].registered)
break;
@@ -2223,6 +2190,12 @@ err_algs:
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+err_engine_start:
+ crypto_engine_exit(dd->engine);
+err_engine:
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
err_pm:
pm_runtime_disable(dev);
if (!dd->polling_mode)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 62c6fe88b212..1be549a07a21 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/slab.h>
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index dac6eb37fff9..fb34bf92861d 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1685,11 +1685,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put;
}
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
-
/*
* Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be
@@ -1697,6 +1692,10 @@ static int spacc_probe(struct platform_device *pdev)
*/
engine->stat_irq_thresh = (engine->fifo_sz / 2);
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
+
/*
* Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 020d099409e5..ed0e8e33fe4b 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c3xxx_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
adf_clean_hw_data_c3xxx(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -203,7 +198,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 11039fe55f61..456979b136a2 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c3xxxvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C3XXXIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C3XXXIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 4ba9c14383af..d8e7c9c25590 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c62x_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
adf_clean_hw_data_c62x(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -203,7 +198,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index b8b021d54bb5..b9810f79eb84 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c62xvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C62XIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
adf_clean_hw_data_c62xiov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C62XIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index c1db8c26afb6..06952ece53d9 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -15,12 +15,6 @@
#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
-#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
-#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
-#define ADF_C62X_PCI_DEVICE_ID 0x37c8
-#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
-#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
-#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
#define ADF_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 32102e27e559..d2ae293d0df6 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -175,7 +175,6 @@ static const struct pci_error_handlers adf_err_handler = {
/**
* adf_enable_aer() - Enable Advance Error Reporting for acceleration device
* @accel_dev: Pointer to acceleration device.
- * @adf: PCI device driver owning the given acceleration device.
*
* Function enables PCI Advance Error Reporting for the
* QAT acceleration device accel_dev.
@@ -183,11 +182,12 @@ static const struct pci_error_handlers adf_err_handler = {
*
* Return: 0 on success, error code otherwise.
*/
-int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+int adf_enable_aer(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ struct pci_driver *pdrv = pdev->driver;
- adf->err_handler = &adf_err_handler;
+ pdrv->err_handler = &adf_err_handler;
pci_enable_pcie_error_reporting(pdev);
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index ac462796cefc..22ae32838113 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -52,24 +52,7 @@ static const struct seq_operations qat_dev_cfg_sops = {
.show = qat_dev_cfg_show
};
-static int qat_dev_cfg_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &qat_dev_cfg_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations qat_dev_cfg_fops = {
- .open = qat_dev_cfg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
/**
* adf_cfg_dev_add() - Create an acceleration device configuration table.
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index ebfcb4ea618d..f22342f612c1 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
int adf_ae_start(struct adf_accel_dev *accel_dev);
int adf_ae_stop(struct adf_accel_dev *accel_dev);
-int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+int adf_enable_aer(struct adf_accel_dev *accel_dev);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 71d0c44aacca..eb9b3be9d8eb 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -416,8 +416,6 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
static int __init adf_register_ctl_device_driver(void)
{
- mutex_init(&adf_ctl_lock);
-
if (adf_chr_drv_create())
goto err_chr_dev;
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 72753af056b3..92ec035576df 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -285,7 +285,7 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
- * @accel_dev: Pointer to pci device.
+ * @pci_dev: Pointer to pci device.
*
* Function returns acceleration device associated with the given pci device.
* To be used by QAT device specific drivers.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 8827aa139f96..963b2bea78f2 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -173,10 +173,14 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov);
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to pci device.
+ * @numvfs: Number of virtual functions (VFs) to enable.
+ *
+ * Note that the @numvfs parameter is ignored and all VFs supported by the
+ * device are enabled due to the design of the hardware.
*
* Function enables SRIOV for the pci device.
*
- * Return: 0 on success, error code otherwise.
+ * Return: number of VFs enabled on success, error code otherwise.
*/
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 2a2eccbf56ec..dac25ba47260 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -77,31 +77,14 @@ static void adf_ring_stop(struct seq_file *sfile, void *v)
mutex_unlock(&ring_read_lock);
}
-static const struct seq_operations adf_ring_sops = {
+static const struct seq_operations adf_ring_debug_sops = {
.start = adf_ring_start,
.next = adf_ring_next,
.stop = adf_ring_stop,
.show = adf_ring_show
};
-static int adf_ring_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &adf_ring_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations adf_ring_debug_fops = {
- .open = adf_ring_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
{
@@ -188,31 +171,14 @@ static void adf_bank_stop(struct seq_file *sfile, void *v)
mutex_unlock(&bank_read_lock);
}
-static const struct seq_operations adf_bank_sops = {
+static const struct seq_operations adf_bank_debug_sops = {
.start = adf_bank_start,
.next = adf_bank_next,
.stop = adf_bank_stop,
.show = adf_bank_show
};
-static int adf_bank_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &adf_bank_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations adf_bank_debug_fops = {
- .open = adf_bank_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
{
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 72753b84dc95..d552dbcfe0a0 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
+ u32 cipher_len;
+
+ cipher_len = areq->cryptlen - digst_size;
+ if (cipher_len % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
@@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = areq->cryptlen - digst_size;
+ cipher_param->cipher_length = cipher_len;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
@@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq)
u8 *iv = areq->iv;
int ret, ctr = 0;
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index fa467e0f8285..6b9d47682d04 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -412,7 +413,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
unsigned int csr_val;
int times = 30;
- if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
+ if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -672,13 +673,13 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev;
- if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_DH895XCC) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
}
handle->fw_auth = (handle->pci_dev->device ==
- ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
+ PCI_DEVICE_ID_INTEL_QAT_DH895XCC) ? false : true;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 00c615f9f9a8..5d1f28cd6680 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -4,6 +4,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -711,11 +712,11 @@ static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
{
switch (handle->pci_dev->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
return ICP_QAT_AC_895XCC_DEV_TYPE;
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
return ICP_QAT_AC_C62X_DEV_TYPE;
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
@@ -1391,7 +1392,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
- if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_C3XXX) {
pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 4e877b75822b..ecb4f6f20e22 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_dh895xcc_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -205,7 +200,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 7d6e1db272c2..404cf9df6922 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_dh895xccvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index cb6d61eb7302..ea616b7259ae 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index c230843e2ffb..87be96a0b0bb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <crypto/internal/hash.h>
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5630c5addd28..a2d3da0ad95f 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 4730f84b646d..99ba8d51d102 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/crypto.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index f385587f99af..35d73061d156 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -10,6 +10,7 @@
*/
#include "rk3288_crypto.h"
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 2b49c677afdb..3db595570c9c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -7,6 +7,7 @@
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 6b7ecbec092e..81befe7febaa 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -8,6 +8,7 @@
*
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
+#include <linux/device.h>
#include "rk3288_crypto.h"
/*
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 4a75c8e1fa6c..1cece1a7d3f0 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -8,6 +8,7 @@
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
+#include <linux/device.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 341433fbcc4a..88a6c853ffd7 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -260,6 +260,7 @@ struct s5p_aes_ctx {
* struct s5p_aes_dev - Crypto device state container
* @dev: Associated device
* @clk: Clock for accessing hardware
+ * @pclk: APB bus clock necessary to access the hardware
* @ioaddr: Mapped IO memory region
* @aes_ioaddr: Per-varian offset for AES block IO memory
* @irq_fc: Feed control interrupt line
@@ -342,13 +343,13 @@ struct s5p_aes_dev {
* @engine: Bits for selecting type of HASH in SSS block
* @sg: sg for DMA transfer
* @sg_len: Length of sg for DMA transfer
- * @sgl[]: sg for joining buffer and req->src scatterlist
+ * @sgl: sg for joining buffer and req->src scatterlist
* @skip: Skip offset in req->src for current op
* @total: Total number of bytes for current request
* @finup: Keep state for finup or final.
* @error: Keep track of error.
* @bufcnt: Number of bytes holded in buffer[]
- * @buffer[]: For byte(s) from end of req->src in UPDATE op
+ * @buffer: For byte(s) from end of req->src in UPDATE op
*/
struct s5p_hash_reqctx {
struct s5p_aes_dev *dd;
@@ -1125,7 +1126,7 @@ static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
* s5p_hash_prepare_sgs() - prepare sg for processing
* @ctx: request context
* @sg: source scatterlist request
- * @nbytes: number of bytes to process from sg
+ * @new_len: number of bytes to process from sg
* @final: final flag
*
* Check two conditions: (1) if buffers in sg have len aligned data, and (2)
@@ -2200,11 +2201,10 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
- if (IS_ERR(pdata->clk)) {
- dev_err(dev, "failed to find secss clock %s\n",
- variant->clk_names[0]);
- return -ENOENT;
- }
+ if (IS_ERR(pdata->clk))
+ return dev_err_probe(dev, PTR_ERR(pdata->clk),
+ "failed to find secss clock %s\n",
+ variant->clk_names[0]);
err = clk_prepare_enable(pdata->clk);
if (err < 0) {
@@ -2216,9 +2216,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (variant->clk_names[1]) {
pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
if (IS_ERR(pdata->pclk)) {
- dev_err(dev, "failed to find clock %s\n",
- variant->clk_names[1]);
- err = -ENOENT;
+ err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
+ "failed to find clock %s\n",
+ variant->clk_names[1]);
goto err_clk;
}
@@ -2307,8 +2307,7 @@ err_algs:
tasklet_kill(&pdata->tasklet);
err_irq:
- if (pdata->pclk)
- clk_disable_unprepare(pdata->pclk);
+ clk_disable_unprepare(pdata->pclk);
err_clk:
clk_disable_unprepare(pdata->clk);
@@ -2338,8 +2337,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
pdata->use_hash = false;
}
- if (pdata->pclk)
- clk_disable_unprepare(pdata->pclk);
+ clk_disable_unprepare(pdata->pclk);
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 5bc099052bd2..eda93fab95fe 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -143,33 +143,38 @@ struct sa_alg_tmpl {
};
/**
+ * struct sa_mapped_sg: scatterlist information for tx and rx
+ * @mapped: Set to true if the @sgt is mapped
+ * @dir: mapping direction used for @sgt
+ * @split_sg: Set if the sg is split and needs to be freed up
+ * @static_sg: Static scatterlist entry for overriding data
+ * @sgt: scatterlist table for DMA API use
+ */
+struct sa_mapped_sg {
+ bool mapped;
+ enum dma_data_direction dir;
+ struct scatterlist static_sg;
+ struct scatterlist *split_sg;
+ struct sg_table sgt;
+};
+/**
* struct sa_rx_data: RX Packet miscellaneous data place holder
* @req: crypto request data pointer
* @ddev: pointer to the DMA device
* @tx_in: dma_async_tx_descriptor pointer for rx channel
- * @split_src_sg: Set if the src sg is split and needs to be freed up
- * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
* @enc: Flag indicating either encryption or decryption
* @enc_iv_size: Initialisation vector size
* @iv_idx: Initialisation vector index
- * @rx_sg: Static scatterlist entry for overriding RX data
- * @tx_sg: Static scatterlist entry for overriding TX data
- * @src: Source data pointer
- * @dst: Destination data pointer
*/
struct sa_rx_data {
void *req;
struct device *ddev;
struct dma_async_tx_descriptor *tx_in;
- struct scatterlist *split_src_sg;
- struct scatterlist *split_dst_sg;
+ struct sa_mapped_sg mapped_sg[2];
u8 enc;
u8 enc_iv_size;
u8 iv_idx;
- struct scatterlist rx_sg;
- struct scatterlist tx_sg;
- struct scatterlist *src;
- struct scatterlist *dst;
};
/**
@@ -976,23 +981,46 @@ static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
return sa_cipher_setkey(tfm, key, keylen, &ad);
}
+static void sa_sync_from_device(struct sa_rx_data *rxd)
+{
+ struct sg_table *sgt;
+
+ if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
+ sgt = &rxd->mapped_sg[0].sgt;
+ else
+ sgt = &rxd->mapped_sg[1].sgt;
+
+ dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
+}
+
+static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
+ struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
+
+ if (mapped_sg->mapped) {
+ dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
+ mapped_sg->dir, 0);
+ kfree(mapped_sg->split_sg);
+ }
+ }
+
+ kfree(rxd);
+}
+
static void sa_aes_dma_in_callback(void *data)
{
struct sa_rx_data *rxd = (struct sa_rx_data *)data;
struct skcipher_request *req;
- int sglen;
u32 *result;
__be32 *mdptr;
size_t ml, pl;
int i;
- enum dma_data_direction dir_src;
- bool diff_dst;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct skcipher_request, base);
- sglen = sg_nents_for_len(req->src, req->cryptlen);
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
if (req->iv) {
mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
@@ -1003,18 +1031,7 @@ static void sa_aes_dma_in_callback(void *data)
result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
}
- dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
- kfree(rxd->split_src_sg);
-
- if (diff_dst) {
- sglen = sg_nents_for_len(req->dst, req->cryptlen);
-
- dma_unmap_sg(rxd->ddev, req->dst, sglen,
- DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
skcipher_request_complete(req, 0);
}
@@ -1043,7 +1060,6 @@ static int sa_run(struct sa_req *req)
struct device *ddev;
struct dma_chan *dma_rx;
int sg_nents, src_nents, dst_nents;
- int mapped_src_nents, mapped_dst_nents;
struct scatterlist *src, *dst;
size_t pl, ml, split_size;
struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
@@ -1052,6 +1068,7 @@ static int sa_run(struct sa_req *req)
u32 *mdptr;
bool diff_dst;
enum dma_data_direction dir_src;
+ struct sa_mapped_sg *mapped_sg;
gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1082,6 +1099,7 @@ static int sa_run(struct sa_req *req)
dma_rx = pdata->dma_rx1;
ddev = dma_rx->device->dev;
+ rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1109,60 +1127,90 @@ static int sa_run(struct sa_req *req)
split_size = req->size;
+ mapped_sg = &rxd->mapped_sg[0];
if (sg_nents == 1 && split_size <= req->src->length) {
- src = &rxd->rx_sg;
+ src = &mapped_sg->static_sg;
+ src_nents = 1;
sg_init_table(src, 1);
sg_set_page(src, sg_page(req->src), split_size,
req->src->offset);
- src_nents = 1;
- dma_map_sg(ddev, src, sg_nents, dir_src);
+
+ mapped_sg->sgt.sgl = src;
+ mapped_sg->sgt.orig_nents = src_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+ if (ret)
+ return ret;
+
+ mapped_sg->dir = dir_src;
+ mapped_sg->mapped = true;
} else {
- mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
- dir_src);
- ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
- &src, &src_nents, gfp_flags);
+ mapped_sg->sgt.sgl = req->src;
+ mapped_sg->sgt.orig_nents = sg_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+ if (ret)
+ return ret;
+
+ mapped_sg->dir = dir_src;
+ mapped_sg->mapped = true;
+
+ ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
+ &split_size, &src, &src_nents, gfp_flags);
if (ret) {
- src_nents = sg_nents;
- src = req->src;
+ src_nents = mapped_sg->sgt.nents;
+ src = mapped_sg->sgt.sgl;
} else {
- rxd->split_src_sg = src;
+ mapped_sg->split_sg = src;
}
}
+ dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
+
if (!diff_dst) {
dst_nents = src_nents;
dst = src;
} else {
dst_nents = sg_nents_for_len(req->dst, req->size);
+ mapped_sg = &rxd->mapped_sg[1];
if (dst_nents == 1 && split_size <= req->dst->length) {
- dst = &rxd->tx_sg;
+ dst = &mapped_sg->static_sg;
+ dst_nents = 1;
sg_init_table(dst, 1);
sg_set_page(dst, sg_page(req->dst), split_size,
req->dst->offset);
- dst_nents = 1;
- dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+
+ mapped_sg->sgt.sgl = dst;
+ mapped_sg->sgt.orig_nents = dst_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
+ DMA_FROM_DEVICE, 0);
+ if (ret)
+ goto err_cleanup;
+
+ mapped_sg->dir = DMA_FROM_DEVICE;
+ mapped_sg->mapped = true;
} else {
- mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
- &split_size, &dst, &dst_nents,
+ mapped_sg->sgt.sgl = req->dst;
+ mapped_sg->sgt.orig_nents = dst_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
+ DMA_FROM_DEVICE, 0);
+ if (ret)
+ goto err_cleanup;
+
+ mapped_sg->dir = DMA_FROM_DEVICE;
+ mapped_sg->mapped = true;
+
+ ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
+ 0, 1, &split_size, &dst, &dst_nents,
gfp_flags);
if (ret) {
- dst_nents = dst_nents;
- dst = req->dst;
+ dst_nents = mapped_sg->sgt.nents;
+ dst = mapped_sg->sgt.sgl;
} else {
- rxd->split_dst_sg = dst;
+ mapped_sg->split_sg = dst;
}
}
}
- if (unlikely(src_nents != sg_nents)) {
- dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
- ret = -EIO;
- goto err_cleanup;
- }
-
rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -1174,9 +1222,6 @@ static int sa_run(struct sa_req *req)
rxd->req = (void *)req->base;
rxd->enc = req->enc;
- rxd->ddev = ddev;
- rxd->src = src;
- rxd->dst = dst;
rxd->iv_idx = req->ctx->iv_idx;
rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
rxd->tx_in->callback = req->callback;
@@ -1214,16 +1259,7 @@ static int sa_run(struct sa_req *req)
return -EINPROGRESS;
err_cleanup:
- dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
- kfree(rxd->split_src_sg);
-
- if (req->src != req->dst) {
- dst_nents = sg_nents_for_len(req->dst, req->size);
- dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
return ret;
}
@@ -1293,11 +1329,12 @@ static void sa_sha_dma_in_callback(void *data)
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int authsize;
- int i, sg_nents;
+ int i;
size_t ml, pl;
u32 *result;
__be32 *mdptr;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct ahash_request, base);
tfm = crypto_ahash_reqtfm(req);
authsize = crypto_ahash_digestsize(tfm);
@@ -1308,12 +1345,7 @@ static void sa_sha_dma_in_callback(void *data)
for (i = 0; i < (authsize / 4); i++)
result[i] = be32_to_cpu(mdptr[i + 4]);
- sg_nents = sg_nents_for_len(req->src, req->nbytes);
- dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
-
- kfree(rxd->split_src_sg);
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
ahash_request_complete(req, 0);
}
@@ -1482,8 +1514,8 @@ static int sa_sha_init(struct ahash_request *req)
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
- crypto_ahash_digestsize(tfm), (u64)rctx);
+ dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
+ crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
rctx->fallback_req.base.flags =
@@ -1637,43 +1669,28 @@ static void sa_aead_dma_in_callback(void *data)
unsigned int authsize;
u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
size_t pl, ml;
- int i, sglen;
+ int i;
int err = 0;
u16 auth_len;
u32 *mdptr;
- bool diff_dst;
- enum dma_data_direction dir_src;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
start = req->assoclen + req->cryptlen;
authsize = crypto_aead_authsize(tfm);
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
for (i = 0; i < (authsize / 4); i++)
mdptr[i + 4] = swab32(mdptr[i + 4]);
auth_len = req->assoclen + req->cryptlen;
- if (!rxd->enc)
- auth_len -= authsize;
-
- sglen = sg_nents_for_len(rxd->src, auth_len);
- dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
- kfree(rxd->split_src_sg);
-
- if (diff_dst) {
- sglen = sg_nents_for_len(rxd->dst, auth_len);
- dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
if (rxd->enc) {
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1);
} else {
+ auth_len -= authsize;
start -= authsize;
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
0);
@@ -1681,7 +1698,7 @@ static void sa_aead_dma_in_callback(void *data)
err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
}
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
aead_request_complete(req, err);
}
@@ -2243,25 +2260,21 @@ static int sa_dma_init(struct sa_crypto_data *dd)
return ret;
dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
- if (IS_ERR(dd->dma_rx1)) {
- if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
- return PTR_ERR(dd->dma_rx1);
- }
+ if (IS_ERR(dd->dma_rx1))
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
+ "Unable to request rx1 DMA channel\n");
dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
if (IS_ERR(dd->dma_rx2)) {
dma_release_channel(dd->dma_rx1);
- if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
- return PTR_ERR(dd->dma_rx2);
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
+ "Unable to request rx2 DMA channel\n");
}
dd->dma_tx = dma_request_chan(dd->dev, "tx");
if (IS_ERR(dd->dma_tx)) {
- if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request tx DMA channel\n");
- ret = PTR_ERR(dd->dma_tx);
+ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
+ "Unable to request tx DMA channel\n");
goto err_dma_tx;
}
@@ -2333,7 +2346,7 @@ static int sa_ul_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
return ret;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c8cb23ae708..d60679c79822 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -18,7 +18,7 @@
#include <crypto/sha.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4ef3eb11361c..4a4c3284ae1f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
+ select CRC32
help
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 3ba41148c2a4..75867c0b0017 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -6,7 +6,10 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -147,7 +150,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
- unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
@@ -155,7 +157,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_get_sync(crc->dev);
- spin_lock_irqsave(&crc->lock, flags);
+ if (!spin_trylock(&crc->lock)) {
+ /* Hardware is busy, calculate crc32 by software */
+ if (mctx->poly == CRC32_POLY_LE)
+ ctx->partial = crc32_le(ctx->partial, d8, length);
+ else
+ ctx->partial = __crc32c_le(ctx->partial, d8, length);
+
+ goto pm_out;
+ }
/*
* Restore previously calculated CRC for this context as init value
@@ -195,8 +205,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- spin_unlock_irqrestore(&crc->lock, flags);
+ spin_unlock(&crc->lock);
+pm_out:
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
@@ -216,9 +227,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
return burst_update(desc, d8, length);
/* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min(length,
- burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
- sizeof(u32)));
+ size = min_t(size_t, length, burst_sz + (size_t)d8 -
+ ALIGN_DOWN((size_t)d8, sizeof(u32)));
for (rem_sz = length, cur = d8; rem_sz;
rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
ret = burst_update(desc, cur, size);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index d347a1d6e351..2670c30332fa 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -118,7 +118,7 @@ struct stm32_cryp_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
- u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ __be32 key[AES_KEYSIZE_256 / sizeof(u32)];
unsigned long flags;
};
@@ -380,24 +380,24 @@ static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
return 0;
}
-static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv)
{
if (!iv)
return;
- stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
- stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0LR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, be32_to_cpu(*iv++));
if (is_aes(cryp)) {
- stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
- stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1LR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, be32_to_cpu(*iv++));
}
}
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
{
struct skcipher_request *req = cryp->req;
- u32 *tmp = (void *)req->iv;
+ __be32 *tmp = (void *)req->iv;
if (!tmp)
return;
@@ -417,13 +417,13 @@ static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
int r_id;
if (is_des(c)) {
- stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
- stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ stm32_cryp_write(c, CRYP_K1LR, be32_to_cpu(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, be32_to_cpu(c->ctx->key[1]));
} else {
r_id = CRYP_K3RR;
for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
stm32_cryp_write(c, r_id,
- cpu_to_be32(c->ctx->key[i - 1]));
+ be32_to_cpu(c->ctx->key[i - 1]));
}
}
@@ -469,7 +469,7 @@ static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
- u32 iv[4];
+ __be32 iv[4];
/* Phase 1 : init */
memcpy(iv, cryp->areq->iv, 12);
@@ -491,6 +491,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ __be32 *bd;
u32 *d;
unsigned int i, textlen;
@@ -498,7 +499,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
iv[AES_BLOCK_SIZE - 1] = 1;
- stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)iv);
/* Build B0 */
memcpy(b0, iv, AES_BLOCK_SIZE);
@@ -518,11 +519,14 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
/* Write B0 */
d = (u32 *)b0;
+ bd = (__be32 *)b0;
for (i = 0; i < AES_BLOCK_32; i++) {
+ u32 xd = d[i];
+
if (!cryp->caps->padding_wa)
- *d = cpu_to_be32(*d);
- stm32_cryp_write(cryp, CRYP_DIN, *d++);
+ xd = be32_to_cpu(bd[i]);
+ stm32_cryp_write(cryp, CRYP_DIN, xd);
}
/* Wait for end of processing */
@@ -617,7 +621,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
case CR_TDES_CBC:
case CR_AES_CBC:
case CR_AES_CTR:
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->iv);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->req->iv);
break;
default:
@@ -1120,7 +1124,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* GCM: write aad and payload size (in bits) */
size_bit = cryp->areq->assoclen * 8;
if (cryp->caps->swap_final)
- size_bit = cpu_to_be32(size_bit);
+ size_bit = (__force u32)cpu_to_be32(size_bit);
stm32_cryp_write(cryp, CRYP_DIN, 0);
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
@@ -1129,7 +1133,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
cryp->areq->cryptlen - AES_BLOCK_SIZE;
size_bit *= 8;
if (cryp->caps->swap_final)
- size_bit = cpu_to_be32(size_bit);
+ size_bit = (__force u32)cpu_to_be32(size_bit);
stm32_cryp_write(cryp, CRYP_DIN, 0);
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
@@ -1137,14 +1141,19 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* CCM: write CTR0 */
u8 iv[AES_BLOCK_SIZE];
u32 *iv32 = (u32 *)iv;
+ __be32 *biv;
+
+ biv = (void *)iv;
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
for (i = 0; i < AES_BLOCK_32; i++) {
+ u32 xiv = iv32[i];
+
if (!cryp->caps->padding_wa)
- *iv32 = cpu_to_be32(*iv32);
- stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
+ xiv = be32_to_cpu(biv[i]);
+ stm32_cryp_write(cryp, CRYP_DIN, xiv);
}
}
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 03c5e6683805..e3e25278a970 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -748,7 +749,7 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
static void stm32_hash_copy_hash(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- u32 *hash = (u32 *)rctx->digest;
+ __be32 *hash = (void *)rctx->digest;
unsigned int i, hashsize;
switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
@@ -769,7 +770,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
}
for (i = 0; i < hashsize / sizeof(u32); i++)
- hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+ hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
HASH_HREG(i)));
}
@@ -1463,14 +1464,9 @@ static int stm32_hash_probe(struct platform_device *pdev)
}
hdev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(hdev->clk)) {
- if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
- dev_err(dev, "failed to get clock for hash (%lu)\n",
- PTR_ERR(hdev->clk));
- }
-
- return PTR_ERR(hdev->clk);
- }
+ if (IS_ERR(hdev->clk))
+ return dev_err_probe(dev, PTR_ERR(hdev->clk),
+ "failed to get clock for hash\n");
ret = clk_prepare_enable(hdev->clk);
if (ret) {
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 7c547352a862..66773892f665 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -806,10 +806,10 @@ static int talitos_register_rng(struct device *dev)
struct talitos_private *priv = dev_get_drvdata(dev);
int err;
- priv->rng.name = dev_driver_string(dev),
- priv->rng.init = talitos_rng_init,
- priv->rng.data_present = talitos_rng_data_present,
- priv->rng.data_read = talitos_rng_data_read,
+ priv->rng.name = dev_driver_string(dev);
+ priv->rng.init = talitos_rng_init;
+ priv->rng.data_present = talitos_rng_data_present;
+ priv->rng.data_read = talitos_rng_data_read;
priv->rng.priv = (unsigned long)dev;
err = hwrng_register(&priv->rng);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 800dfc4d16c4..c3adeb2e5823 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -11,13 +11,15 @@
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
+#include <linux/kernel.h>
#include <linux/klist.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -27,7 +29,6 @@
#include <linux/platform_data/dma-ste-dma40.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -92,17 +93,6 @@ struct cryp_ctx {
static struct cryp_driver_data driver_data;
/**
- * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
- * @in: Data to convert.
- */
-static inline u32 uint8p_to_uint32_be(u8 *in)
-{
- u32 *data = (u32 *)in;
-
- return cpu_to_be32p(data);
-}
-
-/**
* swap_bits_in_byte - mirror the bits in a byte
* @b: the byte to be mirrored
*
@@ -284,6 +274,7 @@ static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
int i;
int status = 0;
int num_of_regs = ctx->blocksize / 8;
+ __be32 *civ = (__be32 *)ctx->iv;
u32 iv[AES_BLOCK_SIZE / 4];
dev_dbg(device_data->dev, "[%s]", __func__);
@@ -300,7 +291,7 @@ static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
}
for (i = 0; i < ctx->blocksize / 4; i++)
- iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+ iv[i] = be32_to_cpup(civ + i);
for (i = 0; i < num_of_regs; i++) {
status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
@@ -339,23 +330,24 @@ static int cfg_keys(struct cryp_ctx *ctx)
int i;
int num_of_regs = ctx->keylen / 8;
u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ __be32 *ckey = (__be32 *)ctx->key;
int cryp_error = 0;
dev_dbg(ctx->device->dev, "[%s]", __func__);
if (mode_is_aes(ctx->config.algomode)) {
- swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ swap_words_in_key_and_bits_in_byte((u8 *)ckey,
(u8 *)swapped_key,
ctx->keylen);
} else {
for (i = 0; i < ctx->keylen / 4; i++)
- swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ swapped_key[i] = be32_to_cpup(ckey + i);
}
for (i = 0; i < num_of_regs; i++) {
cryp_error = set_key(ctx->device,
- *(((u32 *)swapped_key)+i*2),
- *(((u32 *)swapped_key)+i*2+1),
+ swapped_key[i * 2],
+ swapped_key[i * 2 + 1],
(enum cryp_key_reg_index) i);
if (cryp_error != 0) {
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index a5ee8c2fb4e0..3d407eebb2ba 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -1071,27 +1072,32 @@ int hash_hw_update(struct ahash_request *req)
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_hash_walk walk;
- int msg_length = crypto_hash_walk_first(req, &walk);
-
- /* Empty message ("") is correct indata */
- if (msg_length == 0)
- return ret;
+ int msg_length;
index = req_ctx->state.index;
buffer = (u8 *)req_ctx->state.buffer;
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ msg_length = crypto_hash_walk_first(req, &walk);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0) {
+ ret = 0;
+ goto release_dev;
+ }
+
/* Check if ctx->state.length + msg_length
overflows */
if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
- return -EPERM;
+ ret = crypto_hash_walk_done(&walk, -EPERM);
+ goto release_dev;
}
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
/* Main loop */
while (0 != msg_length) {
data_buffer = walk.data;
@@ -1101,7 +1107,8 @@ int hash_hw_update(struct ahash_request *req)
if (ret) {
dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
__func__);
- goto out;
+ crypto_hash_walk_done(&walk, ret);
+ goto release_dev;
}
msg_length = crypto_hash_walk_done(&walk, 0);
@@ -1111,7 +1118,7 @@ int hash_hw_update(struct ahash_request *req)
dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
__func__, req_ctx->state.index, req_ctx->state.bit_index);
-out:
+release_dev:
release_hash_device(device_data);
return ret;
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index fb294174e408..b894e3a8be4f 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -5,7 +5,6 @@ config CRYPTO_DEV_VIRTIO
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
- default m
help
This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 27079354dbe9..bf1f421e05f2 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -10,6 +10,7 @@
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>