summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-01 12:00:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-01 12:00:10 -0700
commit81e8c10dac62c427b25f6bbb07936806e4dd9e8a (patch)
tree9eea9ad000cfdb728e4385873ebba73222c80d93
parent729ea4e064202aeec149b034b459501ef0a5060e (diff)
parent58ca0060ec4e51208d2eee12198fc55fd9e4feb3 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Introduce crypto_shash_tfm_digest() and use it wherever possible. - Fix use-after-free and race in crypto_spawn_alg. - Add support for parallel and batch requests to crypto_engine. Algorithms: - Update jitter RNG for SP800-90B compliance. - Always use jitter RNG as seed in drbg. Drivers: - Add Arm CryptoCell driver cctrng. - Add support for SEV-ES to the PSP driver in ccp" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (114 commits) crypto: hisilicon - fix driver compatibility issue with different versions of devices crypto: engine - do not requeue in case of fatal error crypto: cavium/nitrox - Fix a typo in a comment crypto: hisilicon/qm - change debugfs file name from qm_regs to regs crypto: hisilicon/qm - add DebugFS for xQC and xQE dump crypto: hisilicon/zip - add debugfs for Hisilicon ZIP crypto: hisilicon/hpre - add debugfs for Hisilicon HPRE crypto: hisilicon/sec2 - add debugfs for Hisilicon SEC crypto: hisilicon/qm - add debugfs to the QM state machine crypto: hisilicon/qm - add debugfs for QM crypto: stm32/crc32 - protect from concurrent accesses crypto: stm32/crc32 - don't sleep in runtime pm crypto: stm32/crc32 - fix multi-instance crypto: stm32/crc32 - fix run-time self test issue. crypto: stm32/crc32 - fix ext4 chksum BUG_ON() crypto: hisilicon/zip - Use temporary sqe when doing work crypto: hisilicon - add device error report through abnormal irq crypto: hisilicon - remove codes of directly report device errors through MSI crypto: hisilicon - QM memory management optimization crypto: hisilicon - unify initial value assignment into QM ...
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-hpre89
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-sec94
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-zip70
-rw-r--r--Documentation/devicetree/bindings/rng/arm-cctrng.yaml54
-rw-r--r--Documentation/security/siphash.rst2
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/crypto/sha1_glue.c1
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c1
-rw-r--r--arch/arm/crypto/sha256_glue.c1
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c1
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm64/crypto/aes-glue.c4
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha256-glue.c1
-rw-r--r--arch/arm64/crypto/sha512-glue.c1
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c1
-rw-r--r--arch/powerpc/crypto/md5-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1.c33
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c1
-rw-r--r--arch/s390/crypto/sha1_s390.c12
-rw-r--r--arch/sparc/crypto/md5_glue.c1
-rw-r--r--arch/sparc/crypto/sha1_glue.c1
-rw-r--r--arch/sparc/crypto/sha256_glue.c1
-rw-r--r--arch/sparc/crypto/sha512_glue.c1
-rw-r--r--arch/unicore32/kernel/ksyms.c1
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c1
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c1
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c1
-rw-r--r--crypto/Kconfig3
-rw-r--r--crypto/algapi.c32
-rw-r--r--crypto/algif_rng.c2
-rw-r--r--crypto/api.c3
-rw-r--r--crypto/blake2b_generic.c4
-rw-r--r--crypto/crypto_engine.c168
-rw-r--r--crypto/drbg.c30
-rw-r--r--crypto/essiv.c4
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/jitterentropy-kcapi.c27
-rw-r--r--crypto/jitterentropy.c417
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/sha1_generic.c5
-rw-r--r--crypto/sha256_generic.c14
-rw-r--r--crypto/shash.c16
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cctrng.c735
-rw-r--r--drivers/char/hw_random/cctrng.h72
-rw-r--r--drivers/char/hw_random/omap-rng.c5
-rw-r--r--drivers/char/hw_random/optee-rng.c2
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c2
-rw-r--r--drivers/crypto/ccp/Kconfig3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c9
-rw-r--r--drivers/crypto/ccp/sev-dev.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c9
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c3
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c1
-rw-r--r--drivers/crypto/hisilicon/Kconfig4
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c99
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c355
-rw-r--r--drivers/crypto/hisilicon/qm.c2433
-rw-r--r--drivers/crypto/hisilicon/qm.h120
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c379
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c362
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_main.c4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c95
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c10
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c7
-rw-r--r--drivers/crypto/n2_core.c7
-rw-r--r--drivers/crypto/omap-sham.c21
-rw-r--r--drivers/crypto/s5p-sss.c39
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c230
-rw-r--r--drivers/crypto/stm32/stm32-hash.c38
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c10
-rw-r--r--fs/crypto/fname.c7
-rw-r--r--fs/crypto/hkdf.c6
-rw-r--r--fs/ecryptfs/crypto.c17
-rw-r--r--fs/f2fs/hash.c1
-rw-r--r--fs/nfsd/nfs4recover.c26
-rw-r--r--fs/ubifs/auth.c20
-rw-r--r--fs/ubifs/master.c9
-rw-r--r--fs/ubifs/replay.c14
-rw-r--r--include/crypto/acompress.h2
-rw-r--r--include/crypto/algapi.h2
-rw-r--r--include/crypto/drbg.h6
-rw-r--r--include/crypto/engine.h15
-rw-r--r--include/crypto/hash.h19
-rw-r--r--include/crypto/sha.h28
-rw-r--r--include/crypto/sha256_base.h6
-rw-r--r--include/linux/cryptohash.h14
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/padata.h6
-rw-r--r--include/linux/psp-sev.h2
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--kernel/bpf/core.c18
-rw-r--r--kernel/padata.c14
-rw-r--r--lib/crypto/chacha.c1
-rw-r--r--lib/crypto/sha256.c20
-rw-r--r--lib/mpi/longlong.h2
-rw-r--r--lib/sha1.c24
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/core/secure_seq.c1
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/seg6_hmac.c1
-rw-r--r--net/mptcp/crypto.c4
-rw-r--r--net/sctp/auth.c10
-rw-r--r--net/sctp/sm_make_chunk.c23
-rw-r--r--security/keys/encrypted-keys/encrypted.c18
127 files changed, 4966 insertions, 1794 deletions
diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre
index ec4a79e3a807..b4be5f1db4b7 100644
--- a/Documentation/ABI/testing/debugfs-hisi-hpre
+++ b/Documentation/ABI/testing/debugfs-hisi-hpre
@@ -33,7 +33,7 @@ Contact: linux-crypto@vger.kernel.org
Description: Dump debug registers from the HPRE.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/qm_regs
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/regs
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
Description: Dump debug registers from the QM.
@@ -44,14 +44,97 @@ What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
Description: One QM may contain multiple queues. Select specific queue to
- show its debug registers in above qm_regs.
+ show its debug registers in above regs.
Only available for PF.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
-Description: QM debug registers(qm_regs) read clear control. 1 means enable
+Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
+ QM task completion.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
+ Four states: initiated, started, stopped and closed.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests sent
+ with returning busy.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_fail_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of completed but error requests.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/invalid_req_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of invalid requests being received.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/overtime_thrhld
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Set the threshold time for counting the request which is
+ processed longer than the threshold.
+ 0: disable(default), 1: 1 microsecond.
+ Available for both PF and VF, and take no other effect on HPRE.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/over_thrhld_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of time out requests.
+ Available for both PF and VF, and take no other effect on HPRE.
diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec
index 06adb899495e..85feb4408e0f 100644
--- a/Documentation/ABI/testing/debugfs-hisi-sec
+++ b/Documentation/ABI/testing/debugfs-hisi-sec
@@ -1,10 +1,4 @@
-What: /sys/kernel/debug/hisi_sec/<bdf>/sec_dfx
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the debug registers of SEC cores.
- Only available for PF.
-
-What: /sys/kernel/debug/hisi_sec/<bdf>/clear_enable
+What: /sys/kernel/debug/hisi_sec2/<bdf>/clear_enable
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Enabling/disabling of clear action after reading
@@ -12,7 +6,7 @@ Description: Enabling/disabling of clear action after reading
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec/<bdf>/current_qm
+What: /sys/kernel/debug/hisi_sec2/<bdf>/current_qm
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: One SEC controller has one PF and multiple VFs, each function
@@ -20,24 +14,100 @@ Description: One SEC controller has one PF and multiple VFs, each function
qm refers to.
Only available for PF.
-What: /sys/kernel/debug/hisi_sec/<bdf>/qm/qm_regs
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Dump of QM related debug registers.
Available for PF and VF in host. VF in guest currently only
has one debug register.
-What: /sys/kernel/debug/hisi_sec/<bdf>/qm/current_q
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/current_q
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: One QM of SEC may contain multiple queues. Select specific
- queue to show its debug registers in above 'qm_regs'.
+ queue to show its debug registers in above 'regs'.
Only available for PF.
-What: /sys/kernel/debug/hisi_sec/<bdf>/qm/clear_enable
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/clear_enable
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Enabling/disabling of clear action after reading
the SEC's QM debug registers.
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
+ QM task completion.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
+ Four states: initiated, started, stopped and closed.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests sent with returning busy.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/err_bd_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of BD type error requests
+ to be received.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/invalid_req_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of invalid requests being received.
+ Available for both PF and VF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/done_flag_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of completed but marked error requests
+ to be received.
+ Available for both PF and VF, and take no other effect on SEC.
diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip
index a7c63e6c4bc3..3034a2bf99ca 100644
--- a/Documentation/ABI/testing/debugfs-hisi-zip
+++ b/Documentation/ABI/testing/debugfs-hisi-zip
@@ -26,7 +26,7 @@ Description: One ZIP controller has one PF and multiple VFs, each function
has a QM. Select the QM which below qm refers to.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/qm_regs
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
Description: Dump of QM related debug registers.
@@ -37,14 +37,78 @@ What: /sys/kernel/debug/hisi_zip/<bdf>/qm/current_q
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
Description: One QM may contain multiple queues. Select specific queue to
- show its debug registers in above qm_regs.
+ show its debug registers in above regs.
Only available for PF.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/clear_enable
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
-Description: QM debug registers(qm_regs) read clear control. 1 means enable
+Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
+ QM task completion.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
+ Four states: initiated, started, stopped and closed.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests received
+ with returning busy.
+ Available for both PF and VF, and take no other effect on ZIP.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/err_bd_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of BD type error requests
+ to be received.
+ Available for both PF and VF, and take no other effect on ZIP.
diff --git a/Documentation/devicetree/bindings/rng/arm-cctrng.yaml b/Documentation/devicetree/bindings/rng/arm-cctrng.yaml
new file mode 100644
index 000000000000..ca6aad19b6ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/arm-cctrng.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/arm-cctrng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arm TrustZone CryptoCell TRNG engine
+
+maintainers:
+ - Hadar Gat <hadar.gat@arm.com>
+
+description: |+
+ Arm TrustZone CryptoCell TRNG (True Random Number Generator) engine.
+
+properties:
+ compatible:
+ enum:
+ - arm,cryptocell-713-trng
+ - arm,cryptocell-703-trng
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ arm,rosc-ratio:
+ description:
+ Arm TrustZone CryptoCell TRNG engine has 4 ring oscillators.
+ Sampling ratio values for these 4 ring oscillators. (from calibration)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ maxItems: 4
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - interrupts
+ - reg
+ - arm,rosc-ratio
+
+additionalProperties: false
+
+examples:
+ - |
+ arm_cctrng: rng@60000000 {
+ compatible = "arm,cryptocell-713-trng";
+ interrupts = <0 29 4>;
+ reg = <0x60000000 0x10000>;
+ arm,rosc-ratio = <5000 1000 500 0>;
+ };
diff --git a/Documentation/security/siphash.rst b/Documentation/security/siphash.rst
index 4eba68cdf0a1..bd9363025fcb 100644
--- a/Documentation/security/siphash.rst
+++ b/Documentation/security/siphash.rst
@@ -7,7 +7,7 @@ SipHash - a short input PRF
SipHash is a cryptographically secure PRF -- a keyed hash function -- that
performs very well for short inputs, hence the name. It was designed by
cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
-as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
+as a replacement for some uses of: `jhash`, `md5_transform`, `sha1_transform`,
and so forth.
SipHash takes a secret key filled with randomly generated numbers and either
diff --git a/MAINTAINERS b/MAINTAINERS
index cbfdf73029c6..2b17e667c52b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3908,6 +3908,15 @@ S: Supported
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
F: drivers/crypto/ccree/
+CCTRNG ARM TRUSTZONE CRYPTOCELL TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
+M: Hadar Gat <hadar.gat@arm.com>
+L: linux-crypto@vger.kernel.org
+S: Supported
+F: drivers/char/hw_random/cctrng.c
+F: drivers/char/hw_random/cctrng.h
+F: Documentation/devicetree/bindings/rng/arm-cctrng.txt
+W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
+
CEC FRAMEWORK
M: Hans Verkuil <hverkuil-cisco@xs4all.nl>
L: linux-media@vger.kernel.org
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index c80b0ebfd02f..4e954b3f7ecd 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -14,7 +14,6 @@
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 2c3627334335..0071e5e4411a 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index 215497f011f2..b8a4f79020cf 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 38645e415196..79820b9e2541 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -11,7 +11,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 98bdea51089d..82e96ac83684 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index ed5409c6abf4..395bbf64b2ab 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -158,7 +158,6 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
unsigned int key_len)
{
struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- SHASH_DESC_ON_STACK(desc, ctx->hash);
u8 digest[SHA256_DIGEST_SIZE];
int ret;
@@ -166,8 +165,7 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
if (ret)
return ret;
- desc->tfm = ctx->hash;
- crypto_shash_digest(desc, in_key, key_len, digest);
+ crypto_shash_tfm_digest(ctx->hash, in_key, key_len, digest);
return aes_expandkey(&ctx->key2, digest, sizeof(digest));
}
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 5a95c2628fbf..111d9c9abddd 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -66,7 +66,7 @@
#include <asm/assembler.h>
.text
- .cpu generic+crypto
+ .arch armv8-a+crypto
init_crc .req w19
buf .req x20
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index ddf4a0d85c1c..77bc6e72abae 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -12,7 +12,6 @@
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 78d3083de6b7..370ccb29602f 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 92e12c2c2ec1..51c43ee5e380 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index d1ed066e1a17..8c8ea139653e 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
-#include <linux/cryptohash.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 7d1bf2fcf668..c24f605033bd 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>
#include <asm/byteorder.h>
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index 6379990bd604..cb57be4ada61 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index 7b43fc352089..b40dc50a6908 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -16,14 +16,13 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
-extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp);
+void powerpc_sha_transform(u32 *state, const u8 *src);
-static int sha1_init(struct shash_desc *desc)
+static int powerpc_sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -34,8 +33,8 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
-static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
@@ -47,7 +46,6 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
src = data;
if ((partial + len) > 63) {
- u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
@@ -56,12 +54,11 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
}
do {
- powerpc_sha_transform(sctx->state, src, temp);
+ powerpc_sha_transform(sctx->state, src);
done += 64;
src = data + done;
} while (done + 63 < len);
- memzero_explicit(temp, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
@@ -71,7 +68,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int powerpc_sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
@@ -84,10 +81,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- sha1_update(desc, padding, padlen);
+ powerpc_sha1_update(desc, padding, padlen);
/* Append length */
- sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+ powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
@@ -99,7 +96,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int sha1_export(struct shash_desc *desc, void *out)
+static int powerpc_sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -107,7 +104,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
return 0;
}
-static int sha1_import(struct shash_desc *desc, const void *in)
+static int powerpc_sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -117,11 +114,11 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
- .update = sha1_update,
- .final = sha1_final,
- .export = sha1_export,
- .import = sha1_import,
+ .init = powerpc_sha1_init,
+ .update = powerpc_sha1_update,
+ .final = powerpc_sha1_final,
+ .export = powerpc_sha1_export,
+ .import = powerpc_sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 84939e563b81..ceb0b6c980b3 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 7c15542d3685..698b1e6d3c14 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -27,7 +27,7 @@
#include "sha.h"
-static int sha1_init(struct shash_desc *desc)
+static int s390_sha1_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
@@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
-static int sha1_export(struct shash_desc *desc, void *out)
+static int s390_sha1_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha1_state *octx = out;
@@ -53,7 +53,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
return 0;
}
-static int sha1_import(struct shash_desc *desc, const void *in)
+static int s390_sha1_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha1_state *ictx = in;
@@ -67,11 +67,11 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
+ .init = s390_sha1_init,
.update = s390_sha_update,
.final = s390_sha_final,
- .export = sha1_export,
- .import = sha1_import,
+ .export = s390_sha1_export,
+ .import = s390_sha1_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha1_state),
.base = {
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 14f6c15be6ae..111283fe837e 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 7c1666304441..dc017782be52 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index f403ce9ba6e4..286bc8ecf15b 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index a3b532e43c07..3b2ca732ff7a 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
index f4b84872d640..731445008932 100644
--- a/arch/unicore32/kernel/ksyms.c
+++ b/arch/unicore32/kernel/ksyms.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index a801ffc10cbb..18200135603f 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -21,7 +21,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 6394b5fe8db6..dd06249229e1 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -34,7 +34,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 82cc1b3ced1d..b0b05c93409e 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/crypto/Kconfig b/crypto/Kconfig
index c24a47406f8f..d5daf35431e3 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -370,7 +370,6 @@ config CRYPTO_CFB
config CRYPTO_CTR
tristate "CTR support"
select CRYPTO_SKCIPHER
- select CRYPTO_SEQIV
select CRYPTO_MANAGER
help
CTR: Counter mode
@@ -1820,7 +1819,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "Enable CTR DRBG"
select CRYPTO_AES
- depends on CRYPTO_CTR
+ select CRYPTO_CTR
help
Enable the CTR DRBG variant as defined in NIST SP800-90A.
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 69605e21af92..92abdf675992 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -403,7 +403,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
if (!err)
- crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
+ crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
out:
crypto_larval_kill(&larval->alg);
@@ -716,17 +716,27 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
- struct crypto_alg *alg;
+ struct crypto_alg *alg = ERR_PTR(-EAGAIN);
+ struct crypto_alg *target;
+ bool shoot = false;
down_read(&crypto_alg_sem);
- alg = spawn->alg;
- if (!spawn->dead && !crypto_mod_get(alg)) {
- alg->cra_flags |= CRYPTO_ALG_DYING;
- alg = NULL;
+ if (!spawn->dead) {
+ alg = spawn->alg;
+ if (!crypto_mod_get(alg)) {
+ target = crypto_alg_get(alg);
+ shoot = true;
+ alg = ERR_PTR(-EAGAIN);
+ }
}
up_read(&crypto_alg_sem);
- return alg ?: ERR_PTR(-EAGAIN);
+ if (shoot) {
+ crypto_shoot_alg(target);
+ crypto_alg_put(target);
+ }
+
+ return alg;
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@ -904,6 +914,14 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
+void crypto_enqueue_request_head(struct crypto_queue *queue,
+ struct crypto_async_request *request)
+{
+ queue->qlen++;
+ list_add(&request->list, &queue->list);
+}
+EXPORT_SYMBOL_GPL(crypto_enqueue_request_head);
+
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
{
struct list_head *request;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 22df3799a17b..087c0ad09d38 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -61,7 +61,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
- int err = -EFAULT;
+ int err;
int genlen = 0;
u8 result[MAXSIZE];
diff --git a/crypto/api.c b/crypto/api.c
index 7d71a9b10e5f..edcf690800d4 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -333,12 +333,13 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
return len;
}
-static void crypto_shoot_alg(struct crypto_alg *alg)
+void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
+EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 1d262374fa4e..0ffd8d92e308 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -129,7 +129,9 @@ static void blake2b_compress(struct blake2b_state *S,
ROUND(9);
ROUND(10);
ROUND(11);
-
+#ifdef CONFIG_CC_IS_CLANG
+#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */
+#endif
for (i = 0; i < 8; ++i)
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
}
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index eb029ff1e05a..3655d9d3f5df 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -22,32 +22,36 @@
* @err: error number
*/
static void crypto_finalize_request(struct crypto_engine *engine,
- struct crypto_async_request *req, int err)
+ struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_cur_req = false;
+ bool finalize_req = false;
int ret;
struct crypto_engine_ctx *enginectx;
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == req)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
+ /*
+ * If hardware cannot enqueue more requests
+ * and retry mechanism is not supported
+ * make sure we are completing the current request
+ */
+ if (!engine->retry_support) {
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == req) {
+ finalize_req = true;
+ engine->cur_req = NULL;
+ }
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
- if (finalize_cur_req) {
+ if (finalize_req || engine->retry_support) {
enginectx = crypto_tfm_ctx(req->tfm);
- if (engine->cur_req_prepared &&
+ if (enginectx->op.prepare_request &&
enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
}
-
req->complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -74,7 +78,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */
- if (engine->cur_req)
+ if (!engine->retry_support && engine->cur_req)
goto out;
/* If another context is idling then defer */
@@ -108,13 +112,21 @@ static void crypto_pump_requests(struct crypto_engine *engine,
goto out;
}
+start_request:
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue);
if (!async_req)
goto out;
- engine->cur_req = async_req;
+ /*
+ * If hardware doesn't support the retry mechanism,
+ * keep track of the request we are processing now.
+ * We'll need it on completion (crypto_finalize_request).
+ */
+ if (!engine->retry_support)
+ engine->cur_req = async_req;
+
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -130,7 +142,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err;
+ goto req_err_2;
}
}
@@ -141,28 +153,90 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
- goto req_err;
+ goto req_err_2;
}
- engine->cur_req_prepared = true;
}
if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
- goto req_err;
+ goto req_err_1;
}
+
ret = enginectx->op.do_one_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
- goto req_err;
+
+ /* Request unsuccessfully executed by hardware */
+ if (ret < 0) {
+ /*
+ * If hardware queue is full (-ENOSPC), requeue request
+ * regardless of backlog flag.
+ * Otherwise, unprepare and complete the request.
+ */
+ if (!engine->retry_support ||
+ (ret != -ENOSPC)) {
+ dev_err(engine->dev,
+ "Failed to do one request from queue: %d\n",
+ ret);
+ goto req_err_1;
+ }
+ /*
+ * If retry mechanism is supported,
+ * unprepare current request and
+ * enqueue it back into crypto-engine queue.
+ */
+ if (enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine,
+ async_req);
+ if (ret)
+ dev_err(engine->dev,
+ "failed to unprepare request\n");
+ }
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ /*
+ * If hardware was unable to execute request, enqueue it
+ * back in front of crypto-engine queue, to keep the order
+ * of requests.
+ */
+ crypto_enqueue_request_head(&engine->queue, async_req);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+ goto out;
}
- return;
-req_err:
- crypto_finalize_request(engine, async_req, ret);
+ goto retry;
+
+req_err_1:
+ if (enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine, async_req);
+ if (ret)
+ dev_err(engine->dev, "failed to unprepare request\n");
+ }
+
+req_err_2:
+ async_req->complete(async_req, ret);
+
+retry:
+ /* If retry mechanism is supported, send new requests to engine */
+ if (engine->retry_support) {
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ goto start_request;
+ }
return;
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ /*
+ * Batch requests is possible only if
+ * hardware can enqueue multiple requests
+ */
+ if (engine->do_batch_requests) {
+ ret = engine->do_batch_requests(engine);
+ if (ret)
+ dev_err(engine->dev, "failed to do batch requests: %d\n",
+ ret);
+ }
+
+ return;
}
static void crypto_pump_work(struct kthread_work *work)
@@ -386,15 +460,27 @@ int crypto_engine_stop(struct crypto_engine *engine)
EXPORT_SYMBOL_GPL(crypto_engine_stop);
/**
- * crypto_engine_alloc_init - allocate crypto hardware engine structure and
- * initialize it.
+ * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
+ * and initialize it by setting the maximum number of entries in the software
+ * crypto-engine queue.
* @dev: the device attached with one hardware engine
+ * @retry_support: whether hardware has support for retry mechanism
+ * @cbk_do_batch: pointer to a callback function to be invoked when executing a
+ * a batch of requests.
+ * This has the form:
+ * callback(struct crypto_engine *engine)
+ * where:
+ * @engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
+ * @qlen: maximum size of the crypto-engine queue
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
+ bool retry_support,
+ int (*cbk_do_batch)(struct crypto_engine *engine),
+ bool rt, int qlen)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
struct crypto_engine *engine;
@@ -411,12 +497,18 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
engine->running = false;
engine->busy = false;
engine->idling = false;
- engine->cur_req_prepared = false;
+ engine->retry_support = retry_support;
engine->priv_data = dev;
+ /*
+ * Batch requests is possible only if
+ * hardware has support for retry mechanism.
+ */
+ engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
+
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
- crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+ crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name);
@@ -433,6 +525,22 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
return engine;
}
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+ return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+ CRYPTO_ENGINE_MAX_QLEN);
+}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**
diff --git a/crypto/drbg.c b/crypto/drbg.c
index b6929eb5f565..37526eb8c5d5 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1087,10 +1087,6 @@ static void drbg_async_seed(struct work_struct *work)
if (ret)
goto unlock;
- /* If nonblocking pool is initialized, deactivate Jitter RNG */
- crypto_free_rng(drbg->jent);
- drbg->jent = NULL;
-
/* Set seeded to false so that if __drbg_seed fails the
* next generate call will trigger a reseed.
*/
@@ -1168,7 +1164,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
entropylen);
if (ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
- goto out;
+
+ /*
+ * Do not treat the transient failure of the
+ * Jitter RNG as an error that needs to be
+ * reported. The combined number of the
+ * maximum reseed threshold times the maximum
+ * number of Jitter RNG transient errors is
+ * less than the reseed threshold required by
+ * SP800-90A allowing us to treat the
+ * transient errors as such.
+ *
+ * However, we mandate that at least the first
+ * seeding operation must succeed with the
+ * Jitter RNG.
+ */
+ if (!reseed || ret != -EAGAIN)
+ goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
@@ -1294,8 +1306,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
- if (!drbg->prev)
+ if (!drbg->prev) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->fips_primed = false;
}
@@ -1492,6 +1506,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
if (list_empty(&drbg->test_data.list))
return 0;
+ drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+
INIT_WORK(&drbg->seed_work, drbg_async_seed);
drbg->random_ready.owner = THIS_MODULE;
@@ -1512,8 +1528,6 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
return err;
}
- drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
-
/*
* Require frequent reseeds until the seed source is fully
* initialized.
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 465a89c9d1ef..a7f45dbc4ee2 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -66,7 +66,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- SHASH_DESC_ON_STACK(desc, tctx->hash);
u8 salt[HASH_MAX_DIGESTSIZE];
int err;
@@ -78,8 +77,7 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
if (err)
return err;
- desc->tfm = tctx->hash;
- err = crypto_shash_digest(desc, key, keylen, salt);
+ err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt);
if (err)
return err;
diff --git a/crypto/internal.h b/crypto/internal.h
index d5ebc60c5143..ff06a3bd1ca1 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -65,6 +65,7 @@ void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
+void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index a5ce8f96790f..b43684c0dade 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -108,6 +108,7 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
+ unsigned int reset_cnt;
};
static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -142,7 +143,33 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
int ret = 0;
spin_lock(&rng->jent_lock);
+
+ /* Return a permanent error in case we had too many resets in a row. */
+ if (rng->reset_cnt > (1<<10)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
+
+ /* Reset RNG in case of health failures */
+ if (ret < -1) {
+ pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
+ (ret == -2) ? "Repetition Count Test" :
+ "Adaptive Proportion Test");
+
+ rng->reset_cnt++;
+
+ ret = -EAGAIN;
+ } else {
+ rng->reset_cnt = 0;
+
+ /* Convert the Jitter RNG error into a usable error code */
+ if (ret == -1)
+ ret = -EINVAL;
+ }
+
+out:
spin_unlock(&rng->jent_lock);
return ret;
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 042157f0d28b..57f4a1ac738b 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
- * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020
*
* Design
* ======
@@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
- * version 2.1.2 provided at http://www.chronox.de/jent.html
+ * version 2.2.0 provided at http://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
@@ -83,6 +83,22 @@ struct rand_data {
unsigned int memblocksize; /* Size of one memory block in bytes */
unsigned int memaccessloops; /* Number of memory accesses per random
* bit generation */
+
+ /* Repetition Count Test */
+ int rct_count; /* Number of stuck values */
+
+ /* Adaptive Proportion Test for a significance level of 2^-30 */
+#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
+ /* LSB of time stamp to process */
+#define JENT_APT_LSB 16
+#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1)
+ unsigned int apt_observations; /* Number of collected observations */
+ unsigned int apt_count; /* APT counter */
+ unsigned int apt_base; /* APT base reference */
+ unsigned int apt_base_set:1; /* APT base reference set? */
+
+ unsigned int health_failure:1; /* Permanent health failure */
};
/* Flags that can be used to initialize the RNG */
@@ -98,12 +114,201 @@ struct rand_data {
* variations (2nd derivation of time is
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
+#define JENT_EHEALTH 9 /* Health test failed during initialization */
+#define JENT_ERCT 10 /* RCT failed during initialization */
+
+#include "jitterentropy.h"
/***************************************************************************
- * Helper functions
+ * Adaptive Proportion Test
+ *
+ * This test complies with SP800-90B section 4.4.2.
***************************************************************************/
-#include "jitterentropy.h"
+/**
+ * Reset the APT counter
+ *
+ * @ec [in] Reference to entropy collector
+ */
+static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked)
+{
+ /* Reset APT counter */
+ ec->apt_count = 0;
+ ec->apt_base = delta_masked;
+ ec->apt_observations = 0;
+}
+
+/**
+ * Insert a new entropy event into APT
+ *
+ * @ec [in] Reference to entropy collector
+ * @delta_masked [in] Masked time delta to process
+ */
+static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
+{
+ /* Initialize the base reference */
+ if (!ec->apt_base_set) {
+ ec->apt_base = delta_masked;
+ ec->apt_base_set = 1;
+ return;
+ }
+
+ if (delta_masked == ec->apt_base) {
+ ec->apt_count++;
+
+ if (ec->apt_count >= JENT_APT_CUTOFF)
+ ec->health_failure = 1;
+ }
+
+ ec->apt_observations++;
+
+ if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
+ jent_apt_reset(ec, delta_masked);
+}
+
+/***************************************************************************
+ * Stuck Test and its use as Repetition Count Test
+ *
+ * The Jitter RNG uses an enhanced version of the Repetition Count Test
+ * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical
+ * back-to-back values, the input to the RCT is the counting of the stuck
+ * values during the generation of one Jitter RNG output block.
+ *
+ * The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8.
+ *
+ * During the counting operation, the Jitter RNG always calculates the RCT
+ * cut-off value of C. If that value exceeds the allowed cut-off value,
+ * the Jitter RNG output block will be calculated completely but discarded at
+ * the end. The caller of the Jitter RNG is informed with an error code.
+ ***************************************************************************/
+
+/**
+ * Repetition Count Test as defined in SP800-90B section 4.4.1
+ *
+ * @ec [in] Reference to entropy collector
+ * @stuck [in] Indicator whether the value is stuck
+ */
+static void jent_rct_insert(struct rand_data *ec, int stuck)
+{
+ /*
+ * If we have a count less than zero, a previous RCT round identified
+ * a failure. We will not overwrite it.
+ */
+ if (ec->rct_count < 0)
+ return;
+
+ if (stuck) {
+ ec->rct_count++;
+
+ /*
+ * The cutoff value is based on the following consideration:
+ * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
+ * In addition, we require an entropy value H of 1/OSR as this
+ * is the minimum entropy required to provide full entropy.
+ * Note, we collect 64 * OSR deltas for inserting them into
+ * the entropy pool which should then have (close to) 64 bits
+ * of entropy.
+ *
+ * Note, ec->rct_count (which equals to value B in the pseudo
+ * code of SP800-90B section 4.4.1) starts with zero. Hence
+ * we need to subtract one from the cutoff value as calculated
+ * following SP800-90B.
+ */
+ if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
+ ec->rct_count = -1;
+ ec->health_failure = 1;
+ }
+ } else {
+ ec->rct_count = 0;
+ }
+}
+
+/**
+ * Is there an RCT health test failure?
+ *
+ * @ec [in] Reference to entropy collector
+ *
+ * @return
+ * 0 No health test failure
+ * 1 Permanent health test failure
+ */
+static int jent_rct_failure(struct rand_data *ec)
+{
+ if (ec->rct_count < 0)
+ return 1;
+ return 0;
+}
+
+static inline __u64 jent_delta(__u64 prev, __u64 next)
+{
+#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
+ return (prev < next) ? (next - prev) :
+ (JENT_UINT64_MAX - prev + 1 + next);
+}
+
+/**
+ * Stuck test by checking the:
+ * 1st derivative of the jitter measurement (time delta)
+ * 2nd derivative of the jitter measurement (delta of time deltas)
+ * 3rd derivative of the jitter measurement (delta of delta of time deltas)
+ *
+ * All values must always be non-zero.
+ *
+ * @ec [in] Reference to entropy collector
+ * @current_delta [in] Jitter time delta
+ *
+ * @return
+ * 0 jitter measurement not stuck (good bit)
+ * 1 jitter measurement stuck (reject bit)
+ */
+static int jent_stuck(struct rand_data *ec, __u64 current_delta)
+{
+ __u64 delta2 = jent_delta(ec->last_delta, current_delta);
+ __u64 delta3 = jent_delta(ec->last_delta2, delta2);
+ unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK;
+
+ ec->last_delta = current_delta;
+ ec->last_delta2 = delta2;
+
+ /*
+ * Insert the result of the comparison of two back-to-back time
+ * deltas.
+ */
+ jent_apt_insert(ec, delta_masked);
+
+ if (!current_delta || !delta2 || !delta3) {
+ /* RCT with a stuck bit */
+ jent_rct_insert(ec, 1);
+ return 1;
+ }
+
+ /* RCT with a non-stuck bit */
+ jent_rct_insert(ec, 0);
+
+ return 0;
+}
+
+/**
+ * Report any health test failures
+ *
+ * @ec [in] Reference to entropy collector
+ *
+ * @return
+ * 0 No health test failure
+ * 1 Permanent health test failure
+ */
+static int jent_health_failure(struct rand_data *ec)
+{
+ /* Test is only enabled in FIPS mode */
+ if (!jent_fips_enabled())
+ return 0;
+
+ return ec->health_failure;
+}
+
+/***************************************************************************
+ * Noise sources
+ ***************************************************************************/
/**
* Update of the loop count used for the next round of
@@ -148,10 +353,6 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
return (shuffle + (1<<min));
}
-/***************************************************************************
- * Noise sources
- ***************************************************************************/
-
/**
* CPU Jitter noise source -- this is the noise source based on the CPU
* execution time jitter
@@ -166,18 +367,19 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
* the CPU execution time jitter. Any change to the loop in this function
* implies that careful retesting must be done.
*
- * Input:
- * @ec entropy collector struct
- * @time time stamp to be injected
- * @loop_cnt if a value not equal to 0 is set, use the given value as number of
- * loops to perform the folding
+ * @ec [in] entropy collector struct
+ * @time [in] time stamp to be injected
+ * @loop_cnt [in] if a value not equal to 0 is set, use the given value as
+ * number of loops to perform the folding
+ * @stuck [in] Is the time stamp identified as stuck?
*
* Output:
* updated ec->data
*
* @return Number of loops the folding operation is performed
*/
-static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
+static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt,
+ int stuck)
{
unsigned int i;
__u64 j = 0;
@@ -220,9 +422,17 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
new ^= tmp;
}
}
- ec->data = new;
- return fold_loop_cnt;
+ /*
+ * If the time stamp is stuck, do not finally insert the value into
+ * the entropy pool. Although this operation should not do any harm
+ * even when the time stamp has no entropy, SP800-90B requires that
+ * any conditioning operation (SP800-90B considers the LFSR to be a
+ * conditioning operation) to have an identical amount of input
+ * data according to section 3.1.5.
+ */
+ if (!stuck)
+ ec->data = new;
}
/**
@@ -243,16 +453,13 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
* to reliably access either L3 or memory, the ec->mem memory must be quite
* large which is usually not desirable.
*
- * Input:
- * @ec Reference to the entropy collector with the memory access data -- if
- * the reference to the memory block to be accessed is NULL, this noise
- * source is disabled
- * @loop_cnt if a value not equal to 0 is set, use the given value as number of
- * loops to perform the folding
- *
- * @return Number of memory access operations
+ * @ec [in] Reference to the entropy collector with the memory access data -- if
+ * the reference to the memory block to be accessed is NULL, this noise
+ * source is disabled
+ * @loop_cnt [in] if a value not equal to 0 is set, use the given value
+ * number of loops to perform the LFSR
*/
-static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
+static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
{
unsigned int wrap = 0;
__u64 i = 0;
@@ -262,7 +469,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem)
- return 0;
+ return;
wrap = ec->memblocksize * ec->memblocks;
/*
@@ -288,43 +495,11 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
ec->memlocation = ec->memlocation + ec->memblocksize - 1;
ec->memlocation = ec->memlocation % wrap;
}
- return i;
}
/***************************************************************************
* Start of entropy processing logic
***************************************************************************/
-
-/**
- * Stuck test by checking the:
- * 1st derivation of the jitter measurement (time delta)
- * 2nd derivation of the jitter measurement (delta of time deltas)
- * 3rd derivation of the jitter measurement (delta of delta of time deltas)
- *
- * All values must always be non-zero.
- *
- * Input:
- * @ec Reference to entropy collector
- * @current_delta Jitter time delta
- *
- * @return
- * 0 jitter measurement not stuck (good bit)
- * 1 jitter measurement stuck (reject bit)
- */
-static int jent_stuck(struct rand_data *ec, __u64 current_delta)
-{
- __s64 delta2 = ec->last_delta - current_delta;
- __s64 delta3 = delta2 - ec->last_delta2;
-
- ec->last_delta = current_delta;
- ec->last_delta2 = delta2;
-
- if (!current_delta || !delta2 || !delta3)
- return 1;
-
- return 0;
-}
-
/**
* This is the heart of the entropy generation: calculate time deltas and
* use the CPU jitter in the time deltas. The jitter is injected into the
@@ -334,8 +509,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
* of this function! This can be done by calling this function
* and not using its result.
*
- * Input:
- * @entropy_collector Reference to entropy collector
+ * @ec [in] Reference to entropy collector
*
* @return result of stuck test
*/
@@ -343,6 +517,7 @@ static int jent_measure_jitter(struct rand_data *ec)
{
__u64 time = 0;
__u64 current_delta = 0;
+ int stuck;
/* Invoke one noise source before time measurement to add variations */
jent_memaccess(ec, 0);
@@ -352,22 +527,23 @@ static int jent_measure_jitter(struct rand_data *ec)
* invocation to measure the timing variations
*/
jent_get_nstime(&time);
- current_delta = time - ec->prev_time;
+ current_delta = jent_delta(ec->prev_time, time);
ec->prev_time = time;
+ /* Check whether we have a stuck measurement. */
+ stuck = jent_stuck(ec, current_delta);
+
/* Now call the next noise sources which also injects the data */
- jent_lfsr_time(ec, current_delta, 0);
+ jent_lfsr_time(ec, current_delta, 0, stuck);
- /* Check whether we have a stuck measurement. */
- return jent_stuck(ec, current_delta);
+ return stuck;
}
/**
* Generator of one 64 bit random number
* Function fills rand_data->data
*
- * Input:
- * @ec Reference to entropy collector
+ * @ec [in] Reference to entropy collector
*/
static void jent_gen_entropy(struct rand_data *ec)
{
@@ -391,31 +567,6 @@ static void jent_gen_entropy(struct rand_data *ec)
}
/**
- * The continuous test required by FIPS 140-2 -- the function automatically
- * primes the test if needed.
- *
- * Return:
- * returns normally if FIPS test passed
- * panics the kernel if FIPS test failed
- */
-static void jent_fips_test(struct rand_data *ec)
-{
- if (!jent_fips_enabled())
- return;
-
- /* prime the FIPS test */
- if (!ec->old_data) {
- ec->old_data = ec->data;
- jent_gen_entropy(ec);
- }
-
- if (ec->data == ec->old_data)
- jent_panic("jitterentropy: Duplicate output detected\n");
-
- ec->old_data = ec->data;
-}
-
-/**
* Entry function: Obtain entropy for the caller.
*
* This function invokes the entropy gathering logic as often to generate
@@ -425,17 +576,18 @@ static void jent_fips_test(struct rand_data *ec)
* This function truncates the last 64 bit entropy value output to the exact
* size specified by the caller.
*
- * Input:
- * @ec Reference to entropy collector
- * @data pointer to buffer for storing random data -- buffer must already
- * exist
- * @len size of the buffer, specifying also the requested number of random
- * in bytes
+ * @ec [in] Reference to entropy collector
+ * @data [in] pointer to buffer for storing random data -- buffer must already
+ * exist
+ * @len [in] size of the buffer, specifying also the requested number of random
+ * in bytes
*
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL
+ * -2 RCT failed
+ * -3 APT test failed
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@@ -449,7 +601,42 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int tocopy;
jent_gen_entropy(ec);
- jent_fips_test(ec);
+
+ if (jent_health_failure(ec)) {
+ int ret;
+
+ if (jent_rct_failure(ec))
+ ret = -2;
+ else
+ ret = -3;
+
+ /*
+ * Re-initialize the noise source
+ *
+ * If the health test fails, the Jitter RNG remains
+ * in failure state and will return a health failure
+ * during next invocation.
+ */
+ if (jent_entropy_init())
+ return ret;
+
+ /* Set APT to initial state */
+ jent_apt_reset(ec, 0);
+ ec->apt_base_set = 0;
+
+ /* Set RCT to initial state */
+ ec->rct_count = 0;
+
+ /* Re-enable Jitter RNG */
+ ec->health_failure = 0;
+
+ /*
+ * Return the health test failure status to the
+ * caller as the generated value is not appropriate.
+ */
+ return ret;
+ }
+
if ((DATA_SIZE_BITS / 8) < len)
tocopy = (DATA_SIZE_BITS / 8);
else
@@ -513,11 +700,15 @@ int jent_entropy_init(void)
int i;
__u64 delta_sum = 0;
__u64 old_delta = 0;
+ unsigned int nonstuck = 0;
int time_backwards = 0;
int count_mod = 0;
int count_stuck = 0;
struct rand_data ec = { 0 };
+ /* Required for RCT */
+ ec.osr = 1;
+
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
* loop counts may show some slight skew and we produce
@@ -539,8 +730,10 @@ int jent_entropy_init(void)
/*
* TESTLOOPCOUNT needs some loops to identify edge systems. 100 is
* definitely too little.
+ *
+ * SP800-90B requires at least 1024 initial test cycles.
*/
-#define TESTLOOPCOUNT 300
+#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
__u64 time = 0;
@@ -552,13 +745,13 @@ int jent_entropy_init(void)
/* Invoke core entropy collection logic */
jent_get_nstime(&time);
ec.prev_time = time;
- jent_lfsr_time(&ec, time, 0);
+ jent_lfsr_time(&ec, time, 0, 0);
jent_get_nstime(&time2);
/* test whether timer works */
if (!time || !time2)
return JENT_ENOTIME;
- delta = time2 - time;
+ delta = jent_delta(time, time2);
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
@@ -581,6 +774,28 @@ int jent_entropy_init(void)
if (stuck)
count_stuck++;
+ else {
+ nonstuck++;
+
+ /*
+ * Ensure that the APT succeeded.
+ *
+ * With the check below that count_stuck must be less
+ * than 10% of the overall generated raw entropy values
+ * it is guaranteed that the APT is invoked at
+ * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times.
+ */
+ if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
+ jent_apt_reset(&ec,
+ delta & JENT_APT_WORD_MASK);
+ if (jent_health_failure(&ec))
+ return JENT_EHEALTH;
+ }
+ }
+
+ /* Validate RCT */
+ if (jent_rct_failure(&ec))
+ return JENT_ERCT;
/* test whether we have an increasing timer */
if (!(time2 > time))
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 3c734b81b3a2..5b07a7c09296 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -287,7 +287,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->child);
}
-static void free_inst(struct skcipher_instance *inst)
+static void crypto_lrw_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@@ -400,12 +400,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
- inst->free = free_inst;
+ inst->free = crypto_lrw_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- free_inst(inst);
+ crypto_lrw_free(inst);
}
return err;
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 7c57b844c382..1d43472fecbd 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
@@ -31,10 +30,10 @@ EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
- u32 temp[SHA_WORKSPACE_WORDS];
+ u32 temp[SHA1_WORKSPACE_WORDS];
while (blocks--) {
- sha_transform(sst->state, src, temp);
+ sha1_transform(sst->state, src, temp);
src += SHA1_BLOCK_SIZE;
}
memzero_explicit(temp, sizeof(temp));
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index f2d7095d4f2d..88156e3e2a33 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -35,27 +35,31 @@ EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
static int crypto_sha256_init(struct shash_desc *desc)
{
- return sha256_init(shash_desc_ctx(desc));
+ sha256_init(shash_desc_ctx(desc));
+ return 0;
}
static int crypto_sha224_init(struct shash_desc *desc)
{
- return sha224_init(shash_desc_ctx(desc));
+ sha224_init(shash_desc_ctx(desc));
+ return 0;
}
int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha256_update(shash_desc_ctx(desc), data, len);
+ sha256_update(shash_desc_ctx(desc), data, len);
+ return 0;
}
EXPORT_SYMBOL(crypto_sha256_update);
static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
{
if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
- return sha224_final(shash_desc_ctx(desc), out);
+ sha224_final(shash_desc_ctx(desc), out);
else
- return sha256_final(shash_desc_ctx(desc), out);
+ sha256_final(shash_desc_ctx(desc), out);
+ return 0;
}
int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
diff --git a/crypto/shash.c b/crypto/shash.c
index c075b26c2a1d..e6a4b5f39b8c 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -206,6 +206,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
+int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ int err;
+
+ desc->tfm = tfm;
+
+ err = crypto_shash_digest(desc, data, len, out);
+
+ shash_desc_zero(desc);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
+
static int shash_default_export(struct shash_desc *desc, void *out)
{
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
diff --git a/crypto/xts.c b/crypto/xts.c
index 6d8cea94b3cf..3565f3b863a6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -322,7 +322,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(ctx->tweak);
}
-static void free_inst(struct skcipher_instance *inst)
+static void crypto_xts_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@@ -434,12 +434,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
- inst->free = free_inst;
+ inst->free = crypto_xts_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- free_inst(inst);
+ crypto_xts_free(inst);
}
return err;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9bc46da8d77a..ac00d78ee9cc 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -474,6 +474,19 @@ config HW_RANDOM_KEYSTONE
help
This option enables Keystone's hardware random generator.
+config HW_RANDOM_CCTRNG
+ tristate "Arm CryptoCell True Random Number Generator support"
+ depends on HAS_IOMEM && OF
+ help
+ Say 'Y' to enable the True Random Number Generator driver for the
+ Arm TrustZone CryptoCell family of processors.
+ Currently the CryptoCell 713 and 703 are supported.
+ The driver is supported only in SoC where Trusted Execution
+ Environment is not used.
+ Choose 'M' to compile this driver as a module. The module
+ will be called cctrng.
+ If unsure, say 'N'.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a7801b49ce6c..2c6724735345 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
+obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
new file mode 100644
index 000000000000..619148fb2dc9
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/workqueue.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/fips.h>
+
+#include "cctrng.h"
+
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
+ (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
+
+#define CC_HW_RESET_LOOP_COUNT 10
+#define CC_TRNG_SUSPEND_TIMEOUT 3000
+
+/* data circular buffer in words must be:
+ * - of a power-of-2 size (limitation of circ_buf.h macros)
+ * - at least 6, the size generated in the EHR according to HW implementation
+ */
+#define CCTRNG_DATA_BUF_WORDS 32
+
+/* The timeout for the TRNG operation should be calculated with the formula:
+ * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
+ * while:
+ * - SAMPLE_CNT is input value from the characterisation process
+ * - all the rest are constants
+ */
+#define EHR_NUM 1
+#define VN_COEFF 4
+#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
+#define SCALE_VALUE 2
+#define CCTRNG_TIMEOUT(smpl_cnt) \
+ (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
+
+struct cctrng_drvdata {
+ struct platform_device *pdev;
+ void __iomem *cc_base;
+ struct clk *clk;
+ struct hwrng rng;
+ u32 active_rosc;
+ /* Sampling interval for each ring oscillator:
+ * count of ring oscillator cycles between consecutive bits sampling.
+ * Value of 0 indicates non-valid rosc
+ */
+ u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
+
+ u32 data_buf[CCTRNG_DATA_BUF_WORDS];
+ struct circ_buf circ;
+ struct work_struct compwork;
+ struct work_struct startwork;
+
+ /* pending_hw - 1 when HW is pending, 0 when it is idle */
+ atomic_t pending_hw;
+
+ /* protects against multiple concurrent consumers of data_buf */
+ spinlock_t read_lock;
+};
+
+
+/* functions for write/read CC registers */
+static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+
+static int cc_trng_pm_get(struct device *dev)
+{
+ int rc = 0;
+
+ rc = pm_runtime_get_sync(dev);
+
+ /* pm_runtime_get_sync() can return 1 as a valid return code */
+ return (rc == 1 ? 0 : rc);
+}
+
+static void cc_trng_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ if (rc)
+ dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
+}
+
+static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* must be before the enabling to avoid redundant suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
+ return pm_runtime_set_active(dev);
+}
+
+static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+}
+
+static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ pm_runtime_disable(dev);
+}
+
+
+static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+ struct device_node *np = drvdata->pdev->dev.of_node;
+ int rc;
+ int i;
+ /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
+ int ret = -EINVAL;
+
+ rc = of_property_read_u32_array(np, "arm,rosc-ratio",
+ drvdata->smpl_ratio,
+ CC_TRNG_NUM_OF_ROSCS);
+ if (rc) {
+ /* arm,rosc-ratio was not found in device tree */
+ return rc;
+ }
+
+ /* verify that at least one rosc has (sampling ratio > 0) */
+ for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
+ dev_dbg(dev, "rosc %d sampling ratio %u",
+ i, drvdata->smpl_ratio[i]);
+
+ if (drvdata->smpl_ratio[i] > 0)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
+ drvdata->active_rosc += 1;
+
+ while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
+ if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
+ return 0;
+
+ drvdata->active_rosc += 1;
+ }
+ return -EINVAL;
+}
+
+
+static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
+{
+ u32 max_cycles;
+
+ /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
+ max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
+ cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
+
+ /* enable the RND source */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
+
+ /* unmask RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
+}
+
+
+/* increase circular data buffer index (head/tail) */
+static inline void circ_idx_inc(int *idx, int bytes)
+{
+ *idx += (bytes + 3) >> 2;
+ *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
+}
+
+static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
+{
+ return CIRC_SPACE(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+
+}
+
+static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ /* current implementation ignores "wait" */
+
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 *buf = (u32 *)drvdata->circ.buf;
+ size_t copied = 0;
+ size_t cnt_w;
+ size_t size;
+ size_t left;
+
+ if (!spin_trylock(&drvdata->read_lock)) {
+ /* concurrent consumers from data_buf cannot be served */
+ dev_dbg_ratelimited(dev, "unable to hold lock\n");
+ return 0;
+ }
+
+ /* copy till end of data buffer (without wrap back) */
+ cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), max);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied = size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ /* copy rest of data in data buffer */
+ left = max - copied;
+ if (left > 0) {
+ cnt_w = CIRC_CNT(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), left);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied += size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ }
+
+ spin_unlock(&drvdata->read_lock);
+
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* re-check space in buffer to avoid potential race */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ /* increment device's usage counter */
+ int rc = cc_trng_pm_get(dev);
+
+ if (rc) {
+ dev_err(dev,
+ "cc_trng_pm_get returned %x\n",
+ rc);
+ return rc;
+ }
+
+ /* schedule execution of deferred work handler
+ * for filling of data buffer
+ */
+ schedule_work(&drvdata->startwork);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ }
+ }
+ }
+
+ return copied;
+}
+
+static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
+{
+ u32 tmp_smpl_cnt = 0;
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng hw trigger.\n");
+
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* do software reset */
+ cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
+ /* in order to verify that the reset has completed,
+ * the sample count need to be verified
+ */
+ do {
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* set sampling ratio (rng_clocks) between consecutive bits */
+ cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
+ drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* read the sampling ratio */
+ tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
+
+ } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* disable the RND source for setting new parameters in HW */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
+
+ cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
+
+ /* Debug Control register: set to 0 - no bypasses */
+ cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
+
+ cc_trng_enable_rnd_source(drvdata);
+}
+
+static void cc_trng_compwork_handler(struct work_struct *w)
+{
+ u32 isr = 0;
+ u32 ehr_valid = 0;
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, compwork);
+ struct device *dev = &(drvdata->pdev->dev);
+ int i;
+
+ /* stop DMA and the RNG source */
+ cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ /* read RNG_ISR and check for errors */
+ isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
+ ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
+ dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
+
+ if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
+ fips_fail_notify();
+ /* FIPS error is fatal */
+ panic("Got HW CRNGT error while fips is enabled!\n");
+ }
+
+ /* Clear all pending RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
+
+
+ if (!ehr_valid) {
+ /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
+ if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
+ CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
+ dev_dbg(dev, "cctrng autocorr/timeout error.\n");
+ goto next_rosc;
+ }
+
+ /* in case of VN error, ignore it */
+ }
+
+ /* read EHR data from registers */
+ for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
+ /* calc word ptr in data_buf */
+ u32 *buf = (u32 *)drvdata->circ.buf;
+
+ buf[drvdata->circ.head] = cc_ioread(drvdata,
+ CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
+
+ /* EHR_DATA registers are cleared on read. In case 0 value was
+ * returned, restart the entropy collection.
+ */
+ if (buf[drvdata->circ.head] == 0) {
+ dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
+ drvdata->active_rosc);
+ goto next_rosc;
+ }
+
+ circ_idx_inc(&drvdata->circ.head, 1<<2);
+ }
+
+ atomic_set(&drvdata->pending_hw, 0);
+
+ /* continue to fill data buffer if needed */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* Re-enable rnd source */
+ cc_trng_enable_rnd_source(drvdata);
+ return;
+ }
+ }
+
+ cc_trng_pm_put_suspend(dev);
+
+ dev_dbg(dev, "compwork handler done\n");
+ return;
+
+next_rosc:
+ if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
+ (cc_trng_change_rosc(drvdata) == 0)) {
+ /* trigger trng hw with next rosc */
+ cc_trng_hw_trigger(drvdata);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ cc_trng_pm_put_suspend(dev);
+ }
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 irr;
+
+ /* if driver suspended return, probably shared interrupt */
+ if (pm_runtime_suspended(dev))
+ return IRQ_NONE;
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+
+ if (irr == 0) /* Probably shared interrupt line */
+ return IRQ_NONE;
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
+
+ /* RNG interrupt - most probable */
+ if (irr & CC_HOST_RNG_IRQ_MASK) {
+ /* Mask RNG interrupts - will be unmasked in deferred work */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
+
+ /* We clear RNG interrupt here,
+ * to avoid it from firing as we'll unmask RNG interrupts.
+ */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
+ CC_HOST_RNG_IRQ_MASK);
+
+ irr &= ~CC_HOST_RNG_IRQ_MASK;
+
+ /* schedule execution of deferred work handler */
+ schedule_work(&drvdata->compwork);
+ }
+
+ if (irr) {
+ dev_dbg_ratelimited(dev,
+ "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cc_trng_startwork_handler(struct work_struct *w)
+{
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, startwork);
+
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+}
+
+
+static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
+{
+ struct clk *clk;
+ struct device *dev = &(drvdata->pdev->dev);
+ int rc = 0;
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
+ drvdata->clk = clk;
+
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->clk);
+}
+
+
+static int cctrng_probe(struct platform_device *pdev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cctrng_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+ u32 val;
+ int irq;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!drvdata->rng.name)
+ return -ENOMEM;
+
+ drvdata->rng.read = cctrng_read;
+ drvdata->rng.priv = (unsigned long)drvdata;
+ drvdata->rng.quality = CC_TRNG_QUALITY;
+
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->pdev = pdev;
+
+ drvdata->circ.buf = (char *)drvdata->data_buf;
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(drvdata->cc_base);
+ }
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, drvdata->cc_base);
+
+ /* Then IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return irq;
+ }
+
+ /* parse sampling rate from device tree */
+ rc = cc_trng_parse_sampling_ratio(drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
+ return rc;
+ }
+
+ rc = cc_trng_clk_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_clk_init failed\n");
+ return rc;
+ }
+
+ INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
+ INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
+ spin_lock_init(&drvdata->read_lock);
+
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ /* init PM */
+ rc = cc_trng_pm_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_init failed\n");
+ goto post_clk_err;
+ }
+
+ /* increment device's usage counter */
+ rc = cc_trng_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
+ goto post_pm_err;
+ }
+
+ /* set pending_hw to verify that HW won't be triggered from read */
+ atomic_set(&drvdata->pending_hw, 1);
+
+ /* registration of the hwrng device */
+ rc = hwrng_register(&drvdata->rng);
+ if (rc) {
+ dev_err(dev, "Could not register hwrng device.\n");
+ goto post_pm_err;
+ }
+
+ /* trigger HW to start generate data */
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+
+ /* All set, we can allow auto-suspend */
+ cc_trng_pm_go(drvdata);
+
+ dev_info(dev, "ARM cctrng device initialized\n");
+
+ return 0;
+
+post_pm_err:
+ cc_trng_pm_fini(drvdata);
+
+post_clk_err:
+ cc_trng_clk_fini(drvdata);
+
+ return rc;
+}
+
+static int cctrng_remove(struct platform_device *pdev)
+{
+ struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "Releasing cctrng resources...\n");
+
+ hwrng_unregister(&drvdata->rng);
+
+ cc_trng_pm_fini(drvdata);
+
+ cc_trng_clk_fini(drvdata);
+
+ dev_info(dev, "ARM cctrng device terminated\n");
+
+ return 0;
+}
+
+static int __maybe_unused cctrng_suspend(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_ENABLE);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return 0;
+}
+
+static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
+ if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
+static int __maybe_unused cctrng_resume(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ /* Enables the device source clk */
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ /* wait for Cryptocell reset completion */
+ if (!cctrng_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_DISABLE);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
+
+static const struct of_device_id arm_cctrng_dt_match[] = {
+ { .compatible = "arm,cryptocell-713-trng", },
+ { .compatible = "arm,cryptocell-703-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
+
+static struct platform_driver cctrng_driver = {
+ .driver = {
+ .name = "cctrng",
+ .of_match_table = arm_cctrng_dt_match,
+ .pm = &cctrng_pm,
+ },
+ .probe = cctrng_probe,
+ .remove = cctrng_remove,
+};
+
+static int __init cctrng_mod_init(void)
+{
+ /* Compile time assertion checks */
+ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+ BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
+ return platform_driver_register(&cctrng_driver);
+}
+module_init(cctrng_mod_init);
+
+static void __exit cctrng_mod_exit(void)
+{
+ platform_driver_unregister(&cctrng_driver);
+}
+module_exit(cctrng_mod_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.h b/drivers/char/hw_random/cctrng.h
new file mode 100644
index 000000000000..1f2fde95adcb
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/bitops.h>
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+/* hwrng quality: bits of true entropy per 1024 bits of input */
+#define CC_TRNG_QUALITY 1024
+
+/* CryptoCell TRNG HW definitions */
+#define CC_TRNG_NUM_OF_ROSCS 4
+/* The number of words generated in the entropy holding register (EHR)
+ * 6 words (192 bit) according to HW implementation
+ */
+#define CC_TRNG_EHR_IN_WORDS 6
+#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32))
+
+#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT)
+
+/* RNG interrupt mask */
+#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT))
+
+// --------------------------------------
+// BLOCK: RNG
+// --------------------------------------
+#define CC_RNG_IMR_REG_OFFSET 0x0100UL
+#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL
+#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL
+#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL
+#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL
+#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_REG_OFFSET 0x0104UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL
+#define CC_RNG_ICR_REG_OFFSET 0x0108UL
+#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL
+#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL
+#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL
+#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL
+#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL
+#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL
+#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL
+#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL
+#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL
+// --------------------------------------
+// BLOCK: SEC_HOST_RGF
+// --------------------------------------
+#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL
+#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL
+
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL
+
+// --------------------------------------
+// BLOCK: NVM
+// --------------------------------------
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 65952393e1bb..7290c603fcb8 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -392,11 +392,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
return irq;
- }
err = devm_request_irq(dev, irq, omap4_rng_irq,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index ddfbabaa5f8f..49b2e02537dd 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -226,7 +226,7 @@ static int optee_rng_probe(struct device *dev)
return -ENODEV;
/* Open session with hwrng Trusted App */
- memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &rng_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index d7516a446987..008e6db9ce01 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -328,10 +328,8 @@ static int xgene_rng_probe(struct platform_device *pdev)
return PTR_ERR(ctx->csr_base);
rc = platform_get_irq(pdev, 0);
- if (rc < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (rc < 0)
return rc;
- }
ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0d10e31fd342..cae02b2a871c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
@@ -337,6 +336,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
+#include <crypto/sha.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -1397,14 +1397,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u32 w[5];
unsigned long l[LONGS(20)];
} hash;
- __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u32 workspace[SHA1_WORKSPACE_WORDS];
unsigned long flags;
/*
* If we have an architectural hardware random number
* generator, use it for SHA's initial vector
*/
- sha_init(hash.w);
+ sha1_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
@@ -1415,7 +1415,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
/* Generate a hash across the pool, 16 words (512 bits) at a time */
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 6b301afffd11..a1fb2fbdbe7b 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ if (irq < 0)
return irq;
- }
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ss->reset)) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 9d4ead2f7ebb..411857fad8ba 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev)
mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
- if (mc->irqs[i] < 0) {
- dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ if (mc->irqs[i] < 0)
return mc->irqs[i];
- }
err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
"gxl-crypto", mc);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index e536e2a6bbd8..75ccf41a7cb9 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -31,7 +31,6 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index fcf1effc7661..62ba0325a618 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
if (keylen > blocksize) {
- SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
-
- hdesc->tfm = tfm_ctx->child_hash;
-
tfm_ctx->hmac_key_length = blocksize;
- ret = crypto_shash_digest(hdesc, key, keylen,
- tfm_ctx->hmac_key);
+
+ ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
+ tfm_ctx->hmac_key);
if (ret)
return ret;
-
} else {
memcpy(tfm_ctx->hmac_key, key, keylen);
tfm_ctx->hmac_key_length = keylen;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c8b9408541a9..a353217a0d33 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
- int err = 0;
- unsigned int chunksize = 0; /* Num bytes of request to submit */
- int remaining = 0; /* Bytes of request still to process */
+ int err;
+ unsigned int chunksize; /* Num bytes of request to submit */
+ int remaining; /* Bytes of request still to process */
int chunk_start; /* Beginning of data for current SPU msg */
/* IV or ctr value to use in this SPU msg */
@@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* number of bytes still to be hashed in this req */
unsigned int nbytes_to_hash = 0;
- int err = 0;
+ int err;
unsigned int chunksize = 0; /* length of hash carry + new data */
/*
* length of new data, not from hash carry, to be submitted in
@@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- int err = 0;
+ int err;
rctx = mssg->ctx;
if (unlikely(!rctx)) {
@@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req)
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
- int err = 0;
+ int err;
const char *alg_name;
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
@@ -2299,7 +2299,7 @@ ahash_finup_exit:
static int ahash_digest(struct ahash_request *req)
{
- int err = 0;
+ int err;
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
@@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev)
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
if (IS_ERR(iproc_priv.mbox[i])) {
- err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ err = PTR_ERR(iproc_priv.mbox[i]);
dev_err(dev,
"Mbox channel %d request failed with err %d",
i, err);
@@ -4717,21 +4717,20 @@ static int spu_dt_read(struct platform_device *pdev)
matched_spu_type = of_device_get_match_data(dev);
if (!matched_spu_type) {
- dev_err(&pdev->dev, "Failed to match device\n");
+ dev_err(dev, "Failed to match device\n");
return -ENODEV;
}
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
- i = 0;
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
if (IS_ERR(spu->reg_vbase[i])) {
err = PTR_ERR(spu->reg_vbase[i]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ dev_err(dev, "Failed to map registers: %d\n",
err);
spu->reg_vbase[i] = NULL;
return err;
@@ -4747,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
- int err = 0;
+ int err;
iproc_priv.pdev = pdev;
platform_set_drvdata(iproc_priv.pdev,
@@ -4757,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- err = spu_mb_init(&pdev->dev);
+ err = spu_mb_init(dev);
if (err < 0)
goto failure;
@@ -4766,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
else if (spu->spu_type == SPU_TYPE_SPU2)
iproc_priv.bcm_hdr_len = 0;
- spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
+ spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
spu_counters_init();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index e91be9b8b083..788c6607078b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
}
/**
- * nitrox_bist_check - Check NITORX BIST registers status
+ * nitrox_bist_check - Check NITROX BIST registers status
* @ndev: NITROX device
*/
static int nitrox_bist_check(struct nitrox_device *ndev)
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index e0a8bd15aa74..32268e239bf1 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
- depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
- select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 474e6f1a6a84..b0cc2bd73af8 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
-
- SHASH_DESC_ON_STACK(sdesc, shash);
-
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
@@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
- sdesc->tfm = shash;
-
- ret = crypto_shash_digest(sdesc, key, key_len,
- ctx->u.sha.key);
+ ret = crypto_shash_tfm_digest(shash, key, key_len,
+ ctx->u.sha.key);
if (ret)
return -EINVAL;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 896f190b9a50..439cd737076e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -20,6 +20,7 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/gfp.h>
#include <asm/smp.h>
@@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
static bool psp_dead;
static int psp_timeout;
+/* Trusted Memory Region (TMR):
+ * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
+ * to allocate the memory, which will return aligned memory for the specified
+ * allocation order.
+ */
+#define SEV_ES_TMR_SIZE (1024 * 1024)
+static void *sev_es_tmr;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
+ if (sev_es_tmr) {
+ u64 tmr_pa;
+
+ /*
+ * Do not include the encryption mask on the physical
+ * address of the TMR (firmware should clear it anyway).
+ */
+ tmr_pa = __pa(sev_es_tmr);
+
+ sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
+ sev->init_cmd_buf.tmr_address = tmr_pa;
+ sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
+ }
+
rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
if (rc)
return rc;
@@ -1012,6 +1035,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
+ struct page *tmr_page;
int error, rc;
if (!sev)
@@ -1041,6 +1065,16 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ /* Obtain the TMR memory area for SEV-ES use */
+ tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
+ if (tmr_page) {
+ sev_es_tmr = page_address(tmr_page);
+ } else {
+ sev_es_tmr = NULL;
+ dev_warn(sev->dev,
+ "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ }
+
/* Initialize the platform */
rc = sev_platform_init(&error);
if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
@@ -1075,4 +1109,13 @@ void sev_pci_exit(void)
return;
sev_platform_shutdown(NULL);
+
+ if (sev_es_tmr) {
+ /* The TMR area was encrypted, flush it from the cache */
+ wbinvd_on_all_cpus();
+
+ free_pages((unsigned long)sev_es_tmr,
+ get_order(SEV_ES_TMR_SIZE));
+ sev_es_tmr = NULL;
+ }
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index a84335328f37..872ea3ff1c6b 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
int key_len = keylen >> 1;
int err;
- SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
- desc->tfm = ctx_p->shash_tfm;
-
- err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
- ctx_p->user.key + key_len);
+ err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
+ ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index c454afce7781..7083767602fc 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "VERSION" }, /* Must be 1st */
};
-static struct debugfs_reg32 pid_cid_regs[] = {
+static const struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(PERIPHERAL_ID_0),
CC_DEBUG_REG(PERIPHERAL_ID_1),
CC_DEBUG_REG(PERIPHERAL_ID_2),
@@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(COMPONENT_ID_3),
};
-static struct debugfs_reg32 debug_regs[] = {
+static const struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index c29b80dd30d8..caf1136e7ef9 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -44,7 +44,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -1757,7 +1756,7 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- int error = -EINVAL;
+ int error;
unsigned int cpu;
cpu = get_cpu();
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9fd3b9d1ec2f..25bf6d963066 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -40,7 +40,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index f09c6cf7823e..9c3b3ca815e6 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
@@ -42,6 +43,7 @@ config CRYPTO_DEV_HISI_QM
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
+ depends on ACPI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@@ -52,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
@@ -61,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 03d512ec6336..ed730d173e95 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_INVALID_REQ_CNT,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,6 +45,11 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
@@ -41,13 +57,13 @@ struct hpre_debugfs_file {
*/
struct hpre_debug {
struct dentry *debug_root;
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
struct hpre_debug debug;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 65425250b2e9..7b5cb27d473d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -32,6 +33,9 @@ struct hpre_ctx;
#define HPRE_SQE_DONE_SHIFT 30
#define HPRE_DH_MAX_P_SZ 512
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -68,6 +72,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +95,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_sqe *sqe = resp;
+ struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
+
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
+ return;
+ }
+
+ req->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 88be53bf4a38..a3ee127a70e3 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -59,10 +59,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -80,7 +76,16 @@
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
+#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
+#define HPRE_WR_MSI_PORT BIT(2)
+
+#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
+#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+
#define HPRE_VIA_MSI_DSM 1
+#define HPRE_SQE_MASK_OFFSET 8
+#define HPRE_SQE_MASK_LEN 24
static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
@@ -131,7 +136,7 @@ static const u64 hpre_cluster_offsets[] = {
HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};
-static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
{"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
{"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
@@ -139,7 +144,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
};
-static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
{"AXQOS ", HPRE_VFG_AXQOS},
{"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
@@ -156,44 +161,38 @@ static struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
-{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld",
+ "invalid_req_cnt"
+};
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+ .set = pf_q_num_set,
.get = param_get_int,
};
-static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
-module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
-MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(void)
{
@@ -232,9 +231,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
unsigned long offset;
int ret, i;
@@ -324,17 +322,34 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+
+ /* disable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
+ /* clear HPRE hw error source if having */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+
+ /* enable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -354,9 +369,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
@@ -523,6 +536,33 @@ static const struct file_operations hpre_ctrl_debug_fops = {
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ if (val)
+ return -EINVAL;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD)
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ atomic64_set(&dfx_item->value, val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -620,6 +660,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@@ -629,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre)
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
qm->debug.debug_root = dir;
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -640,6 +698,9 @@ static int hpre_debugfs_init(struct hpre *hpre)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
@@ -654,32 +715,27 @@ static void hpre_debugfs_exit(struct hpre *hpre)
debugfs_remove_recursive(qm->debug.debug_root);
}
-static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
-
- if (rev_id == QM_HW_V1) {
+ if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
+
qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- if (pdev->is_physfn) {
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = hpre_pf_q_num;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
}
- qm->use_dma_api = true;
- return 0;
+ return hisi_qm_init(qm);
}
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -693,8 +749,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err->msg, err->int_msk);
err++;
}
-
- writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -702,16 +756,38 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
+static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+}
+
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 value;
+
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
+ .hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
+ .clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
+ .open_axi_master_ooo = hpre_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR,
+ .msi_wr_port = HPRE_WR_MSI_PORT,
+ .acpi_rst = "HRST",
}
};
@@ -722,7 +798,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -732,6 +808,20 @@ static int hpre_pf_probe_init(struct hpre *hpre)
return 0;
}
+static int hpre_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@@ -742,26 +832,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hpre)
return -ENOMEM;
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
- ret = hpre_qm_pre_init(qm, pdev);
- if (ret)
- return ret;
-
- ret = hisi_qm_init(qm);
- if (ret)
+ ret = hpre_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
return ret;
+ }
- if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
- if (ret)
- goto err_with_qm_init;
- } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_with_qm_init;
+ ret = hpre_probe_init(hpre);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_with_qm_init;
}
ret = hisi_qm_start(qm);
@@ -779,8 +860,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
@@ -794,107 +885,6 @@ err_with_qm_init:
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
-
- /* If remaining queues are not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hpre_clear_vft_config(hpre);
-}
-
-static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
- else
- return hpre_sriov_disable(pdev);
-}
-
static void hpre_remove(struct pci_dev *pdev)
{
struct hpre *hpre = pci_get_drvdata(pdev);
@@ -903,8 +893,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_algs_unregister();
hisi_qm_del_from_list(qm, &hpre_devices);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
- ret = hpre_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@@ -924,6 +914,9 @@ static void hpre_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hpre_pci_driver = {
@@ -931,7 +924,7 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
- .sriov_configure = hpre_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
.err_handler = &hpre_err_handler,
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f795fb557630..9bb263cec6c3 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/log2.h>
@@ -53,6 +56,7 @@
#define QM_SQ_TYPE_SHIFT 8
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
+#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -64,6 +68,7 @@
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
+#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
@@ -122,9 +127,11 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
#define QM_ABNORMAL_INT_STATUS 0x100008
+#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
@@ -140,6 +147,27 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
+#define QM_DEV_RESET_FLAG 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PEH_VENDOR_ID 0x1000d8
+#define ACC_VENDOR_ID_VALUE 0x5a5a
+#define QM_PEH_DFX_INFO0 0x1000fc
+#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
+#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
+#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define ACC_MASTER_TRANS_RETURN_RW 3
+#define ACC_MASTER_TRANS_RETURN 0x300150
+#define ACC_MASTER_GLOBAL_CTRL 0x300000
+#define ACC_AM_CFG_PORT_WR_EN 0x30001c
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define ACC_AM_ROB_ECC_INT_STS 0x300104
+#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+
+#define POLL_PERIOD 10
+#define POLL_TIMEOUT 1000
+#define WAIT_PERIOD_US_MAX 200
+#define WAIT_PERIOD_US_MIN 100
+#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
@@ -147,7 +175,12 @@
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_PCI_COMMAND_INVALID ~0
+
+#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -190,6 +223,12 @@ enum vft_type {
CQC_VFT,
};
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -284,10 +323,22 @@ struct hisi_qm_hw_ops {
u8 cmd, u16 index, u8 priority);
u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
+ void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
- pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
+ enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+};
+
+struct qm_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct qm_dfx_item qm_dfx_files[] = {
+ {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+ {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+ {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+ {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+ {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
};
static const char * const qm_debug_file_name[] = {
@@ -325,6 +376,93 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
+static const char * const qm_s[] = {
+ "init", "start", "close", "stop",
+};
+
+static const char * const qp_s[] = {
+ "none", "init", "start", "stop", "close",
+};
+
+static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
+{
+ enum qm_state curr = atomic_read(&qm->status.flags);
+ bool avail = false;
+
+ switch (curr) {
+ case QM_INIT:
+ if (new == QM_START || new == QM_CLOSE)
+ avail = true;
+ break;
+ case QM_START:
+ if (new == QM_STOP)
+ avail = true;
+ break;
+ case QM_STOP:
+ if (new == QM_CLOSE || new == QM_START)
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ return avail;
+}
+
+static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
+ enum qp_state new)
+{
+ enum qm_state qm_curr = atomic_read(&qm->status.flags);
+ enum qp_state qp_curr = 0;
+ bool avail = false;
+
+ if (qp)
+ qp_curr = atomic_read(&qp->qp_status.flags);
+
+ switch (new) {
+ case QP_INIT:
+ if (qm_curr == QM_START || qm_curr == QM_INIT)
+ avail = true;
+ break;
+ case QP_START:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP))
+ avail = true;
+ break;
+ case QP_STOP:
+ if ((qm_curr == QM_START && qp_curr == QP_START) ||
+ (qp_curr == QP_INIT))
+ avail = true;
+ break;
+ case QP_CLOSE:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_INIT))
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev,
+ "Can not change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ return avail;
+}
+
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
@@ -393,6 +531,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
busy_unlock:
mutex_unlock(&qm->mailbox_lock);
+ if (ret)
+ atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -460,7 +600,7 @@ static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- return qm->qp_array[cqn];
+ return &qm->qp_array[cqn];
}
static void qm_cq_head_update(struct hisi_qp *qp)
@@ -510,8 +650,7 @@ static void qm_work_process(struct work_struct *work)
while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
- if (qp)
- qm_poll_qp(qp, qm);
+ qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -551,6 +690,7 @@ static irqreturn_t qm_irq(int irq, void *data)
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
return do_qm_irq(irq, data);
+ atomic64_inc(&qm->debug.dfx.err_irq_cnt);
dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
@@ -563,6 +703,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
u32 type;
+ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
return IRQ_NONE;
@@ -590,79 +731,20 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t qm_abnormal_irq(int irq, void *data)
-{
- const struct hisi_qm_hw_error *err = qm_hw_error;
- struct hisi_qm *qm = data;
- struct device *dev = &qm->pdev->dev;
- u32 error_status, tmp;
-
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->msi_mask & tmp;
-
- while (err->msg) {
- if (err->int_msk & error_status)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- err++;
- }
-
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
-
- return IRQ_HANDLED;
-}
-
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver == QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- return 0;
-
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
-
static void qm_irq_unregister(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- if (qm->ver == QM_HW_V2) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
}
static void qm_init_qp_status(struct hisi_qp *qp)
@@ -672,7 +754,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
qp_status->cqc_phase = true;
- qp_status->flags = 0;
+ atomic_set(&qp_status->flags, 0);
}
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
@@ -683,36 +765,26 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
if (number > 0) {
switch (type) {
case SQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_SQC_VFT_BUF_SIZE |
QM_SQC_VFT_SQC_SIZE |
QM_SQC_VFT_INDEX_NUMBER |
QM_SQC_VFT_VALID |
(u64)base << QM_SQC_VFT_START_SQN_SHIFT;
- break;
- case QM_HW_V2:
+ } else {
tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
QM_SQC_VFT_VALID |
(u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
case CQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_CQC_VFT_BUF_SIZE |
QM_CQC_VFT_SQC_SIZE |
QM_CQC_VFT_INDEX_NUMBER |
QM_CQC_VFT_VALID;
- break;
- case QM_HW_V2:
+ } else {
tmp = QM_CQC_VFT_VALID;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
}
@@ -986,6 +1058,473 @@ static const struct file_operations qm_regs_fops = {
.release = single_release,
};
+static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ char buf[QM_DBG_READ_LEN];
+ int len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+ dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *ctx_addr;
+
+ ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
+ if (!ctx_addr)
+ return ERR_PTR(-ENOMEM);
+
+ *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ dev_err(dev, "DMA mapping error!\n");
+ kfree(ctx_addr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctx_addr;
+}
+
+static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+ const void *ctx_addr, dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
+ kfree(ctx_addr);
+}
+
+static int dump_show(struct hisi_qm *qm, void *info,
+ unsigned int info_size, char *info_name)
+{
+ struct device *dev = &qm->pdev->dev;
+ u8 *info_buf, *info_curr = info;
+ u32 i;
+#define BYTE_PER_DW 4
+
+ info_buf = kzalloc(info_size, GFP_KERNEL);
+ if (!info_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < info_size; i++, info_curr++) {
+ if (i % BYTE_PER_DW == 0)
+ info_buf[i + 3UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 1)
+ info_buf[i + 1UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 2)
+ info_buf[i - 1] = *info_curr;
+ else if (i % BYTE_PER_DW == 3)
+ info_buf[i - 3] = *info_curr;
+ }
+
+ dev_info(dev, "%s DUMP\n", info_name);
+ for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ info_buf[i], info_buf[i + 1UL],
+ info_buf[i + 2UL], info_buf[i + 3UL]);
+ }
+
+ kfree(info_buf);
+
+ return 0;
+}
+
+static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+}
+
+static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+}
+
+static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc, *sqc_curr;
+ dma_addr_t sqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+ if (IS_ERR(sqc))
+ return PTR_ERR(sqc);
+
+ ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->sqc) {
+ sqc_curr = qm->sqc + qp_id;
+
+ ret = dump_show(qm, sqc_curr, sizeof(*sqc),
+ "SOFT SQC");
+ if (ret)
+ dev_info(dev, "Show soft sqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
+ if (ret)
+ dev_info(dev, "Show hw sqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+ return ret;
+}
+
+static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqc *cqc, *cqc_curr;
+ dma_addr_t cqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+ if (IS_ERR(cqc))
+ return PTR_ERR(cqc);
+
+ ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->cqc) {
+ cqc_curr = qm->cqc + qp_id;
+
+ ret = dump_show(qm, cqc_curr, sizeof(*cqc),
+ "SOFT CQC");
+ if (ret)
+ dev_info(dev, "Show soft cqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
+ if (ret)
+ dev_info(dev, "Show hw cqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+ return ret;
+}
+
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
+ int cmd, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ dma_addr_t xeqc_dma;
+ void *xeqc;
+ int ret;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
+ if (IS_ERR(xeqc))
+ return PTR_ERR(xeqc);
+
+ ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = dump_show(qm, xeqc, size, name);
+ if (ret)
+ dev_info(dev, "Show hw %s failed!\n", name);
+
+err_free_ctx:
+ qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+ return ret;
+}
+
+static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+ u32 *e_id, u32 *q_id)
+{
+ struct device *dev = &qm->pdev->dev;
+ unsigned int qp_num = qm->qp_num;
+ char *presult;
+ int ret;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input qp number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, q_id);
+ if (ret || *q_id >= qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ return -EINVAL;
+ }
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input sqe number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, e_id);
+ if (ret || *e_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_sq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *sqe, *sqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, sqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ if (!sqe)
+ return -ENOMEM;
+
+ qp = &qm->qp_array[qp_id];
+ memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+ memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ qm->debug.sqe_mask_len);
+
+ ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
+ if (ret)
+ dev_info(dev, "Show sqe failed!\n");
+
+ kfree(sqe);
+
+ return ret;
+}
+
+static int qm_cq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqe *cqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, cqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ qp = &qm->qp_array[qp_id];
+ cqe_curr = qp->cqe + cqe_id;
+ ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
+ if (ret)
+ dev_info(dev, "Show cqe failed!\n");
+
+ return ret;
+}
+
+static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
+ size_t size, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *xeqe;
+ u32 xeqe_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &xeqe_id);
+ if (ret || xeqe_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ down_read(&qm->qps_lock);
+
+ if (qm->eqe && !strcmp(name, "EQE")) {
+ xeqe = qm->eqe + xeqe_id;
+ } else if (qm->aeqe && !strcmp(name, "AEQE")) {
+ xeqe = qm->aeqe + xeqe_id;
+ } else {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = dump_show(qm, xeqe, size, name);
+ if (ret)
+ dev_info(dev, "Show %s failed!\n", name);
+
+err_unlock:
+ up_read(&qm->qps_lock);
+ return ret;
+}
+
+static int qm_dbg_help(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "available commands:\n");
+ dev_info(dev, "sqc <num>\n");
+ dev_info(dev, "cqc <num>\n");
+ dev_info(dev, "eqc\n");
+ dev_info(dev, "aeqc\n");
+ dev_info(dev, "sq <num> <e>\n");
+ dev_info(dev, "cq <num> <e>\n");
+ dev_info(dev, "eq <e>\n");
+ dev_info(dev, "aeq <e>\n");
+
+ return 0;
+}
+
+static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *presult, *s;
+ int ret;
+
+ s = kstrdup(cmd_buf, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ kfree(s);
+ return -EINVAL;
+ }
+
+ if (!strcmp(presult, "sqc"))
+ ret = qm_sqc_dump(qm, s);
+ else if (!strcmp(presult, "cqc"))
+ ret = qm_cqc_dump(qm, s);
+ else if (!strcmp(presult, "eqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
+ QM_MB_CMD_EQC, "EQC");
+ else if (!strcmp(presult, "aeqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
+ QM_MB_CMD_AEQC, "AEQC");
+ else if (!strcmp(presult, "sq"))
+ ret = qm_sq_dump(qm, s);
+ else if (!strcmp(presult, "cq"))
+ ret = qm_cq_dump(qm, s);
+ else if (!strcmp(presult, "eq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
+ else if (!strcmp(presult, "aeq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
+ else if (!strcmp(presult, "help"))
+ ret = qm_dbg_help(qm, s);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ dev_info(dev, "Please echo help\n");
+
+ kfree(s);
+
+ return ret;
+}
+
+static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
+ int ret;
+
+ if (*pos)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
+ return 0;
+
+ if (count > QM_DBG_WRITE_LEN)
+ return -ENOSPC;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(cmd_buf, buffer, count)) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ ret = qm_cmd_write_dump(qm, cmd_buf);
+ if (ret) {
+ kfree(cmd_buf);
+ return ret;
+ }
+
+ kfree(cmd_buf);
+
+ return count;
+}
+
+static const struct file_operations qm_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_cmd_read,
+ .write = qm_cmd_write,
+};
+
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
struct dentry *qm_d = qm->debug.qm_d;
@@ -1001,20 +1540,21 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
return 0;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
- u32 irq_enable = ce | nfe | fe | msi;
+ u32 irq_enable = ce | nfe | fe;
u32 irq_unmask = ~irq_enable;
qm->error_mask = ce | nfe | fe;
- qm->msi_mask = msi;
+
+ /* clear QM hw residual error source */
+ writel(QM_ABNORMAL_INT_SOURCE_CLR,
+ qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1022,9 +1562,6 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
- /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */
- writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL);
-
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1071,7 +1608,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
}
}
-static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
u32 error_status, tmp;
@@ -1080,15 +1617,20 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
- qm_log_hw_error(qm, error_status);
+ if (error_status & QM_ECC_MBIT)
+ qm->err_status.is_qm_ecc_mbit = true;
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ qm_log_hw_error(qm, error_status);
+ if (error_status == QM_DB_RANDOM_INVALID) {
+ writel(error_status, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+ return ACC_ERR_RECOVERED;
+ }
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
@@ -1117,68 +1659,61 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size;
}
-/**
- * hisi_qm_create_qp() - Create a queue pair from qm.
- * @qm: The qm we create a qp from.
- * @alg_type: Accelerator specific algorithm type in sqc.
- *
- * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
- * qp memory fails.
- */
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
- int qp_id, ret;
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ int qp_id;
- write_lock(&qm->qps_lock);
+ if (!qm_qp_avail_state(qm, NULL, QP_INIT))
+ return ERR_PTR(-EPERM);
- qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
- if (qp_id >= qm->qp_num) {
- write_unlock(&qm->qps_lock);
- dev_info(&qm->pdev->dev, "QM all queues are busy!\n");
- ret = -EBUSY;
- goto err_free_qp;
+ if (qm->qp_in_used == qm->qp_num) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
}
- set_bit(qp_id, qm->qp_bitmap);
- qm->qp_array[qp_id] = qp;
- qm->qp_in_used++;
- write_unlock(&qm->qps_lock);
-
- qp->qm = qm;
+ qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
+ if (qp_id < 0) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
+ }
- if (qm->use_dma_api) {
- qp->qdma.size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
- qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size,
- &qp->qdma.dma, GFP_KERNEL);
- if (!qp->qdma.va) {
- ret = -ENOMEM;
- goto err_clear_bit;
- }
+ qp = &qm->qp_array[qp_id];
- dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
- }
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qp->event_cb = NULL;
+ qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qm->qp_in_used++;
+ atomic_set(&qp->qp_status.flags, QP_INIT);
return qp;
+}
+
+/**
+ * hisi_qm_create_qp() - Create a queue pair from qm.
+ * @qm: The qm we create a qp from.
+ * @alg_type: Accelerator specific algorithm type in sqc.
+ *
+ * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
+ * qp memory fails.
+ */
+struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+{
+ struct hisi_qp *qp;
-err_clear_bit:
- write_lock(&qm->qps_lock);
- qm->qp_array[qp_id] = NULL;
- clear_bit(qp_id, qm->qp_bitmap);
- write_unlock(&qm->qps_lock);
-err_free_qp:
- kfree(qp);
- return ERR_PTR(ret);
+ down_write(&qm->qps_lock);
+ qp = qm_create_qp_nolock(qm, alg_type);
+ up_write(&qm->qps_lock);
+
+ return qp;
}
EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -1191,19 +1726,18 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
void hisi_qm_release_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct qm_dma *qdma = &qp->qdma;
- struct device *dev = &qm->pdev->dev;
- if (qm->use_dma_api && qdma->va)
- dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ down_write(&qm->qps_lock);
+
+ if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
- write_lock(&qm->qps_lock);
- qm->qp_array[qp->qp_id] = NULL;
- clear_bit(qp->qp_id, qm->qp_bitmap);
qm->qp_in_used--;
- write_unlock(&qm->qps_lock);
+ idr_remove(&qm->qp_idr, qp->qp_id);
- kfree(qp);
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
@@ -1234,7 +1768,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
@@ -1261,7 +1795,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
@@ -1274,6 +1808,27 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
return ret;
}
+static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
+{
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ int qp_id = qp->qp_id;
+ int pasid = arg;
+ int ret;
+
+ if (!qm_qp_avail_state(qm, qp, QP_START))
+ return -EPERM;
+
+ ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+ if (ret)
+ return ret;
+
+ atomic_set(&qp->qp_status.flags, QP_START);
+ dev_dbg(dev, "queue %d started\n", qp_id);
+
+ return 0;
+}
+
/**
* hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to start to run.
@@ -1285,48 +1840,112 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
- struct device *dev = &qm->pdev->dev;
- enum qm_hw_ver ver = qm->ver;
- int qp_id = qp->qp_id;
- int pasid = arg;
- size_t off = 0;
int ret;
-#define QP_INIT_BUF(qp, type, size) do { \
- (qp)->type = ((qp)->qdma.va + (off)); \
- (qp)->type##_dma = (qp)->qdma.dma + (off); \
- off += (size); \
-} while (0)
+ down_write(&qm->qps_lock);
+ ret = qm_start_qp_nolock(qp, arg);
+ up_write(&qm->qps_lock);
- if (!qp->qdma.dma) {
- dev_err(dev, "cannot get qm dma buffer\n");
- return -EINVAL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+
+/**
+ * Determine whether the queue is cleared by judging the tail pointers of
+ * sq and cq.
+ */
+static int qm_drain_qp(struct hisi_qp *qp)
+{
+ size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ dma_addr_t dma_addr;
+ int ret = 0, i = 0;
+ void *addr;
+
+ /*
+ * No need to judge if ECC multi-bit error occurs because the
+ * master OOO will be blocked.
+ */
+ if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ addr = qm_ctx_alloc(qm, size, &dma_addr);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
+ return -ENOMEM;
}
- /* sq need 128 bytes alignment */
- if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) {
- dev_err(dev, "qm sq is not aligned to 128 byte\n");
- return -EINVAL;
+ while (++i) {
+ ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump sqc!\n");
+ break;
+ }
+ sqc = addr;
+
+ ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
+ qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump cqc!\n");
+ break;
+ }
+ cqc = addr + sizeof(struct qm_sqc);
+
+ if ((sqc->tail == cqc->tail) &&
+ (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
+ break;
+
+ if (i == MAX_WAIT_COUNTS) {
+ dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
+ ret = -EBUSY;
+ break;
+ }
+
+ usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
- QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH);
- QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qm_ctx_free(qm, size, addr, &dma_addr);
- dev_dbg(dev, "init qp buffer(v%d):\n"
- " sqe (%pK, %lx)\n"
- " cqe (%pK, %lx)\n",
- ver, qp->sqe, (unsigned long)qp->sqe_dma,
- qp->cqe, (unsigned long)qp->cqe_dma);
+ return ret;
+}
- ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+static int qm_stop_qp_nolock(struct hisi_qp *qp)
+{
+ struct device *dev = &qp->qm->pdev->dev;
+ int ret;
+
+ /*
+ * It is allowed to stop and release qp when reset, If the qp is
+ * stopped when reset but still want to be released then, the
+ * is_resetting flag should be set negative so that this qp will not
+ * be restarted after reset.
+ */
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
+ qp->is_resetting = false;
+ return 0;
+ }
+
+ if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
+ return -EPERM;
+
+ atomic_set(&qp->qp_status.flags, QP_STOP);
+
+ ret = qm_drain_qp(qp);
if (ret)
- return ret;
+ dev_err(dev, "Failed to drain out data for stopping!\n");
- dev_dbg(dev, "queue %d started\n", qp_id);
+ if (qp->qm->wq)
+ flush_workqueue(qp->qm->wq);
+ else
+ flush_work(&qp->qm->work);
+
+ dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* hisi_qm_stop_qp() - Stop a qp in qm.
@@ -1336,27 +1955,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
*/
int hisi_qm_stop_qp(struct hisi_qp *qp)
{
- struct device *dev = &qp->qm->pdev->dev;
- int i = 0;
-
- /* it is stopped */
- if (test_bit(QP_STOP, &qp->qp_status.flags))
- return 0;
-
- while (atomic_read(&qp->qp_status.used)) {
- i++;
- msleep(20);
- if (i == 10) {
- dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n");
- return 0;
- }
- }
-
- set_bit(QP_STOP, &qp->qp_status.flags);
+ int ret;
- dev_dbg(dev, "stop queue %u!", qp->qp_id);
+ down_write(&qp->qm->qps_lock);
+ ret = qm_stop_qp_nolock(qp);
+ up_write(&qp->qm->qps_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
@@ -1367,6 +1972,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
*
* This function will return -EBUSY if qp is currently full, and -EAGAIN
* if qp related qm is resetting.
+ *
+ * Note: This function may run with qm_irq_thread and ACC reset at same time.
+ * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
+ * reset may happen, we have no lock here considering performance. This
+ * causes current qm_db sending fail or can not receive sended sqe. QM
+ * sync/async receive function should handle the error sqe. ACC reset
+ * done function should clear used sqe to 0.
*/
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
@@ -1375,7 +1987,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
void *sqe = qm_get_avail_sqe(qp);
- if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) {
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
+ atomic_read(&qp->qm->status.flags) == QM_STOP ||
+ qp->is_resetting)) {
dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
@@ -1397,12 +2011,13 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
- if (qm->ver == QM_HW_V2) {
- writel(0x1, qm->io_base + QM_CACHE_WB_START);
- if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
- val, val & BIT(0), 10, 1000))
- dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
- }
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ writel(0x1, qm->io_base + QM_CACHE_WB_START);
+ if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ val, val & BIT(0), 10, 1000))
+ dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
}
static void qm_qp_event_notifier(struct hisi_qp *qp)
@@ -1412,16 +2027,7 @@ static void qm_qp_event_notifier(struct hisi_qp *qp)
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
{
- int i, ret;
- struct hisi_qm *qm = uacce->priv;
-
- read_lock(&qm->qps_lock);
- for (i = 0, ret = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- ret++;
- read_unlock(&qm->qps_lock);
-
- return ret;
+ return hisi_qm_get_free_qp_num(uacce->priv);
}
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
@@ -1468,12 +2074,12 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
switch (qfr->type) {
case UACCE_QFRT_MMIO:
- if (qm->ver == QM_HW_V2) {
- if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
- QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
+ if (qm->ver == QM_HW_V1) {
+ if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
} else {
- if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
+ if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
}
@@ -1519,9 +2125,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type)
struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv;
- write_lock(&qm->qps_lock);
+ down_write(&qm->qps_lock);
qp->alg_type = type;
- write_unlock(&qm->qps_lock);
+ up_write(&qm->qps_lock);
return 0;
}
@@ -1623,107 +2229,121 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
{
int ret;
- read_lock(&qm->qps_lock);
+ down_read(&qm->qps_lock);
ret = qm->qp_num - qm->qp_in_used;
- read_unlock(&qm->qps_lock);
+ up_read(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
-/**
- * hisi_qm_init() - Initialize configures about qm.
- * @qm: The qm needing init.
- *
- * This function init qm, then we can call hisi_qm_start to put qm into work.
- */
-int hisi_qm_init(struct hisi_qm *qm)
+static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- unsigned int num_vec;
- int ret;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_dma *qdma;
+ int i;
- switch (qm->ver) {
- case QM_HW_V1:
- qm->ops = &qm_hw_ops_v1;
- break;
- case QM_HW_V2:
- qm->ops = &qm_hw_ops_v2;
- break;
- default:
- return -EINVAL;
+ for (i = num - 1; i >= 0; i--) {
+ qdma = &qm->qp_array[i].qdma;
+ dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
}
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+ kfree(qm->qp_array);
+}
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to enable device mem!\n");
- goto err_remove_uacce;
- }
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t off = qm->sqe_size * QM_Q_DEPTH;
+ struct hisi_qp *qp;
- ret = pci_request_mem_regions(pdev, qm->dev_name);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
- }
+ qp = &qm->qp_array[id];
+ qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
+ GFP_KERNEL);
+ if (!qp->qdma.va)
+ return -ENOMEM;
- qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
- if (!qm->io_base) {
- ret = -EIO;
- goto err_release_mem_regions;
- }
+ qp->sqe = qp->qdma.va;
+ qp->sqe_dma = qp->qdma.dma;
+ qp->cqe = qp->qdma.va + off;
+ qp->cqe_dma = qp->qdma.dma + off;
+ qp->qdma.size = dma_size;
+ qp->qm = qm;
+ qp->qp_id = id;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret < 0)
- goto err_iounmap;
- pci_set_master(pdev);
+ return 0;
+}
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_iounmap;
+static int hisi_qm_memory_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t qp_dma_size, off = 0;
+ int i, ret = 0;
+
+#define QM_INIT_BUF(qm, type, num) do { \
+ (qm)->type = ((qm)->qdma.va + (off)); \
+ (qm)->type##_dma = (qm)->qdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
+} while (0)
+
+ idr_init(&qm->qp_idr);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
+ QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
+ qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
+ GFP_ATOMIC);
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
+ if (!qm->qdma.va)
+ return -ENOMEM;
+
+ QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, sqc, qm->qp_num);
+ QM_INIT_BUF(qm, cqc, qm->qp_num);
+
+ qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
+ if (!qm->qp_array) {
+ ret = -ENOMEM;
+ goto err_alloc_qp_array;
}
- num_vec = qm->ops->get_irq_num(qm);
- ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
- if (ret < 0) {
- dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+
+ /* one more page for device or qp statuses */
+ qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = PAGE_ALIGN(qp_dma_size);
+ for (i = 0; i < qm->qp_num; i++) {
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ if (ret)
+ goto err_init_qp_mem;
+
+ dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
}
- ret = qm_irq_register(qm);
- if (ret)
- goto err_free_irq_vectors;
+ return ret;
- qm->qp_in_used = 0;
- mutex_init(&qm->mailbox_lock);
- rwlock_init(&qm->qps_lock);
- INIT_WORK(&qm->work, qm_work_process);
+err_init_qp_mem:
+ hisi_qp_memory_uninit(qm, i);
+err_alloc_qp_array:
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
- dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
- qm->use_dma_api ? "dma api" : "iommu api");
+ return ret;
+}
- return 0;
+static void hisi_qm_pre_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
-err_free_irq_vectors:
- pci_free_irq_vectors(pdev);
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
-err_disable_pcidev:
- pci_disable_device(pdev);
-err_remove_uacce:
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->ver == QM_HW_V1)
+ qm->ops = &qm_hw_ops_v1;
+ else
+ qm->ops = &qm_hw_ops_v2;
- return ret;
+ pci_set_drvdata(pdev, qm);
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_init);
/**
* hisi_qm_uninit() - Uninitialize qm.
@@ -1736,10 +2356,20 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
+
uacce_remove(qm->uacce);
qm->uacce = NULL;
- if (qm->use_dma_api && qm->qdma.va) {
+ hisi_qp_memory_uninit(qm, qm->qp_num);
+ idr_destroy(&qm->qp_idr);
+
+ if (qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
@@ -1751,6 +2381,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
+
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -1781,12 +2413,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
/**
- * hisi_qm_set_vft() - Set "virtual function table" for a qm.
- * @fun_num: Number of operated function.
- * @qm: The qm in which to set vft, alway in a PF.
- * @base: The base number of queue in vft.
- * @number: The number of queues in vft. 0 means invalid vft.
- *
* This function is alway called in PF driver, it is used to assign queues
* among PF and VFs.
*
@@ -1794,7 +2420,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_qp_num;
@@ -1805,7 +2431,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -1872,22 +2497,10 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- size_t off = 0;
int ret;
-#define QM_INIT_BUF(qm, type, num) do { \
- (qm)->type = ((qm)->qdma.va + (off)); \
- (qm)->type##_dma = (qm)->qdma.dma + (off); \
- off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
-} while (0)
-
WARN_ON(!qm->qdma.dma);
- if (qm->qp_num == 0)
- return -EINVAL;
-
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
if (ret)
@@ -1898,21 +2511,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return ret;
}
- QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, sqc, qm->qp_num);
- QM_INIT_BUF(qm, cqc, qm->qp_num);
-
- dev_dbg(dev, "init qm buffer:\n"
- " eqe (%pK, %lx)\n"
- " aeqe (%pK, %lx)\n"
- " sqc (%pK, %lx)\n"
- " cqc (%pK, %lx)\n",
- qm->eqe, (unsigned long)qm->eqe_dma,
- qm->aeqe, (unsigned long)qm->aeqe_dma,
- qm->sqc, (unsigned long)qm->sqc_dma,
- qm->cqc, (unsigned long)qm->cqc_dma);
-
ret = qm_eq_ctx_cfg(qm);
if (ret)
return ret;
@@ -1940,43 +2538,102 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ int ret = 0;
+
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_START)) {
+ up_write(&qm->qps_lock);
+ return -EPERM;
+ }
dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
- if (!qm->qp_bitmap) {
- qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num),
- sizeof(long), GFP_KERNEL);
- qm->qp_array = devm_kcalloc(dev, qm->qp_num,
- sizeof(struct hisi_qp *),
- GFP_KERNEL);
- if (!qm->qp_bitmap || !qm->qp_array)
- return -ENOMEM;
+ ret = __hisi_qm_start(qm);
+ if (!ret)
+ atomic_set(&qm->status.flags, QM_START);
+
+err_unlock:
+ up_write(&qm->qps_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start);
+
+static int qm_restart(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int ret, i;
+
+ ret = hisi_qm_start(qm);
+ if (ret < 0)
+ return ret;
+
+ down_write(&qm->qps_lock);
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
+ qp->is_resetting == true) {
+ ret = qm_start_qp_nolock(qp, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start qp%d!\n", i);
+
+ up_write(&qm->qps_lock);
+ return ret;
+ }
+ qp->is_resetting = false;
+ }
}
+ up_write(&qm->qps_lock);
- if (!qm->use_dma_api) {
- dev_dbg(&qm->pdev->dev, "qm delay start\n");
- return 0;
- } else if (!qm->qdma.va) {
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
- QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
- qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size,
- &qm->qdma.dma, GFP_KERNEL);
- dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
- if (!qm->qdma.va)
- return -ENOMEM;
+ return 0;
+}
+
+/* Stop started qps in reset flow */
+static int qm_stop_started_qp(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int i, ret;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
+ qp->is_resetting = true;
+ ret = qm_stop_qp_nolock(qp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop qp%d!\n", i);
+ return ret;
+ }
+ }
}
- return __hisi_qm_start(qm);
+ return 0;
+}
+
+/**
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp->is_resetting)
+ memset(qp->qdma.va, 0, qp->qdma.size);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
}
-EXPORT_SYMBOL_GPL(hisi_qm_start);
/**
* hisi_qm_stop() - Stop a qm.
@@ -1988,43 +2645,98 @@ EXPORT_SYMBOL_GPL(hisi_qm_start);
*/
int hisi_qm_stop(struct hisi_qm *qm)
{
- struct device *dev;
- struct hisi_qp *qp;
- int ret = 0, i;
+ struct device *dev = &qm->pdev->dev;
+ int ret = 0;
- if (!qm || !qm->pdev) {
- WARN_ON(1);
- return -EINVAL;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_STOP)) {
+ ret = -EPERM;
+ goto err_unlock;
}
- dev = &qm->pdev->dev;
+ if (qm->status.stop_reason == QM_SOFT_RESET ||
+ qm->status.stop_reason == QM_FLR) {
+ ret = qm_stop_started_qp(qm);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop started qp!\n");
+ goto err_unlock;
+ }
+ }
/* Mask eq and aeq irq */
writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
- /* Stop all qps belong to this qm */
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp) {
- ret = hisi_qm_stop_qp(qp);
- if (ret < 0) {
- dev_err(dev, "Failed to stop qp%d!\n", i);
- return -EBUSY;
- }
- }
- }
-
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Failed to set vft!\n");
+ ret = -EBUSY;
+ goto err_unlock;
+ }
}
+ qm_clear_queues(qm);
+ atomic_set(&qm->status.flags, QM_STOP);
+
+err_unlock:
+ up_write(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop);
+static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char buf[QM_DBG_READ_LEN];
+ int val, cp_len, len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ val = atomic_read(&qm->status.flags);
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+ if (!len)
+ return -EFAULT;
+
+ cp_len = copy_to_user(buffer, buf, len);
+ if (cp_len)
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static const struct file_operations qm_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_status_read,
+};
+
+static int qm_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int qm_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+ qm_debugfs_atomic64_set, "%llu\n");
+
/**
* hisi_qm_debug_init() - Initialize qm related debugfs files.
* @qm: The qm for which we want to add debugfs files.
@@ -2033,7 +2745,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
+ struct qm_dfx *dfx = &qm->debug.dfx;
struct dentry *qm_d;
+ void *data;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2047,7 +2761,20 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
return 0;
@@ -2095,8 +2822,7 @@ static void qm_hw_error_init(struct hisi_qm *qm)
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
- err_info->fe, err_info->msi);
+ qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -2109,36 +2835,17 @@ static void qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
return qm->ops->hw_error_handle(qm);
}
/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-
-/**
* hisi_qm_dev_err_init() - Initialize device error configuration.
* @qm: The qm for which we want to do error initialization.
*
@@ -2299,34 +3006,163 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
+{
+ u32 remain_q_num, q_num, i, j;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+/**
+ * hisi_qm_sriov_enable() - enable virtual functions
+ * @pdev: the PCIe device
+ * @max_vfs: the number of virtual functions to enable
+ *
+ * Returns the number of enabled VFs. If there are VFs enabled already or
+ * max_vfs is more than the total number of device can be enabled, returns
+ * failure.
+ */
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, total_vfs, ret;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
+ pre_existing_vfs);
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, total_vfs);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+/**
+ * hisi_qm_sriov_disable - disable virtual functions
+ * @pdev: the PCI device
+ *
+ * Return failure if there are VFs assigned already.
+ */
+int hisi_qm_sriov_disable(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+/**
+ * hisi_qm_sriov_configure - configure the number of VFs
+ * @pdev: The PCI device
+ * @num_vfs: The number of VFs need enabled
+ *
+ * Enable SR-IOV according to num_vfs, 0 means disable.
+ */
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return hisi_qm_sriov_disable(pdev);
+ else
+ return hisi_qm_sriov_enable(pdev, num_vfs);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
+
+static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
u32 err_sts;
if (!qm->err_ini->get_dev_hw_err_status) {
dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
+ if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+
if (!qm->err_ini->log_dev_hw_err) {
dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
qm->err_ini->log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
-static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
+static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
+ enum acc_err_result qm_ret, dev_ret;
/* log qm error */
qm_ret = qm_hw_error_handle(qm);
@@ -2334,9 +3170,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
/* log device error */
dev_ret = qm_dev_err_handle(qm);
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+ return (qm_ret == ACC_ERR_NEED_RESET ||
+ dev_ret == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
}
/**
@@ -2350,6 +3186,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ enum acc_err_result ret;
+
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
@@ -2357,10 +3196,756 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- return qm_process_dev_error(pdev);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_RECOVERED;
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+static int qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static int qm_check_req_recv(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == ACC_VENDOR_ID_VALUE),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(&pdev->dev, "Fails to read QM reg!\n");
+ return ret;
+ }
+
+ writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == PCI_VENDOR_ID_HUAWEI),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
+
+ return ret;
+}
+
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 cmd;
+ int i;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set)
+ cmd |= PCI_COMMAND_MEMORY;
+ else
+ cmd &= ~PCI_COMMAND_MEMORY;
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 sriov_ctrl;
+ int pos;
+ int i;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set)
+ sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
+ else
+ sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
+ pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
+
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_msi(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ if (set) {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ 0);
+ } else {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ ACC_PEH_MSI_DISABLE);
+ if (qm->err_status.is_qm_ecc_mbit ||
+ qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ mdelay(1);
+ if (readl(qm->io_base + QM_PEH_DFX_INFO0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_prepare(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ int stop_reason = qm->status.stop_reason;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ vf_qm->status.stop_reason = stop_reason;
+ ret = hisi_qm_stop(vf_qm);
+ if (ret)
+ goto stop_fail;
+ }
+ }
+
+stop_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ qm->status.stop_reason = QM_SOFT_RESET;
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_status.is_dev_ecc_mbit &&
+ qm->err_status.is_qm_ecc_mbit &&
+ qm->err_ini->close_axi_master_ooo) {
+
+ qm->err_ini->close_axi_master_ooo(qm);
+
+ } else if (qm->err_status.is_dev_ecc_mbit &&
+ !qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_ini->close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ /* Ensure all doorbells and mailboxes received by QM */
+ ret = qm_check_req_recv(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf MSE bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable PEH MSI bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ /* OOO register set and check */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+ qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val,
+ (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf MSE bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini->err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ ret = qm_restart(vf_qm);
+ if (ret)
+ goto restart_fail;
+ }
+ }
+
+restart_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+static int qm_dev_hw_init(struct hisi_qm *qm)
+{
+ return qm->err_ini->hw_init(qm);
+}
+
+static void qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* temporarily close the OOO port used for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask;
+ if (value && qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini->open_axi_master_ooo)
+ qm->err_ini->open_axi_master_ooo(qm);
+}
+
+static void qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* open the OOO port for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini->err_info.msi_wr_port;
+ writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ qm->err_status.is_qm_ecc_mbit = false;
+ qm->err_status.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable PEH MSI bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf MSE bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf MSE bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init device\n");
+ return ret;
+ }
+
+ qm_restart_prepare(qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+ qm_restart_done(qm);
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return 0;
+}
+
+static int qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+
+/**
+ * hisi_qm_dev_slot_reset() - slot reset
+ * @pdev: the PCIe device
+ *
+ * This function offers QM relate PCIe device reset interface. Drivers which
+ * use QM can use this function as slot_reset in its struct pci_error_handlers.
+ */
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_aer_clear_nonfatal_status(pdev);
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
+ if (ret)
+ return ret;
+
+ return (qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(pf_qm);
+
+ /*
+ * Check whether there is an ECC mbit error, If it occurs, need to
+ * wait for soft reset to fix it.
+ */
+ while (qm_check_dev_error(pf_qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
+ ret);
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(pf_qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (!qm->vfs_num)
+ goto flr_done;
+
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+ }
+
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+
+static irqreturn_t qm_abnormal_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+ enum acc_err_result ret;
+
+ atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ schedule_work(&qm->rst_work);
+
+ return IRQ_HANDLED;
+}
+
+static int qm_irq_register(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
+ qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ return ret;
+
+ if (qm->ver != QM_HW_V1) {
+ ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
+ qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ goto err_aeq_irq;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = request_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR),
+ qm_abnormal_irq, IRQF_SHARED,
+ qm->dev_name, qm);
+ if (ret)
+ goto err_abonormal_irq;
+ }
+ }
+
+ return 0;
+
+err_abonormal_irq:
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+err_aeq_irq:
+ free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
+ return ret;
+}
+
+static void hisi_qm_controller_reset(struct work_struct *rst_work)
+{
+ struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
+ int ret;
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
+
+}
+
+/**
+ * hisi_qm_init() - Initialize configures about qm.
+ * @qm: The qm needing init.
+ *
+ * This function init qm, then we can call hisi_qm_start to put qm into work.
+ */
+int hisi_qm_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ hisi_qm_pre_init(qm);
+
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable device mem!\n");
+ goto err_remove_uacce;
+ }
+
+ ret = pci_request_mem_regions(pdev, qm->dev_name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request mem regions!\n");
+ goto err_disable_pcidev;
+ }
+
+ qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
+ qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
+ qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ if (!qm->io_base) {
+ ret = -EIO;
+ goto err_release_mem_regions;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret < 0)
+ goto err_iounmap;
+ pci_set_master(pdev);
+
+ if (!qm->ops->get_irq_num) {
+ ret = -EOPNOTSUPP;
+ goto err_iounmap;
+ }
+ num_vec = qm->ops->get_irq_num(qm);
+ ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable MSI vectors!\n");
+ goto err_iounmap;
+ }
+
+ ret = qm_irq_register(qm);
+ if (ret)
+ goto err_free_irq_vectors;
+
+ if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_irq_unregister;
+ }
+
+ ret = hisi_qm_memory_init(qm);
+ if (ret)
+ goto err_irq_unregister;
+
+ INIT_WORK(&qm->work, qm_work_process);
+ if (qm->fun_type == QM_HW_PF)
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
+
+ atomic_set(&qm->status.flags, QM_INIT);
+
+ return 0;
+
+err_irq_unregister:
+ qm_irq_unregister(qm);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_iounmap:
+ iounmap(qm->io_base);
+err_release_mem_regions:
+ pci_release_mem_regions(pdev);
+err_disable_pcidev:
+ pci_disable_device(pdev);
+err_remove_uacce:
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_init);
+
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index ec5b6f48db6c..0a351de8d838 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -8,6 +8,10 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -70,7 +74,7 @@
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -80,14 +84,31 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_FLR,
+};
+
+enum qm_state {
+ QM_INIT = 0,
+ QM_START,
+ QM_CLOSE,
+ QM_STOP,
+};
+
enum qp_state {
+ QP_INIT = 1,
+ QP_START,
QP_STOP,
+ QP_CLOSE,
};
enum qm_hw_ver {
QM_HW_UNKNOWN = -1,
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
};
enum qm_fun_type {
@@ -101,6 +122,14 @@ enum qm_debug_file {
DEBUG_FILE_NUM,
};
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
struct debugfs_file {
enum qm_debug_file index;
struct mutex lock;
@@ -109,6 +138,9 @@ struct debugfs_file {
struct qm_debug {
u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
struct dentry *debug_root;
struct dentry *qm_d;
struct debugfs_file files[DEBUG_FILE_NUM];
@@ -125,22 +157,34 @@ struct hisi_qm_status {
bool eqc_phase;
u32 aeq_head;
bool aeqc_phase;
- unsigned long flags;
+ atomic_t flags;
+ int stop_reason;
};
struct hisi_qm;
struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 ce;
u32 nfe;
u32 fe;
- u32 msi;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
};
struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
void (*hw_err_enable)(struct hisi_qm *qm);
void (*hw_err_disable)(struct hisi_qm *qm);
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
struct hisi_qm_err_info err_info;
};
@@ -161,7 +205,9 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 vfs_num;
struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@@ -175,10 +221,12 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_status err_status;
+ unsigned long reset_flag;
- rwlock_t qps_lock;
- unsigned long *qp_bitmap;
- struct hisi_qp **qp_array;
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
struct mutex mailbox_lock;
@@ -187,13 +235,12 @@ struct hisi_qm {
struct qm_debug debug;
u32 error_mask;
- u32 msi_mask;
struct workqueue_struct *wq;
struct work_struct work;
+ struct work_struct rst_work;
const char *algs;
- bool use_dma_api;
bool use_sva;
resource_size_t phys_base;
resource_size_t phys_size;
@@ -205,7 +252,7 @@ struct hisi_qp_status {
u16 sq_tail;
u16 cq_head;
bool cqc_phase;
- unsigned long flags;
+ atomic_t flags;
};
struct hisi_qp_ops {
@@ -230,10 +277,58 @@ struct hisi_qp {
void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
+ bool is_resetting;
u16 pasid;
struct uacce_queue *uacce_q;
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
@@ -267,14 +362,19 @@ void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 3598fa17beb2..7b64aca704d6 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -160,6 +160,10 @@ struct sec_debug_file {
struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+ atomic64_t invalid_req_cnt;
+ atomic64_t done_flag_cnt;
};
struct sec_debug {
@@ -172,7 +176,6 @@ struct sec_dev {
struct sec_debug debug;
u32 ctx_q_num;
bool iommu_used;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 7f1c6a31b82f..64614a9bdf21 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req)
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
struct sec_sqe *bd = resp;
struct sec_ctx *ctx;
struct sec_req *req;
@@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
type = bd->type_cipher_auth & SEC_TYPE_MASK;
if (unlikely(type != SEC_BD_TYPE2)) {
+ atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx->invalid_req_cnt);
+ return;
+ }
req->err_type = bd->type2.error_type;
ctx = req->ctx;
done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
@@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
+ atomic64_inc(&dfx->done_flag_cnt);
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
err = sec_aead_verify(req);
- atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+ atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
@@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return -ENOBUFS;
if (!ret) {
- if (req->fake_busy)
+ if (req->fake_busy) {
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
ret = -EBUSY;
- else
+ } else {
ret = -EINPROGRESS;
+ }
}
return ret;
@@ -832,7 +841,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_authenc_keys *keys)
{
struct crypto_shash *hash_tfm = ctx->hash_tfm;
- SHASH_DESC_ON_STACK(shash, hash_tfm);
int blocksize, ret;
if (!keys->authkeylen) {
@@ -842,8 +850,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
blocksize = crypto_shash_blocksize(hash_tfm);
if (keys->authkeylen > blocksize) {
- ret = crypto_shash_digest(shash, keys->authkey,
- keys->authkeylen, ctx->a_key);
+ ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
+ keys->authkeylen, ctx->a_key);
if (ret) {
pr_err("hisi_sec2: aead auth digest error!\n");
return -EINVAL;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 1f54ebe164b6..a4cb58b54b25 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -80,6 +80,9 @@
#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SQE_MASK_OFFSET 64
+#define SEC_SQE_MASK_LEN 48
+
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
@@ -88,6 +91,11 @@ struct sec_hw_error {
const char *msg;
};
+struct sec_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
@@ -110,7 +118,16 @@ static const char * const sec_dbg_file_name[] = {
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-static struct debugfs_reg32 sec_dfx_regs[] = {
+static struct sec_dfx_item sec_dfx_labels[] = {
+ {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
+ {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
+ {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
+};
+
+static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
@@ -136,45 +153,14 @@ static struct debugfs_reg32 sec_dfx_regs[] = {
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PF_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info("No device, suppose queue number is %d!\n", q_num);
- } else {
- rev_id = pdev->revision;
-
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || !n || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
.set = sec_pf_q_num_set,
.get = param_get_int,
};
+
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
@@ -207,6 +193,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
{
hisi_qm_free_qps(qps, qp_num);
@@ -240,9 +235,8 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static u8 sec_get_endian(struct sec_dev *sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 reg;
/*
@@ -270,9 +264,8 @@ static u8 sec_get_endian(struct sec_dev *sec)
return SEC_64BE;
}
-static int sec_engine_init(struct sec_dev *sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
int ret;
u32 reg;
@@ -315,7 +308,7 @@ static int sec_engine_init(struct sec_dev *sec)
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
- reg |= sec_get_endian(sec);
+ reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
/* Enable sm4 xts mode multiple iv */
@@ -325,10 +318,8 @@ static int sec_engine_init(struct sec_dev *sec)
return 0;
}
-static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -349,7 +340,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- return sec_engine_init(sec);
+ return sec_engine_init(qm);
}
/* sec_debug_regs_clear() - clear the sec debug regs */
@@ -424,23 +415,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file)
static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
struct hisi_qm *qm = file->qm;
- struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
u32 vfq_num;
u32 tmp;
- if (val > sec->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == sec->num_vfs)
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_qp_num - qm->qp_num -
- (sec->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -570,10 +560,22 @@ static const struct file_operations sec_dbg_fops = {
static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
*val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+static int sec_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
return 0;
}
+
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
- NULL, "%lld\n");
+ sec_debugfs_atomic64_set, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@@ -582,6 +584,7 @@ static int sec_core_debug_init(struct sec_dev *sec)
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ int i;
tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
@@ -593,13 +596,15 @@ static int sec_core_debug_init(struct sec_dev *sec)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
-
- debugfs_create_file("send_cnt", 0444, tmp_d,
- &dfx->send_cnt, &sec_atomic64_ops);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_file("recv_cnt", 0444, tmp_d,
- &dfx->recv_cnt, &sec_atomic64_ops);
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
+ atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
+ sec_dfx_labels[i].offset);
+ debugfs_create_file(sec_dfx_labels[i].name, 0644,
+ tmp_d, data, &sec_atomic64_ops);
+ }
return 0;
}
@@ -630,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec)
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
+
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
@@ -675,8 +683,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
errs++;
}
-
- writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
static u32 sec_get_hw_err_status(struct hisi_qm *qm)
@@ -684,17 +690,36 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+}
+
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
+
static const struct hisi_qm_err_ini sec_err_ini = {
+ .hw_init = sec_set_user_domain_and_cache,
.hw_err_enable = sec_hw_error_enable,
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
+ .clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
+ .open_axi_master_ooo = sec_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = BIT(0),
+ .acpi_rst = "SRST",
}
};
@@ -703,22 +728,14 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &sec_err_ini;
- ret = sec_set_user_domain_and_cache(sec);
+ ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -730,32 +747,30 @@ static int sec_pf_probe_init(struct sec_dev *sec)
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ qm->ver = pdev->revision;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
+
qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
- qm->use_dma_api = true;
-
- return hisi_qm_init(qm);
-}
-
-static void sec_qm_uninit(struct hisi_qm *qm)
-{
- hisi_qm_uninit(qm);
-}
-
-static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
-{
- int ret;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ }
/*
* WQ_HIGHPRI: SEC request must be low delayed,
@@ -763,47 +778,38 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
* WQ_UNBOUND: SEC task is likely with long
* running CPU intensive workloads.
*/
- qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
- pci_name(qm->pdev));
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
if (!qm->wq) {
pci_err(qm->pdev, "fail to alloc workqueue\n");
return -ENOMEM;
}
- if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
+ ret = hisi_qm_init(qm);
+ if (ret)
+ destroy_workqueue(qm->wq);
+
+ return ret;
+}
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
ret = sec_pf_probe_init(sec);
if (ret)
- goto err_probe_uninit;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = SEC_PF_DEF_Q_NUM;
- qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_probe_uninit;
- }
- } else {
- ret = -ENODEV;
- goto err_probe_uninit;
+ return ret;
}
return 0;
-err_probe_uninit:
- destroy_workqueue(qm->wq);
- return ret;
}
static void sec_probe_uninit(struct hisi_qm *qm)
@@ -840,20 +846,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!sec)
return -ENOMEM;
- pci_set_drvdata(pdev, sec);
-
- sec->ctx_q_num = ctx_q_num;
- sec_iommu_used_check(sec);
-
qm = &sec->qm;
-
ret = sec_qm_init(qm, pdev);
if (ret) {
- pci_err(pdev, "Failed to pre init qm!\n");
+ pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
return ret;
}
- ret = sec_probe_init(qm, sec);
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
+
+ ret = sec_probe_init(sec);
if (ret) {
pci_err(pdev, "Failed to probe!\n");
goto err_qm_uninit;
@@ -877,8 +880,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_remove_from_list;
}
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ sec_unregister_from_crypto();
+
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
@@ -893,110 +905,6 @@ err_qm_uninit:
return ret;
}
-/* now we only support equal assignment */
-static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num;
- int i, j, ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- q_num = remain_q_num / num_vfs;
-
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret) {
- for (j = i; j > 0; j--)
- hisi_qm_set_vft(qm, j, 0, 0);
- return ret;
- }
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int sec_clear_vft_config(struct sec_dev *sec)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 num_vfs = sec->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- sec->num_vfs = 0;
-
- return 0;
-}
-
-static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- int pre_existing_vfs, ret;
- u32 num_vfs;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- pci_err(pdev, "Can't enable VF. Please disable at first!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = sec_vf_q_assign(sec, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- sec->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- sec_clear_vft_config(sec);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int sec_sriov_disable(struct pci_dev *pdev)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return sec_clear_vft_config(sec);
-}
-
-static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return sec_sriov_enable(pdev, num_vfs);
- else
- return sec_sriov_disable(pdev);
-}
-
static void sec_remove(struct pci_dev *pdev)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
@@ -1006,8 +914,8 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_del_from_list(qm, &sec_devices);
- if (qm->fun_type == QM_HW_PF && sec->num_vfs)
- (void)sec_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
sec_debugfs_exit(sec);
@@ -1023,6 +931,9 @@ static void sec_remove(struct pci_dev *pdev)
static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver sec_pci_driver = {
@@ -1031,7 +942,7 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = sec_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 82dc6f867171..f3ed4c0e5493 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -28,12 +28,20 @@ enum hisi_zip_error_type {
HZIP_NC_ERR = 0x0d,
};
+struct hisi_zip_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+};
+
struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
struct list_head list;
struct hisi_zip_ctrl *ctrl;
+ struct hisi_zip_dfx dfx;
};
struct hisi_zip_sqe {
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 369ec3220574..c73707c2e539 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -64,7 +64,6 @@ struct hisi_zip_req_q {
struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
- struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
@@ -333,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
struct hisi_zip_sqe *sqe = data;
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *req = req_q->q + sqe->tag;
struct acomp_req *acomp_req = req->req;
@@ -340,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
u32 status, dlen, head_size;
int err = 0;
+ atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
err = -EIO;
}
dlen = sqe->produced;
@@ -484,11 +486,12 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
- struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_sqe zip_sqe;
dma_addr_t input;
dma_addr_t output;
int ret;
@@ -511,15 +514,18 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(zip_sqe, req->req_id);
+ hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
+ hisi_zip_config_tag(&zip_sqe, req->req_id);
/* send command to start a task */
- ret = hisi_qp_send(qp, zip_sqe);
- if (ret < 0)
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
goto err_unmap_output;
+ }
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index fcc85d2dbd07..2229a21ae7c8 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -62,6 +62,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
+#define HZIP_CORE_INT_SET 0x3010A8
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
@@ -83,8 +84,13 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
+#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
+#define HZIP_WR_PORT BIT(11)
#define HZIP_BUF_SIZE 22
+#define HZIP_SQE_MASK_OFFSET 64
+#define HZIP_SQE_MASK_LEN 48
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -95,6 +101,18 @@ struct hisi_zip_hw_error {
const char *msg;
};
+struct zip_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct zip_dfx_item zip_dfx_files[] = {
+ {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
+};
+
static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
@@ -134,7 +152,6 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
@@ -162,7 +179,7 @@ static const u64 core_offsets[] = {
[HZIP_DECOMP_CORE5] = 0x309000,
};
-static struct debugfs_reg32 hzip_dfx_regs[] = {
+static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_GET_BD_NUM ", 0x00ull},
{"HZIP_GET_RIGHT_BD ", 0x04ull},
{"HZIP_GET_ERROR_BD ", 0x08ull},
@@ -189,38 +206,7 @@ static struct debugfs_reg32 hzip_dfx_regs[] = {
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -232,9 +218,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
static u32 vfs_num;
-module_param(vfs_num, uint, 0444);
-MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -250,9 +241,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- void __iomem *base = hisi_zip->qm.io_base;
+ void __iomem *base = qm->io_base;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -279,7 +270,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (hisi_zip->qm.use_sva) {
+ if (qm->use_sva) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -295,10 +286,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -317,12 +312,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -342,21 +349,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* Calculate curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs)
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (ctrl->num_vfs - 1) * vfq_num;
+ qm->qp_num - (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -477,6 +483,27 @@ static const struct file_operations ctrl_debug_fops = {
.write = ctrl_debug_write,
};
+
+static int zip_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int zip_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
+ zip_debugfs_atomic64_set, "%llu\n");
+
static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
{
struct hisi_zip *hisi_zip = ctrl->hisi_zip;
@@ -508,6 +535,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
+static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
+ struct hisi_zip_dfx *dfx = &zip->dfx;
+ struct dentry *tmp_dir;
+ void *data;
+ int i;
+
+ tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
+ for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
+ debugfs_create_file(zip_dfx_files[i].name,
+ 0644,
+ tmp_dir,
+ data,
+ &zip_atomic64_ops);
+ }
+}
+
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
int i;
@@ -534,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -546,6 +594,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
}
+ hisi_zip_dfx_debug_init(qm);
+
return 0;
failed_to_create:
@@ -598,8 +648,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
err++;
}
-
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
}
static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
@@ -607,17 +655,55 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ /* Disable ECC Mbit error report. */
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ /* Inject zip ECC Mbit error to block master ooo. */
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ .hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
+ .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
.log_dev_hw_err = hisi_zip_log_hw_error,
+ .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
+ .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = HZIP_WR_PORT,
+ .acpi_rst = "ZRST",
}
};
@@ -633,177 +719,85 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &hisi_zip_err_ini;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
+ hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
}
-/* Currently we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
+static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- if (remain_q_num < num_vfs)
- return -EINVAL;
+ qm->pdev = pdev;
+ qm->ver = pdev->revision;
+ qm->algs = "zlib\ngzip";
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return 0;
+ return hisi_qm_init(qm);
}
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
+static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
int ret;
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
}
- ctrl->num_vfs = 0;
-
return 0;
}
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
- qm->use_dma_api = true;
- qm->pdev = pdev;
- qm->ver = rev_id;
- qm->algs = "zlib\ngzip";
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- ret = hisi_qm_init(qm);
+ ret = hisi_zip_qm_init(qm, pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
+ pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
return ret;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_zip_probe_init(hisi_zip);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
@@ -823,7 +817,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
- ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_remove_from_list;
}
@@ -836,15 +830,8 @@ err_remove_from_list:
hisi_qm_stop(qm);
err_qm_uninit:
hisi_qm_uninit(qm);
- return ret;
-}
-static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
- else
- return hisi_zip_sriov_enable(pdev, num_vfs);
+ return ret;
}
static void hisi_zip_remove(struct pci_dev *pdev)
@@ -852,8 +839,8 @@ static void hisi_zip_remove(struct pci_dev *pdev)
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_zip->qm;
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
@@ -865,6 +852,9 @@ static void hisi_zip_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hisi_zip_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -873,7 +863,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : NULL,
+ hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
index 200fb3303db0..34bb3063eb70 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
@@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt)
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
- dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+ dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
- dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+ dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
index a6774232e9a3..a9e3de65875a 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
@@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
- pr_debug("MBOX opcode %s received from VF%d raw_data %s",
+ pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
- pr_debug("MBOX opcode %s received from PF raw_data %s",
+ pr_debug("MBOX opcode %s received from PF raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
@@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
- dev_err(dev, "Requested queue %d is > than maximum avail %d",
+ dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Requested group %d is > than maximum avail %d",
+ dev_err(dev, "Requested group %d is > than maximum avail %d\n",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
- dev_err(dev, "Requested engine group %d is disabled", grp);
+ dev_err(dev, "Requested engine group %d is disabled\n", grp);
return -EINVAL;
}
@@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
- dev_err(dev, "VF%d binding to eng group %llu failed",
+ dev_err(dev, "VF%d binding to eng group %llu failed\n",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index d04baa319592..fec8f3b9b112 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
int i;
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
- dev_err(dev, "unsupported number of engines %d on octeontx",
+ dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num);
return bmap;
}
@@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
}
if (!found)
- dev_err(dev, "No engines reserved for engine group %d",
+ dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
@@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev,
ucode_size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", filename);
+ dev_err(dev, "Ucode %s invalid size\n", filename);
return -EINVAL;
}
@@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
{
struct tar_ucode_info_t *curr;
- pr_debug("Tar archive filename %s", tar_filename);
- pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
+ pr_debug("Tar archive filename %s\n", tar_filename);
+ pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
tar_arch->fw->size);
list_for_each_entry(curr, &tar_arch->ucodes, list) {
- pr_debug("Ucode filename %s", curr->ucode.filename);
- pr_debug("Ucode version string %s", curr->ucode.ver_str);
- pr_debug("Ucode version %d.%d.%d.%d",
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
- pr_debug("Ucode type (%d) %s", curr->ucode.type,
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
- pr_debug("Ucode size %d", curr->ucode.size);
+ pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
}
}
@@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
goto release_tar_arch;
if (tar_arch->fw->size < TAR_BLOCK_LEN) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_size = tar_arch->fw->size;
tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
- dev_err(dev, "Unsupported format of tar archive %s",
+ dev_err(dev, "Unsupported format of tar archive %s\n",
tar_filename);
goto release_tar_arch;
}
@@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
if (tar_offs + cur_size > tar_size ||
tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
/* Check for the end of the archive */
if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
{
- pr_debug("Ucode info");
- pr_debug("Ucode version string %s", ucode->ver_str);
- pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
+ pr_debug("Ucode info\n");
+ pr_debug("Ucode version string %s\n", ucode->ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
- pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
- pr_debug("Ucode size %d", ucode->size);
- pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
+ pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
+ pr_debug("Ucode size %d\n", ucode->size);
+ pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
}
@@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev,
u32 mask[4];
int i, j;
- pr_debug("Engine groups global info");
- pr_debug("max SE %d, max AE %d",
+ pr_debug("Engine groups global info\n");
+ pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
- pr_debug("free SE %d", eng_grps->avail.se_cnt);
- pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+ pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
+ pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
"enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
- pr_debug("Ucode0 filename %s, version %s",
+ pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
@@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev,
if (engs->type) {
print_engs_info(grp, engs_info,
2*OTX_CPT_UCODE_NAME_LENGTH, j);
- pr_debug("Slot%d: %s", j, engs_info);
+ pr_debug("Slot%d: %s\n", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
mask[3], mask[2], mask[1], mask[0]);
} else
- pr_debug("Slot%d not used", j);
+ pr_debug("Slot%d not used\n", j);
}
if (grp->is_enabled) {
cpt_print_engines_mask(grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
- pr_debug("Cmask: %s", engs_mask);
+ pr_debug("Cmask: %s\n", engs_mask);
}
}
}
@@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev,
if (avail_cnt < req_eng->count) {
dev_err(dev,
- "Error available %s engines %d < than requested %d",
+ "Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
@@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
OTX_CPT_UCODE_ALIGNMENT,
&ucode->dma, GFP_KERNEL);
if (!ucode->va) {
- dev_err(dev, "Unable to allocate space for microcode");
+ dev_err(dev, "Unable to allocate space for microcode\n");
return -ENOMEM;
}
ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
@@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode->size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", ucode_filename);
+ dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
ret = -EINVAL;
goto release_fw;
}
ret = get_ucode_type(ucode_hdr, &ucode->type);
if (ret) {
- dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
- ucode->type);
+ dev_err(dev, "Microcode %s unknown type 0x%x\n",
+ ucode->filename, ucode->type);
goto release_fw;
}
@@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev,
break;
default:
- dev_err(dev, "Invalid engine type %d", engs->type);
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
@@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev,
return -EINVAL;
if (eng_grp->mirror.ref_count) {
- dev_err(dev, "Can't delete engine_group%d as it is used by:",
+ dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
- dev_err(dev, "engine_group%d", i);
+ pr_cont(" %d", i);
}
+ pr_cont("\n");
return -EINVAL;
}
@@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev,
if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev,
- "Microcode %s does not support %s engines",
+ "Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type));
return -EINVAL;
@@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev,
/* Validate if requested engine types are supported by this device */
for (i = 0; i < engs_cnt; i++)
if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
- dev_err(dev, "Device does not support %s engines",
+ dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type));
return -EPERM;
}
@@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev,
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
- dev_err(dev, "Error all engine groups are being used");
+ dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
@@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev,
eng_grp->is_enabled = true;
if (eng_grp->mirror.is_ena)
dev_info(dev,
- "Engine_group%d: reuse microcode %s from group %d",
+ "Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
- dev_info(dev, "Engine_group%d: microcode loaded %s",
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
return 0;
@@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev,
} else {
if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Invalid engine group index %d",
+ dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
- dev_err(dev, "Error engine_group%d is not configured",
+ dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL;
return ret;
@@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
if (timeout--) {
- dev_warn(&cpt->pdev->dev, "Cores still busy");
+ dev_warn(&cpt->pdev->dev, "Cores still busy\n");
break;
}
}
@@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev,
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
- "Number of engines %d > than max supported %d",
+ "Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL;
goto err;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 06202bcffb33..60e744f680d3 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_SE_TYPES:
count = atomic_read(&se_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to add a new device");
+ dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_AE_TYPES:
count = atomic_read(&ae_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to a add new device");
+ dev_err(&pdev->dev, "No space to a add new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
}
if (!dev_found) {
- dev_err(&pdev->dev, "%s device not found", __func__);
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
goto exit;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index a91860b5dc77..ce3168327a39 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
cptvf_write_vq_done_ack(cptvf, intr);
wqe = get_cptvf_vq_wqe(cptvf, 0);
if (unlikely(!wqe)) {
- dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+ dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
cptvf->vfid);
return IRQ_NONE;
}
@@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
GFP_KERNEL)) {
dev_err(&pdev->dev,
- "Allocation failed for affinity_mask for VF %d",
+ "Allocation failed for affinity_mask for VF %d\n",
cptvf->vfid);
return;
}
@@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev,
return -EINVAL;
if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Engine group >= than max available groups %d",
+ dev_err(dev, "Engine group >= than max available groups %d\n",
OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
@@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf_misc_intr_handler, 0, "CPT VF misc intr",
cptvf);
if (err) {
- dev_err(dev, "Failed to request misc irq");
+ dev_err(dev, "Failed to request misc irq\n");
goto free_vectors;
}
@@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
if (err) {
- dev_err(dev, "cptvf_sw_init() failed");
+ dev_err(dev, "cptvf_sw_init() failed\n");
goto free_misc_irq;
}
/* Convey VQ LEN to PF */
@@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev)
/* Convey DOWN to PF */
if (otx_cptvf_send_vf_down(cptvf)) {
- dev_err(&pdev->dev, "PF not responding to DOWN msg");
+ dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
} else {
sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index df839b880354..239195cccf93 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
- dev_err(&pdev->dev, "Setting up SG list failed");
+ dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
@@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
- dev_err(&pdev->dev, "CPT Device is not ready");
+ dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+ dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+ dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
@@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
- dev_warn(&pdev->dev, "Request timed out 0x%p", req);
+ dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index bd6309e57ab8..da3f0b8814aa 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
size_t ds = crypto_shash_digestsize(bctx->shash);
int err, i;
- SHASH_DESC_ON_STACK(shash, bctx->shash);
-
- shash->tfm = bctx->shash;
-
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index f5c468f2cc82..6a828bbecea4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child_shash = ctx->child_shash;
struct crypto_ahash *fallback_tfm;
- SHASH_DESC_ON_STACK(shash, child_shash);
int err, bs, ds;
fallback_tfm = ctx->base.fallback_tfm;
@@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
if (err)
return err;
- shash->tfm = child_shash;
-
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
BUG_ON(ds > N2_HASH_KEY_MAX);
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- ctx->hash_key);
+ err = crypto_shash_tfm_digest(child_shash, key, keylen,
+ ctx->hash_key);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e4072cd38585..063ad5d03f33 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -33,7 +33,6 @@
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
@@ -1245,16 +1244,6 @@ static int omap_sham_update(struct ahash_request *req)
return omap_sham_enqueue(req, OP_UPDATE);
}
-static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
static int omap_sham_final_shash(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@@ -1270,9 +1259,8 @@ static int omap_sham_final_shash(struct ahash_request *req)
!test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
offset = get_block_size(ctx);
- return omap_sham_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer + offset,
- ctx->bufcnt - offset, req->result);
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
+ ctx->bufcnt - offset, req->result);
}
static int omap_sham_final(struct ahash_request *req)
@@ -1351,9 +1339,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
if (keylen > bs) {
- err = omap_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2a16800d2579..341433fbcc4a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1521,37 +1521,6 @@ static int s5p_hash_update(struct ahash_request *req)
}
/**
- * s5p_hash_shash_digest() - calculate shash digest
- * @tfm: crypto transformation
- * @flags: tfm flags
- * @data: input data
- * @len: length of data
- * @out: output buffer
- */
-static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-/**
- * s5p_hash_final_shash() - calculate shash digest
- * @req: AHASH request
- */
-static int s5p_hash_final_shash(struct ahash_request *req)
-{
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
-
- return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer, ctx->bufcnt, req->result);
-}
-
-/**
* s5p_hash_final() - close up hash and calculate digest
* @req: AHASH request
*
@@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req)
if (ctx->error)
return -EINVAL; /* uncompleted hash is not needed */
- if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
- return s5p_hash_final_shash(req);
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
+ ctx->bufcnt, req->result);
+ }
return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 8e92e4ac79f1..3ba41148c2a4 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -28,18 +28,23 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
-#define CRC_INIT_DEFAULT 0xFFFFFFFF
+#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
+#define CRC_CR_REV_IN_BYTE BIT(5)
+#define CRC_CR_REV_OUT BIT(7)
+#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
+static unsigned int burst_size;
+module_param(burst_size, uint, 0644);
+MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
+
struct stm32_crc {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk;
- u8 pending_data[sizeof(u32)];
- size_t nb_pending_bytes;
+ spinlock_t lock;
};
struct stm32_crc_list {
@@ -59,14 +64,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
- struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +79,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -92,87 +96,135 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
+static struct stm32_crc *stm32_crc_get_next_crc(void)
+{
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ if (crc)
+ list_move_tail(&crc->list, &crc_list.dev_list);
+ spin_unlock_bh(&crc_list.lock);
+
+ return crc;
+}
+
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
+ unsigned long flags;
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
+
+ pm_runtime_get_sync(crc->dev);
- pm_runtime_get_sync(ctx->crc->dev);
+ spin_lock_irqsave(&crc->lock, flags);
/* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
- ctx->crc->nb_pending_bytes = 0;
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- pm_runtime_mark_last_busy(ctx->crc->dev);
- pm_runtime_put_autosuspend(ctx->crc->dev);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
return 0;
}
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
+static int burst_update(struct shash_desc *desc, const u8 *d8,
+ size_t length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc *crc = ctx->crc;
- u32 *d32;
- unsigned int i;
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct stm32_crc *crc;
+ unsigned long flags;
+
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
pm_runtime_get_sync(crc->dev);
- if (unlikely(crc->nb_pending_bytes)) {
- while (crc->nb_pending_bytes != sizeof(u32) && length) {
- /* Fill in pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ spin_lock_irqsave(&crc->lock, flags);
+
+ /*
+ * Restore previously calculated CRC for this context as init value
+ * Restore polynomial configuration
+ * Configure in register for word input data,
+ * Configure out register in reversed bit mode data.
+ */
+ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+
+ if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
-
- if (crc->nb_pending_bytes == sizeof(u32)) {
- /* Process completed pending data */
- writel_relaxed(*(u32 *)crc->pending_data,
- crc->regs + CRC_DR);
- crc->nb_pending_bytes = 0;
- }
+ /* Configure for word data */
+ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
}
- d32 = (u32 *)d8;
- for (i = 0; i < length >> 2; i++)
- /* Process 32 bits data */
- writel_relaxed(*(d32++), crc->regs + CRC_DR);
+ for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
+ writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
+
+ if (length) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (length--)
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
+ }
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
- /* Check for pending data (non 32 bits) */
- length &= 3;
- if (likely(!length))
- return 0;
+ return 0;
+}
- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
- /* Shall not happen */
- dev_err(crc->dev, "Pending data overflow\n");
- return -EINVAL;
- }
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+ unsigned int length)
+{
+ const unsigned int burst_sz = burst_size;
+ unsigned int rem_sz;
+ const u8 *cur;
+ size_t size;
+ int ret;
- d8 = (const u8 *)d32;
- for (i = 0; i < length; i++)
- /* Store pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ if (!burst_sz)
+ return burst_update(desc, d8, length);
+
+ /* Digest first bytes not 32bit aligned at first pass in the loop */
+ size = min(length,
+ burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
+ sizeof(u32)));
+ for (rem_sz = length, cur = d8; rem_sz;
+ rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
+ ret = burst_update(desc, cur, size);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -202,6 +254,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
+static unsigned int refcnt;
+static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@@ -284,20 +338,29 @@ static int stm32_crc_probe(struct platform_device *pdev)
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
+ pm_runtime_irq_safe(dev);
pm_runtime_enable(dev);
+ spin_lock_init(&crc->lock);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
+ mutex_lock(&refcnt_lock);
+ if (!refcnt) {
+ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret) {
+ mutex_unlock(&refcnt_lock);
+ dev_err(dev, "Failed to register\n");
+ clk_disable_unprepare(crc->clk);
+ return ret;
+ }
}
+ refcnt++;
+ mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@@ -318,7 +381,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_lock(&refcnt_lock);
+ if (!--refcnt)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
@@ -328,34 +394,60 @@ static int stm32_crc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int stm32_crc_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32_crc_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
- clk_disable_unprepare(crc->clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ clk_unprepare(crc->clk);
return 0;
}
-static int stm32_crc_runtime_resume(struct device *dev)
+static int __maybe_unused stm32_crc_resume(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(crc->clk);
+ ret = clk_prepare(crc->clk);
if (ret) {
- dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ dev_err(crc->dev, "Failed to prepare clock\n");
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
+ clk_disable(crc->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to enable clock\n");
return ret;
}
return 0;
}
-#endif
static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
+ stm32_crc_resume)
SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
stm32_crc_runtime_resume, NULL)
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 167b80eec437..03c5e6683805 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
+ struct dma_chan *chan;
int err;
memset(&dma_conf, 0, sizeof(dma_conf));
@@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_chan(hdev->dev, "in");
- if (IS_ERR(hdev->dma_lch)) {
- dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return PTR_ERR(hdev->dma_lch);
- }
+ chan = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ hdev->dma_lch = chan;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
if (err) {
@@ -1463,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
hdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdev->clk)) {
- dev_err(dev, "failed to get clock for hash (%lu)\n",
- PTR_ERR(hdev->clk));
+ if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ }
+
return PTR_ERR(hdev->clk);
}
@@ -1482,7 +1486,12 @@ static int stm32_hash_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(hdev->rst)) {
+ if (IS_ERR(hdev->rst)) {
+ if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_reset;
+ }
+ } else {
reset_control_assert(hdev->rst);
udelay(2);
reset_control_deassert(hdev->rst);
@@ -1493,8 +1502,15 @@ static int stm32_hash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hdev);
ret = stm32_hash_dma_init(hdev);
- if (ret)
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOENT:
dev_dbg(dev, "DMA mode not available\n");
+ break;
+ default:
+ goto err_dma;
+ }
spin_lock(&stm32_hash.lock);
list_add_tail(&hdev->list, &stm32_hash.dev_list);
@@ -1532,10 +1548,10 @@ err_engine:
spin_lock(&stm32_hash.lock);
list_del(&hdev->list);
spin_unlock(&stm32_hash.lock);
-
+err_dma:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
-
+err_reset:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index de613c623a2c..69857f080704 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -434,15 +434,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
goto out;
}
- {
- SHASH_DESC_ON_STACK(desc, tfm);
-
- desc->tfm = tfm;
-
- ret = crypto_shash_digest(desc, fw->image, image_size,
- hash_data);
- shash_desc_zero(desc);
- }
+ ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
crypto_free_shash(tfm);
if (ret) {
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 4c212442a8f7..5c9fb013e3f7 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -83,13 +83,8 @@ static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result)
tfm = prev_tfm;
}
}
- {
- SHASH_DESC_ON_STACK(desc, tfm);
- desc->tfm = tfm;
-
- return crypto_shash_digest(desc, data, data_len, result);
- }
+ return crypto_shash_tfm_digest(tfm, data, data_len, result);
}
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
index efb95bd19a89..0cba7928446d 100644
--- a/fs/crypto/hkdf.c
+++ b/fs/crypto/hkdf.c
@@ -44,17 +44,13 @@ static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
{
static const u8 default_salt[HKDF_HASHLEN];
- SHASH_DESC_ON_STACK(desc, hmac_tfm);
int err;
err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
if (err)
return err;
- desc->tfm = hmac_tfm;
- err = crypto_shash_digest(desc, ikm, ikmlen, prk);
- shash_desc_zero(desc);
- return err;
+ return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
}
/*
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 2c449aed1b92..0681540c48d9 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -48,18 +48,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
}
}
-static int ecryptfs_hash_digest(struct crypto_shash *tfm,
- char *src, int len, char *dst)
-{
- SHASH_DESC_ON_STACK(desc, tfm);
- int err;
-
- desc->tfm = tfm;
- err = crypto_shash_digest(desc, src, len, dst);
- shash_desc_zero(desc);
- return err;
-}
-
/**
* ecryptfs_calculate_md5 - calculates the md5 of @src
* @dst: Pointer to 16 bytes of allocated memory
@@ -74,11 +62,8 @@ static int ecryptfs_calculate_md5(char *dst,
struct ecryptfs_crypt_stat *crypt_stat,
char *src, int len)
{
- struct crypto_shash *tfm;
- int rc = 0;
+ int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst);
- tfm = crypt_stat->hash_tfm;
- rc = ecryptfs_hash_digest(tfm, src, len, dst);
if (rc) {
printk(KERN_ERR
"%s: Error computing crypto hash; rc = [%d]\n",
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 5bc4dcd8fc03..8c4ea5003ef8 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/cryptohash.h>
#include <linux/pagemap.h>
#include <linux/unicode.h>
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index a8fb18609146..9e40dfecf1b1 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -127,16 +127,8 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
goto out;
}
- {
- SHASH_DESC_ON_STACK(desc, tfm);
-
- desc->tfm = tfm;
-
- status = crypto_shash_digest(desc, clname->data, clname->len,
- cksum.data);
- shash_desc_zero(desc);
- }
-
+ status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
+ cksum.data);
if (status)
goto out;
@@ -1148,7 +1140,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
- SHASH_DESC_ON_STACK(desc, tfm);
/* Don't upcall if it's already stored */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
@@ -1170,16 +1161,14 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal) {
- desc->tfm = tfm;
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL) {
ret = -ENOMEM;
goto out;
}
- ret = crypto_shash_digest(desc, principal, strlen(principal),
- cksum.data);
- shash_desc_zero(desc);
+ ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal),
+ cksum.data);
if (ret) {
kfree(cksum.data);
goto out;
@@ -1343,7 +1332,6 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
- SHASH_DESC_ON_STACK(desc, tfm);
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
@@ -1381,14 +1369,12 @@ found:
principal = clp->cl_cred.cr_principal;
if (principal == NULL)
return -ENOENT;
- desc->tfm = tfm;
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL)
return -ENOENT;
- status = crypto_shash_digest(desc, principal, strlen(principal),
- cksum.data);
- shash_desc_zero(desc);
+ status = crypto_shash_tfm_digest(tfm, principal,
+ strlen(principal), cksum.data);
if (status) {
kfree(cksum.data);
return -ENOENT;
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index f985a3fbbb36..cc5c0abfd536 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -31,15 +31,9 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
u8 *hash)
{
const struct ubifs_ch *ch = node;
- SHASH_DESC_ON_STACK(shash, c->hash_tfm);
- int err;
-
- shash->tfm = c->hash_tfm;
- err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
- if (err < 0)
- return err;
- return 0;
+ return crypto_shash_tfm_digest(c->hash_tfm, node, le32_to_cpu(ch->len),
+ hash);
}
/**
@@ -53,15 +47,7 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
u8 *hmac)
{
- SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
- int err;
-
- shash->tfm = c->hmac_tfm;
-
- err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
- if (err < 0)
- return err;
- return 0;
+ return crypto_shash_tfm_digest(c->hmac_tfm, hash, c->hash_len, hmac);
}
/**
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 52a85c01397e..911d0555b9f2 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -68,12 +68,9 @@ static int mst_node_check_hash(const struct ubifs_info *c,
u8 calc[UBIFS_MAX_HASH_LEN];
const void *node = mst;
- SHASH_DESC_ON_STACK(shash, c->hash_tfm);
-
- shash->tfm = c->hash_tfm;
-
- crypto_shash_digest(shash, node + sizeof(struct ubifs_ch),
- UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), calc);
+ crypto_shash_tfm_digest(c->hash_tfm, node + sizeof(struct ubifs_ch),
+ UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch),
+ calc);
if (ubifs_check_hash(c, expected, calc))
return -EPERM;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 01fcf7975047..b69ffac7e415 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -558,7 +558,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
return data == 0xFFFFFFFF;
}
-/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */
+/* authenticate_sleb_hash is split out for stack usage */
static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
{
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
@@ -569,15 +569,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h
return crypto_shash_final(hash_desc, hash);
}
-static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
-{
- SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
-
- hmac_desc->tfm = c->hmac_tfm;
-
- return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
-}
-
/**
* authenticate_sleb - authenticate one scan LEB
* @c: UBIFS file-system description object
@@ -618,7 +609,8 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
if (err)
goto out;
- err = authenticate_sleb_hmac(c, hash, hmac);
+ err = crypto_shash_tfm_digest(c->hmac_tfm, hash,
+ c->hash_len, hmac);
if (err)
goto out;
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index d873f999b334..2b4d2b06ccbd 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -157,7 +157,7 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_ACOMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
+ mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
return crypto_has_alg(alg_name, type, mask);
}
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e115f9215ed5..00a9cf98debe 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -125,6 +125,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name,
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
+void crypto_enqueue_request_head(struct crypto_queue *queue,
+ struct crypto_async_request *request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
{
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8c9af21efce1..c4165126937e 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -184,11 +184,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg)
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
/* SP800-90A requires 2**48 maximum requests before reseeding */
-#if (__BITS_PER_LONG == 32)
- return SIZE_MAX;
-#else
- return (1UL<<48);
-#endif
+ return (1<<20);
}
/*
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index e29cd67f93c7..3f06e40d063a 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -24,7 +24,9 @@
* @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
- * @cur_req_prepared: current request is prepared
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
* @list: link with the global crypto engine list
* @queue_lock: spinlock to syncronise access to request queue
* @queue: the crypto queue of the engine
@@ -35,6 +37,8 @@
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -45,7 +49,8 @@ struct crypto_engine {
bool idling;
bool busy;
bool running;
- bool cur_req_prepared;
+
+ bool retry_support;
struct list_head list;
spinlock_t queue_lock;
@@ -56,6 +61,8 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
struct kthread_worker *kworker;
struct kthread_work pump_requests;
@@ -102,6 +109,10 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine,
int crypto_engine_start(struct crypto_engine *engine);
int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
+ bool retry_support,
+ int (*cbk_do_batch)(struct crypto_engine *engine),
+ bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index cee446c59497..4829d2367eda 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -856,6 +856,25 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
/**
+ * crypto_shash_tfm_digest() - calculate message digest for buffer
+ * @tfm: hash transformation object
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This is a simplified version of crypto_shash_digest() for users who don't
+ * want to allocate their own hash descriptor (shash_desc). Instead,
+ * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash)
+ * directly, and it allocates a hash descriptor on the stack internally.
+ * Note that this stack allocation may be fairly large.
+ *
+ * Context: Any context.
+ * Return: 0 on success; < 0 if an error occurred.
+ */
+int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
+/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
* @out: output buffer of sufficient size that can hold the hash state
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 5c2132c71900..10753ff71d46 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -114,6 +114,16 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash);
/*
+ * An implementation of SHA-1's compression function. Don't use in new code!
+ * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
+ * the correct way to hash something with SHA-1 (use crypto_shash instead).
+ */
+#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
+#define SHA1_WORKSPACE_WORDS 16
+void sha1_init(__u32 *buf);
+void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+
+/*
* Stand-alone implementation of the SHA256 algorithm. It is designed to
* have as little dependencies as possible so it can be used in the
* kexec_file purgatory. In other cases you should generally use the
@@ -123,7 +133,7 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
* For details see lib/crypto/sha256.c
*/
-static inline int sha256_init(struct sha256_state *sctx)
+static inline void sha256_init(struct sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
@@ -134,14 +144,11 @@ static inline int sha256_init(struct sha256_state *sctx)
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
-
- return 0;
}
-extern int sha256_update(struct sha256_state *sctx, const u8 *input,
- unsigned int length);
-extern int sha256_final(struct sha256_state *sctx, u8 *hash);
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+void sha256_final(struct sha256_state *sctx, u8 *out);
-static inline int sha224_init(struct sha256_state *sctx)
+static inline void sha224_init(struct sha256_state *sctx)
{
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
@@ -152,11 +159,8 @@ static inline int sha224_init(struct sha256_state *sctx)
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
sctx->count = 0;
-
- return 0;
}
-extern int sha224_update(struct sha256_state *sctx, const u8 *input,
- unsigned int length);
-extern int sha224_final(struct sha256_state *sctx, u8 *hash);
+void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+void sha224_final(struct sha256_state *sctx, u8 *out);
#endif
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index cea60cff80bd..6ded110783ae 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -22,14 +22,16 @@ static inline int sha224_base_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- return sha224_init(sctx);
+ sha224_init(sctx);
+ return 0;
}
static inline int sha256_base_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- return sha256_init(sctx);
+ sha256_init(sctx);
+ return 0;
}
static inline int sha256_base_do_update(struct shash_desc *desc,
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
deleted file mode 100644
index f6ba4c3e60d7..000000000000
--- a/include/linux/cryptohash.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CRYPTOHASH_H
-#define __CRYPTOHASH_H
-
-#include <uapi/linux/types.h>
-
-#define SHA_DIGEST_WORDS 5
-#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 16
-
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const char *data, __u32 *W);
-
-#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9b5aa5c483cc..ec45fd7992c9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -16,11 +16,11 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
-#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <crypto/sha.h>
#include <net/sch_generic.h>
@@ -746,7 +746,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
+ sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
diff --git a/include/linux/padata.h b/include/linux/padata.h
index a0d8b41850b2..693cae9bfe66 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -139,7 +139,8 @@ struct padata_shell {
/**
* struct padata_instance - The overall control structure.
*
- * @node: Used by CPU hotplug.
+ * @cpu_online_node: Linkage for CPU online callback.
+ * @cpu_dead_node: Linkage for CPU offline callback.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
@@ -150,7 +151,8 @@ struct padata_shell {
* @flags: padata flags.
*/
struct padata_instance {
- struct hlist_node node;
+ struct hlist_node cpu_online_node;
+ struct hlist_node cpu_dead_node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct list_head pslist;
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 5167bf2bfc75..7fbc8679145c 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -100,6 +100,8 @@ struct sev_data_init {
u32 tmr_len; /* In */
} __packed;
+#define SEV_INIT_FLAGS_SEV_ES 0x01
+
/**
* struct sev_data_pek_csr - PEK_CSR command parameters
*
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6f8e60c6fbc7..cf5649a2e795 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -23,7 +23,6 @@
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/skbuff.h>
-#include <linux/cryptohash.h>
#include <linux/kref.h>
#include <linux/ktime.h>
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 0549a5c622bf..91b4c63d5cbf 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -83,6 +83,8 @@ struct sev_user_data_status {
__u32 guest_count; /* Out */
} __packed;
+#define SEV_STATUS_FLAGS_CONFIG_ES 0x0100
+
/**
* struct sev_user_data_pek_csr - PEK_CSR command parameters
*
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 916f5132a984..14aa1f74dd10 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -262,10 +262,10 @@ void __bpf_prog_free(struct bpf_prog *fp)
int bpf_prog_calc_tag(struct bpf_prog *fp)
{
- const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
+ const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
u32 raw_size = bpf_prog_tag_scratch_size(fp);
- u32 digest[SHA_DIGEST_WORDS];
- u32 ws[SHA_WORKSPACE_WORDS];
+ u32 digest[SHA1_DIGEST_WORDS];
+ u32 ws[SHA1_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
bool was_ld_map;
@@ -277,7 +277,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
if (!raw)
return -ENOMEM;
- sha_init(digest);
+ sha1_init(digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
@@ -308,8 +308,8 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
memset(&raw[psize], 0, raw_size - psize);
raw[psize++] = 0x80;
- bsize = round_up(psize, SHA_MESSAGE_BYTES);
- blocks = bsize / SHA_MESSAGE_BYTES;
+ bsize = round_up(psize, SHA1_BLOCK_SIZE);
+ blocks = bsize / SHA1_BLOCK_SIZE;
todo = raw;
if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64));
@@ -320,12 +320,12 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
*bits = cpu_to_be64((psize - 1) << 3);
while (blocks--) {
- sha_transform(digest, todo, ws);
- todo += SHA_MESSAGE_BYTES;
+ sha1_transform(digest, todo, ws);
+ todo += SHA1_BLOCK_SIZE;
}
result = (__force __be32 *)digest;
- for (i = 0; i < SHA_DIGEST_WORDS; i++)
+ for (i = 0; i < SHA1_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(digest[i]);
memcpy(fp->tag, result, sizeof(fp->tag));
diff --git a/kernel/padata.c b/kernel/padata.c
index a6afa12fb75e..aae789896616 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -703,7 +703,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
@@ -718,7 +718,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
@@ -734,8 +734,9 @@ static enum cpuhp_state hp_online;
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
- cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
+ &pinst->cpu_dead_node);
+ cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
#endif
WARN_ON(!list_empty(&pinst->pslist));
@@ -939,9 +940,10 @@ static struct padata_instance *padata_alloc(const char *name,
mutex_init(&pinst->lock);
#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
+ cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
+ &pinst->cpu_online_node);
cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
- &pinst->node);
+ &pinst->cpu_dead_node);
#endif
put_online_cpus();
diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c
index 65ead6b0c7e0..4ccbec442469 100644
--- a/lib/crypto/chacha.c
+++ b/lib/crypto/chacha.c
@@ -10,7 +10,6 @@
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <asm/unaligned.h>
#include <crypto/chacha.h>
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 66cb04b0cf4e..2e621697c5c3 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -206,7 +206,7 @@ static void sha256_transform(u32 *state, const u8 *input)
memzero_explicit(W, 64 * sizeof(u32));
}
-int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
unsigned int partial, done;
const u8 *src;
@@ -232,18 +232,16 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
partial = 0;
}
memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
}
EXPORT_SYMBOL(sha256_update);
-int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
- return sha256_update(sctx, data, len);
+ sha256_update(sctx, data, len);
}
EXPORT_SYMBOL(sha224_update);
-static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
+static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
{
__be32 *dst = (__be32 *)out;
__be64 bits;
@@ -268,19 +266,17 @@ static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
-
- return 0;
}
-int sha256_final(struct sha256_state *sctx, u8 *out)
+void sha256_final(struct sha256_state *sctx, u8 *out)
{
- return __sha256_final(sctx, out, 8);
+ __sha256_final(sctx, out, 8);
}
EXPORT_SYMBOL(sha256_final);
-int sha224_final(struct sha256_state *sctx, u8 *out)
+void sha224_final(struct sha256_state *sctx, u8 *out)
{
- return __sha256_final(sctx, out, 7);
+ __sha256_final(sctx, out, 7);
}
EXPORT_SYMBOL(sha224_final);
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 891e1c3549c4..afbd99987cf8 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -653,7 +653,7 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
diff --git a/lib/sha1.c b/lib/sha1.c
index 1d96d2c02b82..49257a915bb6 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
-#include <linux/cryptohash.h>
+#include <crypto/sha.h>
#include <asm/unaligned.h>
/*
@@ -64,22 +64,24 @@
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
- * sha_transform - single block SHA1 transform
+ * sha1_transform - single block SHA1 transform (deprecated)
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
- * This function generates a SHA1 digest for a single 512-bit block.
- * Be warned, it does not handle padding and message digest, do not
- * confuse it with the full FIPS 180-1 digest algorithm for variable
- * length messages.
+ * This function executes SHA-1's internal compression function. It updates the
+ * 160-bit internal state (@digest) with a single 512-bit data block (@data).
+ *
+ * Don't use this function. SHA-1 is no longer considered secure. And even if
+ * you do have to use SHA-1, this isn't the correct way to hash something with
+ * SHA-1 as this doesn't handle padding and finalization.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
-void sha_transform(__u32 *digest, const char *data, __u32 *array)
+void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
@@ -185,13 +187,13 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array)
digest[3] += D;
digest[4] += E;
}
-EXPORT_SYMBOL(sha_transform);
+EXPORT_SYMBOL(sha1_transform);
/**
- * sha_init - initialize the vectors for a SHA1 digest
+ * sha1_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
-void sha_init(__u32 *buf)
+void sha1_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
@@ -199,4 +201,4 @@ void sha_init(__u32 *buf)
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
-EXPORT_SYMBOL(sha_init);
+EXPORT_SYMBOL(sha1_init);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1476a91ce935..d022f126eb02 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -170,7 +170,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
size_t len, u8 mac[16])
{
uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX];
- SHASH_DESC_ON_STACK(desc, tfm);
int err;
if (len > CMAC_MSG_MAX)
@@ -181,8 +180,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
return -EINVAL;
}
- desc->tfm = tfm;
-
/* Swap key and message from LSB to MSB */
swap_buf(k, tmp, 16);
swap_buf(m, msg_msb, len);
@@ -196,8 +193,7 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
return err;
}
- err = crypto_shash_digest(desc, msg_msb, len, mac_msb);
- shash_desc_zero(desc);
+ err = crypto_shash_tfm_digest(tfm, msg_msb, len, mac_msb);
if (err) {
BT_ERR("Hash computation error %d", err);
return err;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 7b6b1d2c3d10..b5bc680d4755 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -5,7 +5,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/cryptohash.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/random.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 24e319dfb510..f131cedf5ba6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3222,11 +3222,11 @@ static int ipv6_generate_stable_address(struct in6_addr *address,
const struct inet6_dev *idev)
{
static DEFINE_SPINLOCK(lock);
- static __u32 digest[SHA_DIGEST_WORDS];
- static __u32 workspace[SHA_WORKSPACE_WORDS];
+ static __u32 digest[SHA1_DIGEST_WORDS];
+ static __u32 workspace[SHA1_WORKSPACE_WORDS];
static union {
- char __data[SHA_MESSAGE_BYTES];
+ char __data[SHA1_BLOCK_SIZE];
struct {
struct in6_addr secret;
__be32 prefix[2];
@@ -3251,7 +3251,7 @@ static int ipv6_generate_stable_address(struct in6_addr *address,
retry:
spin_lock_bh(&lock);
- sha_init(digest);
+ sha1_init(digest);
memset(&data, 0, sizeof(data));
memset(workspace, 0, sizeof(workspace));
memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
@@ -3260,7 +3260,7 @@ retry:
data.secret = secret;
data.dad_count = dad_count;
- sha_transform(digest, data.__data, workspace);
+ sha1_transform(digest, data.__data, workspace);
temp = *address;
temp.s6_addr32[2] = (__force __be32)digest[0];
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index ffcfcd2b128f..85dddfe3a2c6 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -34,7 +34,6 @@
#include <net/addrconf.h>
#include <net/xfrm.h>
-#include <linux/cryptohash.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <net/seg6.h>
diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c
index 0f5a414a9366..3d980713a9e2 100644
--- a/net/mptcp/crypto.c
+++ b/net/mptcp/crypto.c
@@ -59,7 +59,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
put_unaligned_be64(key2, key2be);
/* Generate key xored with ipad */
- memset(input, 0x36, SHA_MESSAGE_BYTES);
+ memset(input, 0x36, SHA256_BLOCK_SIZE);
for (i = 0; i < 8; i++)
input[i] ^= key1be[i];
for (i = 0; i < 8; i++)
@@ -76,7 +76,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
sha256_final(&state, &input[SHA256_BLOCK_SIZE]);
/* Prepare second part of hmac */
- memset(input, 0x5C, SHA_MESSAGE_BYTES);
+ memset(input, 0x5C, SHA256_BLOCK_SIZE);
for (i = 0; i < 8; i++)
input[i] ^= key1be[i];
for (i = 0; i < 8; i++)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4278764d82b8..83e97e8892e0 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -741,14 +741,8 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len))
goto free;
- {
- SHASH_DESC_ON_STACK(desc, tfm);
-
- desc->tfm = tfm;
- crypto_shash_digest(desc, (u8 *)auth,
- end - (unsigned char *)auth, digest);
- shash_desc_zero(desc);
- }
+ crypto_shash_tfm_digest(tfm, (u8 *)auth, end - (unsigned char *)auth,
+ digest);
free:
if (free_key)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f7cb0b7faec2..47910470e532 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1670,17 +1670,14 @@ static struct sctp_cookie_param *sctp_pack_cookie(
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
if (sctp_sk(ep->base.sk)->hmac) {
- SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+ struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
int err;
/* Sign the message. */
- desc->tfm = sctp_sk(ep->base.sk)->hmac;
-
- err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+ err = crypto_shash_setkey(tfm, ep->secret_key,
sizeof(ep->secret_key)) ?:
- crypto_shash_digest(desc, (u8 *)&cookie->c, bodysize,
- cookie->signature);
- shash_desc_zero(desc);
+ crypto_shash_tfm_digest(tfm, (u8 *)&cookie->c, bodysize,
+ cookie->signature);
if (err)
goto free_cookie;
}
@@ -1741,17 +1738,13 @@ struct sctp_association *sctp_unpack_cookie(
/* Check the signature. */
{
- SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+ struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
int err;
- desc->tfm = sctp_sk(ep->base.sk)->hmac;
-
- err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+ err = crypto_shash_setkey(tfm, ep->secret_key,
sizeof(ep->secret_key)) ?:
- crypto_shash_digest(desc, (u8 *)bear_cookie, bodysize,
- digest);
- shash_desc_zero(desc);
-
+ crypto_shash_tfm_digest(tfm, (u8 *)bear_cookie, bodysize,
+ digest);
if (err) {
*error = -SCTP_IERROR_NOMEM;
goto fail;
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index f6797ba44bf7..14cf81d1a30b 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -323,19 +323,6 @@ error:
return ukey;
}
-static int calc_hash(struct crypto_shash *tfm, u8 *digest,
- const u8 *buf, unsigned int buflen)
-{
- SHASH_DESC_ON_STACK(desc, tfm);
- int err;
-
- desc->tfm = tfm;
-
- err = crypto_shash_digest(desc, buf, buflen, digest);
- shash_desc_zero(desc);
- return err;
-}
-
static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
const u8 *buf, unsigned int buflen)
{
@@ -351,7 +338,7 @@ static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
err = crypto_shash_setkey(tfm, key, keylen);
if (!err)
- err = calc_hash(tfm, digest, buf, buflen);
+ err = crypto_shash_tfm_digest(tfm, buf, buflen, digest);
crypto_free_shash(tfm);
return err;
}
@@ -381,7 +368,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
master_keylen);
- ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
+ ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len,
+ derived_key);
kzfree(derived_buf);
return ret;
}