From 8130834affc609183184c0c19bc2136609eb2991 Mon Sep 17 00:00:00 2001 From: coolsnowwolf Date: Wed, 19 Oct 2022 20:03:59 +0800 Subject: [PATCH] rockchip: add rockchip-crypto support for rk3568/rk3588 --- .../libs/libselinux/bcm27xx-userland/Makefile | 71 + target/linux/rockchip/armv8/config-6.0 | 15 + ...use-dev_err-for-error-message-about-.patch | 26 + ...ip-do-not-use-uninitialized-variable.patch | 24 + ...ip-do-not-do-custom-power-management.patch | 94 + ...to-rockchip-fix-privete-private-typo.patch | 24 + ...-rockchip-do-not-store-mode-globally.patch | 262 +++ ...pto-rockchip-add-fallback-for-cipher.patch | 244 +++ ...ypto-rockchip-add-fallback-for-ahash.patch | 75 + ...to-rockchip-better-handle-cipher-key.patch | 81 + ...rockchip-remove-non-aligned-handling.patch | 262 +++ ...ckchip-rework-by-using-crypto_engine.patch | 881 +++++++++ .../177-crypto-rockchip-rewrite-type.patch | 174 ++ .../178-crypto-rockchip-add-debugfs.patch | 232 +++ .../179-crypto-rockchip-introduce-PM.patch | 181 ++ ...pto-rockchip-handle-reset-also-in-PM.patch | 66 + ...use-clk_bulk-to-simplify-clock-manag.patch | 118 ++ ...to-rockchip-add-myself-as-maintainer.patch | 30 + ...rypto-rockchip-use-read_poll_timeout.patch | 54 + .../184-crypto-rockchip-fix-style-issue.patch | 55 + ...ypto-rockchip-add-support-for-rk3328.patch | 23 + ...chip-rename-ablk-functions-to-cipher.patch | 119 ++ ...ckchip-rework-rk_handle_req-function.patch | 180 ++ ...use-a-rk_crypto_info-variable-instea.patch | 172 ++ ...use-the-rk_crypto_info-given-as-para.patch | 34 + ...ypto-convert-rockchip-crypto-to-YAML.patch | 115 ++ ...s-crypto-rockchip-add-new-compatible.patch | 114 ++ ...2-clk-rk3399-use-proper-crypto0-name.patch | 37 + ...-dts-rockchip-add-rk3328-crypto-node.patch | 33 + ...-dts-rockchip-rk3399-add-crypto-node.patch | 43 + ...store-crypto_info-in-request-context.patch | 124 ++ ...Check-for-clocks-numbers-and-their-f.patch | 165 ++ ...rk_ahash_reg_init-use-crypto_info-fr.patch | 40 + ...p-permit-to-have-more-than-one-reset.patch | 24 + ...ypto-rockchip-Add-support-for-RK3399.patch | 464 +++++ ...move-kconfig-to-its-dedicated-direct.patch | 105 ++ ...to-add-support-for-rockchip-crypto-r.patch | 89 + ...new-dt-binding-doc-to-the-right-entr.patch | 22 + ...support-the-new-crypto-IP-for-rk3568.patch | 1633 +++++++++++++++++ ...205-ARM64-dts-rk3568-add-crypto-node.patch | 36 + .../patches-6.0/206-fix-build-crypto.patch | 21 + ...p-rk356x-add-support-for-new-boards.patch} | 0 ...dd-support-for-OrangePi-R1-Plus-LTS.patch} | 0 ...dd-support-for-FriendlyARM-NanoPi-R.patch} | 0 ...support-for-FriendlyARM-NanoPi-Neo3.patch} | 0 45 files changed, 6562 insertions(+) create mode 100644 package/libs/libselinux/bcm27xx-userland/Makefile create mode 100644 target/linux/rockchip/patches-6.0/167-crypto-rockchip-use-dev_err-for-error-message-about-.patch create mode 100644 target/linux/rockchip/patches-6.0/168-crypto-rockchip-do-not-use-uninitialized-variable.patch create mode 100644 target/linux/rockchip/patches-6.0/169-crypto-rockchip-do-not-do-custom-power-management.patch create mode 100644 target/linux/rockchip/patches-6.0/170-crypto-rockchip-fix-privete-private-typo.patch create mode 100644 target/linux/rockchip/patches-6.0/171-crypto-rockchip-do-not-store-mode-globally.patch create mode 100644 target/linux/rockchip/patches-6.0/172-crypto-rockchip-add-fallback-for-cipher.patch create mode 100644 target/linux/rockchip/patches-6.0/173-crypto-rockchip-add-fallback-for-ahash.patch create mode 100644 target/linux/rockchip/patches-6.0/174-crypto-rockchip-better-handle-cipher-key.patch create mode 100644 target/linux/rockchip/patches-6.0/175-crypto-rockchip-remove-non-aligned-handling.patch create mode 100644 target/linux/rockchip/patches-6.0/176-crypto-rockchip-rework-by-using-crypto_engine.patch create mode 100644 target/linux/rockchip/patches-6.0/177-crypto-rockchip-rewrite-type.patch create mode 100644 target/linux/rockchip/patches-6.0/178-crypto-rockchip-add-debugfs.patch create mode 100644 target/linux/rockchip/patches-6.0/179-crypto-rockchip-introduce-PM.patch create mode 100644 target/linux/rockchip/patches-6.0/180-crypto-rockchip-handle-reset-also-in-PM.patch create mode 100644 target/linux/rockchip/patches-6.0/181-crypto-rockchip-use-clk_bulk-to-simplify-clock-manag.patch create mode 100644 target/linux/rockchip/patches-6.0/182-crypto-rockchip-add-myself-as-maintainer.patch create mode 100644 target/linux/rockchip/patches-6.0/183-crypto-rockchip-use-read_poll_timeout.patch create mode 100644 target/linux/rockchip/patches-6.0/184-crypto-rockchip-fix-style-issue.patch create mode 100644 target/linux/rockchip/patches-6.0/185-crypto-rockchip-add-support-for-rk3328.patch create mode 100644 target/linux/rockchip/patches-6.0/186-crypto-rockchip-rename-ablk-functions-to-cipher.patch create mode 100644 target/linux/rockchip/patches-6.0/187-crypto-rockchip-rework-rk_handle_req-function.patch create mode 100644 target/linux/rockchip/patches-6.0/188-crypto-rockchip-use-a-rk_crypto_info-variable-instea.patch create mode 100644 target/linux/rockchip/patches-6.0/189-crypto-rockchip-use-the-rk_crypto_info-given-as-para.patch create mode 100644 target/linux/rockchip/patches-6.0/190-dt-bindings-crypto-convert-rockchip-crypto-to-YAML.patch create mode 100644 target/linux/rockchip/patches-6.0/191-dt-bindings-crypto-rockchip-add-new-compatible.patch create mode 100644 target/linux/rockchip/patches-6.0/192-clk-rk3399-use-proper-crypto0-name.patch create mode 100644 target/linux/rockchip/patches-6.0/193-arm64-dts-rockchip-add-rk3328-crypto-node.patch create mode 100644 target/linux/rockchip/patches-6.0/194-arm64-dts-rockchip-rk3399-add-crypto-node.patch create mode 100644 target/linux/rockchip/patches-6.0/195-crypto-rockchip-store-crypto_info-in-request-context.patch create mode 100644 target/linux/rockchip/patches-6.0/196-crypto-rockchip-Check-for-clocks-numbers-and-their-f.patch create mode 100644 target/linux/rockchip/patches-6.0/197-crypto-rockchip-rk_ahash_reg_init-use-crypto_info-fr.patch create mode 100644 target/linux/rockchip/patches-6.0/198-crypto-rockchip-permit-to-have-more-than-one-reset.patch create mode 100644 target/linux/rockchip/patches-6.0/199-crypto-rockchip-Add-support-for-RK3399.patch create mode 100644 target/linux/rockchip/patches-6.0/201-crypto-rockchip-move-kconfig-to-its-dedicated-direct.patch create mode 100644 target/linux/rockchip/patches-6.0/202-dt-bindings-crypto-add-support-for-rockchip-crypto-r.patch create mode 100644 target/linux/rockchip/patches-6.0/203-MAINTAINERS-add-new-dt-binding-doc-to-the-right-entr.patch create mode 100644 target/linux/rockchip/patches-6.0/204-crypto-rockchip-support-the-new-crypto-IP-for-rk3568.patch create mode 100644 target/linux/rockchip/patches-6.0/205-ARM64-dts-rk3568-add-crypto-node.patch create mode 100644 target/linux/rockchip/patches-6.0/206-fix-build-crypto.patch rename target/linux/rockchip/patches-6.0/{210-rockchip-rk356x-add-support-for-new-boards.patch => 300-rockchip-rk356x-add-support-for-new-boards.patch} (100%) rename target/linux/rockchip/patches-6.0/{203-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch => 303-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch} (100%) rename target/linux/rockchip/patches-6.0/{204-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch => 304-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch} (100%) rename target/linux/rockchip/patches-6.0/{205-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch => 305-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch} (100%) diff --git a/package/libs/libselinux/bcm27xx-userland/Makefile b/package/libs/libselinux/bcm27xx-userland/Makefile new file mode 100644 index 000000000..098c039f1 --- /dev/null +++ b/package/libs/libselinux/bcm27xx-userland/Makefile @@ -0,0 +1,71 @@ +# +# Copyright (C) 2019-2020 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=bcm27xx-userland +PKG_VERSION:=4a0a19b88b43e48c6b51b526b9378289fb712a4c +PKG_RELEASE:=1 + +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://codeload.github.com/raspberrypi/userland/tar.gz/$(PKG_VERSION)? +PKG_HASH:=0f42d48095d1f680cbe8781c2e974b76bdd0507aaef64cce8b8b472ca3a09588 + +PKG_FLAGS:=nonshared + +PKG_MAINTAINER:=Álvaro Fernández Rojas + +CMAKE_INSTALL:=1 +CMAKE_OPTIONS+=-DVMCS_INSTALL_PREFIX=/usr + +ifeq ($(ARCH),aarch64) + CMAKE_OPTIONS+=-DARM64=ON +else + CMAKE_OPTIONS+=-DARM64=OFF +endif + +include $(INCLUDE_DIR)/package.mk +include $(INCLUDE_DIR)/cmake.mk + +TAR_OPTIONS:=--strip-components 1 $(TAR_OPTIONS) +TAR_CMD=$(HOST_TAR) -C $(1) $(TAR_OPTIONS) + +define Package/bcm27xx-userland + SECTION:=utils + CATEGORY:=Utilities + DEPENDS:=@TARGET_bcm27xx + TITLE:=BCM27xx userland tools + DEFAULT:=y if TARGET_bcm27xx +endef + +define Package/bcm27xx-userland/description + BCM27xx userland tools including vcgencmd and tvservice. +endef + +define Package/bcm27xx-userland/install + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/dtmerge $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/dtparam $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/dtoverlay $(1)/usr/bin +ifneq ($(ARCH),aarch64) + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/raspistill $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/raspivid $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/raspividyuv $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/raspiyuv $(1)/usr/bin +endif + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/tvservice $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/vcgencmd $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/vcmailbox $(1)/usr/bin + + $(INSTALL_DIR) $(1)/usr/include + $(CP) $(PKG_INSTALL_DIR)/usr/include/ $(1)/usr/ + + $(INSTALL_DIR) $(1)/usr/lib + $(CP) $(PKG_INSTALL_DIR)/usr/lib/ $(1)/usr/ +endef + +$(eval $(call BuildPackage,bcm27xx-userland)) diff --git a/target/linux/rockchip/armv8/config-6.0 b/target/linux/rockchip/armv8/config-6.0 index 1b12a9c5c..4d6a009fd 100644 --- a/target/linux/rockchip/armv8/config-6.0 +++ b/target/linux/rockchip/armv8/config-6.0 @@ -168,17 +168,32 @@ CONFIG_CRYPTO_AES_ARM64=y CONFIG_CRYPTO_AES_ARM64_CE=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CRC32=y CONFIG_CRYPTO_CRC32C=y CONFIG_CRYPTO_CRC64_ROCKSOFT=y CONFIG_CRYPTO_CRCT10DIF=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEV_ROCKCHIP=y +CONFIG_CRYPTO_DEV_ROCKCHIP2=y +# CONFIG_CRYPTO_DEV_ROCKCHIP2_DEBUG is not set +# CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG is not set +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_ENGINE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_LIB_DES=y CONFIG_CRYPTO_LIB_SHA1=y CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y CONFIG_CRYPTO_SIMD=y CONFIG_DCACHE_WORD_ACCESS=y CONFIG_DEBUG_BUGVERBOSE=y diff --git a/target/linux/rockchip/patches-6.0/167-crypto-rockchip-use-dev_err-for-error-message-about-.patch b/target/linux/rockchip/patches-6.0/167-crypto-rockchip-use-dev_err-for-error-message-about-.patch new file mode 100644 index 000000000..587c7c8ed --- /dev/null +++ b/target/linux/rockchip/patches-6.0/167-crypto-rockchip-use-dev_err-for-error-message-about-.patch @@ -0,0 +1,26 @@ +From 5b85875f5b63720c85f525a0b94054041ca2a118 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:39 +0000 +Subject: [PATCH 17/49] crypto: rockchip: use dev_err for error message about + interrupt + +Interrupt is mandatory so the message should be printed as error. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -371,8 +371,7 @@ static int rk_crypto_probe(struct platfo + + crypto_info->irq = platform_get_irq(pdev, 0); + if (crypto_info->irq < 0) { +- dev_warn(crypto_info->dev, +- "control Interrupt is not available.\n"); ++ dev_err(&pdev->dev, "control Interrupt is not available.\n"); + err = crypto_info->irq; + goto err_crypto; + } diff --git a/target/linux/rockchip/patches-6.0/168-crypto-rockchip-do-not-use-uninitialized-variable.patch b/target/linux/rockchip/patches-6.0/168-crypto-rockchip-do-not-use-uninitialized-variable.patch new file mode 100644 index 000000000..b9011ce16 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/168-crypto-rockchip-do-not-use-uninitialized-variable.patch @@ -0,0 +1,24 @@ +From ccd6a7fd0b6afdfa47d3b4f6b850127031effc1f Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:40 +0000 +Subject: [PATCH 18/49] crypto: rockchip: do not use uninitialized variable + +crypto_info->dev is not yet set, so use pdev->dev instead. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -381,7 +381,7 @@ static int rk_crypto_probe(struct platfo + "rk-crypto", pdev); + + if (err) { +- dev_err(crypto_info->dev, "irq request failed.\n"); ++ dev_err(&pdev->dev, "irq request failed.\n"); + goto err_crypto; + } + diff --git a/target/linux/rockchip/patches-6.0/169-crypto-rockchip-do-not-do-custom-power-management.patch b/target/linux/rockchip/patches-6.0/169-crypto-rockchip-do-not-do-custom-power-management.patch new file mode 100644 index 000000000..f5edc895f --- /dev/null +++ b/target/linux/rockchip/patches-6.0/169-crypto-rockchip-do-not-do-custom-power-management.patch @@ -0,0 +1,94 @@ +From 4f973a49ea5f5c6a80468a1c8d28f57642f20f08 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:41 +0000 +Subject: [PATCH 19/49] crypto: rockchip: do not do custom power management + +The clock enable/disable at tfm init/exit is fragile, +if 2 tfm are init in the same time and one is removed just after, +it will leave the hardware uncloked even if a user remains. + +Instead simply enable clocks at probe time. +We will do PM later. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 4 ++-- + drivers/crypto/rockchip/rk3288_crypto.h | 2 -- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 3 +-- + drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 5 +++-- + 4 files changed, 6 insertions(+), 8 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -394,8 +394,7 @@ static int rk_crypto_probe(struct platfo + rk_crypto_done_task_cb, (unsigned long)crypto_info); + crypto_init_queue(&crypto_info->queue, 50); + +- crypto_info->enable_clk = rk_crypto_enable_clk; +- crypto_info->disable_clk = rk_crypto_disable_clk; ++ rk_crypto_enable_clk(crypto_info); + crypto_info->load_data = rk_load_data; + crypto_info->unload_data = rk_unload_data; + crypto_info->enqueue = rk_crypto_enqueue; +@@ -422,6 +421,7 @@ static int rk_crypto_remove(struct platf + struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); + + rk_crypto_unregister(); ++ rk_crypto_disable_clk(crypto_tmp); + tasklet_kill(&crypto_tmp->done_task); + tasklet_kill(&crypto_tmp->queue_task); + return 0; +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -220,8 +220,6 @@ struct rk_crypto_info { + int (*start)(struct rk_crypto_info *dev); + int (*update)(struct rk_crypto_info *dev); + void (*complete)(struct crypto_async_request *base, int err); +- int (*enable_clk)(struct rk_crypto_info *dev); +- void (*disable_clk)(struct rk_crypto_info *dev); + int (*load_data)(struct rk_crypto_info *dev, + struct scatterlist *sg_src, + struct scatterlist *sg_dst); +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -301,7 +301,7 @@ static int rk_cra_hash_init(struct crypt + sizeof(struct rk_ahash_rctx) + + crypto_ahash_reqsize(tctx->fallback_tfm)); + +- return tctx->dev->enable_clk(tctx->dev); ++ return 0; + } + + static void rk_cra_hash_exit(struct crypto_tfm *tfm) +@@ -309,7 +309,6 @@ static void rk_cra_hash_exit(struct cryp + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + + free_page((unsigned long)tctx->dev->addr_vir); +- return tctx->dev->disable_clk(tctx->dev); + } + + struct rk_crypto_tmp rk_ahash_sha1 = { +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -388,8 +388,10 @@ static int rk_ablk_init_tfm(struct crypt + ctx->dev->update = rk_ablk_rx; + ctx->dev->complete = rk_crypto_complete; + ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); ++ if (!ctx->dev->addr_vir) ++ return -ENOMEM; + +- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; ++ return 0; + } + + static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) +@@ -397,7 +399,6 @@ static void rk_ablk_exit_tfm(struct cryp + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + free_page((unsigned long)ctx->dev->addr_vir); +- ctx->dev->disable_clk(ctx->dev); + } + + struct rk_crypto_tmp rk_ecb_aes_alg = { diff --git a/target/linux/rockchip/patches-6.0/170-crypto-rockchip-fix-privete-private-typo.patch b/target/linux/rockchip/patches-6.0/170-crypto-rockchip-fix-privete-private-typo.patch new file mode 100644 index 000000000..4a6f519f4 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/170-crypto-rockchip-fix-privete-private-typo.patch @@ -0,0 +1,24 @@ +From 7ea605d0e8b85ffac2bf152be86d010d9eac0193 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:42 +0000 +Subject: [PATCH 20/49] crypto: rockchip: fix privete/private typo + +This fix a simple typo on private word. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -235,7 +235,7 @@ struct rk_ahash_ctx { + struct crypto_ahash *fallback_tfm; + }; + +-/* the privete variable of hash for fallback */ ++/* the private variable of hash for fallback */ + struct rk_ahash_rctx { + struct ahash_request fallback_req; + u32 mode; diff --git a/target/linux/rockchip/patches-6.0/171-crypto-rockchip-do-not-store-mode-globally.patch b/target/linux/rockchip/patches-6.0/171-crypto-rockchip-do-not-store-mode-globally.patch new file mode 100644 index 000000000..c53552bf3 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/171-crypto-rockchip-do-not-store-mode-globally.patch @@ -0,0 +1,262 @@ +From 61b5f1fbc686ff89fe30ce4efe10ba4b23d692f0 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:43 +0000 +Subject: [PATCH 21/49] crypto: rockchip: do not store mode globally + +Storing the mode globally does not work if 2 requests are handled in the +same time. +We should store it in a request context. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.h | 5 +- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 58 ++++++++++++------- + 2 files changed, 41 insertions(+), 22 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -245,10 +245,13 @@ struct rk_ahash_rctx { + struct rk_cipher_ctx { + struct rk_crypto_info *dev; + unsigned int keylen; +- u32 mode; + u8 iv[AES_BLOCK_SIZE]; + }; + ++struct rk_cipher_rctx { ++ u32 mode; ++}; ++ + enum alg_type { + ALG_TYPE_HASH, + ALG_TYPE_CIPHER, +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -76,9 +76,10 @@ static int rk_aes_ecb_encrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_AES_ECB_MODE; ++ rctx->mode = RK_CRYPTO_AES_ECB_MODE; + return rk_handle_req(dev, req); + } + +@@ -86,9 +87,10 @@ static int rk_aes_ecb_decrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; ++ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } + +@@ -96,9 +98,10 @@ static int rk_aes_cbc_encrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_AES_CBC_MODE; ++ rctx->mode = RK_CRYPTO_AES_CBC_MODE; + return rk_handle_req(dev, req); + } + +@@ -106,9 +109,10 @@ static int rk_aes_cbc_decrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; ++ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } + +@@ -116,9 +120,10 @@ static int rk_des_ecb_encrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = 0; ++ rctx->mode = 0; + return rk_handle_req(dev, req); + } + +@@ -126,9 +131,10 @@ static int rk_des_ecb_decrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_DEC; ++ rctx->mode = RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } + +@@ -136,9 +142,10 @@ static int rk_des_cbc_encrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; ++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); + } + +@@ -146,9 +153,10 @@ static int rk_des_cbc_decrypt(struct skc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; ++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } + +@@ -156,9 +164,10 @@ static int rk_des3_ede_ecb_encrypt(struc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_SELECT; ++ rctx->mode = RK_CRYPTO_TDES_SELECT; + return rk_handle_req(dev, req); + } + +@@ -166,9 +175,10 @@ static int rk_des3_ede_ecb_decrypt(struc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; ++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } + +@@ -176,9 +186,10 @@ static int rk_des3_ede_cbc_encrypt(struc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; ++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); + } + +@@ -186,9 +197,10 @@ static int rk_des3_ede_cbc_decrypt(struc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *dev = ctx->dev; + +- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | ++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + RK_CRYPTO_DEC; + return rk_handle_req(dev, req); + } +@@ -199,6 +211,7 @@ static void rk_ablk_hw_init(struct rk_cr + skcipher_request_cast(dev->async_req); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + u32 ivsize, block, conf_reg = 0; + +@@ -206,22 +219,22 @@ static void rk_ablk_hw_init(struct rk_cr + ivsize = crypto_skcipher_ivsize(cipher); + + if (block == DES_BLOCK_SIZE) { +- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | ++ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + RK_CRYPTO_TDES_BYTESWAP_KEY | + RK_CRYPTO_TDES_BYTESWAP_IV; +- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); ++ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + conf_reg = RK_CRYPTO_DESSEL; + } else { +- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | ++ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | + RK_CRYPTO_AES_KEY_CHANGE | + RK_CRYPTO_AES_BYTESWAP_KEY | + RK_CRYPTO_AES_BYTESWAP_IV; + if (ctx->keylen == AES_KEYSIZE_192) +- ctx->mode |= RK_CRYPTO_AES_192BIT_key; ++ rctx->mode |= RK_CRYPTO_AES_192BIT_key; + else if (ctx->keylen == AES_KEYSIZE_256) +- ctx->mode |= RK_CRYPTO_AES_256BIT_key; +- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); ++ rctx->mode |= RK_CRYPTO_AES_256BIT_key; ++ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | +@@ -246,6 +259,7 @@ static int rk_set_data_start(struct rk_c + struct skcipher_request *req = + skcipher_request_cast(dev->async_req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 ivsize = crypto_skcipher_ivsize(tfm); + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + +@@ -254,7 +268,7 @@ static int rk_set_data_start(struct rk_c + /* Store the iv that need to be updated in chain mode. + * And update the IV buffer to contain the next IV for decryption mode. + */ +- if (ctx->mode & RK_CRYPTO_DEC) { ++ if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(ctx->iv, src_last_blk, ivsize); + sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, + ivsize, dev->total - ivsize); +@@ -294,11 +308,12 @@ static void rk_iv_copyback(struct rk_cry + struct skcipher_request *req = + skcipher_request_cast(dev->async_req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 ivsize = crypto_skcipher_ivsize(tfm); + + /* Update the IV buffer to contain the next IV for encryption mode. */ +- if (!(ctx->mode & RK_CRYPTO_DEC)) { ++ if (!(rctx->mode & RK_CRYPTO_DEC)) { + if (dev->aligned) { + memcpy(req->iv, sg_virt(dev->sg_dst) + + dev->sg_dst->length - ivsize, ivsize); +@@ -314,11 +329,12 @@ static void rk_update_iv(struct rk_crypt + struct skcipher_request *req = + skcipher_request_cast(dev->async_req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 ivsize = crypto_skcipher_ivsize(tfm); + u8 *new_iv = NULL; + +- if (ctx->mode & RK_CRYPTO_DEC) { ++ if (rctx->mode & RK_CRYPTO_DEC) { + new_iv = ctx->iv; + } else { + new_iv = page_address(sg_page(dev->sg_dst)) + diff --git a/target/linux/rockchip/patches-6.0/172-crypto-rockchip-add-fallback-for-cipher.patch b/target/linux/rockchip/patches-6.0/172-crypto-rockchip-add-fallback-for-cipher.patch new file mode 100644 index 000000000..a82459924 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/172-crypto-rockchip-add-fallback-for-cipher.patch @@ -0,0 +1,244 @@ +From 3a978f75f454960fbe00355bd01f3cbb8c3bca33 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:44 +0000 +Subject: [PATCH 22/49] crypto: rockchip: add fallback for cipher + +The hardware does not handle 0 size length request, let's add a +fallback. +Furthermore fallback will be used for all unaligned case the hardware +cannot handle. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/Kconfig | 4 + + drivers/crypto/rockchip/rk3288_crypto.h | 2 + + .../crypto/rockchip/rk3288_crypto_skcipher.c | 97 ++++++++++++++++--- + 3 files changed, 90 insertions(+), 13 deletions(-) + +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -669,6 +669,10 @@ config CRYPTO_DEV_IMGTEC_HASH + config CRYPTO_DEV_ROCKCHIP + tristate "Rockchip's Cryptographic Engine driver" + depends on OF && ARCH_ROCKCHIP ++ depends on PM ++ select CRYPTO_ECB ++ select CRYPTO_CBC ++ select CRYPTO_DES + select CRYPTO_AES + select CRYPTO_LIB_DES + select CRYPTO_MD5 +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -246,10 +246,12 @@ struct rk_cipher_ctx { + struct rk_crypto_info *dev; + unsigned int keylen; + u8 iv[AES_BLOCK_SIZE]; ++ struct crypto_skcipher *fallback_tfm; + }; + + struct rk_cipher_rctx { + u32 mode; ++ struct skcipher_request fallback_req; // keep at the end + }; + + enum alg_type { +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -13,6 +13,63 @@ + + #define RK_CRYPTO_DEC BIT(0) + ++static int rk_cipher_need_fallback(struct skcipher_request *req) ++{ ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ unsigned int bs = crypto_skcipher_blocksize(tfm); ++ struct scatterlist *sgs, *sgd; ++ unsigned int stodo, dtodo, len; ++ ++ if (!req->cryptlen) ++ return true; ++ ++ len = req->cryptlen; ++ sgs = req->src; ++ sgd = req->dst; ++ while (sgs && sgd) { ++ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { ++ return true; ++ } ++ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { ++ return true; ++ } ++ stodo = min(len, sgs->length); ++ if (stodo % bs) { ++ return true; ++ } ++ dtodo = min(len, sgd->length); ++ if (dtodo % bs) { ++ return true; ++ } ++ if (stodo != dtodo) { ++ return true; ++ } ++ len -= stodo; ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); ++ } ++ return false; ++} ++ ++static int rk_cipher_fallback(struct skcipher_request *areq) ++{ ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); ++ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); ++ int err; ++ ++ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); ++ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, ++ areq->base.complete, areq->base.data); ++ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, ++ areq->cryptlen, areq->iv); ++ if (rctx->mode & RK_CRYPTO_DEC) ++ err = crypto_skcipher_decrypt(&rctx->fallback_req); ++ else ++ err = crypto_skcipher_encrypt(&rctx->fallback_req); ++ return err; ++} ++ + static void rk_crypto_complete(struct crypto_async_request *base, int err) + { + if (base->complete) +@@ -22,10 +79,10 @@ static void rk_crypto_complete(struct cr + static int rk_handle_req(struct rk_crypto_info *dev, + struct skcipher_request *req) + { +- if (!IS_ALIGNED(req->cryptlen, dev->align_size)) +- return -EINVAL; +- else +- return dev->enqueue(dev, &req->base); ++ if (rk_cipher_need_fallback(req)) ++ return rk_cipher_fallback(req); ++ ++ return dev->enqueue(dev, &req->base); + } + + static int rk_aes_setkey(struct crypto_skcipher *cipher, +@@ -39,7 +96,8 @@ static int rk_aes_setkey(struct crypto_s + return -EINVAL; + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); +- return 0; ++ ++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } + + static int rk_des_setkey(struct crypto_skcipher *cipher, +@@ -54,7 +112,8 @@ static int rk_des_setkey(struct crypto_s + + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); +- return 0; ++ ++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } + + static int rk_tdes_setkey(struct crypto_skcipher *cipher, +@@ -69,7 +128,7 @@ static int rk_tdes_setkey(struct crypto_ + + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); +- return 0; ++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } + + static int rk_aes_ecb_encrypt(struct skcipher_request *req) +@@ -394,6 +453,7 @@ static int rk_ablk_init_tfm(struct crypt + { + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ const char *name = crypto_tfm_alg_name(&tfm->base); + struct rk_crypto_tmp *algt; + + algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); +@@ -407,6 +467,16 @@ static int rk_ablk_init_tfm(struct crypt + if (!ctx->dev->addr_vir) + return -ENOMEM; + ++ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(ctx->fallback_tfm)) { ++ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ name, PTR_ERR(ctx->fallback_tfm)); ++ return PTR_ERR(ctx->fallback_tfm); ++ } ++ ++ tfm->reqsize = sizeof(struct rk_cipher_rctx) + ++ crypto_skcipher_reqsize(ctx->fallback_tfm); ++ + return 0; + } + +@@ -415,6 +485,7 @@ static void rk_ablk_exit_tfm(struct cryp + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + free_page((unsigned long)ctx->dev->addr_vir); ++ crypto_free_skcipher(ctx->fallback_tfm); + } + + struct rk_crypto_tmp rk_ecb_aes_alg = { +@@ -423,7 +494,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x0f, +@@ -445,7 +516,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x0f, +@@ -468,7 +539,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "ecb-des-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x07, +@@ -490,7 +561,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "cbc-des-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x07, +@@ -513,7 +584,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-ede-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x07, +@@ -535,7 +606,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-ede-rk", + .base.cra_priority = 300, +- .base.cra_flags = CRYPTO_ALG_ASYNC, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), + .base.cra_alignmask = 0x07, diff --git a/target/linux/rockchip/patches-6.0/173-crypto-rockchip-add-fallback-for-ahash.patch b/target/linux/rockchip/patches-6.0/173-crypto-rockchip-add-fallback-for-ahash.patch new file mode 100644 index 000000000..bfd725a63 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/173-crypto-rockchip-add-fallback-for-ahash.patch @@ -0,0 +1,75 @@ +From ccfa662cf9f7dc8b5369f7ceb855e116e0b406be Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:45 +0000 +Subject: [PATCH 23/49] crypto: rockchip: add fallback for ahash + +Adds a fallback for all case hardware cannot handle. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 38 +++++++++++++++++++ + 1 file changed, 38 insertions(+) + +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -16,6 +16,40 @@ + * so we put the fixed hash out when met zero message. + */ + ++static bool rk_ahash_need_fallback(struct ahash_request *req) ++{ ++ struct scatterlist *sg; ++ ++ sg = req->src; ++ while (sg) { ++ if (!IS_ALIGNED(sg->offset, sizeof(u32))) { ++ return true; ++ } ++ if (sg->length % 4) { ++ return true; ++ } ++ sg = sg_next(sg); ++ } ++ return false; ++} ++ ++static int rk_ahash_digest_fb(struct ahash_request *areq) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); ++ rctx->fallback_req.base.flags = areq->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ rctx->fallback_req.nbytes = areq->nbytes; ++ rctx->fallback_req.src = areq->src; ++ rctx->fallback_req.result = areq->result; ++ ++ return crypto_ahash_digest(&rctx->fallback_req); ++} ++ + static int zero_message_process(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); +@@ -167,6 +201,9 @@ static int rk_ahash_digest(struct ahash_ + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct rk_crypto_info *dev = tctx->dev; + ++ if (rk_ahash_need_fallback(req)) ++ return rk_ahash_digest_fb(req); ++ + if (!req->nbytes) + return zero_message_process(req); + else +@@ -309,6 +346,7 @@ static void rk_cra_hash_exit(struct cryp + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + + free_page((unsigned long)tctx->dev->addr_vir); ++ crypto_free_ahash(tctx->fallback_tfm); + } + + struct rk_crypto_tmp rk_ahash_sha1 = { diff --git a/target/linux/rockchip/patches-6.0/174-crypto-rockchip-better-handle-cipher-key.patch b/target/linux/rockchip/patches-6.0/174-crypto-rockchip-better-handle-cipher-key.patch new file mode 100644 index 000000000..955b381c1 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/174-crypto-rockchip-better-handle-cipher-key.patch @@ -0,0 +1,81 @@ +From 08b723e0bccdcb3c8d20dd3931a14ec32823e3e9 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:46 +0000 +Subject: [PATCH 24/49] crypto: rockchip: better handle cipher key + +The key should not be set in hardware too much in advance, this will +fail it 2 TFM with different keys generate alternative requests. +The key should be stored and used just before doing cipher operations. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.h | 1 + + drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 10 +++++++--- + 2 files changed, 8 insertions(+), 3 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -245,6 +245,7 @@ struct rk_ahash_rctx { + struct rk_cipher_ctx { + struct rk_crypto_info *dev; + unsigned int keylen; ++ u8 key[AES_MAX_KEY_SIZE]; + u8 iv[AES_BLOCK_SIZE]; + struct crypto_skcipher *fallback_tfm; + }; +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -95,7 +95,7 @@ static int rk_aes_setkey(struct crypto_s + keylen != AES_KEYSIZE_256) + return -EINVAL; + ctx->keylen = keylen; +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); ++ memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } +@@ -111,7 +111,7 @@ static int rk_des_setkey(struct crypto_s + return err; + + ctx->keylen = keylen; +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); ++ memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } +@@ -127,7 +127,8 @@ static int rk_tdes_setkey(struct crypto_ + return err; + + ctx->keylen = keylen; +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); ++ memcpy(ctx->key, key, keylen); ++ + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); + } + +@@ -283,6 +284,7 @@ static void rk_ablk_hw_init(struct rk_cr + RK_CRYPTO_TDES_BYTESWAP_IV; + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); ++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); + conf_reg = RK_CRYPTO_DESSEL; + } else { + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | +@@ -295,6 +297,7 @@ static void rk_ablk_hw_init(struct rk_cr + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); ++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | + RK_CRYPTO_BYTESWAP_BRFIFO; +@@ -484,6 +487,7 @@ static void rk_ablk_exit_tfm(struct cryp + { + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + ++ memzero_explicit(ctx->key, ctx->keylen); + free_page((unsigned long)ctx->dev->addr_vir); + crypto_free_skcipher(ctx->fallback_tfm); + } diff --git a/target/linux/rockchip/patches-6.0/175-crypto-rockchip-remove-non-aligned-handling.patch b/target/linux/rockchip/patches-6.0/175-crypto-rockchip-remove-non-aligned-handling.patch new file mode 100644 index 000000000..4edbe7124 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/175-crypto-rockchip-remove-non-aligned-handling.patch @@ -0,0 +1,262 @@ +From a02173a29e7db0431a69ae4aefde0d50af0afe17 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:47 +0000 +Subject: [PATCH 25/49] crypto: rockchip: remove non-aligned handling + +Now driver have fallback for un-aligned cases, remove all code handling +those cases. + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 69 +++++-------------- + drivers/crypto/rockchip/rk3288_crypto.h | 4 -- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 22 ++---- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 39 +++-------- + 4 files changed, 31 insertions(+), 103 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -88,63 +88,26 @@ static int rk_load_data(struct rk_crypto + { + unsigned int count; + +- dev->aligned = dev->aligned ? +- check_alignment(sg_src, sg_dst, dev->align_size) : +- dev->aligned; +- if (dev->aligned) { +- count = min(dev->left_bytes, sg_src->length); +- dev->left_bytes -= count; ++ count = min(dev->left_bytes, sg_src->length); ++ dev->left_bytes -= count; + +- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { +- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", ++ if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { ++ dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", + __func__, __LINE__); +- return -EINVAL; +- } +- dev->addr_in = sg_dma_address(sg_src); ++ return -EINVAL; ++ } ++ dev->addr_in = sg_dma_address(sg_src); + +- if (sg_dst) { +- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { +- dev_err(dev->dev, ++ if (sg_dst) { ++ if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { ++ dev_err(dev->dev, + "[%s:%d] dma_map_sg(dst) error\n", + __func__, __LINE__); +- dma_unmap_sg(dev->dev, sg_src, 1, +- DMA_TO_DEVICE); +- return -EINVAL; +- } +- dev->addr_out = sg_dma_address(sg_dst); +- } +- } else { +- count = (dev->left_bytes > PAGE_SIZE) ? +- PAGE_SIZE : dev->left_bytes; +- +- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, +- dev->addr_vir, count, +- dev->total - dev->left_bytes)) { +- dev_err(dev->dev, "[%s:%d] pcopy err\n", +- __func__, __LINE__); ++ dma_unmap_sg(dev->dev, sg_src, 1, ++ DMA_TO_DEVICE); + return -EINVAL; + } +- dev->left_bytes -= count; +- sg_init_one(&dev->sg_tmp, dev->addr_vir, count); +- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { +- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", +- __func__, __LINE__); +- return -ENOMEM; +- } +- dev->addr_in = sg_dma_address(&dev->sg_tmp); +- +- if (sg_dst) { +- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, +- DMA_FROM_DEVICE)) { +- dev_err(dev->dev, +- "[%s:%d] dma_map_sg(sg_tmp) error\n", +- __func__, __LINE__); +- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, +- DMA_TO_DEVICE); +- return -ENOMEM; +- } +- dev->addr_out = sg_dma_address(&dev->sg_tmp); +- } ++ dev->addr_out = sg_dma_address(sg_dst); + } + dev->count = count; + return 0; +@@ -154,11 +117,11 @@ static void rk_unload_data(struct rk_cry + { + struct scatterlist *sg_in, *sg_out; + +- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; ++ sg_in = dev->sg_src; + dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); + + if (dev->sg_dst) { +- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; ++ sg_out = dev->sg_dst; + dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); + } + } +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -204,12 +204,8 @@ struct rk_crypto_info { + /* the public variable */ + struct scatterlist *sg_src; + struct scatterlist *sg_dst; +- struct scatterlist sg_tmp; + struct scatterlist *first; + unsigned int left_bytes; +- void *addr_vir; +- int aligned; +- int align_size; + size_t src_nents; + size_t dst_nents; + unsigned int total; +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -236,8 +236,6 @@ static int rk_ahash_start(struct rk_cryp + + dev->total = req->nbytes; + dev->left_bytes = req->nbytes; +- dev->aligned = 0; +- dev->align_size = 4; + dev->sg_dst = NULL; + dev->sg_src = req->src; + dev->first = req->src; +@@ -272,15 +270,13 @@ static int rk_ahash_crypto_rx(struct rk_ + + dev->unload_data(dev); + if (dev->left_bytes) { +- if (dev->aligned) { +- if (sg_is_last(dev->sg_src)) { +- dev_warn(dev->dev, "[%s:%d], Lack of data\n", +- __func__, __LINE__); +- err = -ENOMEM; +- goto out_rx; +- } +- dev->sg_src = sg_next(dev->sg_src); ++ if (sg_is_last(dev->sg_src)) { ++ dev_warn(dev->dev, "[%s:%d], Lack of data\n", ++ __func__, __LINE__); ++ err = -ENOMEM; ++ goto out_rx; + } ++ dev->sg_src = sg_next(dev->sg_src); + err = rk_ahash_set_data_start(dev); + } else { + /* +@@ -318,11 +314,6 @@ static int rk_cra_hash_init(struct crypt + algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + + tctx->dev = algt->dev; +- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); +- if (!tctx->dev->addr_vir) { +- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); +- return -ENOMEM; +- } + tctx->dev->start = rk_ahash_start; + tctx->dev->update = rk_ahash_crypto_rx; + tctx->dev->complete = rk_ahash_crypto_complete; +@@ -345,7 +336,6 @@ static void rk_cra_hash_exit(struct cryp + { + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + +- free_page((unsigned long)tctx->dev->addr_vir); + crypto_free_ahash(tctx->fallback_tfm); + } + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -356,7 +356,6 @@ static int rk_ablk_start(struct rk_crypt + dev->src_nents = sg_nents(req->src); + dev->sg_dst = req->dst; + dev->dst_nents = sg_nents(req->dst); +- dev->aligned = 1; + + spin_lock_irqsave(&dev->lock, flags); + rk_ablk_hw_init(dev); +@@ -376,13 +375,9 @@ static void rk_iv_copyback(struct rk_cry + + /* Update the IV buffer to contain the next IV for encryption mode. */ + if (!(rctx->mode & RK_CRYPTO_DEC)) { +- if (dev->aligned) { +- memcpy(req->iv, sg_virt(dev->sg_dst) + +- dev->sg_dst->length - ivsize, ivsize); +- } else { +- memcpy(req->iv, dev->addr_vir + +- dev->count - ivsize, ivsize); +- } ++ memcpy(req->iv, ++ sg_virt(dev->sg_dst) + dev->sg_dst->length - ivsize, ++ ivsize); + } + } + +@@ -420,27 +415,16 @@ static int rk_ablk_rx(struct rk_crypto_i + skcipher_request_cast(dev->async_req); + + dev->unload_data(dev); +- if (!dev->aligned) { +- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, +- dev->addr_vir, dev->count, +- dev->total - dev->left_bytes - +- dev->count)) { +- err = -EINVAL; +- goto out_rx; +- } +- } + if (dev->left_bytes) { + rk_update_iv(dev); +- if (dev->aligned) { +- if (sg_is_last(dev->sg_src)) { +- dev_err(dev->dev, "[%s:%d] Lack of data\n", ++ if (sg_is_last(dev->sg_src)) { ++ dev_err(dev->dev, "[%s:%d] Lack of data\n", + __func__, __LINE__); +- err = -ENOMEM; +- goto out_rx; +- } +- dev->sg_src = sg_next(dev->sg_src); +- dev->sg_dst = sg_next(dev->sg_dst); ++ err = -ENOMEM; ++ goto out_rx; + } ++ dev->sg_src = sg_next(dev->sg_src); ++ dev->sg_dst = sg_next(dev->sg_dst); + err = rk_set_data_start(dev); + } else { + rk_iv_copyback(dev); +@@ -462,13 +446,9 @@ static int rk_ablk_init_tfm(struct crypt + algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + + ctx->dev = algt->dev; +- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; + ctx->dev->start = rk_ablk_start; + ctx->dev->update = rk_ablk_rx; + ctx->dev->complete = rk_crypto_complete; +- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); +- if (!ctx->dev->addr_vir) +- return -ENOMEM; + + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { +@@ -488,7 +468,6 @@ static void rk_ablk_exit_tfm(struct cryp + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + memzero_explicit(ctx->key, ctx->keylen); +- free_page((unsigned long)ctx->dev->addr_vir); + crypto_free_skcipher(ctx->fallback_tfm); + } + diff --git a/target/linux/rockchip/patches-6.0/176-crypto-rockchip-rework-by-using-crypto_engine.patch b/target/linux/rockchip/patches-6.0/176-crypto-rockchip-rework-by-using-crypto_engine.patch new file mode 100644 index 000000000..538ebcd88 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/176-crypto-rockchip-rework-by-using-crypto_engine.patch @@ -0,0 +1,881 @@ +From edc3999221d502bbb0b02b4af6110059622bb2f1 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:48 +0000 +Subject: [PATCH 26/49] crypto: rockchip: rework by using crypto_engine + +Instead of doing manual queue management, let's use the crypto/engine +for that. +In the same time, rework the requests handling to be easier to +understand (and fix all bugs related to them). + +Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API") +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/Kconfig | 1 + + drivers/crypto/rockchip/rk3288_crypto.c | 152 +---------- + drivers/crypto/rockchip/rk3288_crypto.h | 39 +-- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 144 +++++----- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 250 +++++++++--------- + 5 files changed, 221 insertions(+), 365 deletions(-) + +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -674,6 +674,7 @@ config CRYPTO_DEV_ROCKCHIP + select CRYPTO_CBC + select CRYPTO_DES + select CRYPTO_AES ++ select CRYPTO_ENGINE + select CRYPTO_LIB_DES + select CRYPTO_MD5 + select CRYPTO_SHA1 +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -65,149 +65,24 @@ static void rk_crypto_disable_clk(struct + clk_disable_unprepare(dev->sclk); + } + +-static int check_alignment(struct scatterlist *sg_src, +- struct scatterlist *sg_dst, +- int align_mask) +-{ +- int in, out, align; +- +- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && +- IS_ALIGNED((uint32_t)sg_src->length, align_mask); +- if (!sg_dst) +- return in; +- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && +- IS_ALIGNED((uint32_t)sg_dst->length, align_mask); +- align = in && out; +- +- return (align && (sg_src->length == sg_dst->length)); +-} +- +-static int rk_load_data(struct rk_crypto_info *dev, +- struct scatterlist *sg_src, +- struct scatterlist *sg_dst) +-{ +- unsigned int count; +- +- count = min(dev->left_bytes, sg_src->length); +- dev->left_bytes -= count; +- +- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { +- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", +- __func__, __LINE__); +- return -EINVAL; +- } +- dev->addr_in = sg_dma_address(sg_src); +- +- if (sg_dst) { +- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { +- dev_err(dev->dev, +- "[%s:%d] dma_map_sg(dst) error\n", +- __func__, __LINE__); +- dma_unmap_sg(dev->dev, sg_src, 1, +- DMA_TO_DEVICE); +- return -EINVAL; +- } +- dev->addr_out = sg_dma_address(sg_dst); +- } +- dev->count = count; +- return 0; +-} +- +-static void rk_unload_data(struct rk_crypto_info *dev) +-{ +- struct scatterlist *sg_in, *sg_out; +- +- sg_in = dev->sg_src; +- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); +- +- if (dev->sg_dst) { +- sg_out = dev->sg_dst; +- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); +- } +-} +- + static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) + { + struct rk_crypto_info *dev = platform_get_drvdata(dev_id); + u32 interrupt_status; + +- spin_lock(&dev->lock); + interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + ++ dev->status = 1; + if (interrupt_status & 0x0a) { + dev_warn(dev->dev, "DMA Error\n"); +- dev->err = -EFAULT; ++ dev->status = 0; + } +- tasklet_schedule(&dev->done_task); ++ complete(&dev->complete); + +- spin_unlock(&dev->lock); + return IRQ_HANDLED; + } + +-static int rk_crypto_enqueue(struct rk_crypto_info *dev, +- struct crypto_async_request *async_req) +-{ +- unsigned long flags; +- int ret; +- +- spin_lock_irqsave(&dev->lock, flags); +- ret = crypto_enqueue_request(&dev->queue, async_req); +- if (dev->busy) { +- spin_unlock_irqrestore(&dev->lock, flags); +- return ret; +- } +- dev->busy = true; +- spin_unlock_irqrestore(&dev->lock, flags); +- tasklet_schedule(&dev->queue_task); +- +- return ret; +-} +- +-static void rk_crypto_queue_task_cb(unsigned long data) +-{ +- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; +- struct crypto_async_request *async_req, *backlog; +- unsigned long flags; +- int err = 0; +- +- dev->err = 0; +- spin_lock_irqsave(&dev->lock, flags); +- backlog = crypto_get_backlog(&dev->queue); +- async_req = crypto_dequeue_request(&dev->queue); +- +- if (!async_req) { +- dev->busy = false; +- spin_unlock_irqrestore(&dev->lock, flags); +- return; +- } +- spin_unlock_irqrestore(&dev->lock, flags); +- +- if (backlog) { +- backlog->complete(backlog, -EINPROGRESS); +- backlog = NULL; +- } +- +- dev->async_req = async_req; +- err = dev->start(dev); +- if (err) +- dev->complete(dev->async_req, err); +-} +- +-static void rk_crypto_done_task_cb(unsigned long data) +-{ +- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; +- +- if (dev->err) { +- dev->complete(dev->async_req, dev->err); +- return; +- } +- +- dev->err = dev->update(dev); +- if (dev->err) +- dev->complete(dev->async_req, dev->err); +-} +- + static struct rk_crypto_tmp *rk_cipher_algs[] = { + &rk_ecb_aes_alg, + &rk_cbc_aes_alg, +@@ -300,8 +175,6 @@ static int rk_crypto_probe(struct platfo + if (err) + goto err_crypto; + +- spin_lock_init(&crypto_info->lock); +- + crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(crypto_info->reg)) { + err = PTR_ERR(crypto_info->reg); +@@ -351,17 +224,11 @@ static int rk_crypto_probe(struct platfo + crypto_info->dev = &pdev->dev; + platform_set_drvdata(pdev, crypto_info); + +- tasklet_init(&crypto_info->queue_task, +- rk_crypto_queue_task_cb, (unsigned long)crypto_info); +- tasklet_init(&crypto_info->done_task, +- rk_crypto_done_task_cb, (unsigned long)crypto_info); +- crypto_init_queue(&crypto_info->queue, 50); ++ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); ++ crypto_engine_start(crypto_info->engine); ++ init_completion(&crypto_info->complete); + + rk_crypto_enable_clk(crypto_info); +- crypto_info->load_data = rk_load_data; +- crypto_info->unload_data = rk_unload_data; +- crypto_info->enqueue = rk_crypto_enqueue; +- crypto_info->busy = false; + + err = rk_crypto_register(crypto_info); + if (err) { +@@ -373,9 +240,9 @@ static int rk_crypto_probe(struct platfo + return 0; + + err_register_alg: +- tasklet_kill(&crypto_info->queue_task); +- tasklet_kill(&crypto_info->done_task); ++ crypto_engine_exit(crypto_info->engine); + err_crypto: ++ dev_err(dev, "Crypto Accelerator not successfully registered\n"); + return err; + } + +@@ -385,8 +252,7 @@ static int rk_crypto_remove(struct platf + + rk_crypto_unregister(); + rk_crypto_disable_clk(crypto_tmp); +- tasklet_kill(&crypto_tmp->done_task); +- tasklet_kill(&crypto_tmp->queue_task); ++ crypto_engine_exit(crypto_tmp->engine); + return 0; + } + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -5,9 +5,11 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include + #include + #include + +@@ -193,39 +195,15 @@ struct rk_crypto_info { + struct reset_control *rst; + void __iomem *reg; + int irq; +- struct crypto_queue queue; +- struct tasklet_struct queue_task; +- struct tasklet_struct done_task; +- struct crypto_async_request *async_req; +- int err; +- /* device lock */ +- spinlock_t lock; +- +- /* the public variable */ +- struct scatterlist *sg_src; +- struct scatterlist *sg_dst; +- struct scatterlist *first; +- unsigned int left_bytes; +- size_t src_nents; +- size_t dst_nents; +- unsigned int total; +- unsigned int count; +- dma_addr_t addr_in; +- dma_addr_t addr_out; +- bool busy; +- int (*start)(struct rk_crypto_info *dev); +- int (*update)(struct rk_crypto_info *dev); +- void (*complete)(struct crypto_async_request *base, int err); +- int (*load_data)(struct rk_crypto_info *dev, +- struct scatterlist *sg_src, +- struct scatterlist *sg_dst); +- void (*unload_data)(struct rk_crypto_info *dev); +- int (*enqueue)(struct rk_crypto_info *dev, +- struct crypto_async_request *async_req); ++ ++ struct crypto_engine *engine; ++ struct completion complete; ++ int status; + }; + + /* the private variable of hash */ + struct rk_ahash_ctx { ++ struct crypto_engine_ctx enginectx; + struct rk_crypto_info *dev; + /* for fallback */ + struct crypto_ahash *fallback_tfm; +@@ -235,10 +213,12 @@ struct rk_ahash_ctx { + struct rk_ahash_rctx { + struct ahash_request fallback_req; + u32 mode; ++ int nrsg; + }; + + /* the private variable of cipher */ + struct rk_cipher_ctx { ++ struct crypto_engine_ctx enginectx; + struct rk_crypto_info *dev; + unsigned int keylen; + u8 key[AES_MAX_KEY_SIZE]; +@@ -247,6 +227,7 @@ struct rk_cipher_ctx { + }; + + struct rk_cipher_rctx { ++ u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end + }; +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -9,6 +9,7 @@ + * Some ideas are from marvell/cesa.c and s5p-sss.c driver. + */ + #include ++#include + #include "rk3288_crypto.h" + + /* +@@ -72,16 +73,12 @@ static int zero_message_process(struct a + return 0; + } + +-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) ++static void rk_ahash_reg_init(struct ahash_request *req) + { +- if (base->complete) +- base->complete(base, err); +-} +- +-static void rk_ahash_reg_init(struct rk_crypto_info *dev) +-{ +- struct ahash_request *req = ahash_request_cast(dev->async_req); + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct rk_crypto_info *dev = tctx->dev; + int reg_status; + + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | +@@ -108,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_ + RK_CRYPTO_BYTESWAP_BRFIFO | + RK_CRYPTO_BYTESWAP_BTFIFO); + +- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); ++ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); + } + + static int rk_ahash_init(struct ahash_request *req) +@@ -206,44 +203,59 @@ static int rk_ahash_digest(struct ahash_ + + if (!req->nbytes) + return zero_message_process(req); +- else +- return dev->enqueue(dev, &req->base); ++ ++ return crypto_transfer_hash_request_to_engine(dev->engine, req); + } + +-static void crypto_ahash_dma_start(struct rk_crypto_info *dev) ++static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) + { +- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); +- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); ++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); ++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | + (RK_CRYPTO_HASH_START << 16)); + } + +-static int rk_ahash_set_data_start(struct rk_crypto_info *dev) ++static int rk_hash_prepare(struct crypto_engine *engine, void *breq) + { +- int err; ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ int ret; ++ ++ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); ++ if (ret <= 0) ++ return -EINVAL; ++ ++ rctx->nrsg = ret; + +- err = dev->load_data(dev, dev->sg_src, NULL); +- if (!err) +- crypto_ahash_dma_start(dev); +- return err; ++ return 0; + } + +-static int rk_ahash_start(struct rk_crypto_info *dev) ++static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) + { +- struct ahash_request *req = ahash_request_cast(dev->async_req); +- struct crypto_ahash *tfm; +- struct rk_ahash_rctx *rctx; ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ ++ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); ++ return 0; ++} ++ ++static int rk_hash_run(struct crypto_engine *engine, void *breq) ++{ ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct scatterlist *sg = areq->src; ++ int err = 0; ++ int i; ++ u32 v; + +- dev->total = req->nbytes; +- dev->left_bytes = req->nbytes; +- dev->sg_dst = NULL; +- dev->sg_src = req->src; +- dev->first = req->src; +- dev->src_nents = sg_nents(req->src); +- rctx = ahash_request_ctx(req); + rctx->mode = 0; + +- tfm = crypto_ahash_reqtfm(req); + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + rctx->mode = RK_CRYPTO_HASH_SHA1; +@@ -255,30 +267,26 @@ static int rk_ahash_start(struct rk_cryp + rctx->mode = RK_CRYPTO_HASH_MD5; + break; + default: +- return -EINVAL; ++ err = -EINVAL; ++ goto theend; + } + +- rk_ahash_reg_init(dev); +- return rk_ahash_set_data_start(dev); +-} +- +-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) +-{ +- int err = 0; +- struct ahash_request *req = ahash_request_cast(dev->async_req); +- struct crypto_ahash *tfm; ++ rk_ahash_reg_init(areq); + +- dev->unload_data(dev); +- if (dev->left_bytes) { +- if (sg_is_last(dev->sg_src)) { +- dev_warn(dev->dev, "[%s:%d], Lack of data\n", +- __func__, __LINE__); +- err = -ENOMEM; +- goto out_rx; ++ while (sg) { ++ reinit_completion(&tctx->dev->complete); ++ tctx->dev->status = 0; ++ crypto_ahash_dma_start(tctx->dev, sg); ++ wait_for_completion_interruptible_timeout(&tctx->dev->complete, ++ msecs_to_jiffies(2000)); ++ if (!tctx->dev->status) { ++ dev_err(tctx->dev->dev, "DMA timeout\n"); ++ err = -EFAULT; ++ goto theend; + } +- dev->sg_src = sg_next(dev->sg_src); +- err = rk_ahash_set_data_start(dev); +- } else { ++ sg = sg_next(sg); ++ } ++ + /* + * it will take some time to process date after last dma + * transmission. +@@ -289,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_ + * efficiency, and make it response quickly when dma + * complete. + */ +- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) +- udelay(10); ++ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) ++ udelay(10); + +- tfm = crypto_ahash_reqtfm(req); +- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, +- crypto_ahash_digestsize(tfm)); +- dev->complete(dev->async_req, 0); +- tasklet_schedule(&dev->queue_task); ++ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { ++ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); ++ put_unaligned_le32(v, areq->result + i * 4); + } + +-out_rx: +- return err; ++theend: ++ local_bh_disable(); ++ crypto_finalize_hash_request(engine, breq, err); ++ local_bh_enable(); ++ ++ return 0; + } + + static int rk_cra_hash_init(struct crypto_tfm *tfm) +@@ -314,9 +324,6 @@ static int rk_cra_hash_init(struct crypt + algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + + tctx->dev = algt->dev; +- tctx->dev->start = rk_ahash_start; +- tctx->dev->update = rk_ahash_crypto_rx; +- tctx->dev->complete = rk_ahash_crypto_complete; + + /* for fallback */ + tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, +@@ -325,10 +332,15 @@ static int rk_cra_hash_init(struct crypt + dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); + return PTR_ERR(tctx->fallback_tfm); + } ++ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct rk_ahash_rctx) + + crypto_ahash_reqsize(tctx->fallback_tfm)); + ++ tctx->enginectx.op.do_one_request = rk_hash_run; ++ tctx->enginectx.op.prepare_request = rk_hash_prepare; ++ tctx->enginectx.op.unprepare_request = rk_hash_unprepare; ++ + return 0; + } + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -9,6 +9,7 @@ + * Some ideas are from marvell-cesa.c and s5p-sss.c driver. + */ + #include ++#include + #include "rk3288_crypto.h" + + #define RK_CRYPTO_DEC BIT(0) +@@ -70,19 +71,15 @@ static int rk_cipher_fallback(struct skc + return err; + } + +-static void rk_crypto_complete(struct crypto_async_request *base, int err) +-{ +- if (base->complete) +- base->complete(base, err); +-} +- + static int rk_handle_req(struct rk_crypto_info *dev, + struct skcipher_request *req) + { ++ struct crypto_engine *engine = dev->engine; ++ + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + +- return dev->enqueue(dev, &req->base); ++ return crypto_transfer_skcipher_request_to_engine(engine, req); + } + + static int rk_aes_setkey(struct crypto_skcipher *cipher, +@@ -265,25 +262,21 @@ static int rk_des3_ede_cbc_decrypt(struc + return rk_handle_req(dev, req); + } + +-static void rk_ablk_hw_init(struct rk_crypto_info *dev) ++static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) + { +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); +- u32 ivsize, block, conf_reg = 0; ++ u32 block, conf_reg = 0; + + block = crypto_tfm_alg_blocksize(tfm); +- ivsize = crypto_skcipher_ivsize(cipher); + + if (block == DES_BLOCK_SIZE) { + rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + RK_CRYPTO_TDES_BYTESWAP_KEY | + RK_CRYPTO_TDES_BYTESWAP_IV; + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); +- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); + conf_reg = RK_CRYPTO_DESSEL; + } else { +@@ -296,7 +289,6 @@ static void rk_ablk_hw_init(struct rk_cr + else if (ctx->keylen == AES_KEYSIZE_256) + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); +- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | +@@ -306,133 +298,138 @@ static void rk_ablk_hw_init(struct rk_cr + RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); + } + +-static void crypto_dma_start(struct rk_crypto_info *dev) +-{ +- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); +- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); +- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); ++static void crypto_dma_start(struct rk_crypto_info *dev, ++ struct scatterlist *sgs, ++ struct scatterlist *sgd, unsigned int todo) ++{ ++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); ++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); ++ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | + _SBF(RK_CRYPTO_BLOCK_START, 16)); + } + +-static int rk_set_data_start(struct rk_crypto_info *dev) ++static int rk_cipher_run(struct crypto_engine *engine, void *async_req) + { +- int err; +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); +- u32 ivsize = crypto_skcipher_ivsize(tfm); +- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + +- dev->sg_src->offset + dev->sg_src->length - ivsize; +- +- /* Store the iv that need to be updated in chain mode. +- * And update the IV buffer to contain the next IV for decryption mode. +- */ +- if (rctx->mode & RK_CRYPTO_DEC) { +- memcpy(ctx->iv, src_last_blk, ivsize); +- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, +- ivsize, dev->total - ivsize); +- } +- +- err = dev->load_data(dev, dev->sg_src, dev->sg_dst); +- if (!err) +- crypto_dma_start(dev); +- return err; +-} +- +-static int rk_ablk_start(struct rk_crypto_info *dev) +-{ +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); +- unsigned long flags; ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); ++ struct scatterlist *sgs, *sgd; + int err = 0; ++ int ivsize = crypto_skcipher_ivsize(tfm); ++ int offset; ++ u8 iv[AES_BLOCK_SIZE]; ++ u8 biv[AES_BLOCK_SIZE]; ++ u8 *ivtouse = areq->iv; ++ unsigned int len = areq->cryptlen; ++ unsigned int todo; ++ ++ ivsize = crypto_skcipher_ivsize(tfm); ++ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ offset = areq->cryptlen - ivsize; ++ scatterwalk_map_and_copy(rctx->backup_iv, areq->src, ++ offset, ivsize, 0); ++ } ++ } + +- dev->left_bytes = req->cryptlen; +- dev->total = req->cryptlen; +- dev->sg_src = req->src; +- dev->first = req->src; +- dev->src_nents = sg_nents(req->src); +- dev->sg_dst = req->dst; +- dev->dst_nents = sg_nents(req->dst); +- +- spin_lock_irqsave(&dev->lock, flags); +- rk_ablk_hw_init(dev); +- err = rk_set_data_start(dev); +- spin_unlock_irqrestore(&dev->lock, flags); +- return err; +-} ++ sgs = areq->src; ++ sgd = areq->dst; + +-static void rk_iv_copyback(struct rk_crypto_info *dev) +-{ +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); +- u32 ivsize = crypto_skcipher_ivsize(tfm); ++ while (sgs && sgd && len) { ++ if (!sgs->length) { ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); ++ continue; ++ } ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ /* we backup last block of source to be used as IV at next step */ ++ offset = sgs->length - ivsize; ++ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); ++ } ++ if (sgs == sgd) { ++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ if (err <= 0) { ++ err = -EINVAL; ++ goto theend_iv; ++ } ++ } else { ++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); ++ if (err <= 0) { ++ err = -EINVAL; ++ goto theend_iv; ++ } ++ err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); ++ if (err <= 0) { ++ err = -EINVAL; ++ goto theend_sgs; ++ } ++ } ++ err = 0; ++ rk_ablk_hw_init(ctx->dev, areq); ++ if (ivsize) { ++ if (ivsize == DES_BLOCK_SIZE) ++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); ++ else ++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); ++ } ++ reinit_completion(&ctx->dev->complete); ++ ctx->dev->status = 0; + +- /* Update the IV buffer to contain the next IV for encryption mode. */ +- if (!(rctx->mode & RK_CRYPTO_DEC)) { +- memcpy(req->iv, +- sg_virt(dev->sg_dst) + dev->sg_dst->length - ivsize, +- ivsize); ++ todo = min(sg_dma_len(sgs), len); ++ len -= todo; ++ crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); ++ wait_for_completion_interruptible_timeout(&ctx->dev->complete, ++ msecs_to_jiffies(2000)); ++ if (!ctx->dev->status) { ++ dev_err(ctx->dev->dev, "DMA timeout\n"); ++ err = -EFAULT; ++ goto theend; ++ } ++ if (sgs == sgd) { ++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ } else { ++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); ++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); ++ } ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ memcpy(iv, biv, ivsize); ++ ivtouse = iv; ++ } else { ++ offset = sgd->length - ivsize; ++ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); ++ ivtouse = iv; ++ } ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); + } +-} +- +-static void rk_update_iv(struct rk_crypto_info *dev) +-{ +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); +- u32 ivsize = crypto_skcipher_ivsize(tfm); +- u8 *new_iv = NULL; + +- if (rctx->mode & RK_CRYPTO_DEC) { +- new_iv = ctx->iv; +- } else { +- new_iv = page_address(sg_page(dev->sg_dst)) + +- dev->sg_dst->offset + dev->sg_dst->length - ivsize; ++ if (areq->iv && ivsize > 0) { ++ offset = areq->cryptlen - ivsize; ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ memcpy(areq->iv, rctx->backup_iv, ivsize); ++ memzero_explicit(rctx->backup_iv, ivsize); ++ } else { ++ scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ++ ivsize, 0); ++ } + } + +- if (ivsize == DES_BLOCK_SIZE) +- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); +- else if (ivsize == AES_BLOCK_SIZE) +- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); +-} +- +-/* return: +- * true some err was occurred +- * fault no err, continue +- */ +-static int rk_ablk_rx(struct rk_crypto_info *dev) +-{ +- int err = 0; +- struct skcipher_request *req = +- skcipher_request_cast(dev->async_req); ++theend: ++ local_bh_disable(); ++ crypto_finalize_skcipher_request(engine, areq, err); ++ local_bh_enable(); ++ return 0; + +- dev->unload_data(dev); +- if (dev->left_bytes) { +- rk_update_iv(dev); +- if (sg_is_last(dev->sg_src)) { +- dev_err(dev->dev, "[%s:%d] Lack of data\n", +- __func__, __LINE__); +- err = -ENOMEM; +- goto out_rx; +- } +- dev->sg_src = sg_next(dev->sg_src); +- dev->sg_dst = sg_next(dev->sg_dst); +- err = rk_set_data_start(dev); ++theend_sgs: ++ if (sgs == sgd) { ++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { +- rk_iv_copyback(dev); +- /* here show the calculation is over without any err */ +- dev->complete(dev->async_req, 0); +- tasklet_schedule(&dev->queue_task); ++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); ++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + } +-out_rx: ++theend_iv: + return err; + } + +@@ -446,9 +443,6 @@ static int rk_ablk_init_tfm(struct crypt + algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + + ctx->dev = algt->dev; +- ctx->dev->start = rk_ablk_start; +- ctx->dev->update = rk_ablk_rx; +- ctx->dev->complete = rk_crypto_complete; + + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { +@@ -460,6 +454,8 @@ static int rk_ablk_init_tfm(struct crypt + tfm->reqsize = sizeof(struct rk_cipher_rctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm); + ++ ctx->enginectx.op.do_one_request = rk_cipher_run; ++ + return 0; + } + diff --git a/target/linux/rockchip/patches-6.0/177-crypto-rockchip-rewrite-type.patch b/target/linux/rockchip/patches-6.0/177-crypto-rockchip-rewrite-type.patch new file mode 100644 index 000000000..032310fb6 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/177-crypto-rockchip-rewrite-type.patch @@ -0,0 +1,174 @@ +From b9d97d2708d9ae617a3bb7bbb91ca543c486f337 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:49 +0000 +Subject: [PATCH 27/49] crypto: rockchip: rewrite type + +Instead of using a custom type for classify algorithms, let's just use +already defined ones. +And let's made a bit more verbose about what is registered. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 26 +++++++++++++------ + drivers/crypto/rockchip/rk3288_crypto.h | 7 +---- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 6 ++--- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 12 ++++----- + 4 files changed, 28 insertions(+), 23 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -102,12 +102,22 @@ static int rk_crypto_register(struct rk_ + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + rk_cipher_algs[i]->dev = crypto_info; +- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) +- err = crypto_register_skcipher( +- &rk_cipher_algs[i]->alg.skcipher); +- else +- err = crypto_register_ahash( +- &rk_cipher_algs[i]->alg.hash); ++ switch (rk_cipher_algs[i]->type) { ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ dev_info(crypto_info->dev, "Register %s as %s\n", ++ rk_cipher_algs[i]->alg.skcipher.base.cra_name, ++ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name); ++ err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher); ++ break; ++ case CRYPTO_ALG_TYPE_AHASH: ++ dev_info(crypto_info->dev, "Register %s as %s\n", ++ rk_cipher_algs[i]->alg.hash.halg.base.cra_name, ++ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name); ++ err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash); ++ break; ++ default: ++ dev_err(crypto_info->dev, "unknown algorithm\n"); ++ } + if (err) + goto err_cipher_algs; + } +@@ -115,7 +125,7 @@ static int rk_crypto_register(struct rk_ + + err_cipher_algs: + for (k = 0; k < i; k++) { +- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) ++ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) + crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher); + else + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); +@@ -128,7 +138,7 @@ static void rk_crypto_unregister(void) + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { +- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) ++ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) + crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher); + else + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -232,18 +232,13 @@ struct rk_cipher_rctx { + struct skcipher_request fallback_req; // keep at the end + }; + +-enum alg_type { +- ALG_TYPE_HASH, +- ALG_TYPE_CIPHER, +-}; +- + struct rk_crypto_tmp { ++ u32 type; + struct rk_crypto_info *dev; + union { + struct skcipher_alg skcipher; + struct ahash_alg hash; + } alg; +- enum alg_type type; + }; + + extern struct rk_crypto_tmp rk_ecb_aes_alg; +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -352,7 +352,7 @@ static void rk_cra_hash_exit(struct cryp + } + + struct rk_crypto_tmp rk_ahash_sha1 = { +- .type = ALG_TYPE_HASH, ++ .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, +@@ -382,7 +382,7 @@ struct rk_crypto_tmp rk_ahash_sha1 = { + }; + + struct rk_crypto_tmp rk_ahash_sha256 = { +- .type = ALG_TYPE_HASH, ++ .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, +@@ -412,7 +412,7 @@ struct rk_crypto_tmp rk_ahash_sha256 = { + }; + + struct rk_crypto_tmp rk_ahash_md5 = { +- .type = ALG_TYPE_HASH, ++ .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -468,7 +468,7 @@ static void rk_ablk_exit_tfm(struct cryp + } + + struct rk_crypto_tmp rk_ecb_aes_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-rk", +@@ -490,7 +490,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { + }; + + struct rk_crypto_tmp rk_cbc_aes_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-rk", +@@ -513,7 +513,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { + }; + + struct rk_crypto_tmp rk_ecb_des_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "ecb-des-rk", +@@ -535,7 +535,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { + }; + + struct rk_crypto_tmp rk_cbc_des_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "cbc-des-rk", +@@ -558,7 +558,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { + }; + + struct rk_crypto_tmp rk_ecb_des3_ede_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-ede-rk", +@@ -580,7 +580,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg + }; + + struct rk_crypto_tmp rk_cbc_des3_ede_alg = { +- .type = ALG_TYPE_CIPHER, ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-ede-rk", diff --git a/target/linux/rockchip/patches-6.0/178-crypto-rockchip-add-debugfs.patch b/target/linux/rockchip/patches-6.0/178-crypto-rockchip-add-debugfs.patch new file mode 100644 index 000000000..0ff54cc53 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/178-crypto-rockchip-add-debugfs.patch @@ -0,0 +1,232 @@ +From ca19c52753332836704019bc5f423b054ea2616b Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:50 +0000 +Subject: [PATCH 28/49] crypto: rockchip: add debugfs + +This patch enable to access usage stats for each algorithm. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/Kconfig | 10 ++++ + drivers/crypto/rockchip/rk3288_crypto.c | 47 +++++++++++++++++++ + drivers/crypto/rockchip/rk3288_crypto.h | 11 +++++ + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 8 ++++ + .../crypto/rockchip/rk3288_crypto_skcipher.c | 15 ++++++ + 5 files changed, 91 insertions(+) + +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -686,6 +686,16 @@ config CRYPTO_DEV_ROCKCHIP + This driver interfaces with the hardware crypto accelerator. + Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. + ++config CRYPTO_DEV_ROCKCHIP_DEBUG ++ bool "Enable Rockchip crypto stats" ++ depends on CRYPTO_DEV_ROCKCHIP ++ depends on DEBUG_FS ++ help ++ Say y to enable Rockchip crypto debug stats. ++ This will create /sys/kernel/debug/rk3288_crypto/stats for displaying ++ the number of requests per algorithm and other internal stats. ++ ++ + config CRYPTO_DEV_ZYNQMP_AES + tristate "Support for Xilinx ZynqMP AES hw accelerator" + depends on ZYNQMP_FIRMWARE || COMPILE_TEST +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -95,6 +95,41 @@ static struct rk_crypto_tmp *rk_cipher_a + &rk_ahash_md5, + }; + ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG ++static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { ++ if (!rk_cipher_algs[i]->dev) ++ continue; ++ switch (rk_cipher_algs[i]->type) { ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ++ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name, ++ rk_cipher_algs[i]->alg.skcipher.base.cra_name, ++ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); ++ seq_printf(seq, "\tfallback due to length: %lu\n", ++ rk_cipher_algs[i]->stat_fb_len); ++ seq_printf(seq, "\tfallback due to alignment: %lu\n", ++ rk_cipher_algs[i]->stat_fb_align); ++ seq_printf(seq, "\tfallback due to SGs: %lu\n", ++ rk_cipher_algs[i]->stat_fb_sgdiff); ++ break; ++ case CRYPTO_ALG_TYPE_AHASH: ++ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ++ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name, ++ rk_cipher_algs[i]->alg.hash.halg.base.cra_name, ++ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); ++ break; ++ } ++ } ++ return 0; ++} ++ ++DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); ++#endif ++ + static int rk_crypto_register(struct rk_crypto_info *crypto_info) + { + unsigned int i, k; +@@ -246,6 +281,15 @@ static int rk_crypto_probe(struct platfo + goto err_register_alg; + } + ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG ++ /* Ignore error of debugfs */ ++ crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); ++ crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444, ++ crypto_info->dbgfs_dir, ++ crypto_info, ++ &rk_crypto_debugfs_fops); ++#endif ++ + dev_info(dev, "Crypto Accelerator successfully registered\n"); + return 0; + +@@ -260,6 +304,9 @@ static int rk_crypto_remove(struct platf + { + struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); + ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG ++ debugfs_remove_recursive(crypto_tmp->dbgfs_dir); ++#endif + rk_crypto_unregister(); + rk_crypto_disable_clk(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -199,6 +200,10 @@ struct rk_crypto_info { + struct crypto_engine *engine; + struct completion complete; + int status; ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG ++ struct dentry *dbgfs_dir; ++ struct dentry *dbgfs_stats; ++#endif + }; + + /* the private variable of hash */ +@@ -239,6 +244,12 @@ struct rk_crypto_tmp { + struct skcipher_alg skcipher; + struct ahash_alg hash; + } alg; ++ unsigned long stat_req; ++ unsigned long stat_fb; ++ unsigned long stat_fb_len; ++ unsigned long stat_fb_sglen; ++ unsigned long stat_fb_align; ++ unsigned long stat_fb_sgdiff; + }; + + extern struct rk_crypto_tmp rk_ecb_aes_alg; +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -39,6 +39,10 @@ static int rk_ahash_digest_fb(struct aha + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); ++ ++ algt->stat_fb++; + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & +@@ -249,6 +253,8 @@ static int rk_hash_run(struct crypto_eng + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct scatterlist *sg = areq->src; + int err = 0; + int i; +@@ -256,6 +262,8 @@ static int rk_hash_run(struct crypto_eng + + rctx->mode = 0; + ++ algt->stat_req++; ++ + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + rctx->mode = RK_CRYPTO_HASH_SHA1; +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -18,6 +18,8 @@ static int rk_cipher_need_fallback(struc + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + unsigned int bs = crypto_skcipher_blocksize(tfm); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; + +@@ -29,20 +31,25 @@ static int rk_cipher_need_fallback(struc + sgd = req->dst; + while (sgs && sgd) { + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { ++ algt->stat_fb_align++; + return true; + } + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { ++ algt->stat_fb_align++; + return true; + } + stodo = min(len, sgs->length); + if (stodo % bs) { ++ algt->stat_fb_len++; + return true; + } + dtodo = min(len, sgd->length); + if (dtodo % bs) { ++ algt->stat_fb_len++; + return true; + } + if (stodo != dtodo) { ++ algt->stat_fb_sgdiff++; + return true; + } + len -= stodo; +@@ -57,8 +64,12 @@ static int rk_cipher_fallback(struct skc + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + int err; + ++ algt->stat_fb++; ++ + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); +@@ -324,6 +335,10 @@ static int rk_cipher_run(struct crypto_e + u8 *ivtouse = areq->iv; + unsigned int len = areq->cryptlen; + unsigned int todo; ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); ++ ++ algt->stat_req++; + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { diff --git a/target/linux/rockchip/patches-6.0/179-crypto-rockchip-introduce-PM.patch b/target/linux/rockchip/patches-6.0/179-crypto-rockchip-introduce-PM.patch new file mode 100644 index 000000000..afe2a9a68 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/179-crypto-rockchip-introduce-PM.patch @@ -0,0 +1,181 @@ +From a8a988a5e67068b554f92418775f274771cfb068 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:51 +0000 +Subject: [PATCH 29/49] crypto: rockchip: introduce PM + +Add runtime PM support for rockchip crypto. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 51 ++++++++++++++++++- + drivers/crypto/rockchip/rk3288_crypto.h | 1 + + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 10 ++++ + .../crypto/rockchip/rk3288_crypto_skcipher.c | 9 ++++ + 4 files changed, 69 insertions(+), 2 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -65,6 +65,48 @@ static void rk_crypto_disable_clk(struct + clk_disable_unprepare(dev->sclk); + } + ++/* ++ * Power management strategy: The device is suspended unless a TFM exists for ++ * one of the algorithms proposed by this driver. ++ */ ++static int rk_crypto_pm_suspend(struct device *dev) ++{ ++ struct rk_crypto_info *rkdev = dev_get_drvdata(dev); ++ ++ rk_crypto_disable_clk(rkdev); ++ return 0; ++} ++ ++static int rk_crypto_pm_resume(struct device *dev) ++{ ++ struct rk_crypto_info *rkdev = dev_get_drvdata(dev); ++ ++ return rk_crypto_enable_clk(rkdev); ++} ++ ++static const struct dev_pm_ops rk_crypto_pm_ops = { ++ SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL) ++}; ++ ++static int rk_crypto_pm_init(struct rk_crypto_info *rkdev) ++{ ++ int err; ++ ++ pm_runtime_use_autosuspend(rkdev->dev); ++ pm_runtime_set_autosuspend_delay(rkdev->dev, 2000); ++ ++ err = pm_runtime_set_suspended(rkdev->dev); ++ if (err) ++ return err; ++ pm_runtime_enable(rkdev->dev); ++ return err; ++} ++ ++static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev) ++{ ++ pm_runtime_disable(rkdev->dev); ++} ++ + static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) + { + struct rk_crypto_info *dev = platform_get_drvdata(dev_id); +@@ -273,7 +315,9 @@ static int rk_crypto_probe(struct platfo + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); + +- rk_crypto_enable_clk(crypto_info); ++ err = rk_crypto_pm_init(crypto_info); ++ if (err) ++ goto err_pm; + + err = rk_crypto_register(crypto_info); + if (err) { +@@ -294,6 +338,8 @@ static int rk_crypto_probe(struct platfo + return 0; + + err_register_alg: ++ rk_crypto_pm_exit(crypto_info); ++err_pm: + crypto_engine_exit(crypto_info->engine); + err_crypto: + dev_err(dev, "Crypto Accelerator not successfully registered\n"); +@@ -308,7 +354,7 @@ static int rk_crypto_remove(struct platf + debugfs_remove_recursive(crypto_tmp->dbgfs_dir); + #endif + rk_crypto_unregister(); +- rk_crypto_disable_clk(crypto_tmp); ++ rk_crypto_pm_exit(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); + return 0; + } +@@ -318,6 +364,7 @@ static struct platform_driver crypto_dri + .remove = rk_crypto_remove, + .driver = { + .name = "rk3288-crypto", ++ .pm = &rk_crypto_pm_ops, + .of_match_table = crypto_of_id_table, + }, + }; +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -328,6 +328,7 @@ static int rk_cra_hash_init(struct crypt + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + + const char *alg_name = crypto_tfm_alg_name(tfm); ++ int err; + + algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + +@@ -349,7 +350,15 @@ static int rk_cra_hash_init(struct crypt + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + ++ err = pm_runtime_resume_and_get(tctx->dev->dev); ++ if (err < 0) ++ goto error_pm; ++ + return 0; ++error_pm: ++ crypto_free_ahash(tctx->fallback_tfm); ++ ++ return err; + } + + static void rk_cra_hash_exit(struct crypto_tfm *tfm) +@@ -357,6 +366,7 @@ static void rk_cra_hash_exit(struct cryp + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(tctx->fallback_tfm); ++ pm_runtime_put_autosuspend(tctx->dev->dev); + } + + struct rk_crypto_tmp rk_ahash_sha1 = { +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -454,6 +454,7 @@ static int rk_ablk_init_tfm(struct crypt + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct rk_crypto_tmp *algt; ++ int err; + + algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + +@@ -471,7 +472,14 @@ static int rk_ablk_init_tfm(struct crypt + + ctx->enginectx.op.do_one_request = rk_cipher_run; + ++ err = pm_runtime_resume_and_get(ctx->dev->dev); ++ if (err < 0) ++ goto error_pm; ++ + return 0; ++error_pm: ++ crypto_free_skcipher(ctx->fallback_tfm); ++ return err; + } + + static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) +@@ -480,6 +488,7 @@ static void rk_ablk_exit_tfm(struct cryp + + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); ++ pm_runtime_put_autosuspend(ctx->dev->dev); + } + + struct rk_crypto_tmp rk_ecb_aes_alg = { diff --git a/target/linux/rockchip/patches-6.0/180-crypto-rockchip-handle-reset-also-in-PM.patch b/target/linux/rockchip/patches-6.0/180-crypto-rockchip-handle-reset-also-in-PM.patch new file mode 100644 index 000000000..5c7e6953f --- /dev/null +++ b/target/linux/rockchip/patches-6.0/180-crypto-rockchip-handle-reset-also-in-PM.patch @@ -0,0 +1,66 @@ +From e1d7ad70ff4447218815f3b9427ee7cd8cc6836b Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:52 +0000 +Subject: [PATCH 30/49] crypto: rockchip: handle reset also in PM + +reset could be handled by PM functions. +We keep the initial reset pulse to be sure the hw is a know device state +after probe. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 22 ++++++++++------------ + 1 file changed, 10 insertions(+), 12 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -74,14 +74,23 @@ static int rk_crypto_pm_suspend(struct d + struct rk_crypto_info *rkdev = dev_get_drvdata(dev); + + rk_crypto_disable_clk(rkdev); ++ reset_control_assert(rkdev->rst); ++ + return 0; + } + + static int rk_crypto_pm_resume(struct device *dev) + { + struct rk_crypto_info *rkdev = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = rk_crypto_enable_clk(rkdev); ++ if (ret) ++ return ret; ++ ++ reset_control_deassert(rkdev->rst); ++ return 0; + +- return rk_crypto_enable_clk(rkdev); + } + + static const struct dev_pm_ops rk_crypto_pm_ops = { +@@ -222,13 +231,6 @@ static void rk_crypto_unregister(void) + } + } + +-static void rk_crypto_action(void *data) +-{ +- struct rk_crypto_info *crypto_info = data; +- +- reset_control_assert(crypto_info->rst); +-} +- + static const struct of_device_id crypto_of_id_table[] = { + { .compatible = "rockchip,rk3288-crypto" }, + {} +@@ -258,10 +260,6 @@ static int rk_crypto_probe(struct platfo + usleep_range(10, 20); + reset_control_deassert(crypto_info->rst); + +- err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); +- if (err) +- goto err_crypto; +- + crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(crypto_info->reg)) { + err = PTR_ERR(crypto_info->reg); diff --git a/target/linux/rockchip/patches-6.0/181-crypto-rockchip-use-clk_bulk-to-simplify-clock-manag.patch b/target/linux/rockchip/patches-6.0/181-crypto-rockchip-use-clk_bulk-to-simplify-clock-manag.patch new file mode 100644 index 000000000..2caf7f6c8 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/181-crypto-rockchip-use-clk_bulk-to-simplify-clock-manag.patch @@ -0,0 +1,118 @@ +From b388e1b6a75d477735f7e2b90130169638b72c37 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:53 +0000 +Subject: [PATCH 31/49] crypto: rockchip: use clk_bulk to simplify clock + management + +rk3328 does not have the same clock names than rk3288, instead of using a complex +clock management, let's use clk_bulk to simplify their handling. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 66 ++++--------------------- + drivers/crypto/rockchip/rk3288_crypto.h | 6 +-- + 2 files changed, 11 insertions(+), 61 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -22,47 +22,16 @@ static int rk_crypto_enable_clk(struct r + { + int err; + +- err = clk_prepare_enable(dev->sclk); +- if (err) { +- dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", +- __func__, __LINE__); +- goto err_return; +- } +- err = clk_prepare_enable(dev->aclk); +- if (err) { +- dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", +- __func__, __LINE__); +- goto err_aclk; +- } +- err = clk_prepare_enable(dev->hclk); +- if (err) { +- dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", +- __func__, __LINE__); +- goto err_hclk; +- } +- err = clk_prepare_enable(dev->dmaclk); +- if (err) { +- dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", +- __func__, __LINE__); +- goto err_dmaclk; +- } +- return err; +-err_dmaclk: +- clk_disable_unprepare(dev->hclk); +-err_hclk: +- clk_disable_unprepare(dev->aclk); +-err_aclk: +- clk_disable_unprepare(dev->sclk); +-err_return: ++ err = clk_bulk_prepare_enable(dev->num_clks, dev->clks); ++ if (err) ++ dev_err(dev->dev, "Could not enable clock clks\n"); ++ + return err; + } + + static void rk_crypto_disable_clk(struct rk_crypto_info *dev) + { +- clk_disable_unprepare(dev->dmaclk); +- clk_disable_unprepare(dev->hclk); +- clk_disable_unprepare(dev->aclk); +- clk_disable_unprepare(dev->sclk); ++ clk_bulk_disable_unprepare(dev->num_clks, dev->clks); + } + + /* +@@ -266,27 +235,10 @@ static int rk_crypto_probe(struct platfo + goto err_crypto; + } + +- crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); +- if (IS_ERR(crypto_info->aclk)) { +- err = PTR_ERR(crypto_info->aclk); +- goto err_crypto; +- } +- +- crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); +- if (IS_ERR(crypto_info->hclk)) { +- err = PTR_ERR(crypto_info->hclk); +- goto err_crypto; +- } +- +- crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); +- if (IS_ERR(crypto_info->sclk)) { +- err = PTR_ERR(crypto_info->sclk); +- goto err_crypto; +- } +- +- crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); +- if (IS_ERR(crypto_info->dmaclk)) { +- err = PTR_ERR(crypto_info->dmaclk); ++ crypto_info->num_clks = devm_clk_bulk_get_all(&pdev->dev, ++ &crypto_info->clks); ++ if (crypto_info->num_clks < 3) { ++ err = -EINVAL; + goto err_crypto; + } + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -190,10 +190,8 @@ + + struct rk_crypto_info { + struct device *dev; +- struct clk *aclk; +- struct clk *hclk; +- struct clk *sclk; +- struct clk *dmaclk; ++ struct clk_bulk_data *clks; ++ int num_clks; + struct reset_control *rst; + void __iomem *reg; + int irq; diff --git a/target/linux/rockchip/patches-6.0/182-crypto-rockchip-add-myself-as-maintainer.patch b/target/linux/rockchip/patches-6.0/182-crypto-rockchip-add-myself-as-maintainer.patch new file mode 100644 index 000000000..90a5a8626 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/182-crypto-rockchip-add-myself-as-maintainer.patch @@ -0,0 +1,30 @@ +From 771d6ebe99b61c1b143feab92e896fecc6ba16d0 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:54 +0000 +Subject: [PATCH 32/49] crypto: rockchip: add myself as maintainer + +Nobody is set as maintainer of rockchip crypto, I propose to do it as I +have already reworked lot of this code. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + MAINTAINERS | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -17570,6 +17570,13 @@ F: Documentation/ABI/*/sysfs-driver-hid- + F: drivers/hid/hid-roccat* + F: include/linux/hid-roccat* + ++ROCKCHIP CRYPTO DRIVERS ++M: Corentin Labbe ++L: linux-crypto@vger.kernel.org ++S: Maintained ++F: Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml ++F: drivers/crypto/rockchip/ ++ + ROCKCHIP I2S TDM DRIVER + M: Nicolas Frattaroli + L: linux-rockchip@lists.infradead.org diff --git a/target/linux/rockchip/patches-6.0/183-crypto-rockchip-use-read_poll_timeout.patch b/target/linux/rockchip/patches-6.0/183-crypto-rockchip-use-read_poll_timeout.patch new file mode 100644 index 000000000..b1ae94d2e --- /dev/null +++ b/target/linux/rockchip/patches-6.0/183-crypto-rockchip-use-read_poll_timeout.patch @@ -0,0 +1,54 @@ +From f7d1ea66d097a14e2c8dd312f2360db746db055f Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:55 +0000 +Subject: [PATCH 33/49] crypto: rockchip: use read_poll_timeout + +Use read_poll_timeout instead of open coding it. +In the same time, fix indentation of related comment. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 24 +++++++++---------- + 1 file changed, 12 insertions(+), 12 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -10,6 +10,7 @@ + */ + #include + #include ++#include + #include "rk3288_crypto.h" + + /* +@@ -295,18 +296,17 @@ static int rk_hash_run(struct crypto_eng + sg = sg_next(sg); + } + +- /* +- * it will take some time to process date after last dma +- * transmission. +- * +- * waiting time is relative with the last date len, +- * so cannot set a fixed time here. +- * 10us makes system not call here frequently wasting +- * efficiency, and make it response quickly when dma +- * complete. +- */ +- while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) +- udelay(10); ++ /* ++ * it will take some time to process date after last dma ++ * transmission. ++ * ++ * waiting time is relative with the last date len, ++ * so cannot set a fixed time here. ++ * 10us makes system not call here frequently wasting ++ * efficiency, and make it response quickly when dma ++ * complete. ++ */ ++ readl_poll_timeout(tctx->dev->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { + v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); diff --git a/target/linux/rockchip/patches-6.0/184-crypto-rockchip-fix-style-issue.patch b/target/linux/rockchip/patches-6.0/184-crypto-rockchip-fix-style-issue.patch new file mode 100644 index 000000000..4c6d89be1 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/184-crypto-rockchip-fix-style-issue.patch @@ -0,0 +1,55 @@ +From ce5aef761e09d6f17ed8f3e0dae3bf90fb06a838 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:56 +0000 +Subject: [PATCH 34/49] crypto: rockchip: fix style issue + +This patch fixes some warning reported by checkpatch + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -336,7 +336,7 @@ static int rk_cra_hash_init(struct crypt + + /* for fallback */ + tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, +- CRYPTO_ALG_NEED_FALLBACK); ++ CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(tctx->fallback_tfm)) { + dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); + return PTR_ERR(tctx->fallback_tfm); +@@ -394,8 +394,8 @@ struct rk_crypto_tmp rk_ahash_sha1 = { + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, +- } +- } ++ } ++ } + } + }; + +@@ -424,8 +424,8 @@ struct rk_crypto_tmp rk_ahash_sha256 = { + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, +- } +- } ++ } ++ } + } + }; + +@@ -454,7 +454,7 @@ struct rk_crypto_tmp rk_ahash_md5 = { + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, +- } + } ++ } + } + }; diff --git a/target/linux/rockchip/patches-6.0/185-crypto-rockchip-add-support-for-rk3328.patch b/target/linux/rockchip/patches-6.0/185-crypto-rockchip-add-support-for-rk3328.patch new file mode 100644 index 000000000..7319de603 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/185-crypto-rockchip-add-support-for-rk3328.patch @@ -0,0 +1,23 @@ +From 50f01f648e6cc3d2b20674b50b662290c98e9041 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:57 +0000 +Subject: [PATCH 35/49] crypto: rockchip: add support for rk3328 + +The rk3328 could be used as-is by the rockchip driver. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -202,6 +202,7 @@ static void rk_crypto_unregister(void) + + static const struct of_device_id crypto_of_id_table[] = { + { .compatible = "rockchip,rk3288-crypto" }, ++ { .compatible = "rockchip,rk3328-crypto" }, + {} + }; + MODULE_DEVICE_TABLE(of, crypto_of_id_table); diff --git a/target/linux/rockchip/patches-6.0/186-crypto-rockchip-rename-ablk-functions-to-cipher.patch b/target/linux/rockchip/patches-6.0/186-crypto-rockchip-rename-ablk-functions-to-cipher.patch new file mode 100644 index 000000000..bc6337569 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/186-crypto-rockchip-rename-ablk-functions-to-cipher.patch @@ -0,0 +1,119 @@ +From d6996995f04ac2be833d87b94e78aa532ed9ee16 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:58 +0000 +Subject: [PATCH 36/49] crypto: rockchip: rename ablk functions to cipher + +Some functions have still ablk in their name even if there are +not handling ablk_cipher anymore. +So let's rename them. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 32 +++++++++---------- + 1 file changed, 16 insertions(+), 16 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -273,7 +273,7 @@ static int rk_des3_ede_cbc_decrypt(struc + return rk_handle_req(dev, req); + } + +-static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) ++static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) + { + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); +@@ -382,7 +382,7 @@ static int rk_cipher_run(struct crypto_e + } + } + err = 0; +- rk_ablk_hw_init(ctx->dev, areq); ++ rk_cipher_hw_init(ctx->dev, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); +@@ -448,7 +448,7 @@ theend_iv: + return err; + } + +-static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) ++static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) + { + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); +@@ -482,7 +482,7 @@ error_pm: + return err; + } + +-static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) ++static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) + { + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + +@@ -503,8 +503,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { + .base.cra_alignmask = 0x0f, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = rk_aes_setkey, +@@ -525,8 +525,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { + .base.cra_alignmask = 0x0f, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, +@@ -548,8 +548,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = { + .base.cra_alignmask = 0x07, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = rk_des_setkey, +@@ -570,8 +570,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = { + .base.cra_alignmask = 0x07, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, +@@ -593,8 +593,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg + .base.cra_alignmask = 0x07, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = rk_tdes_setkey, +@@ -615,8 +615,8 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg + .base.cra_alignmask = 0x07, + .base.cra_module = THIS_MODULE, + +- .init = rk_ablk_init_tfm, +- .exit = rk_ablk_exit_tfm, ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, diff --git a/target/linux/rockchip/patches-6.0/187-crypto-rockchip-rework-rk_handle_req-function.patch b/target/linux/rockchip/patches-6.0/187-crypto-rockchip-rework-rk_handle_req-function.patch new file mode 100644 index 000000000..136423a66 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/187-crypto-rockchip-rework-rk_handle_req-function.patch @@ -0,0 +1,180 @@ +From d294827ca9bf9c9a893ea0b70f5cec93a3248706 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:54:59 +0000 +Subject: [PATCH 37/49] crypto: rockchip: rework rk_handle_req function + +This patch rework the rk_handle_req(), simply removing the +rk_crypto_info parameter. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 68 +++++-------------- + 1 file changed, 17 insertions(+), 51 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -82,10 +82,12 @@ static int rk_cipher_fallback(struct skc + return err; + } + +-static int rk_handle_req(struct rk_crypto_info *dev, +- struct skcipher_request *req) ++static int rk_cipher_handle_req(struct skcipher_request *req) + { +- struct crypto_engine *engine = dev->engine; ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm); ++ struct rk_crypto_info *rkc = tctx->dev; ++ struct crypto_engine *engine = rkc->engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); +@@ -142,135 +144,99 @@ static int rk_tdes_setkey(struct crypto_ + + static int rk_aes_ecb_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_AES_ECB_MODE; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_aes_ecb_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_aes_cbc_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_AES_CBC_MODE; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_aes_cbc_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des_ecb_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = 0; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des_ecb_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des_cbc_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des_cbc_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_SELECT; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *dev = ctx->dev; + + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + RK_CRYPTO_DEC; +- return rk_handle_req(dev, req); ++ return rk_cipher_handle_req(req); + } + + static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) diff --git a/target/linux/rockchip/patches-6.0/188-crypto-rockchip-use-a-rk_crypto_info-variable-instea.patch b/target/linux/rockchip/patches-6.0/188-crypto-rockchip-use-a-rk_crypto_info-variable-instea.patch new file mode 100644 index 000000000..b663638df --- /dev/null +++ b/target/linux/rockchip/patches-6.0/188-crypto-rockchip-use-a-rk_crypto_info-variable-instea.patch @@ -0,0 +1,172 @@ +From b792b8f33d2c772cab201a068884feb0c10c1533 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:00 +0000 +Subject: [PATCH 38/49] crypto: rockchip: use a rk_crypto_info variable instead + of lot of indirection + +Instead of using lot of ctx->dev->xx indirections, use an intermediate +variable for rk_crypto_info. +This will help later, when 2 different rk_crypto_info would be used. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 23 +++++++----- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 37 ++++++++++--------- + 2 files changed, 32 insertions(+), 28 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -226,9 +226,10 @@ static int rk_hash_prepare(struct crypto + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct rk_crypto_info *rkc = tctx->dev; + int ret; + +- ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); ++ ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (ret <= 0) + return -EINVAL; + +@@ -243,8 +244,9 @@ static int rk_hash_unprepare(struct cryp + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct rk_crypto_info *rkc = tctx->dev; + +- dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); ++ dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; + } + +@@ -257,6 +259,7 @@ static int rk_hash_run(struct crypto_eng + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct scatterlist *sg = areq->src; ++ struct rk_crypto_info *rkc = tctx->dev; + int err = 0; + int i; + u32 v; +@@ -283,13 +286,13 @@ static int rk_hash_run(struct crypto_eng + rk_ahash_reg_init(areq); + + while (sg) { +- reinit_completion(&tctx->dev->complete); +- tctx->dev->status = 0; +- crypto_ahash_dma_start(tctx->dev, sg); +- wait_for_completion_interruptible_timeout(&tctx->dev->complete, ++ reinit_completion(&rkc->complete); ++ rkc->status = 0; ++ crypto_ahash_dma_start(rkc, sg); ++ wait_for_completion_interruptible_timeout(&rkc->complete, + msecs_to_jiffies(2000)); +- if (!tctx->dev->status) { +- dev_err(tctx->dev->dev, "DMA timeout\n"); ++ if (!rkc->status) { ++ dev_err(rkc->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } +@@ -306,10 +309,10 @@ static int rk_hash_run(struct crypto_eng + * efficiency, and make it response quickly when dma + * complete. + */ +- readl_poll_timeout(tctx->dev->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); ++ readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { +- v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); ++ v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); + } + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -303,6 +303,7 @@ static int rk_cipher_run(struct crypto_e + unsigned int todo; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); ++ struct rk_crypto_info *rkc = ctx->dev; + + algt->stat_req++; + +@@ -330,49 +331,49 @@ static int rk_cipher_run(struct crypto_e + scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); + } + if (sgs == sgd) { +- err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + } else { +- err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); ++ err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } +- err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); ++ err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_sgs; + } + } + err = 0; +- rk_cipher_hw_init(ctx->dev, areq); ++ rk_cipher_hw_init(rkc, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); ++ memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); + else +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); ++ memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); + } +- reinit_completion(&ctx->dev->complete); +- ctx->dev->status = 0; ++ reinit_completion(&rkc->complete); ++ rkc->status = 0; + + todo = min(sg_dma_len(sgs), len); + len -= todo; +- crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); +- wait_for_completion_interruptible_timeout(&ctx->dev->complete, ++ crypto_dma_start(rkc, sgs, sgd, todo / 4); ++ wait_for_completion_interruptible_timeout(&rkc->complete, + msecs_to_jiffies(2000)); +- if (!ctx->dev->status) { +- dev_err(ctx->dev->dev, "DMA timeout\n"); ++ if (!rkc->status) { ++ dev_err(rkc->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } + if (sgs == sgd) { +- dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { +- dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); +- dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); ++ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); + } + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(iv, biv, ivsize); +@@ -405,10 +406,10 @@ theend: + + theend_sgs: + if (sgs == sgd) { +- dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { +- dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); +- dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); ++ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); + } + theend_iv: + return err; diff --git a/target/linux/rockchip/patches-6.0/189-crypto-rockchip-use-the-rk_crypto_info-given-as-para.patch b/target/linux/rockchip/patches-6.0/189-crypto-rockchip-use-the-rk_crypto_info-given-as-para.patch new file mode 100644 index 000000000..d5d582fd2 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/189-crypto-rockchip-use-the-rk_crypto_info-given-as-para.patch @@ -0,0 +1,34 @@ +From 9cbaeb79b6353f2b13c592754739d33f027e6662 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:01 +0000 +Subject: [PATCH 39/49] crypto: rockchip: use the rk_crypto_info given as + parameter + +Instead of using the crypto_info from TFM ctx, use the one given as parameter. + +Reviewed-by: John Keeping +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -254,7 +254,7 @@ static void rk_cipher_hw_init(struct rk_ + RK_CRYPTO_TDES_BYTESWAP_KEY | + RK_CRYPTO_TDES_BYTESWAP_IV; + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); ++ memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); + conf_reg = RK_CRYPTO_DESSEL; + } else { + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | +@@ -266,7 +266,7 @@ static void rk_cipher_hw_init(struct rk_ + else if (ctx->keylen == AES_KEYSIZE_256) + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); +- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); ++ memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | + RK_CRYPTO_BYTESWAP_BRFIFO; diff --git a/target/linux/rockchip/patches-6.0/190-dt-bindings-crypto-convert-rockchip-crypto-to-YAML.patch b/target/linux/rockchip/patches-6.0/190-dt-bindings-crypto-convert-rockchip-crypto-to-YAML.patch new file mode 100644 index 000000000..6d41f1a01 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/190-dt-bindings-crypto-convert-rockchip-crypto-to-YAML.patch @@ -0,0 +1,115 @@ +From b4f63ecb0942ead52697ef3790c79546804fe478 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:02 +0000 +Subject: [PATCH 40/49] dt-bindings: crypto: convert rockchip-crypto to YAML + +Convert rockchip-crypto to YAML. + +Reviewed-by: John Keeping +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Corentin Labbe +--- + .../crypto/rockchip,rk3288-crypto.yaml | 64 +++++++++++++++++++ + .../bindings/crypto/rockchip-crypto.txt | 28 -------- + 2 files changed, 64 insertions(+), 28 deletions(-) + create mode 100644 Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml + delete mode 100644 Documentation/devicetree/bindings/crypto/rockchip-crypto.txt + +--- /dev/null ++++ b/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml +@@ -0,0 +1,64 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/crypto/rockchip,rk3288-crypto.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Rockchip Electronics Security Accelerator ++ ++maintainers: ++ - Heiko Stuebner ++ ++properties: ++ compatible: ++ enum: ++ - rockchip,rk3288-crypto ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 4 ++ ++ clock-names: ++ items: ++ - const: aclk ++ - const: hclk ++ - const: sclk ++ - const: apb_pclk ++ ++ resets: ++ maxItems: 1 ++ ++ reset-names: ++ items: ++ - const: crypto-rst ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - resets ++ - reset-names ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ #include ++ crypto@ff8a0000 { ++ compatible = "rockchip,rk3288-crypto"; ++ reg = <0xff8a0000 0x4000>; ++ interrupts = ; ++ clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, ++ <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; ++ clock-names = "aclk", "hclk", "sclk", "apb_pclk"; ++ resets = <&cru SRST_CRYPTO>; ++ reset-names = "crypto-rst"; ++ }; +--- a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt ++++ /dev/null +@@ -1,28 +0,0 @@ +-Rockchip Electronics And Security Accelerator +- +-Required properties: +-- compatible: Should be "rockchip,rk3288-crypto" +-- reg: Base physical address of the engine and length of memory mapped +- region +-- interrupts: Interrupt number +-- clocks: Reference to the clocks about crypto +-- clock-names: "aclk" used to clock data +- "hclk" used to clock data +- "sclk" used to clock crypto accelerator +- "apb_pclk" used to clock dma +-- resets: Must contain an entry for each entry in reset-names. +- See ../reset/reset.txt for details. +-- reset-names: Must include the name "crypto-rst". +- +-Examples: +- +- crypto: cypto-controller@ff8a0000 { +- compatible = "rockchip,rk3288-crypto"; +- reg = <0xff8a0000 0x4000>; +- interrupts = ; +- clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, +- <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; +- clock-names = "aclk", "hclk", "sclk", "apb_pclk"; +- resets = <&cru SRST_CRYPTO>; +- reset-names = "crypto-rst"; +- }; diff --git a/target/linux/rockchip/patches-6.0/191-dt-bindings-crypto-rockchip-add-new-compatible.patch b/target/linux/rockchip/patches-6.0/191-dt-bindings-crypto-rockchip-add-new-compatible.patch new file mode 100644 index 000000000..dfe6203a0 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/191-dt-bindings-crypto-rockchip-add-new-compatible.patch @@ -0,0 +1,114 @@ +From a20c32bcc5b5067368adc5ae47c467e32ffc0994 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:03 +0000 +Subject: [PATCH 41/49] dt-bindings: crypto: rockchip: add new compatible + +Since driver support new compatible, we need to update the driver bindings. + +Signed-off-by: Corentin Labbe +--- + .../crypto/rockchip,rk3288-crypto.yaml | 79 +++++++++++++++++-- + 1 file changed, 71 insertions(+), 8 deletions(-) + +--- a/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml ++++ b/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml +@@ -13,6 +13,8 @@ properties: + compatible: + enum: + - rockchip,rk3288-crypto ++ - rockchip,rk3328-crypto ++ - rockchip,rk3399-crypto + + reg: + maxItems: 1 +@@ -21,21 +23,82 @@ properties: + maxItems: 1 + + clocks: ++ minItems: 3 + maxItems: 4 + + clock-names: +- items: +- - const: aclk +- - const: hclk +- - const: sclk +- - const: apb_pclk ++ minItems: 3 ++ maxItems: 4 + + resets: +- maxItems: 1 ++ minItems: 1 ++ maxItems: 3 + + reset-names: +- items: +- - const: crypto-rst ++ minItems: 1 ++ maxItems: 3 ++ ++allOf: ++ - if: ++ properties: ++ compatible: ++ contains: ++ const: rockchip,rk3288-crypto ++ then: ++ properties: ++ clocks: ++ minItems: 4 ++ clock-names: ++ items: ++ - const: aclk ++ - const: hclk ++ - const: sclk ++ - const: apb_pclk ++ resets: ++ maxItems: 1 ++ reset-names: ++ items: ++ - const: crypto-rst ++ - if: ++ properties: ++ compatible: ++ contains: ++ const: rockchip,rk3328-crypto ++ then: ++ properties: ++ clocks: ++ maxItems: 3 ++ clock-names: ++ items: ++ - const: hclk_master ++ - const: hclk_slave ++ - const: sclk ++ resets: ++ maxItems: 1 ++ reset-names: ++ items: ++ - const: crypto-rst ++ - if: ++ properties: ++ compatible: ++ contains: ++ const: rockchip,rk3399-crypto ++ then: ++ properties: ++ clocks: ++ maxItems: 3 ++ clock-names: ++ items: ++ - const: hclk_master ++ - const: hclk_slave ++ - const: sclk ++ resets: ++ minItems: 3 ++ reset-names: ++ items: ++ - const: master ++ - const: slave ++ - const: crypto-rst + + required: + - compatible diff --git a/target/linux/rockchip/patches-6.0/192-clk-rk3399-use-proper-crypto0-name.patch b/target/linux/rockchip/patches-6.0/192-clk-rk3399-use-proper-crypto0-name.patch new file mode 100644 index 000000000..6c2d5eb4c --- /dev/null +++ b/target/linux/rockchip/patches-6.0/192-clk-rk3399-use-proper-crypto0-name.patch @@ -0,0 +1,37 @@ +From b55b62250202a6d95872e367963190aaad6f9f08 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:04 +0000 +Subject: [PATCH 42/49] clk: rk3399: use proper crypto0 name + +RK3399 has 2 crypto instance, named crypto0 and crypto1 in the TRM. +Only reset for crypto1 is correctly named, but crypto0 is not. +Since nobody use them , add a 0 to be consistent with the TRM and crypto1 entries. + +Acked-by: Rob Herring +Signed-off-by: Corentin Labbe +--- + include/dt-bindings/clock/rk3399-cru.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/include/dt-bindings/clock/rk3399-cru.h ++++ b/include/dt-bindings/clock/rk3399-cru.h +@@ -547,8 +547,8 @@ + #define SRST_H_PERILP0 171 + #define SRST_H_PERILP0_NOC 172 + #define SRST_ROM 173 +-#define SRST_CRYPTO_S 174 +-#define SRST_CRYPTO_M 175 ++#define SRST_CRYPTO0_S 174 ++#define SRST_CRYPTO0_M 175 + + /* cru_softrst_con11 */ + #define SRST_P_DCF 176 +@@ -556,7 +556,7 @@ + #define SRST_CM0S 178 + #define SRST_CM0S_DBG 179 + #define SRST_CM0S_PO 180 +-#define SRST_CRYPTO 181 ++#define SRST_CRYPTO0 181 + #define SRST_P_PERILP1_SGRF 182 + #define SRST_P_PERILP1_GRF 183 + #define SRST_CRYPTO1_S 184 diff --git a/target/linux/rockchip/patches-6.0/193-arm64-dts-rockchip-add-rk3328-crypto-node.patch b/target/linux/rockchip/patches-6.0/193-arm64-dts-rockchip-add-rk3328-crypto-node.patch new file mode 100644 index 000000000..ef812d7cd --- /dev/null +++ b/target/linux/rockchip/patches-6.0/193-arm64-dts-rockchip-add-rk3328-crypto-node.patch @@ -0,0 +1,33 @@ +From e39620e4a26f650c72ad5624f27dfe964ccf3e03 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:05 +0000 +Subject: [PATCH 43/49] arm64: dts: rockchip: add rk3328 crypto node + +rk3328 has a crypto IP handled by the rk3288 crypto driver so adds a +node for it. + +Signed-off-by: Corentin Labbe +--- + arch/arm64/boot/dts/rockchip/rk3328.dtsi | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +@@ -1025,6 +1025,17 @@ + (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; + }; + ++ crypto: crypto@ff060000 { ++ compatible = "rockchip,rk3328-crypto"; ++ reg = <0x0 0xff060000 0x0 0x4000>; ++ interrupts = ; ++ clocks = <&cru HCLK_CRYPTO_MST>, <&cru HCLK_CRYPTO_SLV>, ++ <&cru SCLK_CRYPTO>; ++ clock-names = "hclk_master", "hclk_slave", "sclk"; ++ resets = <&cru SRST_CRYPTO>; ++ reset-names = "crypto-rst"; ++ }; ++ + pinctrl: pinctrl { + compatible = "rockchip,rk3328-pinctrl"; + rockchip,grf = <&grf>; diff --git a/target/linux/rockchip/patches-6.0/194-arm64-dts-rockchip-rk3399-add-crypto-node.patch b/target/linux/rockchip/patches-6.0/194-arm64-dts-rockchip-rk3399-add-crypto-node.patch new file mode 100644 index 000000000..31f7ea653 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/194-arm64-dts-rockchip-rk3399-add-crypto-node.patch @@ -0,0 +1,43 @@ +From e0d5c068d092b0c1a60f706cb18ac71ff4ec5268 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:06 +0000 +Subject: [PATCH 44/49] arm64: dts: rockchip: rk3399: add crypto node + +The rk3399 has a crypto IP handled by the rk3288 crypto driver so adds a +node for it. + +Tested-by Diederik de Haas +Signed-off-by: Corentin Labbe +--- + arch/arm64/boot/dts/rockchip/rk3399.dtsi | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +@@ -582,6 +582,26 @@ + status = "disabled"; + }; + ++ crypto0: crypto@ff8b0000 { ++ compatible = "rockchip,rk3399-crypto"; ++ reg = <0x0 0xff8b0000 0x0 0x4000>; ++ interrupts = ; ++ clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>; ++ clock-names = "hclk_master", "hclk_slave", "sclk"; ++ resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>; ++ reset-names = "master", "lave", "crypto"; ++ }; ++ ++ crypto1: crypto@ff8b8000 { ++ compatible = "rockchip,rk3399-crypto"; ++ reg = <0x0 0xff8b8000 0x0 0x4000>; ++ interrupts = ; ++ clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>; ++ clock-names = "hclk_master", "hclk_slave", "sclk"; ++ resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>; ++ reset-names = "master", "slave", "crypto"; ++ }; ++ + i2c1: i2c@ff110000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff110000 0x0 0x1000>; diff --git a/target/linux/rockchip/patches-6.0/195-crypto-rockchip-store-crypto_info-in-request-context.patch b/target/linux/rockchip/patches-6.0/195-crypto-rockchip-store-crypto_info-in-request-context.patch new file mode 100644 index 000000000..d8c4d173e --- /dev/null +++ b/target/linux/rockchip/patches-6.0/195-crypto-rockchip-store-crypto_info-in-request-context.patch @@ -0,0 +1,124 @@ +From 61900b43baea9ed606aacb824b761feca7511eaa Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:07 +0000 +Subject: [PATCH 45/49] crypto: rockchip: store crypto_info in request context + +The crypto_info to use must be stored in the request context. +This will help when 2 crypto_info will be available on rk3399. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.h | 2 ++ + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 14 ++++++-------- + drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 6 ++++-- + 3 files changed, 12 insertions(+), 10 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -215,6 +215,7 @@ struct rk_ahash_ctx { + + /* the private variable of hash for fallback */ + struct rk_ahash_rctx { ++ struct rk_crypto_info *dev; + struct ahash_request fallback_req; + u32 mode; + int nrsg; +@@ -231,6 +232,7 @@ struct rk_cipher_ctx { + }; + + struct rk_cipher_rctx { ++ struct rk_crypto_info *dev; + u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -200,6 +200,7 @@ static int rk_ahash_export(struct ahash_ + + static int rk_ahash_digest(struct ahash_request *req) + { ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct rk_crypto_info *dev = tctx->dev; + +@@ -209,6 +210,8 @@ static int rk_ahash_digest(struct ahash_ + if (!req->nbytes) + return zero_message_process(req); + ++ rctx->dev = dev; ++ + return crypto_transfer_hash_request_to_engine(dev->engine, req); + } + +@@ -223,10 +226,8 @@ static void crypto_ahash_dma_start(struc + static int rk_hash_prepare(struct crypto_engine *engine, void *breq) + { + struct ahash_request *areq = container_of(breq, struct ahash_request, base); +- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); +- struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); +- struct rk_crypto_info *rkc = tctx->dev; ++ struct rk_crypto_info *rkc = rctx->dev; + int ret; + + ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); +@@ -241,10 +242,8 @@ static int rk_hash_prepare(struct crypto + static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) + { + struct ahash_request *areq = container_of(breq, struct ahash_request, base); +- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); +- struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); +- struct rk_crypto_info *rkc = tctx->dev; ++ struct rk_crypto_info *rkc = rctx->dev; + + dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; +@@ -255,11 +254,10 @@ static int rk_hash_run(struct crypto_eng + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); +- struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct scatterlist *sg = areq->src; +- struct rk_crypto_info *rkc = tctx->dev; ++ struct rk_crypto_info *rkc = rctx->dev; + int err = 0; + int i; + u32 v; +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -86,12 +86,15 @@ static int rk_cipher_handle_req(struct s + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *rkc = tctx->dev; + struct crypto_engine *engine = rkc->engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + ++ rctx->dev = rkc; ++ + return crypto_transfer_skcipher_request_to_engine(engine, req); + } + +@@ -290,7 +293,6 @@ static int rk_cipher_run(struct crypto_e + { + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); +- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sgs, *sgd; + int err = 0; +@@ -303,7 +305,7 @@ static int rk_cipher_run(struct crypto_e + unsigned int todo; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); +- struct rk_crypto_info *rkc = ctx->dev; ++ struct rk_crypto_info *rkc = rctx->dev; + + algt->stat_req++; + diff --git a/target/linux/rockchip/patches-6.0/196-crypto-rockchip-Check-for-clocks-numbers-and-their-f.patch b/target/linux/rockchip/patches-6.0/196-crypto-rockchip-Check-for-clocks-numbers-and-their-f.patch new file mode 100644 index 000000000..5cebb131e --- /dev/null +++ b/target/linux/rockchip/patches-6.0/196-crypto-rockchip-Check-for-clocks-numbers-and-their-f.patch @@ -0,0 +1,165 @@ +From 5301685e031d21df2b2a2d5959805f3292f3d481 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:08 +0000 +Subject: [PATCH 46/49] crypto: rockchip: Check for clocks numbers and their + frequencies + +Add the number of clocks needed for each compatible. +Rockchip's datasheet give maximum frequencies for some clocks, so add +checks for verifying they are within limits. Let's start with rk3288 for +clock frequency check, other will came later. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 75 +++++++++++++++++++++---- + drivers/crypto/rockchip/rk3288_crypto.h | 16 +++++- + 2 files changed, 79 insertions(+), 12 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -14,10 +14,58 @@ + #include + #include + #include ++#include + #include + #include + #include + ++static const struct rk_variant rk3288_variant = { ++ .num_clks = 4, ++ .rkclks = { ++ { "sclk", 150000000}, ++ } ++}; ++ ++static const struct rk_variant rk3328_variant = { ++ .num_clks = 3, ++}; ++ ++static int rk_crypto_get_clks(struct rk_crypto_info *dev) ++{ ++ int i, j, err; ++ unsigned long cr; ++ ++ dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks); ++ if (dev->num_clks < dev->variant->num_clks) { ++ dev_err(dev->dev, "Missing clocks, got %d instead of %d\n", ++ dev->num_clks, dev->variant->num_clks); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < dev->num_clks; i++) { ++ cr = clk_get_rate(dev->clks[i].clk); ++ for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) { ++ if (dev->variant->rkclks[j].max == 0) ++ continue; ++ if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id)) ++ continue; ++ if (cr > dev->variant->rkclks[j].max) { ++ err = clk_set_rate(dev->clks[i].clk, ++ dev->variant->rkclks[j].max); ++ if (err) ++ dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n", ++ dev->variant->rkclks[j].name, cr, ++ dev->variant->rkclks[j].max); ++ else ++ dev_info(dev->dev, "Downclocking %s from %lu to %lu\n", ++ dev->variant->rkclks[j].name, cr, ++ dev->variant->rkclks[j].max); ++ } ++ } ++ } ++ return 0; ++} ++ + static int rk_crypto_enable_clk(struct rk_crypto_info *dev) + { + int err; +@@ -201,8 +249,12 @@ static void rk_crypto_unregister(void) + } + + static const struct of_device_id crypto_of_id_table[] = { +- { .compatible = "rockchip,rk3288-crypto" }, +- { .compatible = "rockchip,rk3328-crypto" }, ++ { .compatible = "rockchip,rk3288-crypto", ++ .data = &rk3288_variant, ++ }, ++ { .compatible = "rockchip,rk3328-crypto", ++ .data = &rk3328_variant, ++ }, + {} + }; + MODULE_DEVICE_TABLE(of, crypto_of_id_table); +@@ -220,6 +272,15 @@ static int rk_crypto_probe(struct platfo + goto err_crypto; + } + ++ crypto_info->dev = &pdev->dev; ++ platform_set_drvdata(pdev, crypto_info); ++ ++ crypto_info->variant = of_device_get_match_data(&pdev->dev); ++ if (!crypto_info->variant) { ++ dev_err(&pdev->dev, "Missing variant\n"); ++ return -EINVAL; ++ } ++ + crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); + if (IS_ERR(crypto_info->rst)) { + err = PTR_ERR(crypto_info->rst); +@@ -236,12 +297,9 @@ static int rk_crypto_probe(struct platfo + goto err_crypto; + } + +- crypto_info->num_clks = devm_clk_bulk_get_all(&pdev->dev, +- &crypto_info->clks); +- if (crypto_info->num_clks < 3) { +- err = -EINVAL; ++ err = rk_crypto_get_clks(crypto_info); ++ if (err) + goto err_crypto; +- } + + crypto_info->irq = platform_get_irq(pdev, 0); + if (crypto_info->irq < 0) { +@@ -259,9 +317,6 @@ static int rk_crypto_probe(struct platfo + goto err_crypto; + } + +- crypto_info->dev = &pdev->dev; +- platform_set_drvdata(pdev, crypto_info); +- + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -188,14 +188,26 @@ + #define CRYPTO_WRITE(dev, offset, val) \ + writel_relaxed((val), ((dev)->reg + (offset))) + ++#define RK_MAX_CLKS 4 ++ ++struct rk_clks { ++ const char *name; ++ unsigned long max; ++}; ++ ++struct rk_variant { ++ int num_clks; ++ struct rk_clks rkclks[RK_MAX_CLKS]; ++}; ++ + struct rk_crypto_info { + struct device *dev; + struct clk_bulk_data *clks; +- int num_clks; ++ int num_clks; + struct reset_control *rst; + void __iomem *reg; + int irq; +- ++ const struct rk_variant *variant; + struct crypto_engine *engine; + struct completion complete; + int status; diff --git a/target/linux/rockchip/patches-6.0/197-crypto-rockchip-rk_ahash_reg_init-use-crypto_info-fr.patch b/target/linux/rockchip/patches-6.0/197-crypto-rockchip-rk_ahash_reg_init-use-crypto_info-fr.patch new file mode 100644 index 000000000..bb6b4f256 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/197-crypto-rockchip-rk_ahash_reg_init-use-crypto_info-fr.patch @@ -0,0 +1,40 @@ +From 566cce03ee27f1288a4a029f5c7d437cc2e11eac Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:09 +0000 +Subject: [PATCH 47/49] crypto: rockchip: rk_ahash_reg_init use crypto_info + from parameter + +rk_ahash_reg_init() use crypto_info from TFM context, since we will +remove it, let's take if from parameters. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -78,12 +78,10 @@ static int zero_message_process(struct a + return 0; + } + +-static void rk_ahash_reg_init(struct ahash_request *req) ++static void rk_ahash_reg_init(struct ahash_request *req, ++ struct rk_crypto_info *dev) + { + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); +- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); +- struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); +- struct rk_crypto_info *dev = tctx->dev; + int reg_status; + + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | +@@ -281,7 +279,7 @@ static int rk_hash_run(struct crypto_eng + goto theend; + } + +- rk_ahash_reg_init(areq); ++ rk_ahash_reg_init(areq, rkc); + + while (sg) { + reinit_completion(&rkc->complete); diff --git a/target/linux/rockchip/patches-6.0/198-crypto-rockchip-permit-to-have-more-than-one-reset.patch b/target/linux/rockchip/patches-6.0/198-crypto-rockchip-permit-to-have-more-than-one-reset.patch new file mode 100644 index 000000000..a56241149 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/198-crypto-rockchip-permit-to-have-more-than-one-reset.patch @@ -0,0 +1,24 @@ +From 5a73176384bd62a9ac4300805d243592e93fe5d4 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:10 +0000 +Subject: [PATCH 48/49] crypto: rockchip: permit to have more than one reset + +The RK3399 has 3 resets, so the driver to handle multiple resets. +This is done by using devm_reset_control_array_get_exclusive(). + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -281,7 +281,7 @@ static int rk_crypto_probe(struct platfo + return -EINVAL; + } + +- crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); ++ crypto_info->rst = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(crypto_info->rst)) { + err = PTR_ERR(crypto_info->rst); + goto err_crypto; diff --git a/target/linux/rockchip/patches-6.0/199-crypto-rockchip-Add-support-for-RK3399.patch b/target/linux/rockchip/patches-6.0/199-crypto-rockchip-Add-support-for-RK3399.patch new file mode 100644 index 000000000..7c5902ff5 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/199-crypto-rockchip-Add-support-for-RK3399.patch @@ -0,0 +1,464 @@ +From 5a0b753155b5d3cbba77ecd6a017e6f3733e344e Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 07:55:11 +0000 +Subject: [PATCH 49/49] crypto: rockchip: Add support for RK3399 + +The RK3399 has 2 rk3288 compatible crypto device named crypto0 and +crypto1. The only difference is lack of RSA in crypto1. + +We need to add driver support for 2 parallel instance as only one need +to register crypto algorithms. +Then the driver will round robin each request on each device. + +For avoiding complexity (device bringup after a TFM is created), PM is +modified to be handled per request. +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/rk3288_crypto.c | 92 +++++++++++++++---- + drivers/crypto/rockchip/rk3288_crypto.h | 25 +++-- + drivers/crypto/rockchip/rk3288_crypto_ahash.c | 37 ++++---- + .../crypto/rockchip/rk3288_crypto_skcipher.c | 37 ++++---- + 4 files changed, 123 insertions(+), 68 deletions(-) + +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -19,6 +19,23 @@ + #include + #include + ++static struct rockchip_ip rocklist = { ++ .dev_list = LIST_HEAD_INIT(rocklist.dev_list), ++ .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), ++}; ++ ++struct rk_crypto_info *get_rk_crypto(void) ++{ ++ struct rk_crypto_info *first; ++ ++ spin_lock(&rocklist.lock); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_info, list); ++ list_rotate_left(&rocklist.dev_list); ++ spin_unlock(&rocklist.lock); ++ return first; ++} ++ + static const struct rk_variant rk3288_variant = { + .num_clks = 4, + .rkclks = { +@@ -30,6 +47,10 @@ static const struct rk_variant rk3328_va + .num_clks = 3, + }; + ++static const struct rk_variant rk3399_variant = { ++ .num_clks = 3, ++}; ++ + static int rk_crypto_get_clks(struct rk_crypto_info *dev) + { + int i, j, err; +@@ -83,8 +104,8 @@ static void rk_crypto_disable_clk(struct + } + + /* +- * Power management strategy: The device is suspended unless a TFM exists for +- * one of the algorithms proposed by this driver. ++ * Power management strategy: The device is suspended until a request ++ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s. + */ + static int rk_crypto_pm_suspend(struct device *dev) + { +@@ -166,8 +187,17 @@ static struct rk_crypto_tmp *rk_cipher_a + #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG + static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) + { ++ struct rk_crypto_info *dd; + unsigned int i; + ++ spin_lock(&rocklist.lock); ++ list_for_each_entry(dd, &rocklist.dev_list, list) { ++ seq_printf(seq, "%s %s requests: %lu\n", ++ dev_driver_string(dd->dev), dev_name(dd->dev), ++ dd->nreq); ++ } ++ spin_unlock(&rocklist.lock); ++ + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + if (!rk_cipher_algs[i]->dev) + continue; +@@ -198,6 +228,18 @@ static int rk_crypto_debugfs_show(struct + DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); + #endif + ++static void register_debugfs(struct rk_crypto_info *crypto_info) ++{ ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG ++ /* Ignore error of debugfs */ ++ rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); ++ rocklist.dbgfs_stats = debugfs_create_file("stats", 0444, ++ rocklist.dbgfs_dir, ++ &rocklist, ++ &rk_crypto_debugfs_fops); ++#endif ++} ++ + static int rk_crypto_register(struct rk_crypto_info *crypto_info) + { + unsigned int i, k; +@@ -255,6 +297,9 @@ static const struct of_device_id crypto_ + { .compatible = "rockchip,rk3328-crypto", + .data = &rk3328_variant, + }, ++ { .compatible = "rockchip,rk3399-crypto", ++ .data = &rk3399_variant, ++ }, + {} + }; + MODULE_DEVICE_TABLE(of, crypto_of_id_table); +@@ -262,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_tab + static int rk_crypto_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; +- struct rk_crypto_info *crypto_info; ++ struct rk_crypto_info *crypto_info, *first; + int err = 0; + + crypto_info = devm_kzalloc(&pdev->dev, +@@ -325,22 +370,22 @@ static int rk_crypto_probe(struct platfo + if (err) + goto err_pm; + +- err = rk_crypto_register(crypto_info); +- if (err) { +- dev_err(dev, "err in register alg"); +- goto err_register_alg; +- } ++ spin_lock(&rocklist.lock); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_info, list); ++ list_add_tail(&crypto_info->list, &rocklist.dev_list); ++ spin_unlock(&rocklist.lock); ++ ++ if (!first) { ++ err = rk_crypto_register(crypto_info); ++ if (err) { ++ dev_err(dev, "Fail to register crypto algorithms"); ++ goto err_register_alg; ++ } + +-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG +- /* Ignore error of debugfs */ +- crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); +- crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444, +- crypto_info->dbgfs_dir, +- crypto_info, +- &rk_crypto_debugfs_fops); +-#endif ++ register_debugfs(crypto_info); ++ } + +- dev_info(dev, "Crypto Accelerator successfully registered\n"); + return 0; + + err_register_alg: +@@ -355,11 +400,20 @@ err_crypto: + static int rk_crypto_remove(struct platform_device *pdev) + { + struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); ++ struct rk_crypto_info *first; ++ ++ spin_lock_bh(&rocklist.lock); ++ list_del(&crypto_tmp->list); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_info, list); ++ spin_unlock_bh(&rocklist.lock); + ++ if (!first) { + #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG +- debugfs_remove_recursive(crypto_tmp->dbgfs_dir); ++ debugfs_remove_recursive(rocklist.dbgfs_dir); + #endif +- rk_crypto_unregister(); ++ rk_crypto_unregister(); ++ } + rk_crypto_pm_exit(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); + return 0; +--- a/drivers/crypto/rockchip/rk3288_crypto.h ++++ b/drivers/crypto/rockchip/rk3288_crypto.h +@@ -190,6 +190,20 @@ + + #define RK_MAX_CLKS 4 + ++/* ++ * struct rockchip_ip - struct for managing a list of RK crypto instance ++ * @dev_list: Used for doing a list of rk_crypto_info ++ * @lock: Control access to dev_list ++ * @dbgfs_dir: Debugfs dentry for statistic directory ++ * @dbgfs_stats: Debugfs dentry for statistic counters ++ */ ++struct rockchip_ip { ++ struct list_head dev_list; ++ spinlock_t lock; /* Control access to dev_list */ ++ struct dentry *dbgfs_dir; ++ struct dentry *dbgfs_stats; ++}; ++ + struct rk_clks { + const char *name; + unsigned long max; +@@ -201,6 +215,7 @@ struct rk_variant { + }; + + struct rk_crypto_info { ++ struct list_head list; + struct device *dev; + struct clk_bulk_data *clks; + int num_clks; +@@ -208,19 +223,15 @@ struct rk_crypto_info { + void __iomem *reg; + int irq; + const struct rk_variant *variant; ++ unsigned long nreq; + struct crypto_engine *engine; + struct completion complete; + int status; +-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG +- struct dentry *dbgfs_dir; +- struct dentry *dbgfs_stats; +-#endif + }; + + /* the private variable of hash */ + struct rk_ahash_ctx { + struct crypto_engine_ctx enginectx; +- struct rk_crypto_info *dev; + /* for fallback */ + struct crypto_ahash *fallback_tfm; + }; +@@ -236,7 +247,6 @@ struct rk_ahash_rctx { + /* the private variable of cipher */ + struct rk_cipher_ctx { + struct crypto_engine_ctx enginectx; +- struct rk_crypto_info *dev; + unsigned int keylen; + u8 key[AES_MAX_KEY_SIZE]; + u8 iv[AES_BLOCK_SIZE]; +@@ -252,7 +262,7 @@ struct rk_cipher_rctx { + + struct rk_crypto_tmp { + u32 type; +- struct rk_crypto_info *dev; ++ struct rk_crypto_info *dev; + union { + struct skcipher_alg skcipher; + struct ahash_alg hash; +@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha + extern struct rk_crypto_tmp rk_ahash_sha256; + extern struct rk_crypto_tmp rk_ahash_md5; + ++struct rk_crypto_info *get_rk_crypto(void); + #endif +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c +@@ -199,8 +199,8 @@ static int rk_ahash_export(struct ahash_ + static int rk_ahash_digest(struct ahash_request *req) + { + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); +- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); +- struct rk_crypto_info *dev = tctx->dev; ++ struct rk_crypto_info *dev; ++ struct crypto_engine *engine; + + if (rk_ahash_need_fallback(req)) + return rk_ahash_digest_fb(req); +@@ -208,9 +208,12 @@ static int rk_ahash_digest(struct ahash_ + if (!req->nbytes) + return zero_message_process(req); + ++ dev = get_rk_crypto(); ++ + rctx->dev = dev; ++ engine = dev->engine; + +- return crypto_transfer_hash_request_to_engine(dev->engine, req); ++ return crypto_transfer_hash_request_to_engine(engine, req); + } + + static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) +@@ -260,9 +263,14 @@ static int rk_hash_run(struct crypto_eng + int i; + u32 v; + ++ err = pm_runtime_resume_and_get(rkc->dev); ++ if (err) ++ return err; ++ + rctx->mode = 0; + + algt->stat_req++; ++ rkc->nreq++; + + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: +@@ -313,6 +321,8 @@ static int rk_hash_run(struct crypto_eng + } + + theend: ++ pm_runtime_put_autosuspend(rkc->dev); ++ + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); +@@ -323,21 +333,15 @@ theend: + static int rk_cra_hash_init(struct crypto_tfm *tfm) + { + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); +- struct rk_crypto_tmp *algt; +- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); +- + const char *alg_name = crypto_tfm_alg_name(tfm); +- int err; +- +- algt = container_of(alg, struct rk_crypto_tmp, alg.hash); +- +- tctx->dev = algt->dev; ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + + /* for fallback */ + tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(tctx->fallback_tfm)) { +- dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); ++ dev_err(algt->dev->dev, "Could not load fallback driver.\n"); + return PTR_ERR(tctx->fallback_tfm); + } + +@@ -349,15 +353,7 @@ static int rk_cra_hash_init(struct crypt + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + +- err = pm_runtime_resume_and_get(tctx->dev->dev); +- if (err < 0) +- goto error_pm; +- + return 0; +-error_pm: +- crypto_free_ahash(tctx->fallback_tfm); +- +- return err; + } + + static void rk_cra_hash_exit(struct crypto_tfm *tfm) +@@ -365,7 +361,6 @@ static void rk_cra_hash_exit(struct cryp + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(tctx->fallback_tfm); +- pm_runtime_put_autosuspend(tctx->dev->dev); + } + + struct rk_crypto_tmp rk_ahash_sha1 = { +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +@@ -17,11 +17,11 @@ + static int rk_cipher_need_fallback(struct skcipher_request *req) + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- unsigned int bs = crypto_skcipher_blocksize(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; ++ unsigned int bs = crypto_skcipher_blocksize(tfm); + + if (!req->cryptlen) + return true; +@@ -84,15 +84,16 @@ static int rk_cipher_fallback(struct skc + + static int rk_cipher_handle_req(struct skcipher_request *req) + { +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +- struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); +- struct rk_crypto_info *rkc = tctx->dev; +- struct crypto_engine *engine = rkc->engine; ++ struct rk_crypto_info *rkc; ++ struct crypto_engine *engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + ++ rkc = get_rk_crypto(); ++ ++ engine = rkc->engine; + rctx->dev = rkc; + + return crypto_transfer_skcipher_request_to_engine(engine, req); +@@ -307,7 +308,12 @@ static int rk_cipher_run(struct crypto_e + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_info *rkc = rctx->dev; + ++ err = pm_runtime_resume_and_get(rkc->dev); ++ if (err) ++ return err; ++ + algt->stat_req++; ++ rkc->nreq++; + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { +@@ -401,6 +407,8 @@ static int rk_cipher_run(struct crypto_e + } + + theend: ++ pm_runtime_put_autosuspend(rkc->dev); ++ + local_bh_disable(); + crypto_finalize_skcipher_request(engine, areq, err); + local_bh_enable(); +@@ -420,18 +428,13 @@ theend_iv: + static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) + { + struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); +- struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); +- struct rk_crypto_tmp *algt; +- int err; +- +- algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); +- +- ctx->dev = algt->dev; ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { +- dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } +@@ -441,14 +444,7 @@ static int rk_cipher_tfm_init(struct cry + + ctx->enginectx.op.do_one_request = rk_cipher_run; + +- err = pm_runtime_resume_and_get(ctx->dev->dev); +- if (err < 0) +- goto error_pm; +- + return 0; +-error_pm: +- crypto_free_skcipher(ctx->fallback_tfm); +- return err; + } + + static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) +@@ -457,7 +453,6 @@ static void rk_cipher_tfm_exit(struct cr + + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); +- pm_runtime_put_autosuspend(ctx->dev->dev); + } + + struct rk_crypto_tmp rk_ecb_aes_alg = { diff --git a/target/linux/rockchip/patches-6.0/201-crypto-rockchip-move-kconfig-to-its-dedicated-direct.patch b/target/linux/rockchip/patches-6.0/201-crypto-rockchip-move-kconfig-to-its-dedicated-direct.patch new file mode 100644 index 000000000..6c212a340 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/201-crypto-rockchip-move-kconfig-to-its-dedicated-direct.patch @@ -0,0 +1,105 @@ +From e8749922cdcddc6e3d3df6a94c530df863073353 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 08:00:44 +0000 +Subject: [PATCH 1/5] crypto: rockchip: move kconfig to its dedicated directory + +Move all rockchip kconfig in its own subdirectory. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/Kconfig | 32 ++------------------------------ + drivers/crypto/Makefile | 2 +- + drivers/crypto/rockchip/Kconfig | 28 ++++++++++++++++++++++++++++ + 3 files changed, 31 insertions(+), 31 deletions(-) + create mode 100644 drivers/crypto/rockchip/Kconfig + +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -646,6 +646,8 @@ config CRYPTO_DEV_QCOM_RNG + To compile this driver as a module, choose M here. The + module will be called qcom-rng. If unsure, say N. + ++source "drivers/crypto/rockchip/Kconfig" ++ + config CRYPTO_DEV_VMX + bool "Support for VMX cryptographic acceleration instructions" + depends on PPC64 && VSX +@@ -666,36 +668,6 @@ config CRYPTO_DEV_IMGTEC_HASH + hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256 + hashing algorithms. + +-config CRYPTO_DEV_ROCKCHIP +- tristate "Rockchip's Cryptographic Engine driver" +- depends on OF && ARCH_ROCKCHIP +- depends on PM +- select CRYPTO_ECB +- select CRYPTO_CBC +- select CRYPTO_DES +- select CRYPTO_AES +- select CRYPTO_ENGINE +- select CRYPTO_LIB_DES +- select CRYPTO_MD5 +- select CRYPTO_SHA1 +- select CRYPTO_SHA256 +- select CRYPTO_HASH +- select CRYPTO_SKCIPHER +- +- help +- This driver interfaces with the hardware crypto accelerator. +- Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. +- +-config CRYPTO_DEV_ROCKCHIP_DEBUG +- bool "Enable Rockchip crypto stats" +- depends on CRYPTO_DEV_ROCKCHIP +- depends on DEBUG_FS +- help +- Say y to enable Rockchip crypto debug stats. +- This will create /sys/kernel/debug/rk3288_crypto/stats for displaying +- the number of requests per algorithm and other internal stats. +- +- + config CRYPTO_DEV_ZYNQMP_AES + tristate "Support for Xilinx ZynqMP AES hw accelerator" + depends on ZYNQMP_FIRMWARE || COMPILE_TEST +--- a/drivers/crypto/Makefile ++++ b/drivers/crypto/Makefile +@@ -35,7 +35,7 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ + obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ + obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ + obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o +-obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ ++obj-y += rockchip/ + obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o + obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o + obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o +--- /dev/null ++++ b/drivers/crypto/rockchip/Kconfig +@@ -0,0 +1,28 @@ ++config CRYPTO_DEV_ROCKCHIP ++ tristate "Rockchip's Cryptographic Engine driver" ++ depends on OF && ARCH_ROCKCHIP ++ depends on PM ++ select CRYPTO_ECB ++ select CRYPTO_CBC ++ select CRYPTO_DES ++ select CRYPTO_AES ++ select CRYPTO_ENGINE ++ select CRYPTO_LIB_DES ++ select CRYPTO_MD5 ++ select CRYPTO_SHA1 ++ select CRYPTO_SHA256 ++ select CRYPTO_HASH ++ select CRYPTO_SKCIPHER ++ ++ help ++ This driver interfaces with the hardware crypto accelerator. ++ Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. ++ ++config CRYPTO_DEV_ROCKCHIP_DEBUG ++ bool "Enable Rockchip crypto stats" ++ depends on CRYPTO_DEV_ROCKCHIP ++ depends on DEBUG_FS ++ help ++ Say y to enable Rockchip crypto debug stats. ++ This will create /sys/kernel/debug/rk3288_crypto/stats for displaying ++ the number of requests per algorithm and other internal stats. diff --git a/target/linux/rockchip/patches-6.0/202-dt-bindings-crypto-add-support-for-rockchip-crypto-r.patch b/target/linux/rockchip/patches-6.0/202-dt-bindings-crypto-add-support-for-rockchip-crypto-r.patch new file mode 100644 index 000000000..aa9e35d31 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/202-dt-bindings-crypto-add-support-for-rockchip-crypto-r.patch @@ -0,0 +1,89 @@ +From 03c8a180b9ba57a1bcf658c25a725b063bbade96 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 08:00:45 +0000 +Subject: [PATCH 2/5] dt-bindings: crypto: add support for + rockchip,crypto-rk3588 + +Add device tree binding documentation for the Rockchip cryptographic +offloader V2. + +Signed-off-by: Corentin Labbe +--- + .../crypto/rockchip,rk3588-crypto.yaml | 71 +++++++++++++++++++ + 1 file changed, 71 insertions(+) + create mode 100644 Documentation/devicetree/bindings/crypto/rockchip,rk3588-crypto.yaml + +--- /dev/null ++++ b/Documentation/devicetree/bindings/crypto/rockchip,rk3588-crypto.yaml +@@ -0,0 +1,71 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/crypto/rockchip,rk3588-crypto.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Rockchip cryptographic offloader V2 ++ ++maintainers: ++ - Corentin Labbe ++ ++properties: ++ compatible: ++ enum: ++ - rockchip,rk3568-crypto ++ - rockchip,rk3588-crypto ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ minItems: 4 ++ ++ clock-names: ++ items: ++ - const: aclk ++ - const: hclk ++ - const: sclk ++ - const: pka ++ ++ resets: ++ minItems: 5 ++ ++ reset-names: ++ items: ++ - const: core ++ - const: a ++ - const: h ++ - const: rng ++ - const: pka ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - resets ++ - reset-names ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ #include ++ crypto@fe380000 { ++ compatible = "rockchip,rk3588-crypto"; ++ reg = <0xfe380000 0x4000>; ++ interrupts = ; ++ clocks = <&cru ACLK_CRYPTO_NS>, <&cru HCLK_CRYPTO_NS>, ++ <&cru CLK_CRYPTO_NS_CORE>, <&cru CLK_CRYPTO_NS_PKA>; ++ clock-names = "aclk", "hclk", "sclk", "pka"; ++ resets = <&cru SRST_CRYPTO_NS_CORE>, <&cru SRST_A_CRYPTO_NS>, ++ <&cru SRST_H_CRYPTO_NS>, <&cru SRST_CRYPTO_NS_RNG>, ++ <&cru SRST_CRYPTO_NS_PKA>; ++ reset-names = "core", "a", "h", "rng", "pka"; ++ }; diff --git a/target/linux/rockchip/patches-6.0/203-MAINTAINERS-add-new-dt-binding-doc-to-the-right-entr.patch b/target/linux/rockchip/patches-6.0/203-MAINTAINERS-add-new-dt-binding-doc-to-the-right-entr.patch new file mode 100644 index 000000000..9e9e2b88b --- /dev/null +++ b/target/linux/rockchip/patches-6.0/203-MAINTAINERS-add-new-dt-binding-doc-to-the-right-entr.patch @@ -0,0 +1,22 @@ +From a091cb568872e3ef2e6a5c6e28e4f43465d46ca2 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 08:00:46 +0000 +Subject: [PATCH 3/5] MAINTAINERS: add new dt-binding doc to the right entry + +Rockchip crypto driver have a new file to be added. + +Signed-off-by: Corentin Labbe +--- + MAINTAINERS | 1 + + 1 file changed, 1 insertion(+) + +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -17575,6 +17575,7 @@ M: Corentin Labbe + L: linux-crypto@vger.kernel.org + S: Maintained + F: Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml ++F: Documentation/devicetree/bindings/crypto/rockchip,rk3588-crypto.yaml + F: drivers/crypto/rockchip/ + + ROCKCHIP I2S TDM DRIVER diff --git a/target/linux/rockchip/patches-6.0/204-crypto-rockchip-support-the-new-crypto-IP-for-rk3568.patch b/target/linux/rockchip/patches-6.0/204-crypto-rockchip-support-the-new-crypto-IP-for-rk3568.patch new file mode 100644 index 000000000..ecef87107 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/204-crypto-rockchip-support-the-new-crypto-IP-for-rk3568.patch @@ -0,0 +1,1633 @@ +From 06ecfb2f7b3277c4ed1bf0172b14ae7bc0c2d4aa Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 08:00:47 +0000 +Subject: [PATCH 4/5] crypto: rockchip: support the new crypto IP for + rk3568/rk3588 + +Rockchip rk3568 and rk3588 have a common crypto offloader IP. +This driver adds support for it. + +Signed-off-by: Corentin Labbe +--- + drivers/crypto/rockchip/Kconfig | 28 + + drivers/crypto/rockchip/Makefile | 5 + + drivers/crypto/rockchip/rk3588_crypto.c | 646 ++++++++++++++++++ + drivers/crypto/rockchip/rk3588_crypto.h | 221 ++++++ + drivers/crypto/rockchip/rk3588_crypto_ahash.c | 346 ++++++++++ + .../crypto/rockchip/rk3588_crypto_skcipher.c | 340 +++++++++ + 6 files changed, 1586 insertions(+) + create mode 100644 drivers/crypto/rockchip/rk3588_crypto.c + create mode 100644 drivers/crypto/rockchip/rk3588_crypto.h + create mode 100644 drivers/crypto/rockchip/rk3588_crypto_ahash.c + create mode 100644 drivers/crypto/rockchip/rk3588_crypto_skcipher.c + +--- a/drivers/crypto/rockchip/Kconfig ++++ b/drivers/crypto/rockchip/Kconfig +@@ -26,3 +26,31 @@ config CRYPTO_DEV_ROCKCHIP_DEBUG + Say y to enable Rockchip crypto debug stats. + This will create /sys/kernel/debug/rk3288_crypto/stats for displaying + the number of requests per algorithm and other internal stats. ++ ++config CRYPTO_DEV_ROCKCHIP2 ++ tristate "Rockchip's cryptographic offloader V2" ++ depends on OF && ARCH_ROCKCHIP ++ depends on PM ++ select CRYPTO_ECB ++ select CRYPTO_CBC ++ select CRYPTO_AES ++ select CRYPTO_MD5 ++ select CRYPTO_SHA1 ++ select CRYPTO_SHA256 ++ select CRYPTO_SM3 ++ select CRYPTO_HASH ++ select CRYPTO_SKCIPHER ++ select CRYPTO_ENGINE ++ ++ help ++ This driver interfaces with the hardware crypto offloader present ++ on RK3568 and RK3588. ++ ++config CRYPTO_DEV_ROCKCHIP2_DEBUG ++ bool "Enable Rockchip V2 crypto stats" ++ depends on CRYPTO_DEV_ROCKCHIP2 ++ depends on DEBUG_FS ++ help ++ Say y to enable Rockchip crypto debug stats. ++ This will create /sys/kernel/debug/rk3588_crypto/stats for displaying ++ the number of requests per algorithm and other internal stats. +--- a/drivers/crypto/rockchip/Makefile ++++ b/drivers/crypto/rockchip/Makefile +@@ -3,3 +3,8 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_ + rk_crypto-objs := rk3288_crypto.o \ + rk3288_crypto_skcipher.o \ + rk3288_crypto_ahash.o ++ ++obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP2) += rk_crypto2.o ++rk_crypto2-objs := rk3588_crypto.o \ ++ rk3588_crypto_skcipher.o \ ++ rk3588_crypto_ahash.o +--- /dev/null ++++ b/drivers/crypto/rockchip/rk3588_crypto.c +@@ -0,0 +1,646 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * hardware cryptographic offloader for rk3568/rk3588 SoC ++ * ++ * Copyright (c) 2022, Corentin Labbe ++ */ ++ ++#include "rk3588_crypto.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static struct rockchip_ip rocklist = { ++ .dev_list = LIST_HEAD_INIT(rocklist.dev_list), ++ .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), ++}; ++ ++struct rk_crypto_dev *get_rk_crypto(void) ++{ ++ struct rk_crypto_dev *first; ++ ++ spin_lock(&rocklist.lock); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_dev, list); ++ list_rotate_left(&rocklist.dev_list); ++ spin_unlock(&rocklist.lock); ++ return first; ++} ++ ++static const struct rk_variant rk3568_variant = { ++ .num_clks = 4, ++}; ++ ++static const struct rk_variant rk3588_variant = { ++ .num_clks = 4, ++}; ++ ++static int rk_crypto_get_clks(struct rk_crypto_dev *dev) ++{ ++ int i, j, err; ++ unsigned long cr; ++ ++ dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks); ++ if (dev->num_clks < dev->variant->num_clks) { ++ dev_err(dev->dev, "Missing clocks, got %d instead of %d\n", ++ dev->num_clks, dev->variant->num_clks); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < dev->num_clks; i++) { ++ cr = clk_get_rate(dev->clks[i].clk); ++ for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) { ++ if (dev->variant->rkclks[j].max == 0) ++ continue; ++ if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id)) ++ continue; ++ if (cr > dev->variant->rkclks[j].max) { ++ err = clk_set_rate(dev->clks[i].clk, ++ dev->variant->rkclks[j].max); ++ if (err) ++ dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n", ++ dev->variant->rkclks[j].name, cr, ++ dev->variant->rkclks[j].max); ++ else ++ dev_info(dev->dev, "Downclocking %s from %lu to %lu\n", ++ dev->variant->rkclks[j].name, cr, ++ dev->variant->rkclks[j].max); ++ } ++ } ++ } ++ return 0; ++} ++ ++static int rk_crypto_enable_clk(struct rk_crypto_dev *dev) ++{ ++ int err; ++ ++ err = clk_bulk_prepare_enable(dev->num_clks, dev->clks); ++ if (err) ++ dev_err(dev->dev, "Could not enable clock clks\n"); ++ ++ return err; ++} ++ ++static void rk_crypto_disable_clk(struct rk_crypto_dev *dev) ++{ ++ clk_bulk_disable_unprepare(dev->num_clks, dev->clks); ++} ++ ++/* ++ * Power management strategy: The device is suspended until a request ++ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s. ++ */ ++static int rk_crypto_pm_suspend(struct device *dev) ++{ ++ struct rk_crypto_dev *rkdev = dev_get_drvdata(dev); ++ ++ rk_crypto_disable_clk(rkdev); ++ reset_control_assert(rkdev->rst); ++ ++ return 0; ++} ++ ++static int rk_crypto_pm_resume(struct device *dev) ++{ ++ struct rk_crypto_dev *rkdev = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = rk_crypto_enable_clk(rkdev); ++ if (ret) ++ return ret; ++ ++ reset_control_deassert(rkdev->rst); ++ return 0; ++} ++ ++static const struct dev_pm_ops rk_crypto_pm_ops = { ++ SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL) ++}; ++ ++static int rk_crypto_pm_init(struct rk_crypto_dev *rkdev) ++{ ++ int err; ++ ++ pm_runtime_use_autosuspend(rkdev->dev); ++ pm_runtime_set_autosuspend_delay(rkdev->dev, 2000); ++ ++ err = pm_runtime_set_suspended(rkdev->dev); ++ if (err) ++ return err; ++ pm_runtime_enable(rkdev->dev); ++ return err; ++} ++ ++static void rk_crypto_pm_exit(struct rk_crypto_dev *rkdev) ++{ ++ pm_runtime_disable(rkdev->dev); ++} ++ ++static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) ++{ ++ struct rk_crypto_dev *rkc = platform_get_drvdata(dev_id); ++ u32 v; ++ ++ v = readl(rkc->reg + RK_CRYPTO_DMA_INT_ST); ++ writel(v, rkc->reg + RK_CRYPTO_DMA_INT_ST); ++ ++ rkc->status = 1; ++ if (v & 0xF8) { ++ dev_warn(rkc->dev, "DMA Error\n"); ++ rkc->status = 0; ++ } ++ complete(&rkc->complete); ++ ++ return IRQ_HANDLED; ++} ++ ++static struct rk_crypto_template rk_cipher_algs[] = { ++ { ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, ++ .alg.skcipher = { ++ .base.cra_name = "ecb(aes)", ++ .base.cra_driver_name = "ecb-aes-rk2", ++ .base.cra_priority = 300, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, ++ .base.cra_blocksize = AES_BLOCK_SIZE, ++ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), ++ .base.cra_alignmask = 0x0f, ++ .base.cra_module = THIS_MODULE, ++ ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .setkey = rk_aes_setkey, ++ .encrypt = rk_aes_ecb_encrypt, ++ .decrypt = rk_aes_ecb_decrypt, ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, ++ .alg.skcipher = { ++ .base.cra_name = "cbc(aes)", ++ .base.cra_driver_name = "cbc-aes-rk2", ++ .base.cra_priority = 300, ++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, ++ .base.cra_blocksize = AES_BLOCK_SIZE, ++ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), ++ .base.cra_alignmask = 0x0f, ++ .base.cra_module = THIS_MODULE, ++ ++ .init = rk_cipher_tfm_init, ++ .exit = rk_cipher_tfm_exit, ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ .setkey = rk_aes_setkey, ++ .encrypt = rk_aes_cbc_encrypt, ++ .decrypt = rk_aes_cbc_decrypt, ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_MD5, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = MD5_DIGEST_SIZE, ++ .statesize = sizeof(struct md5_state), ++ .base = { ++ .cra_name = "md5", ++ .cra_driver_name = "rk2-md5", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SHA1_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_SHA1, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = SHA1_DIGEST_SIZE, ++ .statesize = sizeof(struct sha1_state), ++ .base = { ++ .cra_name = "sha1", ++ .cra_driver_name = "rk2-sha1", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SHA1_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_SHA256, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = SHA256_DIGEST_SIZE, ++ .statesize = sizeof(struct sha256_state), ++ .base = { ++ .cra_name = "sha256", ++ .cra_driver_name = "rk2-sha256", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SHA256_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_SHA384, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = SHA384_DIGEST_SIZE, ++ .statesize = sizeof(struct sha512_state), ++ .base = { ++ .cra_name = "sha384", ++ .cra_driver_name = "rk2-sha384", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SHA384_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_SHA512, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = SHA512_DIGEST_SIZE, ++ .statesize = sizeof(struct sha512_state), ++ .base = { ++ .cra_name = "sha512", ++ .cra_driver_name = "rk2-sha512", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SHA512_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++ { ++ .type = CRYPTO_ALG_TYPE_AHASH, ++ .rk_mode = RK_CRYPTO_SM3, ++ .alg.hash = { ++ .init = rk_ahash_init, ++ .update = rk_ahash_update, ++ .final = rk_ahash_final, ++ .finup = rk_ahash_finup, ++ .export = rk_ahash_export, ++ .import = rk_ahash_import, ++ .digest = rk_ahash_digest, ++ .halg = { ++ .digestsize = SM3_DIGEST_SIZE, ++ .statesize = sizeof(struct sm3_state), ++ .base = { ++ .cra_name = "sm3", ++ .cra_driver_name = "rk2-sm3", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_blocksize = SM3_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct rk_ahash_ctx), ++ .cra_alignmask = 3, ++ .cra_init = rk_cra_hash_init, ++ .cra_exit = rk_cra_hash_exit, ++ .cra_module = THIS_MODULE, ++ } ++ } ++ } ++ }, ++}; ++ ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP2_DEBUG ++static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) ++{ ++ struct rk_crypto_dev *dd; ++ unsigned int i; ++ ++ spin_lock(&rocklist.lock); ++ list_for_each_entry(dd, &rocklist.dev_list, list) { ++ seq_printf(seq, "%s %s requests: %lu\n", ++ dev_driver_string(dd->dev), dev_name(dd->dev), ++ dd->nreq); ++ } ++ spin_unlock(&rocklist.lock); ++ ++ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { ++ if (!rk_cipher_algs[i].dev) ++ continue; ++ switch (rk_cipher_algs[i].type) { ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ++ rk_cipher_algs[i].alg.skcipher.base.cra_driver_name, ++ rk_cipher_algs[i].alg.skcipher.base.cra_name, ++ rk_cipher_algs[i].stat_req, rk_cipher_algs[i].stat_fb); ++ seq_printf(seq, "\tfallback due to length: %lu\n", ++ rk_cipher_algs[i].stat_fb_len); ++ seq_printf(seq, "\tfallback due to alignment: %lu\n", ++ rk_cipher_algs[i].stat_fb_align); ++ seq_printf(seq, "\tfallback due to SGs: %lu\n", ++ rk_cipher_algs[i].stat_fb_sgdiff); ++ break; ++ case CRYPTO_ALG_TYPE_AHASH: ++ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ++ rk_cipher_algs[i].alg.hash.halg.base.cra_driver_name, ++ rk_cipher_algs[i].alg.hash.halg.base.cra_name, ++ rk_cipher_algs[i].stat_req, rk_cipher_algs[i].stat_fb); ++ break; ++ } ++ } ++ return 0; ++} ++ ++DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); ++#endif ++ ++static void register_debugfs(struct rk_crypto_dev *crypto_dev) ++{ ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP2_DEBUG ++ /* Ignore error of debugfs */ ++ rocklist.dbgfs_dir = debugfs_create_dir("rk3588_crypto", NULL); ++ rocklist.dbgfs_stats = debugfs_create_file("stats", 0444, ++ rocklist.dbgfs_dir, ++ &rocklist, ++ &rk_crypto_debugfs_fops); ++#endif ++} ++ ++static int rk_crypto_register(struct rk_crypto_dev *rkc) ++{ ++ unsigned int i, k; ++ int err = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { ++ rk_cipher_algs[i].dev = rkc; ++ switch (rk_cipher_algs[i].type) { ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ dev_info(rkc->dev, "Register %s as %s\n", ++ rk_cipher_algs[i].alg.skcipher.base.cra_name, ++ rk_cipher_algs[i].alg.skcipher.base.cra_driver_name); ++ err = crypto_register_skcipher(&rk_cipher_algs[i].alg.skcipher); ++ break; ++ case CRYPTO_ALG_TYPE_AHASH: ++ dev_info(rkc->dev, "Register %s as %s\n", ++ rk_cipher_algs[i].alg.hash.halg.base.cra_name, ++ rk_cipher_algs[i].alg.hash.halg.base.cra_driver_name); ++ err = crypto_register_ahash(&rk_cipher_algs[i].alg.hash); ++ break; ++ default: ++ dev_err(rkc->dev, "unknown algorithm\n"); ++ } ++ if (err) ++ goto err_cipher_algs; ++ } ++ return 0; ++ ++err_cipher_algs: ++ for (k = 0; k < i; k++) { ++ if (rk_cipher_algs[i].type == CRYPTO_ALG_TYPE_SKCIPHER) ++ crypto_unregister_skcipher(&rk_cipher_algs[k].alg.skcipher); ++ else ++ crypto_unregister_ahash(&rk_cipher_algs[i].alg.hash); ++ } ++ return err; ++} ++ ++static void rk_crypto_unregister(void) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { ++ if (rk_cipher_algs[i].type == CRYPTO_ALG_TYPE_SKCIPHER) ++ crypto_unregister_skcipher(&rk_cipher_algs[i].alg.skcipher); ++ else ++ crypto_unregister_ahash(&rk_cipher_algs[i].alg.hash); ++ } ++} ++ ++static const struct of_device_id crypto_of_id_table[] = { ++ { .compatible = "rockchip,rk3568-crypto", ++ .data = &rk3568_variant, ++ }, ++ { .compatible = "rockchip,rk3588-crypto", ++ .data = &rk3588_variant, ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, crypto_of_id_table); ++ ++static int rk_crypto_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct rk_crypto_dev *rkc, *first; ++ int err = 0; ++ ++ rkc = devm_kzalloc(&pdev->dev, sizeof(*rkc), GFP_KERNEL); ++ if (!rkc) { ++ err = -ENOMEM; ++ goto err_crypto; ++ } ++ ++ rkc->dev = &pdev->dev; ++ platform_set_drvdata(pdev, rkc); ++ ++ rkc->variant = of_device_get_match_data(&pdev->dev); ++ if (!rkc->variant) { ++ dev_err(&pdev->dev, "Missing variant\n"); ++ return -EINVAL; ++ } ++ ++ rkc->rst = devm_reset_control_array_get_exclusive(dev); ++ if (IS_ERR(rkc->rst)) { ++ err = PTR_ERR(rkc->rst); ++ goto err_crypto; ++ } ++ ++ rkc->tl = dma_alloc_coherent(rkc->dev, ++ sizeof(struct rk_crypto_lli) * MAX_LLI, ++ &rkc->t_phy, GFP_KERNEL); ++ if (!rkc->tl) { ++ dev_err(rkc->dev, "Cannot get DMA memory for task\n"); ++ err = -ENOMEM; ++ goto err_crypto; ++ } ++ ++ reset_control_assert(rkc->rst); ++ usleep_range(10, 20); ++ reset_control_deassert(rkc->rst); ++ ++ rkc->reg = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(rkc->reg)) { ++ err = PTR_ERR(rkc->reg); ++ goto err_crypto; ++ } ++ ++ err = rk_crypto_get_clks(rkc); ++ if (err) ++ goto err_crypto; ++ ++ rkc->irq = platform_get_irq(pdev, 0); ++ if (rkc->irq < 0) { ++ dev_err(&pdev->dev, "control Interrupt is not available.\n"); ++ err = rkc->irq; ++ goto err_crypto; ++ } ++ ++ err = devm_request_irq(&pdev->dev, rkc->irq, ++ rk_crypto_irq_handle, IRQF_SHARED, ++ "rk-crypto", pdev); ++ ++ if (err) { ++ dev_err(&pdev->dev, "irq request failed.\n"); ++ goto err_crypto; ++ } ++ ++ rkc->engine = crypto_engine_alloc_init(&pdev->dev, true); ++ crypto_engine_start(rkc->engine); ++ init_completion(&rkc->complete); ++ ++ err = rk_crypto_pm_init(rkc); ++ if (err) ++ goto err_pm; ++ ++ err = pm_runtime_resume_and_get(&pdev->dev); ++ ++ spin_lock(&rocklist.lock); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_dev, list); ++ list_add_tail(&rkc->list, &rocklist.dev_list); ++ spin_unlock(&rocklist.lock); ++ ++ if (!first) { ++ dev_info(dev, "Registers crypto algos\n"); ++ err = rk_crypto_register(rkc); ++ if (err) { ++ dev_err(dev, "Fail to register crypto algorithms"); ++ goto err_register_alg; ++ } ++ ++ register_debugfs(rkc); ++ } ++ ++ return 0; ++ ++err_register_alg: ++ rk_crypto_pm_exit(rkc); ++err_pm: ++ crypto_engine_exit(rkc->engine); ++err_crypto: ++ dev_err(dev, "Crypto Accelerator not successfully registered\n"); ++ return err; ++} ++ ++static int rk_crypto_remove(struct platform_device *pdev) ++{ ++ struct rk_crypto_dev *crypto_tmp = platform_get_drvdata(pdev); ++ struct rk_crypto_dev *first; ++ ++ spin_lock_bh(&rocklist.lock); ++ list_del(&crypto_tmp->list); ++ first = list_first_entry_or_null(&rocklist.dev_list, ++ struct rk_crypto_dev, list); ++ spin_unlock_bh(&rocklist.lock); ++ ++ if (!first) { ++#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP2_DEBUG ++ debugfs_remove_recursive(rocklist.dbgfs_dir); ++#endif ++ rk_crypto_unregister(); ++ } ++ rk_crypto_pm_exit(crypto_tmp); ++ crypto_engine_exit(crypto_tmp->engine); ++ return 0; ++} ++ ++static struct platform_driver crypto_driver = { ++ .probe = rk_crypto_probe, ++ .remove = rk_crypto_remove, ++ .driver = { ++ .name = "rk3588-crypto", ++ .pm = &rk_crypto_pm_ops, ++ .of_match_table = crypto_of_id_table, ++ }, ++}; ++ ++module_platform_driver(crypto_driver); ++ ++MODULE_DESCRIPTION("Rockchip Crypto Engine cryptographic offloader"); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Corentin Labbe "); +--- /dev/null ++++ b/drivers/crypto/rockchip/rk3588_crypto.h +@@ -0,0 +1,221 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define RK_CRYPTO_CLK_CTL 0x0000 ++#define RK_CRYPTO_RST_CTL 0x0004 ++ ++#define RK_CRYPTO_DMA_INT_EN 0x0008 ++/* values for RK_CRYPTO_DMA_INT_EN */ ++#define RK_CRYPTO_DMA_INT_LISTDONE BIT(0) ++ ++#define RK_CRYPTO_DMA_INT_ST 0x000C ++/* values in RK_CRYPTO_DMA_INT_ST are the same than in RK_CRYPTO_DMA_INT_EN */ ++ ++#define RK_CRYPTO_DMA_CTL 0x0010 ++#define RK_CRYPTO_DMA_CTL_START BIT(0) ++ ++#define RK_CRYPTO_DMA_LLI_ADDR 0x0014 ++ ++#define RK_CRYPTO_FIFO_CTL 0x0040 ++ ++#define RK_CRYPTO_BC_CTL 0x0044 ++#define RK_CRYPTO_AES (0 << 8) ++#define RK_CRYPTO_MODE_ECB (0 << 4) ++#define RK_CRYPTO_MODE_CBC (1 << 4) ++ ++#define RK_CRYPTO_HASH_CTL 0x0048 ++#define RK_CRYPTO_HW_PAD BIT(2) ++#define RK_CRYPTO_SHA1 (0 << 4) ++#define RK_CRYPTO_MD5 (1 << 4) ++#define RK_CRYPTO_SHA224 (3 << 4) ++#define RK_CRYPTO_SHA256 (2 << 4) ++#define RK_CRYPTO_SHA384 (9 << 4) ++#define RK_CRYPTO_SHA512 (8 << 4) ++#define RK_CRYPTO_SM3 (4 << 4) ++ ++#define RK_CRYPTO_AES_ECB_MODE (RK_CRYPTO_AES | RK_CRYPTO_MODE_ECB) ++#define RK_CRYPTO_AES_CBC_MODE (RK_CRYPTO_AES | RK_CRYPTO_MODE_CBC) ++#define RK_CRYPTO_AES_CTR_MODE 3 ++#define RK_CRYPTO_AES_128BIT_key (0 << 2) ++#define RK_CRYPTO_AES_192BIT_key (1 << 2) ++#define RK_CRYPTO_AES_256BIT_key (2 << 2) ++ ++#define RK_CRYPTO_DEC BIT(1) ++#define RK_CRYPTO_ENABLE BIT(0) ++ ++#define RK_CRYPTO_CH0_IV_0 0x0100 ++ ++#define RK_CRYPTO_KEY0 0x0180 ++#define RK_CRYPTO_KEY1 0x0184 ++#define RK_CRYPTO_KEY2 0x0188 ++#define RK_CRYPTO_KEY3 0x018C ++#define RK_CRYPTO_KEY4 0x0190 ++#define RK_CRYPTO_KEY5 0x0194 ++#define RK_CRYPTO_KEY6 0x0198 ++#define RK_CRYPTO_KEY7 0x019C ++ ++#define RK_CRYPTO_CH0_PC_LEN_0 0x0280 ++ ++#define RK_CRYPTO_CH0_IV_LEN 0x0300 ++ ++#define RK_CRYPTO_HASH_DOUT_0 0x03A0 ++#define RK_CRYPTO_HASH_VALID 0x03E4 ++ ++#define CRYPTO_AES_VERSION 0x0680 ++#define CRYPTO_DES_VERSION 0x0684 ++#define CRYPTO_SM4_VERSION 0x0688 ++#define CRYPTO_HASH_VERSION 0x068C ++#define CRYPTO_HMAC_VERSION 0x0690 ++#define CRYPTO_RNG_VERSION 0x0694 ++#define CRYPTO_PKA_VERSION 0x0698 ++#define CRYPTO_CRYPTO_VERSION 0x06F0 ++ ++#define RK_LLI_DMA_CTRL_SRC_INT BIT(10) ++#define RK_LLI_DMA_CTRL_DST_INT BIT(9) ++#define RK_LLI_DMA_CTRL_LIST_INT BIT(8) ++#define RK_LLI_DMA_CTRL_LAST BIT(0) ++ ++#define RK_LLI_STRING_LAST BIT(2) ++#define RK_LLI_STRING_FIRST BIT(1) ++#define RK_LLI_CIPHER_START BIT(0) ++ ++#define RK_MAX_CLKS 4 ++ ++/* there are no hw limit, but we need to choose a maximum of descriptor to allocate */ ++#define MAX_LLI 20 ++ ++struct rk_crypto_lli { ++ __le32 src_addr; ++ __le32 src_len; ++ __le32 dst_addr; ++ __le32 dst_len; ++ __le32 user; ++ __le32 iv; ++ __le32 dma_ctrl; ++ __le32 next; ++}; ++ ++/* ++ * struct rockchip_ip - struct for managing a list of RK crypto instance ++ * @dev_list: Used for doing a list of rk_crypto_dev ++ * @lock: Control access to dev_list ++ * @dbgfs_dir: Debugfs dentry for statistic directory ++ * @dbgfs_stats: Debugfs dentry for statistic counters ++ */ ++struct rockchip_ip { ++ struct list_head dev_list; ++ spinlock_t lock; /* Control access to dev_list */ ++ struct dentry *dbgfs_dir; ++ struct dentry *dbgfs_stats; ++}; ++ ++struct rk_clks { ++ const char *name; ++ unsigned long max; ++}; ++ ++struct rk_variant { ++ int num_clks; ++ struct rk_clks rkclks[RK_MAX_CLKS]; ++}; ++ ++struct rk_crypto_dev { ++ struct list_head list; ++ struct device *dev; ++ struct clk_bulk_data *clks; ++ int num_clks; ++ struct reset_control *rst; ++ void __iomem *reg; ++ int irq; ++ const struct rk_variant *variant; ++ unsigned long nreq; ++ struct crypto_engine *engine; ++ struct completion complete; ++ int status; ++ struct rk_crypto_lli *tl; ++ dma_addr_t t_phy; ++}; ++ ++/* the private variable of hash */ ++struct rk_ahash_ctx { ++ struct crypto_engine_ctx enginectx; ++ /* for fallback */ ++ struct crypto_ahash *fallback_tfm; ++}; ++ ++/* the private variable of hash for fallback */ ++struct rk_ahash_rctx { ++ struct rk_crypto_dev *dev; ++ struct ahash_request fallback_req; ++ u32 mode; ++ int nrsgs; ++}; ++ ++/* the private variable of cipher */ ++struct rk_cipher_ctx { ++ struct crypto_engine_ctx enginectx; ++ unsigned int keylen; ++ u8 key[AES_MAX_KEY_SIZE]; ++ u8 iv[AES_BLOCK_SIZE]; ++ struct crypto_skcipher *fallback_tfm; ++}; ++ ++struct rk_cipher_rctx { ++ struct rk_crypto_dev *dev; ++ u8 backup_iv[AES_BLOCK_SIZE]; ++ u32 mode; ++ struct skcipher_request fallback_req; // keep at the end ++}; ++ ++struct rk_crypto_template { ++ u32 type; ++ u32 rk_mode; ++ struct rk_crypto_dev *dev; ++ union { ++ struct skcipher_alg skcipher; ++ struct ahash_alg hash; ++ } alg; ++ unsigned long stat_req; ++ unsigned long stat_fb; ++ unsigned long stat_fb_len; ++ unsigned long stat_fb_sglen; ++ unsigned long stat_fb_align; ++ unsigned long stat_fb_sgdiff; ++}; ++ ++struct rk_crypto_dev *get_rk_crypto(void); ++ ++int rk_cipher_tfm_init(struct crypto_skcipher *tfm); ++void rk_cipher_tfm_exit(struct crypto_skcipher *tfm); ++int rk_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, ++ unsigned int keylen); ++int rk_aes_ecb_encrypt(struct skcipher_request *req); ++int rk_aes_ecb_decrypt(struct skcipher_request *req); ++int rk_aes_cbc_encrypt(struct skcipher_request *req); ++int rk_aes_cbc_decrypt(struct skcipher_request *req); ++ ++int rk_ahash_init(struct ahash_request *req); ++int rk_ahash_update(struct ahash_request *req); ++int rk_ahash_final(struct ahash_request *req); ++int rk_ahash_finup(struct ahash_request *req); ++int rk_ahash_import(struct ahash_request *req, const void *in); ++int rk_ahash_export(struct ahash_request *req, void *out); ++int rk_ahash_digest(struct ahash_request *req); ++int rk_cra_hash_init(struct crypto_tfm *tfm); ++void rk_cra_hash_exit(struct crypto_tfm *tfm); +--- /dev/null ++++ b/drivers/crypto/rockchip/rk3588_crypto_ahash.c +@@ -0,0 +1,346 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Crypto acceleration support for Rockchip RK3588 ++ * ++ * Copyright (c) 2022 Corentin Labbe ++ */ ++#include ++#include ++#include "rk3588_crypto.h" ++ ++static bool rk_ahash_need_fallback(struct ahash_request *areq) ++{ ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.hash); ++ struct scatterlist *sg; ++ ++ sg = areq->src; ++ while (sg) { ++ if (!IS_ALIGNED(sg->offset, sizeof(u32))) { ++ algt->stat_fb_align++; ++ return true; ++ } ++ if (sg->length % 4) { ++ algt->stat_fb_sglen++; ++ return true; ++ } ++ sg = sg_next(sg); ++ } ++ return false; ++} ++ ++static int rk_ahash_digest_fb(struct ahash_request *areq) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.hash); ++ ++ algt->stat_fb++; ++ ++ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); ++ rctx->fallback_req.base.flags = areq->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ rctx->fallback_req.nbytes = areq->nbytes; ++ rctx->fallback_req.src = areq->src; ++ rctx->fallback_req.result = areq->result; ++ ++ return crypto_ahash_digest(&rctx->fallback_req); ++} ++ ++static int zero_message_process(struct ahash_request *req) ++{ ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.hash); ++ int digestsize = crypto_ahash_digestsize(tfm); ++ ++ switch (algt->rk_mode) { ++ case RK_CRYPTO_SHA1: ++ memcpy(req->result, sha1_zero_message_hash, digestsize); ++ break; ++ case RK_CRYPTO_SHA256: ++ memcpy(req->result, sha256_zero_message_hash, digestsize); ++ break; ++ case RK_CRYPTO_SHA384: ++ memcpy(req->result, sha384_zero_message_hash, digestsize); ++ break; ++ case RK_CRYPTO_SHA512: ++ memcpy(req->result, sha512_zero_message_hash, digestsize); ++ break; ++ case RK_CRYPTO_MD5: ++ memcpy(req->result, md5_zero_message_hash, digestsize); ++ break; ++ case RK_CRYPTO_SM3: ++ memcpy(req->result, sm3_zero_message_hash, digestsize); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int rk_ahash_init(struct ahash_request *req) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ return crypto_ahash_init(&rctx->fallback_req); ++} ++ ++int rk_ahash_update(struct ahash_request *req) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ rctx->fallback_req.nbytes = req->nbytes; ++ rctx->fallback_req.src = req->src; ++ ++ return crypto_ahash_update(&rctx->fallback_req); ++} ++ ++int rk_ahash_final(struct ahash_request *req) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ rctx->fallback_req.result = req->result; ++ ++ return crypto_ahash_final(&rctx->fallback_req); ++} ++ ++int rk_ahash_finup(struct ahash_request *req) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ rctx->fallback_req.nbytes = req->nbytes; ++ rctx->fallback_req.src = req->src; ++ rctx->fallback_req.result = req->result; ++ ++ return crypto_ahash_finup(&rctx->fallback_req); ++} ++ ++int rk_ahash_import(struct ahash_request *req, const void *in) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ return crypto_ahash_import(&rctx->fallback_req, in); ++} ++ ++int rk_ahash_export(struct ahash_request *req, void *out) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ++ ++ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); ++ rctx->fallback_req.base.flags = req->base.flags & ++ CRYPTO_TFM_REQ_MAY_SLEEP; ++ ++ return crypto_ahash_export(&rctx->fallback_req, out); ++} ++ ++int rk_ahash_digest(struct ahash_request *req) ++{ ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); ++ struct rk_crypto_dev *dev; ++ struct crypto_engine *engine; ++ ++ if (rk_ahash_need_fallback(req)) ++ return rk_ahash_digest_fb(req); ++ ++ if (!req->nbytes) ++ return zero_message_process(req); ++ ++ dev = get_rk_crypto(); ++ ++ rctx->dev = dev; ++ engine = dev->engine; ++ ++ return crypto_transfer_hash_request_to_engine(engine, req); ++} ++ ++static int rk_hash_prepare(struct crypto_engine *engine, void *breq) ++{ ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct rk_crypto_dev *rkc = rctx->dev; ++ int ret; ++ ++ ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); ++ if (ret <= 0) ++ return -EINVAL; ++ ++ rctx->nrsgs = ret; ++ ++ return 0; ++} ++ ++static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) ++{ ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct rk_crypto_dev *rkc = rctx->dev; ++ ++ dma_unmap_sg(rkc->dev, areq->src, rctx->nrsgs, DMA_TO_DEVICE); ++ return 0; ++} ++ ++static int rk_hash_run(struct crypto_engine *engine, void *breq) ++{ ++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); ++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.hash); ++ struct scatterlist *sgs = areq->src; ++ struct rk_crypto_dev *rkc = rctx->dev; ++ struct rk_crypto_lli *dd = &rkc->tl[0]; ++ int ddi = 0; ++ int err = 0; ++ unsigned int len = areq->nbytes; ++ unsigned int todo; ++ u32 v; ++ int i; ++ ++ err = pm_runtime_resume_and_get(rkc->dev); ++ if (err) ++ return err; ++ ++ dev_dbg(rkc->dev, "%s %s len=%d\n", __func__, ++ crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); ++ ++ algt->stat_req++; ++ rkc->nreq++; ++ ++ rctx->mode = algt->rk_mode; ++ rctx->mode |= 0xffff0000; ++ rctx->mode |= RK_CRYPTO_ENABLE | RK_CRYPTO_HW_PAD; ++ writel(rctx->mode, rkc->reg + RK_CRYPTO_HASH_CTL); ++ ++ while (sgs && len > 0) { ++ dd = &rkc->tl[ddi]; ++ ++ todo = min(sg_dma_len(sgs), len); ++ dd->src_addr = sg_dma_address(sgs); ++ dd->src_len = todo; ++ dd->dst_addr = 0; ++ dd->dst_len = 0; ++ dd->dma_ctrl = ddi << 24; ++ dd->iv = 0; ++ dd->next = rkc->t_phy + sizeof(struct rk_crypto_lli) * (ddi + 1); ++ ++ if (ddi == 0) ++ dd->user = RK_LLI_CIPHER_START | RK_LLI_STRING_FIRST; ++ else ++ dd->user = 0; ++ ++ len -= todo; ++ dd->dma_ctrl |= RK_LLI_DMA_CTRL_SRC_INT; ++ if (len == 0) { ++ dd->user |= RK_LLI_STRING_LAST; ++ dd->dma_ctrl |= RK_LLI_DMA_CTRL_LAST; ++ } ++ dev_dbg(rkc->dev, "HASH SG %d sglen=%d user=%x dma=%x mode=%x len=%d todo=%d phy=%llx\n", ++ ddi, sgs->length, dd->user, dd->dma_ctrl, rctx->mode, len, todo, rkc->t_phy); ++ ++ sgs = sg_next(sgs); ++ ddi++; ++ } ++ dd->next = 1; ++ writel(RK_CRYPTO_DMA_INT_LISTDONE | 0x7F, rkc->reg + RK_CRYPTO_DMA_INT_EN); ++ ++ writel(rkc->t_phy, rkc->reg + RK_CRYPTO_DMA_LLI_ADDR); ++ ++ reinit_completion(&rkc->complete); ++ rkc->status = 0; ++ ++ writel(RK_CRYPTO_DMA_CTL_START | 1 << 16, rkc->reg + RK_CRYPTO_DMA_CTL); ++ ++ wait_for_completion_interruptible_timeout(&rkc->complete, ++ msecs_to_jiffies(2000)); ++ if (!rkc->status) { ++ dev_err(rkc->dev, "DMA timeout\n"); ++ err = -EFAULT; ++ goto theend; ++ } ++ ++ readl_poll_timeout_atomic(rkc->reg + RK_CRYPTO_HASH_VALID, v, v == 1, ++ 10, 1000); ++ ++ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { ++ v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); ++ put_unaligned_le32(be32_to_cpu(v), areq->result + i * 4); ++ } ++ ++theend: ++ pm_runtime_put_autosuspend(rkc->dev); ++ ++ local_bh_disable(); ++ crypto_finalize_hash_request(engine, breq, err); ++ local_bh_enable(); ++ ++ return 0; ++} ++ ++int rk_cra_hash_init(struct crypto_tfm *tfm) ++{ ++ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); ++ const char *alg_name = crypto_tfm_alg_name(tfm); ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.hash); ++ ++ /* for fallback */ ++ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, ++ CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(tctx->fallback_tfm)) { ++ dev_err(algt->dev->dev, "Could not load fallback driver.\n"); ++ return PTR_ERR(tctx->fallback_tfm); ++ } ++ ++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), ++ sizeof(struct rk_ahash_rctx) + ++ crypto_ahash_reqsize(tctx->fallback_tfm)); ++ ++ tctx->enginectx.op.do_one_request = rk_hash_run; ++ tctx->enginectx.op.prepare_request = rk_hash_prepare; ++ tctx->enginectx.op.unprepare_request = rk_hash_unprepare; ++ ++ return 0; ++} ++ ++void rk_cra_hash_exit(struct crypto_tfm *tfm) ++{ ++ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); ++ ++ crypto_free_ahash(tctx->fallback_tfm); ++} +--- /dev/null ++++ b/drivers/crypto/rockchip/rk3588_crypto_skcipher.c +@@ -0,0 +1,340 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * hardware cryptographic offloader for rk3568/rk3588 SoC ++ * ++ * Copyright (c) 2022 Corentin Labbe ++ */ ++#include ++#include "rk3588_crypto.h" ++ ++static int rk_cipher_need_fallback(struct skcipher_request *req) ++{ ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.skcipher); ++ struct scatterlist *sgs, *sgd; ++ unsigned int stodo, dtodo, len; ++ unsigned int bs = crypto_skcipher_blocksize(tfm); ++ ++ if (!req->cryptlen) ++ return true; ++ ++ len = req->cryptlen; ++ sgs = req->src; ++ sgd = req->dst; ++ while (sgs && sgd) { ++ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { ++ algt->stat_fb_align++; ++ return true; ++ } ++ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { ++ algt->stat_fb_align++; ++ return true; ++ } ++ stodo = min(len, sgs->length); ++ if (stodo % bs) { ++ algt->stat_fb_len++; ++ return true; ++ } ++ dtodo = min(len, sgd->length); ++ if (dtodo % bs) { ++ algt->stat_fb_len++; ++ return true; ++ } ++ if (stodo != dtodo) { ++ algt->stat_fb_sgdiff++; ++ return true; ++ } ++ len -= stodo; ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); ++ } ++ return false; ++} ++ ++static int rk_cipher_fallback(struct skcipher_request *areq) ++{ ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); ++ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.skcipher); ++ int err; ++ ++ algt->stat_fb++; ++ ++ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); ++ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, ++ areq->base.complete, areq->base.data); ++ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, ++ areq->cryptlen, areq->iv); ++ if (rctx->mode & RK_CRYPTO_DEC) ++ err = crypto_skcipher_decrypt(&rctx->fallback_req); ++ else ++ err = crypto_skcipher_encrypt(&rctx->fallback_req); ++ return err; ++} ++ ++static int rk_cipher_handle_req(struct skcipher_request *req) ++{ ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ struct rk_crypto_dev *rkc; ++ struct crypto_engine *engine; ++ ++ if (rk_cipher_need_fallback(req)) ++ return rk_cipher_fallback(req); ++ ++ rkc = get_rk_crypto(); ++ ++ engine = rkc->engine; ++ rctx->dev = rkc; ++ ++ return crypto_transfer_skcipher_request_to_engine(engine, req); ++} ++ ++int rk_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, ++ unsigned int keylen) ++{ ++ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); ++ struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && ++ keylen != AES_KEYSIZE_256) ++ return -EINVAL; ++ ctx->keylen = keylen; ++ memcpy(ctx->key, key, keylen); ++ ++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); ++} ++ ++int rk_aes_ecb_encrypt(struct skcipher_request *req) ++{ ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ ++ rctx->mode = RK_CRYPTO_AES_ECB_MODE; ++ return rk_cipher_handle_req(req); ++} ++ ++int rk_aes_ecb_decrypt(struct skcipher_request *req) ++{ ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ ++ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; ++ return rk_cipher_handle_req(req); ++} ++ ++int rk_aes_cbc_encrypt(struct skcipher_request *req) ++{ ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ ++ rctx->mode = RK_CRYPTO_AES_CBC_MODE; ++ return rk_cipher_handle_req(req); ++} ++ ++int rk_aes_cbc_decrypt(struct skcipher_request *req) ++{ ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); ++ ++ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; ++ return rk_cipher_handle_req(req); ++} ++ ++static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ++{ ++ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); ++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); ++ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct scatterlist *sgs, *sgd; ++ int err = 0; ++ int ivsize = crypto_skcipher_ivsize(tfm); ++ unsigned int len = areq->cryptlen; ++ unsigned int todo; ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.skcipher); ++ struct rk_crypto_dev *rkc = rctx->dev; ++ struct rk_crypto_lli *dd = &rkc->tl[0]; ++ u32 m, v; ++ u32 *rkey = (u32 *)ctx->key; ++ u32 *riv = (u32 *)areq->iv; ++ int i; ++ unsigned int offset; ++ ++ err = pm_runtime_resume_and_get(rkc->dev); ++ if (err) ++ return err; ++ ++ algt->stat_req++; ++ rkc->nreq++; ++ ++ m = rctx->mode | RK_CRYPTO_ENABLE; ++ switch (ctx->keylen) { ++ case AES_KEYSIZE_128: ++ m |= RK_CRYPTO_AES_128BIT_key; ++ break; ++ case AES_KEYSIZE_192: ++ m |= RK_CRYPTO_AES_192BIT_key; ++ break; ++ case AES_KEYSIZE_256: ++ m |= RK_CRYPTO_AES_256BIT_key; ++ break; ++ } ++ /* the upper bits are a write enable mask, so we need to write 1 to all ++ * upper 16 bits to allow write to the 16 lower bits ++ */ ++ m |= 0xffff0000; ++ ++ dev_dbg(rkc->dev, "%s %s len=%u keylen=%u mode=%x\n", __func__, ++ crypto_tfm_alg_name(areq->base.tfm), ++ areq->cryptlen, ctx->keylen, m); ++ sgs = areq->src; ++ sgd = areq->dst; ++ ++ while (sgs && sgd && len) { ++ ivsize = crypto_skcipher_ivsize(tfm); ++ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ offset = sgs->length - ivsize; ++ scatterwalk_map_and_copy(rctx->backup_iv, sgs, ++ offset, ivsize, 0); ++ } ++ } ++ ++ if (sgs == sgd) { ++ err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ if (err != 1) { ++ dev_err(rkc->dev, "Invalid sg number %d\n", err); ++ err = -EINVAL; ++ goto theend; ++ } ++ } else { ++ err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); ++ if (err != 1) { ++ dev_err(rkc->dev, "Invalid sg number %d\n", err); ++ err = -EINVAL; ++ goto theend; ++ } ++ err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); ++ if (err != 1) { ++ dev_err(rkc->dev, "Invalid sg number %d\n", err); ++ err = -EINVAL; ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); ++ goto theend; ++ } ++ } ++ err = 0; ++ writel(m, rkc->reg + RK_CRYPTO_BC_CTL); ++ ++ for (i = 0; i < ctx->keylen / 4; i++) { ++ v = cpu_to_be32(rkey[i]); ++ writel(v, rkc->reg + RK_CRYPTO_KEY0 + i * 4); ++ } ++ ++ if (ivsize) { ++ for (i = 0; i < ivsize / 4; i++) ++ writel(cpu_to_be32(riv[i]), ++ rkc->reg + RK_CRYPTO_CH0_IV_0 + i * 4); ++ writel(ivsize, rkc->reg + RK_CRYPTO_CH0_IV_LEN); ++ } ++ if (!sgs->length) { ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); ++ continue; ++ } ++ ++ /* The hw support multiple descriptor, so why this driver use ++ * only one descritor ? ++ * Using one descriptor per SG seems the way to do and it works ++ * but only when doing encryption. ++ * With decryption it always fail on second descriptor. ++ * Probably the HW dont know how to use IV. ++ */ ++ todo = min(sg_dma_len(sgs), len); ++ len -= todo; ++ dd->src_addr = sg_dma_address(sgs); ++ dd->src_len = todo; ++ dd->dst_addr = sg_dma_address(sgd); ++ dd->dst_len = todo; ++ dd->iv = 0; ++ dd->next = 1; ++ ++ dd->user = RK_LLI_CIPHER_START | RK_LLI_STRING_FIRST | RK_LLI_STRING_LAST; ++ dd->dma_ctrl |= RK_LLI_DMA_CTRL_DST_INT | RK_LLI_DMA_CTRL_LAST; ++ ++ writel(RK_CRYPTO_DMA_INT_LISTDONE | 0x7F, rkc->reg + RK_CRYPTO_DMA_INT_EN); ++ ++ writel(rkc->t_phy, rkc->reg + RK_CRYPTO_DMA_LLI_ADDR); ++ ++ reinit_completion(&rkc->complete); ++ rkc->status = 0; ++ ++ writel(RK_CRYPTO_DMA_CTL_START | 1 << 16, rkc->reg + RK_CRYPTO_DMA_CTL); ++ ++ wait_for_completion_interruptible_timeout(&rkc->complete, ++ msecs_to_jiffies(10000)); ++ if (sgs == sgd) { ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); ++ } else { ++ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); ++ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); ++ } ++ ++ if (!rkc->status) { ++ dev_err(rkc->dev, "DMA timeout\n"); ++ err = -EFAULT; ++ goto theend; ++ } ++ if (areq->iv && ivsize > 0) { ++ offset = sgd->length - ivsize; ++ if (rctx->mode & RK_CRYPTO_DEC) { ++ memcpy(areq->iv, rctx->backup_iv, ivsize); ++ memzero_explicit(rctx->backup_iv, ivsize); ++ } else { ++ scatterwalk_map_and_copy(areq->iv, sgd, offset, ++ ivsize, 0); ++ } ++ } ++ sgs = sg_next(sgs); ++ sgd = sg_next(sgd); ++ } ++theend: ++ writel(0xffff0000, rkc->reg + RK_CRYPTO_BC_CTL); ++ pm_runtime_put_autosuspend(rkc->dev); ++ ++ local_bh_disable(); ++ crypto_finalize_skcipher_request(engine, areq, err); ++ local_bh_enable(); ++ return 0; ++} ++ ++int rk_cipher_tfm_init(struct crypto_skcipher *tfm) ++{ ++ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ const char *name = crypto_tfm_alg_name(&tfm->base); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ struct rk_crypto_template *algt = container_of(alg, struct rk_crypto_template, alg.skcipher); ++ ++ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(ctx->fallback_tfm)) { ++ dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ name, PTR_ERR(ctx->fallback_tfm)); ++ return PTR_ERR(ctx->fallback_tfm); ++ } ++ ++ tfm->reqsize = sizeof(struct rk_cipher_rctx) + ++ crypto_skcipher_reqsize(ctx->fallback_tfm); ++ ++ ctx->enginectx.op.do_one_request = rk_cipher_run; ++ ctx->enginectx.op.prepare_request = NULL; ++ ctx->enginectx.op.unprepare_request = NULL; ++ ++ return 0; ++} ++ ++void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) ++{ ++ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ ++ memzero_explicit(ctx->key, ctx->keylen); ++ crypto_free_skcipher(ctx->fallback_tfm); ++} diff --git a/target/linux/rockchip/patches-6.0/205-ARM64-dts-rk3568-add-crypto-node.patch b/target/linux/rockchip/patches-6.0/205-ARM64-dts-rk3568-add-crypto-node.patch new file mode 100644 index 000000000..ca2cfbf87 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/205-ARM64-dts-rk3568-add-crypto-node.patch @@ -0,0 +1,36 @@ +From 5055f9e39713f9e5303bbcdc3712909a462dd3c2 Mon Sep 17 00:00:00 2001 +From: Corentin Labbe +Date: Tue, 27 Sep 2022 08:00:48 +0000 +Subject: [PATCH 5/5] ARM64: dts: rk3568: add crypto node + +The rk3568 has a crypto IP handled by the rk3588 crypto driver so adds a +node for it. + +Signed-off-by: Corentin Labbe +--- + arch/arm64/boot/dts/rockchip/rk3568.dtsi | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi +@@ -211,6 +211,20 @@ + }; + }; + ++ crypto: crypto@fe380000 { ++ compatible = "rockchip,rk3568-crypto"; ++ reg = <0x0 0xfe380000 0x0 0x2000>; ++ interrupts = ; ++ clocks = <&cru ACLK_CRYPTO_NS>, <&cru HCLK_CRYPTO_NS>, ++ <&cru CLK_CRYPTO_NS_CORE>, <&cru CLK_CRYPTO_NS_PKA>; ++ clock-names = "aclk", "hclk", "sclk", "pka"; ++ resets = <&cru SRST_CRYPTO_NS_CORE>, <&cru SRST_A_CRYPTO_NS>, ++ <&cru SRST_H_CRYPTO_NS>, <&cru SRST_CRYPTO_NS_RNG>, ++ <&cru SRST_CRYPTO_NS_PKA>; ++ reset-names = "core", "a", "h", "rng,", "pka"; ++ status = "okay"; ++ }; ++ + combphy0: phy@fe820000 { + compatible = "rockchip,rk3568-naneng-combphy"; + reg = <0x0 0xfe820000 0x0 0x100>; diff --git a/target/linux/rockchip/patches-6.0/206-fix-build-crypto.patch b/target/linux/rockchip/patches-6.0/206-fix-build-crypto.patch new file mode 100644 index 000000000..234e69b56 --- /dev/null +++ b/target/linux/rockchip/patches-6.0/206-fix-build-crypto.patch @@ -0,0 +1,21 @@ +--- a/drivers/crypto/rockchip/rk3288_crypto.c ++++ b/drivers/crypto/rockchip/rk3288_crypto.c +@@ -24,18 +24,6 @@ static struct rockchip_ip rocklist = { + .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), + }; + +-struct rk_crypto_info *get_rk_crypto(void) +-{ +- struct rk_crypto_info *first; +- +- spin_lock(&rocklist.lock); +- first = list_first_entry_or_null(&rocklist.dev_list, +- struct rk_crypto_info, list); +- list_rotate_left(&rocklist.dev_list); +- spin_unlock(&rocklist.lock); +- return first; +-} +- + static const struct rk_variant rk3288_variant = { + .num_clks = 4, + .rkclks = { diff --git a/target/linux/rockchip/patches-6.0/210-rockchip-rk356x-add-support-for-new-boards.patch b/target/linux/rockchip/patches-6.0/300-rockchip-rk356x-add-support-for-new-boards.patch similarity index 100% rename from target/linux/rockchip/patches-6.0/210-rockchip-rk356x-add-support-for-new-boards.patch rename to target/linux/rockchip/patches-6.0/300-rockchip-rk356x-add-support-for-new-boards.patch diff --git a/target/linux/rockchip/patches-6.0/203-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch b/target/linux/rockchip/patches-6.0/303-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch similarity index 100% rename from target/linux/rockchip/patches-6.0/203-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch rename to target/linux/rockchip/patches-6.0/303-rockchip-rk3328-Add-support-for-OrangePi-R1-Plus-LTS.patch diff --git a/target/linux/rockchip/patches-6.0/204-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch b/target/linux/rockchip/patches-6.0/304-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch similarity index 100% rename from target/linux/rockchip/patches-6.0/204-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch rename to target/linux/rockchip/patches-6.0/304-rockchip-rk3328-Add-support-for-FriendlyARM-NanoPi-R.patch diff --git a/target/linux/rockchip/patches-6.0/205-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch b/target/linux/rockchip/patches-6.0/305-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch similarity index 100% rename from target/linux/rockchip/patches-6.0/205-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch rename to target/linux/rockchip/patches-6.0/305-rockchip-rk3328-add-support-for-FriendlyARM-NanoPi-Neo3.patch