From 9db10a8cb2581b404e6d702c832594941254757e Mon Sep 17 00:00:00 2001 From: lean Date: Fri, 13 Mar 2020 09:45:45 +0800 Subject: [PATCH] ipq40xx: Qualcomm Crypto Engine fixes --- package/kernel/linux/modules/crypto.mk | 120 ++++- target/linux/ipq40xx/config-4.19 | 23 +- ...dd-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch} | 0 ...40-crypto-qce-switch-to-skcipher-API.patch | 463 +++++++++++++++++ ...ce-fix-ctr-aes-qce-block-chunk-sizes.patch | 41 ++ ...crypto-qce-fix-xts-aes-qce-key-sizes.patch | 52 ++ ...-save-a-sg-table-slot-for-result-buf.patch | 85 ++++ ...44-crypto-qce-update-the-skcipher-IV.patch | 29 ++ ...qce-initialize-fallback-only-for-AES.patch | 56 ++ ...e-use-cryptlen-when-adding-extra-sgl.patch | 89 ++++ ...-use-AES-fallback-for-small-requests.patch | 126 +++++ ...-handle-AES-XTS-cases-that-qce-fails.patch | 59 +++ ...e-allow-building-only-hashes-ciphers.patch | 415 +++++++++++++++ ...40-crypto-qce-switch-to-skcipher-API.patch | 481 ++++++++++++++++++ ...ce-fix-ctr-aes-qce-block-chunk-sizes.patch | 43 ++ ...crypto-qce-fix-xts-aes-qce-key-sizes.patch | 60 +++ ...-save-a-sg-table-slot-for-result-buf.patch | 87 ++++ ...44-crypto-qce-update-the-skcipher-IV.patch | 31 ++ ...qce-initialize-fallback-only-for-AES.patch | 54 ++ ...e-allow-building-only-hashes-ciphers.patch | 428 ++++++++++++++++ ...e-use-cryptlen-when-adding-extra-sgl.patch | 91 ++++ ...-use-AES-fallback-for-small-requests.patch | 117 +++++ ...-handle-AES-XTS-cases-that-qce-fails.patch | 61 +++ 23 files changed, 2995 insertions(+), 16 deletions(-) rename target/linux/ipq40xx/patches-4.19/{181-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch => 039-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch} (100%) create mode 100644 target/linux/ipq40xx/patches-4.19/040-crypto-qce-switch-to-skcipher-API.patch create mode 100644 target/linux/ipq40xx/patches-4.19/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch create mode 100644 target/linux/ipq40xx/patches-4.19/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch create mode 100644 target/linux/ipq40xx/patches-4.19/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch create mode 100644 target/linux/ipq40xx/patches-4.19/044-crypto-qce-update-the-skcipher-IV.patch create mode 100644 target/linux/ipq40xx/patches-4.19/046-crypto-qce-initialize-fallback-only-for-AES.patch create mode 100644 target/linux/ipq40xx/patches-4.19/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch create mode 100644 target/linux/ipq40xx/patches-4.19/048-crypto-qce-use-AES-fallback-for-small-requests.patch create mode 100644 target/linux/ipq40xx/patches-4.19/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch create mode 100644 target/linux/ipq40xx/patches-4.19/051-crypto-qce-allow-building-only-hashes-ciphers.patch create mode 100644 target/linux/ipq40xx/patches-5.4/040-crypto-qce-switch-to-skcipher-API.patch create mode 100644 target/linux/ipq40xx/patches-5.4/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch create mode 100644 target/linux/ipq40xx/patches-5.4/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch create mode 100644 target/linux/ipq40xx/patches-5.4/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch create mode 100644 target/linux/ipq40xx/patches-5.4/044-crypto-qce-update-the-skcipher-IV.patch create mode 100644 target/linux/ipq40xx/patches-5.4/045-crypto-qce-initialize-fallback-only-for-AES.patch create mode 100644 target/linux/ipq40xx/patches-5.4/046-crypto-qce-allow-building-only-hashes-ciphers.patch create mode 100644 target/linux/ipq40xx/patches-5.4/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch create mode 100644 target/linux/ipq40xx/patches-5.4/048-crypto-qce-use-AES-fallback-for-small-requests.patch create mode 100644 target/linux/ipq40xx/patches-5.4/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch diff --git a/package/kernel/linux/modules/crypto.mk b/package/kernel/linux/modules/crypto.mk index dc7eb7798..72eef08ff 100644 --- a/package/kernel/linux/modules/crypto.mk +++ b/package/kernel/linux/modules/crypto.mk @@ -263,12 +263,23 @@ $(eval $(call KernelPackage,crypto-gf128)) define KernelPackage/crypto-ghash TITLE:=GHASH digest CryptoAPI module DEPENDS:=+kmod-crypto-gf128 +kmod-crypto-hash - KCONFIG:=CONFIG_CRYPTO_GHASH + KCONFIG:= \ + CONFIG_CRYPTO_GHASH \ + CONFIG_CRYPTO_GHASH_ARM_CE FILES:=$(LINUX_DIR)/crypto/ghash-generic.ko AUTOLOAD:=$(call AutoLoad,09,ghash-generic) $(call AddDepends/crypto) endef +define KernelPackage/crypto-ghash/arm-ce + FILES+= $(LINUX_DIR)/arch/arm/crypto/ghash-arm-ce.ko + AUTOLOAD+=$(call AutoLoad,09,ghash-arm-ce) +endef + +KernelPackage/crypto-ghash/imx6=$(KernelPackage/crypto-ghash/arm-ce) +KernelPackage/crypto-ghash/ipq40xx=$(KernelPackage/crypto-ghash/arm-ce) +KernelPackage/crypto-ghash/mvebu=$(KernelPackage/crypto-ghash/arm-ce) + $(eval $(call KernelPackage,crypto-ghash)) @@ -361,6 +372,78 @@ endef $(eval $(call KernelPackage,crypto-hw-padlock)) +define KernelPackage/crypto-hw-qce + TITLE:=Qualcomm Crypto Engine hw crypto module + DEPENDS:= @TARGET_ipq40xx +kmod-crypto-manager \ + +QCE_SKCIPHER:kmod-crypto-des \ + +QCE_SKCIPHER:kmod-crypto-ecb \ + +QCE_SKCIPHER:kmod-crypto-cbc \ + +QCE_SKCIPHER:kmod-crypto-xts \ + +QCE_SKCIPHER:kmod-crypto-ctr + KCONFIG:= \ + CONFIG_CRYPTO_HW=y \ + CONFIG_CRYPTO_DEV_QCE + FILES:= $(LINUX_DIR)/drivers/crypto/qce/qcrypto.ko + AUTOLOAD:=$(call AutoLoad,09,qcrypto) + $(call AddDepends/crypto) +endef + +define KernelPackage/crypto-hw-qce/config + if PACKAGE_kmod-crypto-hw-qce + config QCE_SKCIPHER + bool + choice + prompt "Algorithms enabled for QCE acceleration" + default KERNEL_CRYPTO_DEV_QCE_ENABLE_SKCIPHER + help + The Qualcomm Crypto Engine is shown to severely slowdown ipsec, + especially when built with all supported algorithms. + When performing crypto in small blocks, typical of network usage, + the neon asm drivers will outperform it. + QCE is fast when fed with larger blocks. If you are able to use + jumbo frames, it will be much faster than software. + Hashes are troublesome. They fail the tcrypt multibuffer tests, and + are slower than the Neon drivers, so the default is to enable + symmetric-key ciphers only. + + config KERNEL_CRYPTO_DEV_QCE_ENABLE_ALL + bool "All supported algorithms" + select QCE_SKCIPHER + config KERNEL_CRYPTO_DEV_QCE_ENABLE_SKCIPHER + bool "Symmetric-key ciphers only" + select QCE_SKCIPHER + config KERNEL_CRYPTO_DEV_QCE_ENABLE_SHA + bool "Hash/HMAC only" + endchoice + + config KERNEL_CRYPTO_DEV_QCE_SW_MAX_LEN + int "Default maximum request size to use software for AES" + depends on QCE_SKCIPHER + default 512 + help + This sets the default maximum request size to perform AES requests + using software instead of the crypto engine. It can be changed by + setting the aes_sw_max_len parameter. + + Small blocks are processed faster in software than hardware. + Considering the 256-bit ciphers, software is 2-3 times faster than + qce at 256-bytes, 30% faster at 512, and about even at 768-bytes. + With 128-bit keys, the break-even point would be around 1024-bytes. + + The default is set a little lower, to 512 bytes, to balance the + cost in CPU usage. The minimum recommended setting is 16-bytes + (1 AES block), since AES-GCM will fail if you set it lower. + Setting this to zero will send all requests to the hardware. + + Note that 192-bit keys are not supported by the hardware and are + always processed by the software fallback, and all DES requests + are done by the hardware. + endif +endef + +$(eval $(call KernelPackage,crypto-hw-qce)) + + define KernelPackage/crypto-hw-safexcel TITLE:= MVEBU SafeXcel Crypto Engine module DEPENDS:=@!LINUX_4_14 @(TARGET_mvebu_cortexa53||TARGET_mvebu_cortexa72) \ @@ -684,6 +767,8 @@ define KernelPackage/crypto-sha1 DEPENDS:=+kmod-crypto-hash KCONFIG:= \ CONFIG_CRYPTO_SHA1 \ + CONFIG_CRYPTO_SHA1_ARM \ + CONFIG_CRYPTO_SHA1_ARM_NEON \ CONFIG_CRYPTO_SHA1_OCTEON \ CONFIG_CRYPTO_SHA1_SSSE3 FILES:=$(LINUX_DIR)/crypto/sha1_generic.ko @@ -691,11 +776,30 @@ define KernelPackage/crypto-sha1 $(call AddDepends/crypto) endef +define KernelPackage/crypto-sha1/arm + FILES+=$(LINUX_DIR)/arch/arm/crypto/sha1-arm.ko + AUTOLOAD+=$(call AutoLoad,09,sha1-arm) +endef + +define KernelPackage/crypto-sha1/arm-neon + $(call KernelPackage/crypto-sha1/arm) + FILES+=$(LINUX_DIR)/arch/arm/crypto/sha1-arm-neon.ko + AUTOLOAD+=$(call AutoLoad,09,sha1-arm-neon) +endef + +KernelPackage/crypto-sha1/imx6=$(KernelPackage/crypto-sha1/arm-neon) + +KernelPackage/crypto-sha1/ipq40xx=$(KernelPackage/crypto-sha1/arm-neon) + +KernelPackage/crypto-sha1/mvebu=$(KernelPackage/crypto-sha1/arm-neon) + define KernelPackage/crypto-sha1/octeon FILES+=$(LINUX_DIR)/arch/mips/cavium-octeon/crypto/octeon-sha1.ko AUTOLOAD+=$(call AutoLoad,09,octeon-sha1) endef +KernelPackage/crypto-sha1/tegra=$(KernelPakcage/crypto-sha1/arm) + define KernelPackage/crypto-sha1/x86/64 FILES+=$(LINUX_DIR)/arch/x86/crypto/sha1-ssse3.ko AUTOLOAD+=$(call AutoLoad,09,sha1-ssse3) @@ -734,6 +838,7 @@ define KernelPackage/crypto-sha512 DEPENDS:=+kmod-crypto-hash KCONFIG:= \ CONFIG_CRYPTO_SHA512 \ + CONFIG_CRYPTO_SHA512_ARM \ CONFIG_CRYPTO_SHA512_OCTEON \ CONFIG_CRYPTO_SHA512_SSSE3 FILES:=$(LINUX_DIR)/crypto/sha512_generic.ko @@ -741,11 +846,24 @@ define KernelPackage/crypto-sha512 $(call AddDepends/crypto) endef +define KernelPackage/crypto-sha512/arm + FILES+=$(LINUX_DIR)/arch/arm/crypto/sha512-arm.ko + AUTOLOAD+=$(call AutoLoad,09,sha512-arm) +endef + +KernelPackage/crypto-sha512/imx6=$(KernelPackage/crypto-sha512/arm) + +KernelPackage/crypto-sha512/ipq40xx=$(KernelPackage/crypto-sha512/arm) + +KernelPackage/crypto-sha512/mvebu=$(KernelPackage/crypto-sha512/arm) + define KernelPackage/crypto-sha512/octeon FILES+=$(LINUX_DIR)/arch/mips/cavium-octeon/crypto/octeon-sha512.ko AUTOLOAD+=$(call AutoLoad,09,octeon-sha512) endef +KernelPackage/crypto-sha512/tegra=$(KernelPackage/crypto-sha512/arm) + define KernelPackage/crypto-sha512/x86/64 FILES+=$(LINUX_DIR)/arch/x86/crypto/sha512-ssse3.ko AUTOLOAD+=$(call AutoLoad,09,sha512-ssse3) diff --git a/target/linux/ipq40xx/config-4.19 b/target/linux/ipq40xx/config-4.19 index 9ff9e7f3e..49227d3e9 100644 --- a/target/linux/ipq40xx/config-4.19 +++ b/target/linux/ipq40xx/config-4.19 @@ -47,6 +47,7 @@ CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y CONFIG_ARM_CPUIDLE=y CONFIG_ARM_CPU_SUSPEND=y # CONFIG_ARM_CPU_TOPOLOGY is not set +CONFIG_ARM_CRYPTO=y CONFIG_ARM_GIC=y CONFIG_ARM_HAS_SG_CHAIN=y CONFIG_ARM_L1_CACHE_SHIFT=6 @@ -115,34 +116,26 @@ CONFIG_CRC32_SLICEBY8=y CONFIG_CRYPTO_ACOMP2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_AES_ARM=y +CONFIG_CRYPTO_AES_ARM_BS=y CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_DEV_QCE=y CONFIG_CRYPTO_DEV_QCOM_RNG=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_GF128MUL=y +# CONFIG_CRYPTO_GHASH_ARM_CE is not set CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_JITTERENTROPY=y CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_RNG=y CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_SEQIV=y +# CONFIG_CRYPTO_SHA1_ARM_CE is not set +# CONFIG_CRYPTO_SHA1_ARM_NEON is not set CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA256_ARM=y +CONFIG_CRYPTO_SIMD=y CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_XTS=y CONFIG_DCACHE_WORD_ACCESS=y CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" # CONFIG_DEBUG_USER is not set diff --git a/target/linux/ipq40xx/patches-4.19/181-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch b/target/linux/ipq40xx/patches-4.19/039-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch similarity index 100% rename from target/linux/ipq40xx/patches-4.19/181-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch rename to target/linux/ipq40xx/patches-4.19/039-crypto-qce-add-CRYPTO_ALG_KERN_DRIVER_ONLY-flag.patch diff --git a/target/linux/ipq40xx/patches-4.19/040-crypto-qce-switch-to-skcipher-API.patch b/target/linux/ipq40xx/patches-4.19/040-crypto-qce-switch-to-skcipher-API.patch new file mode 100644 index 000000000..2adfe622e --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/040-crypto-qce-switch-to-skcipher-API.patch @@ -0,0 +1,463 @@ +From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Tue, 5 Nov 2019 14:28:17 +0100 +Subject: [PATCH] crypto: qce - switch to skcipher API + +Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface") +dated 20 august 2015 introduced the new skcipher API which is supposed to +replace both blkcipher and ablkcipher. While all consumers of the API have +been converted long ago, some producers of the ablkcipher remain, forcing +us to keep the ablkcipher support routines alive, along with the matching +code to expose [a]blkciphers via the skcipher API. + +So switch this driver to the skcipher API, allowing us to finally drop the +blkcipher code in the near future. + +Reviewed-by: Stanimir Varbanov +Signed-off-by: Ard Biesheuvel +Backported-to-4.19-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile +index 19a7f899acff..8caa04e1ec43 100644 +--- a/drivers/crypto/qce/Makefile ++++ b/drivers/crypto/qce/Makefile +@@ -4,4 +4,4 @@ qcrypto-objs := core.o \ + common.o \ + dma.o \ + sha.o \ +- ablkcipher.o ++ skcipher.o +diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h +index 2b0278bb6e92..f93fab1dd1ff 100644 +--- a/drivers/crypto/qce/cipher.h ++++ b/drivers/crypto/qce/cipher.h +@@ -53,12 +53,12 @@ struct qce_cipher_reqctx { + unsigned int cryptlen; + }; + +-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) ++static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) + { +- struct crypto_alg *alg = tfm->__crt_alg; +- return container_of(alg, struct qce_alg_template, alg.crypto); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ return container_of(alg, struct qce_alg_template, alg.skcipher); + } + +-extern const struct qce_algo_ops ablkcipher_ops; ++extern const struct qce_algo_ops skcipher_ops; + + #endif /* _CIPHER_H_ */ +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index 1fb5fde7fc03..e0202755682b 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -312,13 +312,13 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + return 0; + } + +-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, ++static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) + { +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; + __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; +@@ -397,8 +397,8 @@ int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) + { + switch (type) { +- case CRYPTO_ALG_TYPE_ABLKCIPHER: +- return qce_setup_regs_ablkcipher(async_req, totallen, offset); ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ return qce_setup_regs_skcipher(async_req, totallen, offset); + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); + default: +diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h +index a4addd4f7d6c..3252efa41e7a 100644 +--- a/drivers/crypto/qce/common.h ++++ b/drivers/crypto/qce/common.h +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + /* key size in bytes */ + #define QCE_SHA_HMAC_KEY_SIZE 64 +@@ -87,7 +88,7 @@ struct qce_alg_template { + unsigned long alg_flags; + const u32 *std_iv; + union { +- struct crypto_alg crypto; ++ struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; + struct qce_device *qce; +diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c +index 1c3b36b75467..bf409edc23ab 100644 +--- a/drivers/crypto/qce/core.c ++++ b/drivers/crypto/qce/core.c +@@ -30,7 +30,7 @@ + #define QCE_QUEUE_LENGTH 1 + + static const struct qce_algo_ops *qce_ops[] = { +- &ablkcipher_ops, ++ &skcipher_ops, + &ahash_ops, + }; + +diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c +similarity index 62% +rename from drivers/crypto/qce/ablkcipher.c +rename to drivers/crypto/qce/skcipher.c +index 3658c46ef9c7..0376bb969834 100644 +--- a/drivers/crypto/qce/ablkcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -20,14 +20,14 @@ + + #include "cipher.h" + +-static LIST_HEAD(ablkcipher_algs); ++static LIST_HEAD(skcipher_algs); + +-static void qce_ablkcipher_done(void *data) ++static void qce_skcipher_done(void *data) + { + struct crypto_async_request *async_req = data; +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + u32 status; +@@ -40,7 +40,7 @@ static void qce_ablkcipher_done(void *data) + + error = qce_dma_terminate_all(&qce->dma); + if (error) +- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", ++ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", + error); + + if (diff_dst) +@@ -51,18 +51,18 @@ static void qce_ablkcipher_done(void *data) + + error = qce_check_status(qce, &status); + if (error < 0) +- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); ++ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); + + qce->async_req_done(tmpl->qce, error); + } + + static int +-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) ++qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + { +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); +- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + struct scatterlist *sg; +@@ -70,17 +70,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + gfp_t gfp; + int ret; + +- rctx->iv = req->info; +- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); +- rctx->cryptlen = req->nbytes; ++ rctx->iv = req->iv; ++ rctx->ivsize = crypto_skcipher_ivsize(skcipher); ++ rctx->cryptlen = req->cryptlen; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + +- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); ++ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (diff_dst) +- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + else + rctx->dst_nents = rctx->src_nents; + if (rctx->src_nents < 0) { +@@ -133,13 +133,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, + rctx->dst_sg, rctx->dst_nents, +- qce_ablkcipher_done, async_req); ++ qce_skcipher_done, async_req); + if (ret) + goto error_unmap_src; + + qce_dma_issue_pending(&qce->dma); + +- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); ++ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); + if (ret) + goto error_terminate; + +@@ -157,12 +157,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + return ret; + } + +-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, ++static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + unsigned int keylen) + { +- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +- unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); ++ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; + int ret; + + if (!key || !keylen) +@@ -180,7 +179,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + u32 tmp[DES_EXPKEY_WORDS]; + + ret = des_ekey(tmp, key); +- if (!ret && crypto_ablkcipher_get_flags(ablk) & ++ if (!ret && crypto_skcipher_get_flags(ablk) & + CRYPTO_TFM_REQ_WEAK_KEY) + goto weakkey; + } +@@ -194,16 +193,15 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + ctx->enc_keylen = keylen; + return ret; + weakkey: +- crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); ++ crypto_skcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + +-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) ++static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + { +- struct crypto_tfm *tfm = +- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + int ret; + +@@ -218,7 +216,7 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, +- req->nbytes, req->info); ++ req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); +@@ -228,37 +226,37 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); + } + +-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) ++static int qce_skcipher_encrypt(struct skcipher_request *req) + { +- return qce_ablkcipher_crypt(req, 1); ++ return qce_skcipher_crypt(req, 1); + } + +-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) ++static int qce_skcipher_decrypt(struct skcipher_request *req) + { +- return qce_ablkcipher_crypt(req, 0); ++ return qce_skcipher_crypt(req, 0); + } + +-static int qce_ablkcipher_init(struct crypto_tfm *tfm) ++static int qce_skcipher_init(struct crypto_skcipher *tfm) + { +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); +- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + +- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, +- CRYPTO_ALG_ASYNC | +- CRYPTO_ALG_NEED_FALLBACK); ++ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), ++ 0, CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK); + return PTR_ERR_OR_ZERO(ctx->fallback); + } + +-static void qce_ablkcipher_exit(struct crypto_tfm *tfm) ++static void qce_skcipher_exit(struct crypto_skcipher *tfm) + { +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->fallback); + } + +-struct qce_ablkcipher_def { ++struct qce_skcipher_def { + unsigned long flags; + const char *name; + const char *drv_name; +@@ -268,7 +266,7 @@ struct qce_ablkcipher_def { + unsigned int max_keysize; + }; + +-static const struct qce_ablkcipher_def ablkcipher_def[] = { ++static const struct qce_skcipher_def skcipher_def[] = { + { + .flags = QCE_ALG_AES | QCE_MODE_ECB, + .name = "ecb(aes)", +@@ -343,89 +341,89 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = { + }, + }; + +-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, ++static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + struct qce_device *qce) + { + struct qce_alg_template *tmpl; +- struct crypto_alg *alg; ++ struct skcipher_alg *alg; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + +- alg = &tmpl->alg.crypto; ++ alg = &tmpl->alg.skcipher; + +- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); +- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); ++ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + +- alg->cra_blocksize = def->blocksize; +- alg->cra_ablkcipher.ivsize = def->ivsize; +- alg->cra_ablkcipher.min_keysize = def->min_keysize; +- alg->cra_ablkcipher.max_keysize = def->max_keysize; +- alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; +- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; +- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; +- +- alg->cra_priority = 300; +- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | +- CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY; +- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); +- alg->cra_alignmask = 0; +- alg->cra_type = &crypto_ablkcipher_type; +- alg->cra_module = THIS_MODULE; +- alg->cra_init = qce_ablkcipher_init; +- alg->cra_exit = qce_ablkcipher_exit; +- INIT_LIST_HEAD(&alg->cra_list); ++ alg->base.cra_blocksize = def->blocksize; ++ alg->ivsize = def->ivsize; ++ alg->min_keysize = def->min_keysize; ++ alg->max_keysize = def->max_keysize; ++ alg->setkey = qce_skcipher_setkey; ++ alg->encrypt = qce_skcipher_encrypt; ++ alg->decrypt = qce_skcipher_decrypt; ++ ++ alg->base.cra_priority = 300; ++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK | ++ CRYPTO_ALG_KERN_DRIVER_ONLY; ++ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); ++ alg->base.cra_alignmask = 0; ++ alg->base.cra_module = THIS_MODULE; ++ ++ alg->init = qce_skcipher_init; ++ alg->exit = qce_skcipher_exit; + + INIT_LIST_HEAD(&tmpl->entry); +- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; ++ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + +- ret = crypto_register_alg(alg); ++ ret = crypto_register_skcipher(alg); + if (ret) { + kfree(tmpl); +- dev_err(qce->dev, "%s registration failed\n", alg->cra_name); ++ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); + return ret; + } + +- list_add_tail(&tmpl->entry, &ablkcipher_algs); +- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); ++ list_add_tail(&tmpl->entry, &skcipher_algs); ++ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); + return 0; + } + +-static void qce_ablkcipher_unregister(struct qce_device *qce) ++static void qce_skcipher_unregister(struct qce_device *qce) + { + struct qce_alg_template *tmpl, *n; + +- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { +- crypto_unregister_alg(&tmpl->alg.crypto); ++ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { ++ crypto_unregister_skcipher(&tmpl->alg.skcipher); + list_del(&tmpl->entry); + kfree(tmpl); + } + } + +-static int qce_ablkcipher_register(struct qce_device *qce) ++static int qce_skcipher_register(struct qce_device *qce) + { + int ret, i; + +- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { +- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); ++ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { ++ ret = qce_skcipher_register_one(&skcipher_def[i], qce); + if (ret) + goto err; + } + + return 0; + err: +- qce_ablkcipher_unregister(qce); ++ qce_skcipher_unregister(qce); + return ret; + } + +-const struct qce_algo_ops ablkcipher_ops = { +- .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +- .register_algs = qce_ablkcipher_register, +- .unregister_algs = qce_ablkcipher_unregister, +- .async_req_handle = qce_ablkcipher_async_req_handle, ++const struct qce_algo_ops skcipher_ops = { ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, ++ .register_algs = qce_skcipher_register, ++ .unregister_algs = qce_skcipher_unregister, ++ .async_req_handle = qce_skcipher_async_req_handle, + }; diff --git a/target/linux/ipq40xx/patches-4.19/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch b/target/linux/ipq40xx/patches-4.19/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch new file mode 100644 index 000000000..7d822c59d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch @@ -0,0 +1,41 @@ +From 3f5598286445f695bb63a22239dd3603c69a6eaf Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Mon, 28 Oct 2019 09:03:07 -0300 +Subject: [PATCH] crypto: qce - fix ctr-aes-qce block, chunk sizes + +Set blocksize of ctr-aes-qce to 1, so it can operate as a stream cipher, +adding the definition for chucksize instead, where the underlying block +size belongs. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 0376bb969834..0776286cfc9e 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -261,6 +261,7 @@ struct qce_skcipher_def { + const char *name; + const char *drv_name; + unsigned int blocksize; ++ unsigned int chunksize; + unsigned int ivsize; + unsigned int min_keysize; + unsigned int max_keysize; +@@ -289,7 +290,8 @@ static const struct qce_skcipher_def skcipher_def[] = { + .flags = QCE_ALG_AES | QCE_MODE_CTR, + .name = "ctr(aes)", + .drv_name = "ctr-aes-qce", +- .blocksize = AES_BLOCK_SIZE, ++ .blocksize = 1, ++ .chunksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +@@ -359,6 +361,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + def->drv_name); + + alg->base.cra_blocksize = def->blocksize; ++ alg->chunksize = def->chunksize; + alg->ivsize = def->ivsize; + alg->min_keysize = def->min_keysize; + alg->max_keysize = def->max_keysize; diff --git a/target/linux/ipq40xx/patches-4.19/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch b/target/linux/ipq40xx/patches-4.19/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch new file mode 100644 index 000000000..dd58dcd80 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch @@ -0,0 +1,52 @@ +From 0138c3c13809250338d7cfba6f4ca3b2da02b2c8 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Thu, 21 Nov 2019 14:28:23 -0300 +Subject: [PATCH] crypto: qce - fix xts-aes-qce key sizes + +XTS-mode uses two keys, so the keysizes should be doubled in +skcipher_def, and halved when checking if it is AES-128/192/256. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 0776286cfc9e..9b1bb32515b4 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -168,7 +168,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + return -EINVAL; + + if (IS_AES(flags)) { +- switch (keylen) { ++ switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; +@@ -203,13 +203,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); ++ int keylen; + int ret; + + rctx->flags = tmpl->alg_flags; + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; ++ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + +- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && +- ctx->enc_keylen != AES_KEYSIZE_256) { ++ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 && ++ keylen != AES_KEYSIZE_256) { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); +@@ -302,8 +304,8 @@ static const struct qce_skcipher_def skcipher_def[] = { + .drv_name = "xts-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, +- .min_keysize = AES_MIN_KEY_SIZE, +- .max_keysize = AES_MAX_KEY_SIZE, ++ .min_keysize = AES_MIN_KEY_SIZE * 2, ++ .max_keysize = AES_MAX_KEY_SIZE * 2, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_ECB, diff --git a/target/linux/ipq40xx/patches-4.19/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch b/target/linux/ipq40xx/patches-4.19/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch new file mode 100644 index 000000000..9f107db03 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch @@ -0,0 +1,85 @@ +From 31f796293b6c38126a466414c565827b9cfdbe39 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Wed, 20 Nov 2019 21:39:11 -0300 +Subject: [PATCH] crypto: qce - save a sg table slot for result buf + +When ctr-aes-qce is used for gcm-mode, an extra sg entry for the +authentication tag is present, causing trouble when the qce driver +prepares the dst-results eg table for dma. + +It computes the number of entries needed with sg_nents_for_len, leaving +out the tag entry. Then it creates a sg table with that number plus +one, used to store a "result" sg. + +When copying the sg table, it does not limit the number of entries +copied, so tha extra slot is filled with the authentication tag sg. +When the driver tries to add the result sg, the list is full, and it +returns EINVAL. + +By limiting the number of sg entries copied to the dest table, the slot +for the result buffer is guaranteed to be unused. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c +index 4797e795c9b9..db560c3d3e4f 100644 +--- a/drivers/crypto/qce/dma.c ++++ b/drivers/crypto/qce/dma.c +@@ -55,7 +55,8 @@ void qce_dma_release(struct qce_dma_data *dma) + } + + struct scatterlist * +-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) ++qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, ++ int max_ents) + { + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; + +@@ -68,12 +69,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) + if (!sg) + return ERR_PTR(-EINVAL); + +- while (new_sgl && sg) { ++ while (new_sgl && sg && max_ents) { + sg_set_page(sg, sg_page(new_sgl), new_sgl->length, + new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); ++ max_ents--; + } + + return sg_last; +diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h +index 130235d17bb4..0be71f7f7a58 100644 +--- a/drivers/crypto/qce/dma.h ++++ b/drivers/crypto/qce/dma.h +@@ -50,6 +50,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, + void qce_dma_issue_pending(struct qce_dma_data *dma); + int qce_dma_terminate_all(struct qce_dma_data *dma); + struct scatterlist * +-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); ++qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, ++ int max_ents); + + #endif /* _DMA_H_ */ +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 12f882032544..33d998f5cf5f 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -103,13 +103,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + +- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); ++ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + +- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); ++ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; diff --git a/target/linux/ipq40xx/patches-4.19/044-crypto-qce-update-the-skcipher-IV.patch b/target/linux/ipq40xx/patches-4.19/044-crypto-qce-update-the-skcipher-IV.patch new file mode 100644 index 000000000..de7829e76 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/044-crypto-qce-update-the-skcipher-IV.patch @@ -0,0 +1,29 @@ +From 502ca0b7c1d856a46dbd78e67690c12c47775b97 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 22 Nov 2019 09:00:02 -0300 +Subject: [PATCH] crypto: qce - update the skcipher IV + +Update the IV after the completion of each cipher operation. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 33d998f5cf5f..51377395ed53 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -29,6 +29,7 @@ static void qce_skcipher_done(void *data) + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; ++ struct qce_result_dump *result_buf = qce->dma.result_buf; + enum dma_data_direction dir_src, dir_dst; + u32 status; + int error; +@@ -53,6 +54,7 @@ static void qce_skcipher_done(void *data) + if (error < 0) + dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); + ++ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); + qce->async_req_done(tmpl->qce, error); + } + diff --git a/target/linux/ipq40xx/patches-4.19/046-crypto-qce-initialize-fallback-only-for-AES.patch b/target/linux/ipq40xx/patches-4.19/046-crypto-qce-initialize-fallback-only-for-AES.patch new file mode 100644 index 000000000..b673884ba --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/046-crypto-qce-initialize-fallback-only-for-AES.patch @@ -0,0 +1,56 @@ +From f2a33ce18232919d3831d1c61a06b6067209282d Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 22 Nov 2019 09:34:29 -0300 +Subject: [PATCH] crypto: qce - initialize fallback only for AES + +Adjust cra_flags to add CRYPTO_NEED_FALLBACK only for AES ciphers, where +AES-192 is not handled by the qce hardware, and don't allocate & free +the fallback skcipher for anything other than AES. + +The rest of the code is unchanged, as the use of the fallback is already +restricted to AES. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 51377395ed53..5a4863091f2a 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -246,7 +246,15 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm) + + memset(ctx, 0, sizeof(*ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); ++ return 0; ++} ++ ++static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) ++{ ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ int ret; + ++ qce_skcipher_init(tfm); + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); +@@ -375,14 +383,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + + alg->base.cra_priority = 300; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | +- CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); + alg->base.cra_alignmask = 0; + alg->base.cra_module = THIS_MODULE; + +- alg->init = qce_skcipher_init; +- alg->exit = qce_skcipher_exit; ++ if (IS_AES(def->flags)) { ++ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; ++ alg->init = qce_skcipher_init_fallback; ++ alg->exit = qce_skcipher_exit; ++ } else { ++ alg->init = qce_skcipher_init; ++ } + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; diff --git a/target/linux/ipq40xx/patches-4.19/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch b/target/linux/ipq40xx/patches-4.19/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch new file mode 100644 index 000000000..8003f7502 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch @@ -0,0 +1,89 @@ +From 686aa4db696270dadc5e8b2971769e1676251ff1 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 31 Jan 2020 17:43:16 -0300 +Subject: [PATCH] crypto: qce - use cryptlen when adding extra sgl + +The qce crypto driver appends an extra entry to the dst sgl, to maintain +private state information. + +When the gcm driver sends requests to the ctr skcipher, it passes the +authentication tag after the actual crypto payload, but it must not be +touched. + +Commit 1336c2221bee ("crypto: qce - save a sg table slot for result +buf") limited the destination sgl to avoid overwriting the +authentication tag but it assumed the tag would be in a separate sgl +entry. + +This is not always the case, so it is better to limit the length of the +destination buffer to req->cryptlen before appending the result buf. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c +index db560c3d3e4f..0ae9d28afa69 100644 +--- a/drivers/crypto/qce/dma.c ++++ b/drivers/crypto/qce/dma.c +@@ -56,9 +56,10 @@ void qce_dma_release(struct qce_dma_data *dma) + + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, +- int max_ents) ++ unsigned int max_len) + { + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; ++ unsigned int new_len; + + while (sg) { + if (!sg_page(sg)) +@@ -69,13 +70,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, + if (!sg) + return ERR_PTR(-EINVAL); + +- while (new_sgl && sg && max_ents) { +- sg_set_page(sg, sg_page(new_sgl), new_sgl->length, +- new_sgl->offset); ++ while (new_sgl && sg && max_len) { ++ new_len = new_sgl->length > max_len ? max_len : new_sgl->length; ++ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); +- max_ents--; ++ max_len -= new_len; + } + + return sg_last; +diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h +index 0be71f7f7a58..710d5e370293 100644 +--- a/drivers/crypto/qce/dma.h ++++ b/drivers/crypto/qce/dma.h +@@ -51,6 +51,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma); + int qce_dma_terminate_all(struct qce_dma_data *dma); + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, +- int max_ents); ++ unsigned int max_len); + + #endif /* _DMA_H_ */ +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 5af74f2431ca..188eb234ba2c 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -105,13 +105,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + +- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + +- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, ++ QCE_RESULT_BUF_SZ); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; diff --git a/target/linux/ipq40xx/patches-4.19/048-crypto-qce-use-AES-fallback-for-small-requests.patch b/target/linux/ipq40xx/patches-4.19/048-crypto-qce-use-AES-fallback-for-small-requests.patch new file mode 100644 index 000000000..60ebe95c3 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/048-crypto-qce-use-AES-fallback-for-small-requests.patch @@ -0,0 +1,126 @@ +From 2d3b6fae7d1a2ad821769440daa91d7eec5c8250 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 09:41:44 -0300 +Subject: [PATCH] crypto: qce - use AES fallback for small requests + +Process small blocks using the fallback cipher, as a workaround for an +observed failure (DMA-related, apparently) when computing the GCM ghash +key. This brings a speed gain as well, since it avoids the latency of +using the hardware engine to process small blocks. + +Using software for all 16-byte requests would be enough to make GCM +work, but to increase performance, a larger threshold would be better. +Measuring the performance of supported ciphers with openssl speed, +software matches hardware at around 768-1024 bytes. + +Considering the 256-bit ciphers, software is 2-3 times faster than qce +at 256-bytes, 30% faster at 512, and about even at 768-bytes. With +128-bit keys, the break-even point would be around 1024-bytes. + +This adds the 'aes_sw_max_len' parameter, to set the largest request +length processed by the software fallback. Its default is being set to +512 bytes, a little lower than the break-even point, to balance the cost +in CPU usage. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index c1595750864e..a0c4d85de4c3 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -640,6 +640,29 @@ choice + + endchoice + ++config CRYPTO_DEV_QCE_SW_MAX_LEN ++ int "Default maximum request size to use software for AES" ++ depends on CRYPTO_DEV_QCE && CRYPTO_DEV_QCE_SKCIPHER ++ default 512 ++ help ++ This sets the default maximum request size to perform AES requests ++ using software instead of the crypto engine. It can be changed by ++ setting the aes_sw_max_len parameter. ++ ++ Small blocks are processed faster in software than hardware. ++ Considering the 256-bit ciphers, software is 2-3 times faster than ++ qce at 256-bytes, 30% faster at 512, and about even at 768-bytes. ++ With 128-bit keys, the break-even point would be around 1024-bytes. ++ ++ The default is set a little lower, to 512 bytes, to balance the ++ cost in CPU usage. The minimum recommended setting is 16-bytes ++ (1 AES block), since AES-GCM will fail if you set it lower. ++ Setting this to zero will send all requests to the hardware. ++ ++ Note that 192-bit keys are not supported by the hardware and are ++ always processed by the software fallback, and all DES requests ++ are done by the hardware. ++ + config CRYPTO_DEV_QCOM_RNG + tristate "Qualcomm Random Number Generator Driver" + depends on ARCH_QCOM || COMPILE_TEST +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 188eb234ba2c..9b72fec2ab2e 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -13,6 +13,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -20,6 +21,13 @@ + + #include "cipher.h" + ++static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; ++module_param(aes_sw_max_len, uint, 0644); ++MODULE_PARM_DESC(aes_sw_max_len, ++ "Only use hardware for AES requests larger than this " ++ "[0=always use hardware; anything <16 breaks AES-GCM; default=" ++ __stringify(CONFIG_CRYPTO_DEV_QCE_SOFT_THRESHOLD)"]"); ++ + static LIST_HEAD(skcipher_algs); + + static void qce_skcipher_done(void *data) +@@ -170,15 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + if (!key || !keylen) + return -EINVAL; + +- if (IS_AES(flags)) { +- switch (IS_XTS(flags) ? keylen >> 1 : keylen) { +- case AES_KEYSIZE_128: +- case AES_KEYSIZE_256: +- break; +- default: +- goto fallback; +- } +- } else if (IS_DES(flags)) { ++ if (IS_DES(flags)) { + u32 tmp[DES_EXPKEY_WORDS]; + + ret = des_ekey(tmp, key); +@@ -189,8 +189,8 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + + ctx->enc_keylen = keylen; + memcpy(ctx->enc_key, key, keylen); +- return 0; +-fallback: ++ if (!IS_AES(flags)) ++ return 0; + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (!ret) + ctx->enc_keylen = keylen; +@@ -213,8 +213,9 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + +- if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 && +- keylen != AES_KEYSIZE_256) { ++ if (IS_AES(rctx->flags) && ++ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || ++ req->cryptlen <= aes_sw_max_len)) { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); diff --git a/target/linux/ipq40xx/patches-4.19/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch b/target/linux/ipq40xx/patches-4.19/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch new file mode 100644 index 000000000..eee2241d1 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch @@ -0,0 +1,59 @@ +From bbf2b1cf22dc98f3df33b6666df046dfb9564d91 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Wed, 5 Feb 2020 13:42:25 -0300 +Subject: [PATCH] crypto: qce - handle AES-XTS cases that qce fails + +QCE hangs when presented with an AES-XTS request whose length is larger +than QCE_SECTOR_SIZE (512-bytes), and is not a multiple of it. Let the +fallback cipher handle them. + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index 3849d7bcaeb0..3e0fcd7613f1 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -23,8 +23,6 @@ + #include "regs-v5.h" + #include "sha.h" + +-#define QCE_SECTOR_SIZE 512 +- + static inline u32 qce_read(struct qce_device *qce, u32 offset) + { + return readl(qce->base + offset); +diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h +index 3252efa41e7a..5f9d3c4e3ead 100644 +--- a/drivers/crypto/qce/common.h ++++ b/drivers/crypto/qce/common.h +@@ -20,6 +20,9 @@ + #include + #include + ++/* xts du size */ ++#define QCE_SECTOR_SIZE 512 ++ + /* key size in bytes */ + #define QCE_SHA_HMAC_KEY_SIZE 64 + #define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 9b72fec2ab2e..e46cb8269640 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -213,9 +213,14 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + ++ /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and ++ * is not a multiple of it; pass such requests to the fallback ++ */ + if (IS_AES(rctx->flags) && +- ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || +- req->cryptlen <= aes_sw_max_len)) { ++ (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || ++ req->cryptlen <= aes_sw_max_len) || ++ (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && ++ req->cryptlen % QCE_SECTOR_SIZE))) { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); diff --git a/target/linux/ipq40xx/patches-4.19/051-crypto-qce-allow-building-only-hashes-ciphers.patch b/target/linux/ipq40xx/patches-4.19/051-crypto-qce-allow-building-only-hashes-ciphers.patch new file mode 100644 index 000000000..3d4214929 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.19/051-crypto-qce-allow-building-only-hashes-ciphers.patch @@ -0,0 +1,415 @@ +From 62134842498927a0fcc19798a615340a7a6a9e62 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Mon, 28 Oct 2019 15:17:19 -0300 +Subject: [PATCH] crypto: qce - allow building only hashes/ciphers + +Signed-off-by: Eneas U de Queiroz + +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index a8c4ce07fc9d..c1595750864e 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -573,6 +573,14 @@ config CRYPTO_DEV_QCE + tristate "Qualcomm crypto engine accelerator" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM ++ help ++ This driver supports Qualcomm crypto engine accelerator ++ hardware. To compile this driver as a module, choose M here. The ++ module will be called qcrypto. ++ ++config CRYPTO_DEV_QCE_SKCIPHER ++ bool ++ depends on CRYPTO_DEV_QCE + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_ECB +@@ -580,10 +588,57 @@ config CRYPTO_DEV_QCE + select CRYPTO_XTS + select CRYPTO_CTR + select CRYPTO_BLKCIPHER +- help +- This driver supports Qualcomm crypto engine accelerator +- hardware. To compile this driver as a module, choose M here. The +- module will be called qcrypto. ++ ++config CRYPTO_DEV_QCE_SHA ++ bool ++ depends on CRYPTO_DEV_QCE ++ ++choice ++ prompt "Algorithms enabled for QCE acceleration" ++ default CRYPTO_DEV_QCE_ENABLE_ALL ++ depends on CRYPTO_DEV_QCE ++ help ++ This option allows to choose whether to build support for all algorihtms ++ (default), hashes-only, or skciphers-only. ++ ++ The QCE engine does not appear to scale as well as the CPU to handle ++ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the ++ QCE handles only 2 requests in parallel. ++ ++ Ipsec throughput seems to improve when disabling either family of ++ algorithms, sharing the load with the CPU. Enabling skciphers-only ++ appears to work best. ++ ++ config CRYPTO_DEV_QCE_ENABLE_ALL ++ bool "All supported algorithms" ++ select CRYPTO_DEV_QCE_SKCIPHER ++ select CRYPTO_DEV_QCE_SHA ++ help ++ Enable all supported algorithms: ++ - AES (CBC, CTR, ECB, XTS) ++ - 3DES (CBC, ECB) ++ - DES (CBC, ECB) ++ - SHA1, HMAC-SHA1 ++ - SHA256, HMAC-SHA256 ++ ++ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER ++ bool "Symmetric-key ciphers only" ++ select CRYPTO_DEV_QCE_SKCIPHER ++ help ++ Enable symmetric-key ciphers only: ++ - AES (CBC, CTR, ECB, XTS) ++ - 3DES (ECB, CBC) ++ - DES (ECB, CBC) ++ ++ config CRYPTO_DEV_QCE_ENABLE_SHA ++ bool "Hash/HMAC only" ++ select CRYPTO_DEV_QCE_SHA ++ help ++ Enable hashes/HMAC algorithms only: ++ - SHA1, HMAC-SHA1 ++ - SHA256, HMAC-SHA256 ++ ++endchoice + + config CRYPTO_DEV_QCOM_RNG + tristate "Qualcomm Random Number Generator Driver" +diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile +index 8caa04e1ec43..14ade8a7d664 100644 +--- a/drivers/crypto/qce/Makefile ++++ b/drivers/crypto/qce/Makefile +@@ -2,6 +2,7 @@ + obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o + qcrypto-objs := core.o \ + common.o \ +- dma.o \ +- sha.o \ +- skcipher.o ++ dma.o ++ ++qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o ++qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index e0202755682b..3849d7bcaeb0 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -53,52 +53,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) + qce_write(qce, offset + i * sizeof(u32), 0); + } + +-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) ++static u32 qce_config_reg(struct qce_device *qce, int little) + { +- u32 cfg = 0; ++ u32 beats = (qce->burst_size >> 3) - 1; ++ u32 pipe_pair = qce->pipe_pair_id; ++ u32 config; + +- if (IS_AES(flags)) { +- if (aes_key_size == AES_KEYSIZE_128) +- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; +- else if (aes_key_size == AES_KEYSIZE_256) +- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; +- } ++ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; ++ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | ++ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); ++ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; ++ config &= ~HIGH_SPD_EN_N_SHIFT; + +- if (IS_AES(flags)) +- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; +- else if (IS_DES(flags) || IS_3DES(flags)) +- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; ++ if (little) ++ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); + +- if (IS_DES(flags)) +- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; ++ return config; ++} + +- if (IS_3DES(flags)) +- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; ++void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) ++{ ++ __be32 *d = dst; ++ const u8 *s = src; ++ unsigned int n; + +- switch (flags & QCE_MODE_MASK) { +- case QCE_MODE_ECB: +- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CBC: +- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CTR: +- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_XTS: +- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CCM: +- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; +- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; +- break; +- default: +- return ~0; ++ n = len / sizeof(u32); ++ for (; n > 0; n--) { ++ *d = cpu_to_be32p((const __u32 *) s); ++ s += sizeof(__u32); ++ d++; + } ++} + +- return cfg; ++static void qce_setup_config(struct qce_device *qce) ++{ ++ u32 config; ++ ++ /* get big endianness */ ++ config = qce_config_reg(qce, 0); ++ ++ /* clear status */ ++ qce_write(qce, REG_STATUS, 0); ++ qce_write(qce, REG_CONFIG, config); ++} ++ ++static inline void qce_crypto_go(struct qce_device *qce) ++{ ++ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); + } + ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + static u32 qce_auth_cfg(unsigned long flags, u32 key_size) + { + u32 cfg = 0; +@@ -145,88 +149,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) + return cfg; + } + +-static u32 qce_config_reg(struct qce_device *qce, int little) +-{ +- u32 beats = (qce->burst_size >> 3) - 1; +- u32 pipe_pair = qce->pipe_pair_id; +- u32 config; +- +- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; +- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | +- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); +- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; +- config &= ~HIGH_SPD_EN_N_SHIFT; +- +- if (little) +- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); +- +- return config; +-} +- +-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) +-{ +- __be32 *d = dst; +- const u8 *s = src; +- unsigned int n; +- +- n = len / sizeof(u32); +- for (; n > 0; n--) { +- *d = cpu_to_be32p((const __u32 *) s); +- s += sizeof(__u32); +- d++; +- } +-} +- +-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) +-{ +- u8 swap[QCE_AES_IV_LENGTH]; +- u32 i, j; +- +- if (ivsize > QCE_AES_IV_LENGTH) +- return; +- +- memset(swap, 0, QCE_AES_IV_LENGTH); +- +- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; +- i < QCE_AES_IV_LENGTH; i++, j--) +- swap[i] = src[j]; +- +- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); +-} +- +-static void qce_xtskey(struct qce_device *qce, const u8 *enckey, +- unsigned int enckeylen, unsigned int cryptlen) +-{ +- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; +- unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); +- unsigned int xtsdusize; +- +- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, +- enckeylen / 2); +- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); +- +- /* xts du size 512B */ +- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); +- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); +-} +- +-static void qce_setup_config(struct qce_device *qce) +-{ +- u32 config; +- +- /* get big endianness */ +- config = qce_config_reg(qce, 0); +- +- /* clear status */ +- qce_write(qce, REG_STATUS, 0); +- qce_write(qce, REG_CONFIG, config); +-} +- +-static inline void qce_crypto_go(struct qce_device *qce) +-{ +- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); +-} +- + static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + u32 totallen, u32 offset) + { +@@ -311,6 +233,87 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + + return 0; + } ++#endif ++ ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER ++static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) ++{ ++ u32 cfg = 0; ++ ++ if (IS_AES(flags)) { ++ if (aes_key_size == AES_KEYSIZE_128) ++ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; ++ else if (aes_key_size == AES_KEYSIZE_256) ++ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; ++ } ++ ++ if (IS_AES(flags)) ++ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; ++ else if (IS_DES(flags) || IS_3DES(flags)) ++ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; ++ ++ if (IS_DES(flags)) ++ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; ++ ++ if (IS_3DES(flags)) ++ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; ++ ++ switch (flags & QCE_MODE_MASK) { ++ case QCE_MODE_ECB: ++ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CBC: ++ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CTR: ++ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_XTS: ++ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CCM: ++ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; ++ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; ++ break; ++ default: ++ return ~0; ++ } ++ ++ return cfg; ++} ++ ++static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) ++{ ++ u8 swap[QCE_AES_IV_LENGTH]; ++ u32 i, j; ++ ++ if (ivsize > QCE_AES_IV_LENGTH) ++ return; ++ ++ memset(swap, 0, QCE_AES_IV_LENGTH); ++ ++ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; ++ i < QCE_AES_IV_LENGTH; i++, j--) ++ swap[i] = src[j]; ++ ++ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); ++} ++ ++static void qce_xtskey(struct qce_device *qce, const u8 *enckey, ++ unsigned int enckeylen, unsigned int cryptlen) ++{ ++ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; ++ unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); ++ unsigned int xtsdusize; ++ ++ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, ++ enckeylen / 2); ++ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); ++ ++ /* xts du size 512B */ ++ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); ++ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); ++} + + static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +@@ -392,15 +395,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + + return 0; + } ++#endif + + int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) + { + switch (type) { ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER + case CRYPTO_ALG_TYPE_SKCIPHER: + return qce_setup_regs_skcipher(async_req, totallen, offset); ++#endif ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); ++#endif + default: + return -EINVAL; + } +diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c +index bf409edc23ab..4ad79b67fc51 100644 +--- a/drivers/crypto/qce/core.c ++++ b/drivers/crypto/qce/core.c +@@ -30,8 +30,12 @@ + #define QCE_QUEUE_LENGTH 1 + + static const struct qce_algo_ops *qce_ops[] = { ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER + &skcipher_ops, ++#endif ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + &ahash_ops, ++#endif + }; + + static void qce_unregister_algs(struct qce_device *qce) diff --git a/target/linux/ipq40xx/patches-5.4/040-crypto-qce-switch-to-skcipher-API.patch b/target/linux/ipq40xx/patches-5.4/040-crypto-qce-switch-to-skcipher-API.patch new file mode 100644 index 000000000..ebe7a3ec1 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/040-crypto-qce-switch-to-skcipher-API.patch @@ -0,0 +1,481 @@ +From 08d5aa79cb81b82ac248e48db6786b5a16e85a08 Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Sat, 9 Nov 2019 18:09:45 +0100 +Subject: [PATCH] crypto: qce - switch to skcipher API + +Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface") +dated 20 august 2015 introduced the new skcipher API which is supposed to +replace both blkcipher and ablkcipher. While all consumers of the API have +been converted long ago, some producers of the ablkcipher remain, forcing +us to keep the ablkcipher support routines alive, along with the matching +code to expose [a]blkciphers via the skcipher API. + +So switch this driver to the skcipher API, allowing us to finally drop the +ablkcipher code in the near future. + +Reviewed-by: Stanimir Varbanov +Signed-off-by: Ard Biesheuvel +Signed-off-by: Herbert Xu +(cherry picked from commit 8bf0871539faa090fa057cee83cf06ef4e76e7c5) + +diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile +index 19a7f899acff..8caa04e1ec43 100644 +--- a/drivers/crypto/qce/Makefile ++++ b/drivers/crypto/qce/Makefile +@@ -4,4 +4,4 @@ qcrypto-objs := core.o \ + common.o \ + dma.o \ + sha.o \ +- ablkcipher.o ++ skcipher.o +diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h +index 5cab8f0706a8..7770660bc853 100644 +--- a/drivers/crypto/qce/cipher.h ++++ b/drivers/crypto/qce/cipher.h +@@ -45,12 +45,12 @@ struct qce_cipher_reqctx { + unsigned int cryptlen; + }; + +-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) ++static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) + { +- struct crypto_alg *alg = tfm->__crt_alg; +- return container_of(alg, struct qce_alg_template, alg.crypto); ++ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); ++ return container_of(alg, struct qce_alg_template, alg.skcipher); + } + +-extern const struct qce_algo_ops ablkcipher_ops; ++extern const struct qce_algo_ops skcipher_ops; + + #endif /* _CIPHER_H_ */ +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index 3fb510164326..da1188abc9ba 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -304,13 +304,13 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + return 0; + } + +-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, ++static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) + { +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; + __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; +@@ -389,8 +389,8 @@ int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) + { + switch (type) { +- case CRYPTO_ALG_TYPE_ABLKCIPHER: +- return qce_setup_regs_ablkcipher(async_req, totallen, offset); ++ case CRYPTO_ALG_TYPE_SKCIPHER: ++ return qce_setup_regs_skcipher(async_req, totallen, offset); + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); + default: +diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h +index 47fb523357ac..282d4317470d 100644 +--- a/drivers/crypto/qce/common.h ++++ b/drivers/crypto/qce/common.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + /* key size in bytes */ + #define QCE_SHA_HMAC_KEY_SIZE 64 +@@ -79,7 +80,7 @@ struct qce_alg_template { + unsigned long alg_flags; + const u32 *std_iv; + union { +- struct crypto_alg crypto; ++ struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; + struct qce_device *qce; +diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c +index 08d4ce3bfddf..0a44a6eeacf5 100644 +--- a/drivers/crypto/qce/core.c ++++ b/drivers/crypto/qce/core.c +@@ -22,7 +22,7 @@ + #define QCE_QUEUE_LENGTH 1 + + static const struct qce_algo_ops *qce_ops[] = { +- &ablkcipher_ops, ++ &skcipher_ops, + &ahash_ops, + }; + +diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c +similarity index 61% +rename from drivers/crypto/qce/ablkcipher.c +rename to drivers/crypto/qce/skcipher.c +index f0b59a8bbed0..fee07323f8f9 100644 +--- a/drivers/crypto/qce/ablkcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -12,14 +12,14 @@ + + #include "cipher.h" + +-static LIST_HEAD(ablkcipher_algs); ++static LIST_HEAD(skcipher_algs); + +-static void qce_ablkcipher_done(void *data) ++static void qce_skcipher_done(void *data) + { + struct crypto_async_request *async_req = data; +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + u32 status; +@@ -32,7 +32,7 @@ static void qce_ablkcipher_done(void *data) + + error = qce_dma_terminate_all(&qce->dma); + if (error) +- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", ++ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", + error); + + if (diff_dst) +@@ -43,18 +43,18 @@ static void qce_ablkcipher_done(void *data) + + error = qce_check_status(qce, &status); + if (error < 0) +- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); ++ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); + + qce->async_req_done(tmpl->qce, error); + } + + static int +-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) ++qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + { +- struct ablkcipher_request *req = ablkcipher_request_cast(async_req); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); +- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); +- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); ++ struct skcipher_request *req = skcipher_request_cast(async_req); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); ++ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + struct scatterlist *sg; +@@ -62,17 +62,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + gfp_t gfp; + int ret; + +- rctx->iv = req->info; +- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); +- rctx->cryptlen = req->nbytes; ++ rctx->iv = req->iv; ++ rctx->ivsize = crypto_skcipher_ivsize(skcipher); ++ rctx->cryptlen = req->cryptlen; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + +- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); ++ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (diff_dst) +- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); ++ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + else + rctx->dst_nents = rctx->src_nents; + if (rctx->src_nents < 0) { +@@ -125,13 +125,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, + rctx->dst_sg, rctx->dst_nents, +- qce_ablkcipher_done, async_req); ++ qce_skcipher_done, async_req); + if (ret) + goto error_unmap_src; + + qce_dma_issue_pending(&qce->dma); + +- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); ++ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); + if (ret) + goto error_terminate; + +@@ -149,10 +149,10 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) + return ret; + } + +-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, ++static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + unsigned int keylen) + { +- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); ++ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + +@@ -177,13 +177,13 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + return ret; + } + +-static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key, ++static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, + unsigned int keylen) + { +- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); + int err; + +- err = verify_ablkcipher_des_key(ablk, key); ++ err = verify_skcipher_des_key(ablk, key); + if (err) + return err; + +@@ -192,13 +192,13 @@ static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + return 0; + } + +-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, ++static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, + unsigned int keylen) + { +- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); + int err; + +- err = verify_ablkcipher_des3_key(ablk, key); ++ err = verify_skcipher_des3_key(ablk, key); + if (err) + return err; + +@@ -207,12 +207,11 @@ static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + return 0; + } + +-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) ++static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + { +- struct crypto_tfm *tfm = +- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + int ret; + +@@ -227,7 +226,7 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, +- req->nbytes, req->info); ++ req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); +@@ -237,36 +236,36 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); + } + +-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) ++static int qce_skcipher_encrypt(struct skcipher_request *req) + { +- return qce_ablkcipher_crypt(req, 1); ++ return qce_skcipher_crypt(req, 1); + } + +-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) ++static int qce_skcipher_decrypt(struct skcipher_request *req) + { +- return qce_ablkcipher_crypt(req, 0); ++ return qce_skcipher_crypt(req, 0); + } + +-static int qce_ablkcipher_init(struct crypto_tfm *tfm) ++static int qce_skcipher_init(struct crypto_skcipher *tfm) + { +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); +- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + +- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm), ++ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_NEED_FALLBACK); + return PTR_ERR_OR_ZERO(ctx->fallback); + } + +-static void qce_ablkcipher_exit(struct crypto_tfm *tfm) ++static void qce_skcipher_exit(struct crypto_skcipher *tfm) + { +- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_sync_skcipher(ctx->fallback); + } + +-struct qce_ablkcipher_def { ++struct qce_skcipher_def { + unsigned long flags; + const char *name; + const char *drv_name; +@@ -276,7 +275,7 @@ struct qce_ablkcipher_def { + unsigned int max_keysize; + }; + +-static const struct qce_ablkcipher_def ablkcipher_def[] = { ++static const struct qce_skcipher_def skcipher_def[] = { + { + .flags = QCE_ALG_AES | QCE_MODE_ECB, + .name = "ecb(aes)", +@@ -351,90 +350,91 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = { + }, + }; + +-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, ++static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + struct qce_device *qce) + { + struct qce_alg_template *tmpl; +- struct crypto_alg *alg; ++ struct skcipher_alg *alg; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + +- alg = &tmpl->alg.crypto; ++ alg = &tmpl->alg.skcipher; + +- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); +- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); ++ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + +- alg->cra_blocksize = def->blocksize; +- alg->cra_ablkcipher.ivsize = def->ivsize; +- alg->cra_ablkcipher.min_keysize = def->min_keysize; +- alg->cra_ablkcipher.max_keysize = def->max_keysize; +- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey : +- IS_DES(def->flags) ? qce_des_setkey : +- qce_ablkcipher_setkey; +- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; +- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; +- +- alg->cra_priority = 300; +- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | +- CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY; +- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); +- alg->cra_alignmask = 0; +- alg->cra_type = &crypto_ablkcipher_type; +- alg->cra_module = THIS_MODULE; +- alg->cra_init = qce_ablkcipher_init; +- alg->cra_exit = qce_ablkcipher_exit; ++ alg->base.cra_blocksize = def->blocksize; ++ alg->ivsize = def->ivsize; ++ alg->min_keysize = def->min_keysize; ++ alg->max_keysize = def->max_keysize; ++ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : ++ IS_DES(def->flags) ? qce_des_setkey : ++ qce_skcipher_setkey; ++ alg->encrypt = qce_skcipher_encrypt; ++ alg->decrypt = qce_skcipher_decrypt; ++ ++ alg->base.cra_priority = 300; ++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK | ++ CRYPTO_ALG_KERN_DRIVER_ONLY; ++ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); ++ alg->base.cra_alignmask = 0; ++ alg->base.cra_module = THIS_MODULE; ++ ++ alg->init = qce_skcipher_init; ++ alg->exit = qce_skcipher_exit; + + INIT_LIST_HEAD(&tmpl->entry); +- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; ++ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + +- ret = crypto_register_alg(alg); ++ ret = crypto_register_skcipher(alg); + if (ret) { + kfree(tmpl); +- dev_err(qce->dev, "%s registration failed\n", alg->cra_name); ++ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); + return ret; + } + +- list_add_tail(&tmpl->entry, &ablkcipher_algs); +- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); ++ list_add_tail(&tmpl->entry, &skcipher_algs); ++ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); + return 0; + } + +-static void qce_ablkcipher_unregister(struct qce_device *qce) ++static void qce_skcipher_unregister(struct qce_device *qce) + { + struct qce_alg_template *tmpl, *n; + +- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { +- crypto_unregister_alg(&tmpl->alg.crypto); ++ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { ++ crypto_unregister_skcipher(&tmpl->alg.skcipher); + list_del(&tmpl->entry); + kfree(tmpl); + } + } + +-static int qce_ablkcipher_register(struct qce_device *qce) ++static int qce_skcipher_register(struct qce_device *qce) + { + int ret, i; + +- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { +- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); ++ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { ++ ret = qce_skcipher_register_one(&skcipher_def[i], qce); + if (ret) + goto err; + } + + return 0; + err: +- qce_ablkcipher_unregister(qce); ++ qce_skcipher_unregister(qce); + return ret; + } + +-const struct qce_algo_ops ablkcipher_ops = { +- .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +- .register_algs = qce_ablkcipher_register, +- .unregister_algs = qce_ablkcipher_unregister, +- .async_req_handle = qce_ablkcipher_async_req_handle, ++const struct qce_algo_ops skcipher_ops = { ++ .type = CRYPTO_ALG_TYPE_SKCIPHER, ++ .register_algs = qce_skcipher_register, ++ .unregister_algs = qce_skcipher_unregister, ++ .async_req_handle = qce_skcipher_async_req_handle, + }; diff --git a/target/linux/ipq40xx/patches-5.4/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch b/target/linux/ipq40xx/patches-5.4/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch new file mode 100644 index 000000000..b83b61722 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/041-crypto-qce-fix-ctr-aes-qce-block-chunk-sizes.patch @@ -0,0 +1,43 @@ +From c7d6f50650bc31f7bd117fdf6da0402657ba1e30 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:13 -0300 +Subject: [PATCH] crypto: qce - fix ctr-aes-qce block, chunk sizes + +Set blocksize of ctr-aes-qce to 1, so it can operate as a stream cipher, +adding the definition for chucksize instead, where the underlying block +size belongs. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit bb5c863b3d3cbd10e80b2ebf409934a091058f54) + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index fee07323f8f9..1f1f40a761fa 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -270,6 +270,7 @@ struct qce_skcipher_def { + const char *name; + const char *drv_name; + unsigned int blocksize; ++ unsigned int chunksize; + unsigned int ivsize; + unsigned int min_keysize; + unsigned int max_keysize; +@@ -298,7 +299,8 @@ static const struct qce_skcipher_def skcipher_def[] = { + .flags = QCE_ALG_AES | QCE_MODE_CTR, + .name = "ctr(aes)", + .drv_name = "ctr-aes-qce", +- .blocksize = AES_BLOCK_SIZE, ++ .blocksize = 1, ++ .chunksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +@@ -368,6 +370,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + def->drv_name); + + alg->base.cra_blocksize = def->blocksize; ++ alg->chunksize = def->chunksize; + alg->ivsize = def->ivsize; + alg->min_keysize = def->min_keysize; + alg->max_keysize = def->max_keysize; diff --git a/target/linux/ipq40xx/patches-5.4/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch b/target/linux/ipq40xx/patches-5.4/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch new file mode 100644 index 000000000..1752d749c --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/042-crypto-qce-fix-xts-aes-qce-key-sizes.patch @@ -0,0 +1,60 @@ +From 24e91ebb48e8a9d0475940cb47dfd228cdf3336e Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:14 -0300 +Subject: [PATCH] crypto: qce - fix xts-aes-qce key sizes + +XTS-mode uses two keys, so the keysizes should be doubled in +skcipher_def, and halved when checking if it is AES-128/192/256. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 7de4c2bd196f111e39cc60f6197654aff23ba2b4) + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 1f1f40a761fa..e4f6d87ba51d 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -154,12 +154,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + { + struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); ++ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; + int ret; + + if (!key || !keylen) + return -EINVAL; + +- switch (keylen) { ++ switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; +@@ -213,13 +214,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); ++ int keylen; + int ret; + + rctx->flags = tmpl->alg_flags; + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; ++ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + +- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && +- ctx->enc_keylen != AES_KEYSIZE_256) { ++ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 && ++ keylen != AES_KEYSIZE_256) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback); +@@ -311,8 +314,8 @@ static const struct qce_skcipher_def skcipher_def[] = { + .drv_name = "xts-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, +- .min_keysize = AES_MIN_KEY_SIZE, +- .max_keysize = AES_MAX_KEY_SIZE, ++ .min_keysize = AES_MIN_KEY_SIZE * 2, ++ .max_keysize = AES_MAX_KEY_SIZE * 2, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_ECB, diff --git a/target/linux/ipq40xx/patches-5.4/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch b/target/linux/ipq40xx/patches-5.4/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch new file mode 100644 index 000000000..28f3b33d2 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/043-crypto-qce-save-a-sg-table-slot-for-result-buf.patch @@ -0,0 +1,87 @@ +From c61bc00b568fbf1068aceb41e4362568456ec324 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:15 -0300 +Subject: [PATCH] crypto: qce - save a sg table slot for result buf + +When ctr-aes-qce is used for gcm-mode, an extra sg entry for the +authentication tag is present, causing trouble when the qce driver +prepares the dst-results sg table for dma. + +It computes the number of entries needed with sg_nents_for_len, leaving +out the tag entry. Then it creates a sg table with that number plus +one, used to store a result buffer. + +When copying the sg table, there's no limit to the number of entries +copied, so the extra slot is filled with the authentication tag sg. +When the driver tries to add the result sg, the list is full, and it +returns EINVAL. + +By limiting the number of sg entries copied to the dest table, the slot +for the result buffer is guaranteed to be unused. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 3ee50c896d712dc2fc8f34c2cd1918d035e74045) + +diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c +index 0984a719144d..a1a8c1221fbe 100644 +--- a/drivers/crypto/qce/dma.c ++++ b/drivers/crypto/qce/dma.c +@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma) + } + + struct scatterlist * +-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) ++qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, ++ int max_ents) + { + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; + +@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) + if (!sg) + return ERR_PTR(-EINVAL); + +- while (new_sgl && sg) { ++ while (new_sgl && sg && max_ents) { + sg_set_page(sg, sg_page(new_sgl), new_sgl->length, + new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); ++ max_ents--; + } + + return sg_last; +diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h +index 1e25a9e0e6f8..ed25a0d9829e 100644 +--- a/drivers/crypto/qce/dma.h ++++ b/drivers/crypto/qce/dma.h +@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, + void qce_dma_issue_pending(struct qce_dma_data *dma); + int qce_dma_terminate_all(struct qce_dma_data *dma); + struct scatterlist * +-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); ++qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, ++ int max_ents); + + #endif /* _DMA_H_ */ +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index e4f6d87ba51d..a9ae356bc2a7 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -95,13 +95,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + +- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); ++ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + +- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); ++ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; diff --git a/target/linux/ipq40xx/patches-5.4/044-crypto-qce-update-the-skcipher-IV.patch b/target/linux/ipq40xx/patches-5.4/044-crypto-qce-update-the-skcipher-IV.patch new file mode 100644 index 000000000..140c9476b --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/044-crypto-qce-update-the-skcipher-IV.patch @@ -0,0 +1,31 @@ +From 146e43fd8fc99c83437c00c532a1ad3d8e74d555 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:16 -0300 +Subject: [PATCH] crypto: qce - update the skcipher IV + +Update the IV after the completion of each cipher operation. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 3e806a12d10af2581aa26c37b58439286eab9782) + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index a9ae356bc2a7..d3852a61cb1d 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data) + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); + struct qce_device *qce = tmpl->qce; ++ struct qce_result_dump *result_buf = qce->dma.result_buf; + enum dma_data_direction dir_src, dir_dst; + u32 status; + int error; +@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data) + if (error < 0) + dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); + ++ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); + qce->async_req_done(tmpl->qce, error); + } + diff --git a/target/linux/ipq40xx/patches-5.4/045-crypto-qce-initialize-fallback-only-for-AES.patch b/target/linux/ipq40xx/patches-5.4/045-crypto-qce-initialize-fallback-only-for-AES.patch new file mode 100644 index 000000000..c007feded --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/045-crypto-qce-initialize-fallback-only-for-AES.patch @@ -0,0 +1,54 @@ +From f79421b2c51f2b236cb8e84376d22af007ba1b53 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:17 -0300 +Subject: [PATCH] crypto: qce - initialize fallback only for AES + +Adjust cra_flags to add CRYPTO_NEED_FALLBACK only for AES ciphers, where +AES-192 is not handled by the qce hardware, and don't allocate & free +the fallback skcipher for other algorithms. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 8ceda883205db6dfedb82e39f67feae3b50c95a1) + +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index d3852a61cb1d..4217b745f124 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -257,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm) + + memset(ctx, 0, sizeof(*ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); ++ return 0; ++} + ++static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) ++{ ++ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ++ ++ qce_skcipher_init(tfm); + ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_NEED_FALLBACK); + return PTR_ERR_OR_ZERO(ctx->fallback); +@@ -387,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, + + alg->base.cra_priority = 300; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | +- CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); + alg->base.cra_alignmask = 0; + alg->base.cra_module = THIS_MODULE; + +- alg->init = qce_skcipher_init; +- alg->exit = qce_skcipher_exit; ++ if (IS_AES(def->flags)) { ++ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; ++ alg->init = qce_skcipher_init_fallback; ++ alg->exit = qce_skcipher_exit; ++ } else { ++ alg->init = qce_skcipher_init; ++ } + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; diff --git a/target/linux/ipq40xx/patches-5.4/046-crypto-qce-allow-building-only-hashes-ciphers.patch b/target/linux/ipq40xx/patches-5.4/046-crypto-qce-allow-building-only-hashes-ciphers.patch new file mode 100644 index 000000000..d27f87ed1 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/046-crypto-qce-allow-building-only-hashes-ciphers.patch @@ -0,0 +1,428 @@ +From 7794ba0f42531460cc8d0942df49c95ec4665666 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 20 Dec 2019 16:02:18 -0300 +Subject: [PATCH] crypto: qce - allow building only hashes/ciphers + +Allow the user to choose whether to build support for all algorithms +(default), hashes-only, or skciphers-only. + +The QCE engine does not appear to scale as well as the CPU to handle +multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the +QCE handles only 2 requests in parallel. + +Ipsec throughput seems to improve when disabling either family of +algorithms, sharing the load with the CPU. Enabling skciphers-only +appears to work best. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 59e056cda4beb5412e3653e6360c2eb0fa770baa) + +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index 1fb622f2a87d..89c40658b246 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -614,6 +614,14 @@ config CRYPTO_DEV_QCE + tristate "Qualcomm crypto engine accelerator" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM ++ help ++ This driver supports Qualcomm crypto engine accelerator ++ hardware. To compile this driver as a module, choose M here. The ++ module will be called qcrypto. ++ ++config CRYPTO_DEV_QCE_SKCIPHER ++ bool ++ depends on CRYPTO_DEV_QCE + select CRYPTO_AES + select CRYPTO_LIB_DES + select CRYPTO_ECB +@@ -621,10 +629,57 @@ config CRYPTO_DEV_QCE + select CRYPTO_XTS + select CRYPTO_CTR + select CRYPTO_BLKCIPHER +- help +- This driver supports Qualcomm crypto engine accelerator +- hardware. To compile this driver as a module, choose M here. The +- module will be called qcrypto. ++ ++config CRYPTO_DEV_QCE_SHA ++ bool ++ depends on CRYPTO_DEV_QCE ++ ++choice ++ prompt "Algorithms enabled for QCE acceleration" ++ default CRYPTO_DEV_QCE_ENABLE_ALL ++ depends on CRYPTO_DEV_QCE ++ help ++ This option allows to choose whether to build support for all algorihtms ++ (default), hashes-only, or skciphers-only. ++ ++ The QCE engine does not appear to scale as well as the CPU to handle ++ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the ++ QCE handles only 2 requests in parallel. ++ ++ Ipsec throughput seems to improve when disabling either family of ++ algorithms, sharing the load with the CPU. Enabling skciphers-only ++ appears to work best. ++ ++ config CRYPTO_DEV_QCE_ENABLE_ALL ++ bool "All supported algorithms" ++ select CRYPTO_DEV_QCE_SKCIPHER ++ select CRYPTO_DEV_QCE_SHA ++ help ++ Enable all supported algorithms: ++ - AES (CBC, CTR, ECB, XTS) ++ - 3DES (CBC, ECB) ++ - DES (CBC, ECB) ++ - SHA1, HMAC-SHA1 ++ - SHA256, HMAC-SHA256 ++ ++ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER ++ bool "Symmetric-key ciphers only" ++ select CRYPTO_DEV_QCE_SKCIPHER ++ help ++ Enable symmetric-key ciphers only: ++ - AES (CBC, CTR, ECB, XTS) ++ - 3DES (ECB, CBC) ++ - DES (ECB, CBC) ++ ++ config CRYPTO_DEV_QCE_ENABLE_SHA ++ bool "Hash/HMAC only" ++ select CRYPTO_DEV_QCE_SHA ++ help ++ Enable hashes/HMAC algorithms only: ++ - SHA1, HMAC-SHA1 ++ - SHA256, HMAC-SHA256 ++ ++endchoice + + config CRYPTO_DEV_QCOM_RNG + tristate "Qualcomm Random Number Generator Driver" +diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile +index 8caa04e1ec43..14ade8a7d664 100644 +--- a/drivers/crypto/qce/Makefile ++++ b/drivers/crypto/qce/Makefile +@@ -2,6 +2,7 @@ + obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o + qcrypto-objs := core.o \ + common.o \ +- dma.o \ +- sha.o \ +- skcipher.o ++ dma.o ++ ++qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o ++qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index da1188abc9ba..629e7f34dc09 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) + qce_write(qce, offset + i * sizeof(u32), 0); + } + +-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) ++static u32 qce_config_reg(struct qce_device *qce, int little) + { +- u32 cfg = 0; ++ u32 beats = (qce->burst_size >> 3) - 1; ++ u32 pipe_pair = qce->pipe_pair_id; ++ u32 config; + +- if (IS_AES(flags)) { +- if (aes_key_size == AES_KEYSIZE_128) +- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; +- else if (aes_key_size == AES_KEYSIZE_256) +- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; +- } ++ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; ++ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | ++ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); ++ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; ++ config &= ~HIGH_SPD_EN_N_SHIFT; + +- if (IS_AES(flags)) +- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; +- else if (IS_DES(flags) || IS_3DES(flags)) +- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; ++ if (little) ++ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); + +- if (IS_DES(flags)) +- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; ++ return config; ++} + +- if (IS_3DES(flags)) +- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; ++void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) ++{ ++ __be32 *d = dst; ++ const u8 *s = src; ++ unsigned int n; + +- switch (flags & QCE_MODE_MASK) { +- case QCE_MODE_ECB: +- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CBC: +- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CTR: +- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_XTS: +- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; +- break; +- case QCE_MODE_CCM: +- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; +- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; +- break; +- default: +- return ~0; ++ n = len / sizeof(u32); ++ for (; n > 0; n--) { ++ *d = cpu_to_be32p((const __u32 *) s); ++ s += sizeof(__u32); ++ d++; + } ++} + +- return cfg; ++static void qce_setup_config(struct qce_device *qce) ++{ ++ u32 config; ++ ++ /* get big endianness */ ++ config = qce_config_reg(qce, 0); ++ ++ /* clear status */ ++ qce_write(qce, REG_STATUS, 0); ++ qce_write(qce, REG_CONFIG, config); ++} ++ ++static inline void qce_crypto_go(struct qce_device *qce) ++{ ++ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); + } + ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + static u32 qce_auth_cfg(unsigned long flags, u32 key_size) + { + u32 cfg = 0; +@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) + return cfg; + } + +-static u32 qce_config_reg(struct qce_device *qce, int little) +-{ +- u32 beats = (qce->burst_size >> 3) - 1; +- u32 pipe_pair = qce->pipe_pair_id; +- u32 config; +- +- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; +- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | +- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); +- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; +- config &= ~HIGH_SPD_EN_N_SHIFT; +- +- if (little) +- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); +- +- return config; +-} +- +-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) +-{ +- __be32 *d = dst; +- const u8 *s = src; +- unsigned int n; +- +- n = len / sizeof(u32); +- for (; n > 0; n--) { +- *d = cpu_to_be32p((const __u32 *) s); +- s += sizeof(__u32); +- d++; +- } +-} +- +-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) +-{ +- u8 swap[QCE_AES_IV_LENGTH]; +- u32 i, j; +- +- if (ivsize > QCE_AES_IV_LENGTH) +- return; +- +- memset(swap, 0, QCE_AES_IV_LENGTH); +- +- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; +- i < QCE_AES_IV_LENGTH; i++, j--) +- swap[i] = src[j]; +- +- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); +-} +- +-static void qce_xtskey(struct qce_device *qce, const u8 *enckey, +- unsigned int enckeylen, unsigned int cryptlen) +-{ +- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; +- unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); +- unsigned int xtsdusize; +- +- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, +- enckeylen / 2); +- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); +- +- /* xts du size 512B */ +- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); +- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); +-} +- +-static void qce_setup_config(struct qce_device *qce) +-{ +- u32 config; +- +- /* get big endianness */ +- config = qce_config_reg(qce, 0); +- +- /* clear status */ +- qce_write(qce, REG_STATUS, 0); +- qce_write(qce, REG_CONFIG, config); +-} +- +-static inline void qce_crypto_go(struct qce_device *qce) +-{ +- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); +-} +- + static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + u32 totallen, u32 offset) + { +@@ -303,6 +225,87 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + + return 0; + } ++#endif ++ ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER ++static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) ++{ ++ u32 cfg = 0; ++ ++ if (IS_AES(flags)) { ++ if (aes_key_size == AES_KEYSIZE_128) ++ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; ++ else if (aes_key_size == AES_KEYSIZE_256) ++ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; ++ } ++ ++ if (IS_AES(flags)) ++ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; ++ else if (IS_DES(flags) || IS_3DES(flags)) ++ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; ++ ++ if (IS_DES(flags)) ++ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; ++ ++ if (IS_3DES(flags)) ++ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; ++ ++ switch (flags & QCE_MODE_MASK) { ++ case QCE_MODE_ECB: ++ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CBC: ++ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CTR: ++ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_XTS: ++ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; ++ break; ++ case QCE_MODE_CCM: ++ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; ++ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; ++ break; ++ default: ++ return ~0; ++ } ++ ++ return cfg; ++} ++ ++static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) ++{ ++ u8 swap[QCE_AES_IV_LENGTH]; ++ u32 i, j; ++ ++ if (ivsize > QCE_AES_IV_LENGTH) ++ return; ++ ++ memset(swap, 0, QCE_AES_IV_LENGTH); ++ ++ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; ++ i < QCE_AES_IV_LENGTH; i++, j--) ++ swap[i] = src[j]; ++ ++ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); ++} ++ ++static void qce_xtskey(struct qce_device *qce, const u8 *enckey, ++ unsigned int enckeylen, unsigned int cryptlen) ++{ ++ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; ++ unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); ++ unsigned int xtsdusize; ++ ++ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, ++ enckeylen / 2); ++ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); ++ ++ /* xts du size 512B */ ++ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); ++ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); ++} + + static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, + + return 0; + } ++#endif + + int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) + { + switch (type) { ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER + case CRYPTO_ALG_TYPE_SKCIPHER: + return qce_setup_regs_skcipher(async_req, totallen, offset); ++#endif ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); ++#endif + default: + return -EINVAL; + } +diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c +index 0a44a6eeacf5..cb6d61eb7302 100644 +--- a/drivers/crypto/qce/core.c ++++ b/drivers/crypto/qce/core.c +@@ -22,8 +22,12 @@ + #define QCE_QUEUE_LENGTH 1 + + static const struct qce_algo_ops *qce_ops[] = { ++#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER + &skcipher_ops, ++#endif ++#ifdef CONFIG_CRYPTO_DEV_QCE_SHA + &ahash_ops, ++#endif + }; + + static void qce_unregister_algs(struct qce_device *qce) diff --git a/target/linux/ipq40xx/patches-5.4/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch b/target/linux/ipq40xx/patches-5.4/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch new file mode 100644 index 000000000..21fd416f3 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/047-crypto-qce-use-cryptlen-when-adding-extra-sgl.patch @@ -0,0 +1,91 @@ +From 1cc87f7819f115c0b4df46f2ec1bcd5494669b8f Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 7 Feb 2020 12:02:25 -0300 +Subject: [PATCH] crypto: qce - use cryptlen when adding extra sgl + +The qce crypto driver appends an extra entry to the dst sgl, to maintain +private state information. + +When the gcm driver sends requests to the ctr skcipher, it passes the +authentication tag after the actual crypto payload, but it must not be +touched. + +Commit 1336c2221bee ("crypto: qce - save a sg table slot for result +buf") limited the destination sgl to avoid overwriting the +authentication tag but it assumed the tag would be in a separate sgl +entry. + +This is not always the case, so it is better to limit the length of the +destination buffer to req->cryptlen before appending the result buf. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit d6364b8128439a8c0e381f80c38667de9f15eef8) + +diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c +index a1a8c1221fbe..458ce5b4f3d8 100644 +--- a/drivers/crypto/qce/dma.c ++++ b/drivers/crypto/qce/dma.c +@@ -48,9 +48,10 @@ void qce_dma_release(struct qce_dma_data *dma) + + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, +- int max_ents) ++ unsigned int max_len) + { + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; ++ unsigned int new_len; + + while (sg) { + if (!sg_page(sg)) +@@ -61,13 +62,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, + if (!sg) + return ERR_PTR(-EINVAL); + +- while (new_sgl && sg && max_ents) { +- sg_set_page(sg, sg_page(new_sgl), new_sgl->length, +- new_sgl->offset); ++ while (new_sgl && sg && max_len) { ++ new_len = new_sgl->length > max_len ? max_len : new_sgl->length; ++ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); +- max_ents--; ++ max_len -= new_len; + } + + return sg_last; +diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h +index ed25a0d9829e..786402169360 100644 +--- a/drivers/crypto/qce/dma.h ++++ b/drivers/crypto/qce/dma.h +@@ -43,6 +43,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma); + int qce_dma_terminate_all(struct qce_dma_data *dma); + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, +- int max_ents); ++ unsigned int max_len); + + #endif /* _DMA_H_ */ +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 4217b745f124..63ae75809cb7 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -97,13 +97,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + +- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + +- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, ++ QCE_RESULT_BUF_SZ); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; diff --git a/target/linux/ipq40xx/patches-5.4/048-crypto-qce-use-AES-fallback-for-small-requests.patch b/target/linux/ipq40xx/patches-5.4/048-crypto-qce-use-AES-fallback-for-small-requests.patch new file mode 100644 index 000000000..048ad642c --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/048-crypto-qce-use-AES-fallback-for-small-requests.patch @@ -0,0 +1,117 @@ +From 45c2be777b4e643b831451428d4b07a78e90354d Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 7 Feb 2020 12:02:26 -0300 +Subject: [PATCH] crypto: qce - use AES fallback for small requests + +Process small blocks using the fallback cipher, as a workaround for an +observed failure (DMA-related, apparently) when computing the GCM ghash +key. This brings a speed gain as well, since it avoids the latency of +using the hardware engine to process small blocks. + +Using software for all 16-byte requests would be enough to make GCM +work, but to increase performance, a larger threshold would be better. +Measuring the performance of supported ciphers with openssl speed, +software matches hardware at around 768-1024 bytes. + +Considering the 256-bit ciphers, software is 2-3 times faster than qce +at 256-bytes, 30% faster at 512, and about even at 768-bytes. With +128-bit keys, the break-even point would be around 1024-bytes. + +This adds the 'aes_sw_max_len' parameter, to set the largest request +length processed by the software fallback. Its default is being set to +512 bytes, a little lower than the break-even point, to balance the cost +in CPU usage. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit ce163ba0bf298f1707321ac025ef639f88e62801) + +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index 89c40658b246..fde2af138ad4 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -681,6 +681,29 @@ choice + + endchoice + ++config CRYPTO_DEV_QCE_SW_MAX_LEN ++ int "Default maximum request size to use software for AES" ++ depends on CRYPTO_DEV_QCE && CRYPTO_DEV_QCE_SKCIPHER ++ default 512 ++ help ++ This sets the default maximum request size to perform AES requests ++ using software instead of the crypto engine. It can be changed by ++ setting the aes_sw_max_len parameter. ++ ++ Small blocks are processed faster in software than hardware. ++ Considering the 256-bit ciphers, software is 2-3 times faster than ++ qce at 256-bytes, 30% faster at 512, and about even at 768-bytes. ++ With 128-bit keys, the break-even point would be around 1024-bytes. ++ ++ The default is set a little lower, to 512 bytes, to balance the ++ cost in CPU usage. The minimum recommended setting is 16-bytes ++ (1 AES block), since AES-GCM will fail if you set it lower. ++ Setting this to zero will send all requests to the hardware. ++ ++ Note that 192-bit keys are not supported by the hardware and are ++ always processed by the software fallback, and all DES requests ++ are done by the hardware. ++ + config CRYPTO_DEV_QCOM_RNG + tristate "Qualcomm Random Number Generator Driver" + depends on ARCH_QCOM || COMPILE_TEST +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 63ae75809cb7..fc7c940b5a43 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -5,6 +5,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -12,6 +13,13 @@ + + #include "cipher.h" + ++static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; ++module_param(aes_sw_max_len, uint, 0644); ++MODULE_PARM_DESC(aes_sw_max_len, ++ "Only use hardware for AES requests larger than this " ++ "[0=always use hardware; anything <16 breaks AES-GCM; default=" ++ __stringify(CONFIG_CRYPTO_DEV_QCE_SOFT_THRESHOLD)"]"); ++ + static LIST_HEAD(skcipher_algs); + + static void qce_skcipher_done(void *data) +@@ -166,15 +174,10 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, + switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: ++ memcpy(ctx->enc_key, key, keylen); + break; +- default: +- goto fallback; + } + +- ctx->enc_keylen = keylen; +- memcpy(ctx->enc_key, key, keylen); +- return 0; +-fallback: + ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + if (!ret) + ctx->enc_keylen = keylen; +@@ -224,8 +227,9 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + +- if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 && +- keylen != AES_KEYSIZE_256) { ++ if (IS_AES(rctx->flags) && ++ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || ++ req->cryptlen <= aes_sw_max_len)) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback); diff --git a/target/linux/ipq40xx/patches-5.4/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch b/target/linux/ipq40xx/patches-5.4/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch new file mode 100644 index 000000000..f4519ec56 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.4/049-crypto-qce-handle-AES-XTS-cases-that-qce-fails.patch @@ -0,0 +1,61 @@ +From 6a4eb14ec487918a7f6da7db5b2f3f7598c45bba Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz +Date: Fri, 7 Feb 2020 12:02:27 -0300 +Subject: [PATCH] crypto: qce - handle AES-XTS cases that qce fails + +QCE hangs when presented with an AES-XTS request whose length is larger +than QCE_SECTOR_SIZE (512-bytes), and is not a multiple of it. Let the +fallback cipher handle them. + +Signed-off-by: Eneas U de Queiroz +Signed-off-by: Herbert Xu +(cherry picked from commit 7f19380b2cfd412dcef2facefb3f6c62788864d7) + +diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c +index 629e7f34dc09..5006e74c40cd 100644 +--- a/drivers/crypto/qce/common.c ++++ b/drivers/crypto/qce/common.c +@@ -15,8 +15,6 @@ + #include "regs-v5.h" + #include "sha.h" + +-#define QCE_SECTOR_SIZE 512 +- + static inline u32 qce_read(struct qce_device *qce, u32 offset) + { + return readl(qce->base + offset); +diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h +index 282d4317470d..9f989cba0f1b 100644 +--- a/drivers/crypto/qce/common.h ++++ b/drivers/crypto/qce/common.h +@@ -12,6 +12,9 @@ + #include + #include + ++/* xts du size */ ++#define QCE_SECTOR_SIZE 512 ++ + /* key size in bytes */ + #define QCE_SHA_HMAC_KEY_SIZE 64 + #define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index fc7c940b5a43..a4f6ec1b64c7 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -227,9 +227,14 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; + ++ /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and ++ * is not a multiple of it; pass such requests to the fallback ++ */ + if (IS_AES(rctx->flags) && +- ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || +- req->cryptlen <= aes_sw_max_len)) { ++ (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || ++ req->cryptlen <= aes_sw_max_len) || ++ (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && ++ req->cryptlen % QCE_SECTOR_SIZE))) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback);