mirror of
https://github.com/coolsnowwolf/lede.git
synced 2025-04-16 04:13:31 +00:00
2420 lines
73 KiB
Diff
2420 lines
73 KiB
Diff
From patchwork Wed Jul 6 09:03:40 2022
|
|
Content-Type: text/plain; charset="utf-8"
|
|
MIME-Version: 1.0
|
|
Content-Transfer-Encoding: 7bit
|
|
X-Patchwork-Submitter: LABBE Corentin <clabbe@baylibre.com>
|
|
X-Patchwork-Id: 12907886
|
|
Return-Path:
|
|
<linux-rockchip-bounces+linux-rockchip=archiver.kernel.org@lists.infradead.org>
|
|
X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
|
|
aws-us-west-2-korg-lkml-1.web.codeaurora.org
|
|
Received: from bombadil.infradead.org (bombadil.infradead.org
|
|
[198.137.202.133])
|
|
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
|
|
(No client certificate requested)
|
|
by smtp.lore.kernel.org (Postfix) with ESMTPS id 80701C43334
|
|
for <linux-rockchip@archiver.kernel.org>;
|
|
Wed, 6 Jul 2022 10:21:13 +0000 (UTC)
|
|
DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
|
|
d=lists.infradead.org; s=bombadil.20210309; h=Sender:
|
|
Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post:
|
|
List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
|
|
Message-Id:Date:Subject:Cc:To:From:Reply-To:Content-ID:Content-Description:
|
|
Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
|
|
List-Owner; bh=w2ewNjuPH3tCNpBACI9xMFukG8dznlgGn9auxf/TbSQ=; b=NMnZMIilH6Rj6q
|
|
JBeFHcO/0vy9bRepjfvBprmkGHfP0tGg2cfWZeQccgiH3J4xRnkQ8JCi8RhG+amUdCBP677fEwXqL
|
|
XDvulNMteh+ACd3gAZ2pWy1qE0RBQIRDEIngwNqYTFB0IfP/7cMHrzRy4v5Ynx7nI/6rkfi7O2oWH
|
|
jUb0AaXqbIQFeD4t3sr8v5gFOHpbwbH2urceXPRviZtl67L3Ejc9yEpmZJBZc0u2/hI1MLLE5h8Ks
|
|
gnOq0NUJdvmH9pV3fxLEx5m5RPb5D6DaTAT05Sjbj3tICBtXcrvI0CfGiM6Py64YLwfHiJFFDbhp6
|
|
ng01U9Kg047g0MgLntvw==;
|
|
Received: from localhost ([::1] helo=bombadil.infradead.org)
|
|
by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux))
|
|
id 1o929R-008Ncf-Sj; Wed, 06 Jul 2022 10:20:58 +0000
|
|
Received: from mail-wr1-x430.google.com ([2a00:1450:4864:20::430])
|
|
by bombadil.infradead.org with esmtps (Exim 4.94.2 #2 (Red Hat Linux))
|
|
id 1o90xZ-007bjU-2s
|
|
for linux-rockchip@lists.infradead.org; Wed, 06 Jul 2022 09:04:39 +0000
|
|
Received: by mail-wr1-x430.google.com with SMTP id s1so21047035wra.9
|
|
for <linux-rockchip@lists.infradead.org>;
|
|
Wed, 06 Jul 2022 02:04:35 -0700 (PDT)
|
|
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
|
|
d=baylibre-com.20210112.gappssmtp.com; s=20210112;
|
|
h=from:to:cc:subject:date:message-id:in-reply-to:references
|
|
:mime-version:content-transfer-encoding;
|
|
bh=2nwWkT4441Zvq5igIAOKA8C0HAjPPXDmTRADPlAyhok=;
|
|
b=Pr3IXa8ExRhTFh+SPH7MrhQizXMmrRN4KOZzHqdYpWENJ96Ms04oBik0WfZbJ27egJ
|
|
ldxRn/lKH+72j5pIK0nS2nYnViD4t7mh4Zb1sOqM2u2kO5jOs6YQUThf4H4eI2Km2Ujk
|
|
DwddZ86xRnFapyFfMRqcT6ohi+6e0uc4u1C7mMTTJA/fIR7ogqFH26AfqYtb+NPy09S6
|
|
Lqsl7Gej6g7TLBDiNG7TJFSX2PL0SMt1IdS4uGkcq1ucTSIZUoDqeFV0sk0icX109eD0
|
|
+evOAKZrJ8Fcpr+vITTRyd9o5yoYy0tl+3ssElJnyQ3+12HdNMnyGRJT/M/jsAeeXFrC
|
|
2s1g==
|
|
X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
|
|
d=1e100.net; s=20210112;
|
|
h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to
|
|
:references:mime-version:content-transfer-encoding;
|
|
bh=2nwWkT4441Zvq5igIAOKA8C0HAjPPXDmTRADPlAyhok=;
|
|
b=jrNkcu/YHsMg9g+c9DCMe2rYgZEjs2Kcrlvvla7WjME4uPIc42dSmgAaooVKGqfrOx
|
|
bmRlEpMseKnH+swTfgQT96HD72A1+GlBNgvGzgobVh9dE6qywC2hXeayy+6lvNQ04QDb
|
|
htuh/1tFPquCnMZrB6XkrBLtIoqngGlPObKutvylCovAOjcMfdpNkAbu6WXUdrVezesX
|
|
ipOA7nPpe1xK3xuRRarLWUtH0krfxDUMC3Zzv/ci7c9hqGq+wEV9+gofJqJnTbwHiAUU
|
|
XxtjOgZTFFruqo2Xh/aH1pz/dpOBlM77c+p9+BTiVbNjFR0uxH26+0CI2qLyeCpccvyL
|
|
Z5bg==
|
|
X-Gm-Message-State: AJIora+k2/Y0J3c5bRbTc8eO22Wszf2dEyzrIIniVBJBPx3Z+auKYN3q
|
|
rdqODKFBStTNE4iGSGyh2gI2yw==
|
|
X-Google-Smtp-Source:
|
|
AGRyM1uEESfBM2/+cx8O6PDi67ahL3H9B52KIeo6hugjBlO2PZU9E0Dg3m/zc4Jx7ArSkzmwQCEl3w==
|
|
X-Received: by 2002:a5d:6d0c:0:b0:21b:ccda:fc67 with SMTP id
|
|
e12-20020a5d6d0c000000b0021bccdafc67mr37250833wrq.246.1657098274092;
|
|
Wed, 06 Jul 2022 02:04:34 -0700 (PDT)
|
|
Received: from localhost.localdomain
|
|
(laubervilliers-658-1-213-31.w90-63.abo.wanadoo.fr. [90.63.244.31])
|
|
by smtp.googlemail.com with ESMTPSA id
|
|
v11-20020adfe28b000000b0021d6ef34b2asm5230223wri.51.2022.07.06.02.04.33
|
|
(version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
|
|
Wed, 06 Jul 2022 02:04:33 -0700 (PDT)
|
|
From: Corentin Labbe <clabbe@baylibre.com>
|
|
To: heiko@sntech.de,
|
|
herbert@gondor.apana.org.au,
|
|
krzysztof.kozlowski+dt@linaro.org,
|
|
mturquette@baylibre.com,
|
|
p.zabel@pengutronix.de,
|
|
robh+dt@kernel.org,
|
|
sboyd@kernel.org
|
|
Cc: linux-rockchip@lists.infradead.org,
|
|
devicetree@vger.kernel.org,
|
|
linux-arm-kernel@lists.infradead.org,
|
|
linux-clk@vger.kernel.org,
|
|
linux-crypto@vger.kernel.org,
|
|
linux-kernel@vger.kernel.org,
|
|
john@metanate.com,
|
|
didi.debian@cknow.org,
|
|
Corentin Labbe <clabbe@baylibre.com>
|
|
Subject: [PATCH v8 01/33] crypto: rockchip: use dev_err for error message
|
|
about interrupt
|
|
Date: Wed, 6 Jul 2022 09:03:40 +0000
|
|
Message-Id: <20220706090412.806101-2-clabbe@baylibre.com>
|
|
X-Mailer: git-send-email 2.25.1
|
|
In-Reply-To: <20220706090412.806101-1-clabbe@baylibre.com>
|
|
References: <20220706090412.806101-1-clabbe@baylibre.com>
|
|
MIME-Version: 1.0
|
|
X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
|
|
X-CRM114-CacheID: sfid-20220706_020437_385385_DD262621
|
|
X-CRM114-Status: GOOD ( 11.86 )
|
|
X-BeenThere: linux-rockchip@lists.infradead.org
|
|
X-Mailman-Version: 2.1.34
|
|
Precedence: list
|
|
List-Id: Upstream kernel work for Rockchip platforms
|
|
<linux-rockchip.lists.infradead.org>
|
|
List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-rockchip>,
|
|
<mailto:linux-rockchip-request@lists.infradead.org?subject=unsubscribe>
|
|
List-Archive: <http://lists.infradead.org/pipermail/linux-rockchip/>
|
|
List-Post: <mailto:linux-rockchip@lists.infradead.org>
|
|
List-Help: <mailto:linux-rockchip-request@lists.infradead.org?subject=help>
|
|
List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-rockchip>,
|
|
<mailto:linux-rockchip-request@lists.infradead.org?subject=subscribe>
|
|
Sender: "Linux-rockchip" <linux-rockchip-bounces@lists.infradead.org>
|
|
Errors-To:
|
|
linux-rockchip-bounces+linux-rockchip=archiver.kernel.org@lists.infradead.org
|
|
|
|
Interrupt is mandatory so the message should be printed as error.
|
|
|
|
Reviewed-by: John Keeping <john@metanate.com>
|
|
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
|
|
---
|
|
drivers/crypto/rockchip/rk3288_crypto.c | 3 +--
|
|
1 file changed, 1 insertion(+), 2 deletions(-)
|
|
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
|
|
@@ -14,235 +14,162 @@
|
|
#include <linux/module.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/of.h>
|
|
+#include <linux/of_device.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/reset.h>
|
|
|
|
-static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
|
|
+static struct rockchip_ip rocklist = {
|
|
+ .dev_list = LIST_HEAD_INIT(rocklist.dev_list),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock),
|
|
+};
|
|
+
|
|
+struct rk_crypto_info *get_rk_crypto(void)
|
|
{
|
|
- int err;
|
|
+ struct rk_crypto_info *first;
|
|
|
|
- err = clk_prepare_enable(dev->sclk);
|
|
- if (err) {
|
|
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
|
|
- __func__, __LINE__);
|
|
- goto err_return;
|
|
- }
|
|
- err = clk_prepare_enable(dev->aclk);
|
|
- if (err) {
|
|
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
|
|
- __func__, __LINE__);
|
|
- goto err_aclk;
|
|
+ spin_lock(&rocklist.lock);
|
|
+ first = list_first_entry_or_null(&rocklist.dev_list,
|
|
+ struct rk_crypto_info, list);
|
|
+ list_rotate_left(&rocklist.dev_list);
|
|
+ spin_unlock(&rocklist.lock);
|
|
+ return first;
|
|
+}
|
|
+
|
|
+static const struct rk_variant rk3288_variant = {
|
|
+ .num_clks = 4,
|
|
+ .rkclks = {
|
|
+ { "sclk", 150000000},
|
|
}
|
|
- err = clk_prepare_enable(dev->hclk);
|
|
- if (err) {
|
|
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
|
|
- __func__, __LINE__);
|
|
- goto err_hclk;
|
|
- }
|
|
- err = clk_prepare_enable(dev->dmaclk);
|
|
- if (err) {
|
|
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
|
|
- __func__, __LINE__);
|
|
- goto err_dmaclk;
|
|
- }
|
|
- return err;
|
|
-err_dmaclk:
|
|
- clk_disable_unprepare(dev->hclk);
|
|
-err_hclk:
|
|
- clk_disable_unprepare(dev->aclk);
|
|
-err_aclk:
|
|
- clk_disable_unprepare(dev->sclk);
|
|
-err_return:
|
|
- return err;
|
|
-}
|
|
+};
|
|
|
|
-static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
|
|
-{
|
|
- clk_disable_unprepare(dev->dmaclk);
|
|
- clk_disable_unprepare(dev->hclk);
|
|
- clk_disable_unprepare(dev->aclk);
|
|
- clk_disable_unprepare(dev->sclk);
|
|
-}
|
|
-
|
|
-static int check_alignment(struct scatterlist *sg_src,
|
|
- struct scatterlist *sg_dst,
|
|
- int align_mask)
|
|
-{
|
|
- int in, out, align;
|
|
-
|
|
- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
|
|
- IS_ALIGNED((uint32_t)sg_src->length, align_mask);
|
|
- if (!sg_dst)
|
|
- return in;
|
|
- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
|
|
- IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
|
|
- align = in && out;
|
|
-
|
|
- return (align && (sg_src->length == sg_dst->length));
|
|
-}
|
|
-
|
|
-static int rk_load_data(struct rk_crypto_info *dev,
|
|
- struct scatterlist *sg_src,
|
|
- struct scatterlist *sg_dst)
|
|
-{
|
|
- unsigned int count;
|
|
-
|
|
- dev->aligned = dev->aligned ?
|
|
- check_alignment(sg_src, sg_dst, dev->align_size) :
|
|
- dev->aligned;
|
|
- if (dev->aligned) {
|
|
- count = min(dev->left_bytes, sg_src->length);
|
|
- dev->left_bytes -= count;
|
|
-
|
|
- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
|
|
- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
|
|
- __func__, __LINE__);
|
|
- return -EINVAL;
|
|
- }
|
|
- dev->addr_in = sg_dma_address(sg_src);
|
|
+static const struct rk_variant rk3328_variant = {
|
|
+ .num_clks = 3,
|
|
+};
|
|
|
|
- if (sg_dst) {
|
|
- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
|
|
- dev_err(dev->dev,
|
|
- "[%s:%d] dma_map_sg(dst) error\n",
|
|
- __func__, __LINE__);
|
|
- dma_unmap_sg(dev->dev, sg_src, 1,
|
|
- DMA_TO_DEVICE);
|
|
- return -EINVAL;
|
|
- }
|
|
- dev->addr_out = sg_dma_address(sg_dst);
|
|
- }
|
|
- } else {
|
|
- count = (dev->left_bytes > PAGE_SIZE) ?
|
|
- PAGE_SIZE : dev->left_bytes;
|
|
-
|
|
- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
|
|
- dev->addr_vir, count,
|
|
- dev->total - dev->left_bytes)) {
|
|
- dev_err(dev->dev, "[%s:%d] pcopy err\n",
|
|
- __func__, __LINE__);
|
|
- return -EINVAL;
|
|
- }
|
|
- dev->left_bytes -= count;
|
|
- sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
|
|
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
|
|
- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
|
|
- __func__, __LINE__);
|
|
- return -ENOMEM;
|
|
- }
|
|
- dev->addr_in = sg_dma_address(&dev->sg_tmp);
|
|
+static const struct rk_variant rk3399_variant = {
|
|
+ .num_clks = 3,
|
|
+};
|
|
+
|
|
+static int rk_crypto_get_clks(struct rk_crypto_info *dev)
|
|
+{
|
|
+ int i, j, err;
|
|
+ unsigned long cr;
|
|
|
|
- if (sg_dst) {
|
|
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
|
|
- DMA_FROM_DEVICE)) {
|
|
- dev_err(dev->dev,
|
|
- "[%s:%d] dma_map_sg(sg_tmp) error\n",
|
|
- __func__, __LINE__);
|
|
- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
|
|
- DMA_TO_DEVICE);
|
|
- return -ENOMEM;
|
|
+ dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks);
|
|
+ if (dev->num_clks < dev->variant->num_clks) {
|
|
+ dev_err(dev->dev, "Missing clocks, got %d instead of %d\n",
|
|
+ dev->num_clks, dev->variant->num_clks);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < dev->num_clks; i++) {
|
|
+ cr = clk_get_rate(dev->clks[i].clk);
|
|
+ for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) {
|
|
+ if (dev->variant->rkclks[j].max == 0)
|
|
+ continue;
|
|
+ if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id))
|
|
+ continue;
|
|
+ if (cr > dev->variant->rkclks[j].max) {
|
|
+ err = clk_set_rate(dev->clks[i].clk,
|
|
+ dev->variant->rkclks[j].max);
|
|
+ if (err)
|
|
+ dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n",
|
|
+ dev->variant->rkclks[j].name, cr,
|
|
+ dev->variant->rkclks[j].max);
|
|
+ else
|
|
+ dev_info(dev->dev, "Downclocking %s from %lu to %lu\n",
|
|
+ dev->variant->rkclks[j].name, cr,
|
|
+ dev->variant->rkclks[j].max);
|
|
}
|
|
- dev->addr_out = sg_dma_address(&dev->sg_tmp);
|
|
}
|
|
}
|
|
- dev->count = count;
|
|
return 0;
|
|
}
|
|
|
|
-static void rk_unload_data(struct rk_crypto_info *dev)
|
|
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
|
|
{
|
|
- struct scatterlist *sg_in, *sg_out;
|
|
+ int err;
|
|
|
|
- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
|
|
- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
|
|
+ err = clk_bulk_prepare_enable(dev->num_clks, dev->clks);
|
|
+ if (err)
|
|
+ dev_err(dev->dev, "Could not enable clock clks\n");
|
|
|
|
- if (dev->sg_dst) {
|
|
- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
|
|
- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
|
|
- }
|
|
+ return err;
|
|
}
|
|
|
|
-static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
|
|
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
|
|
{
|
|
- struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
|
|
- u32 interrupt_status;
|
|
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
|
|
+}
|
|
|
|
- spin_lock(&dev->lock);
|
|
- interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
|
|
+/*
|
|
+ * Power management strategy: The device is suspended until a request
|
|
+ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s.
|
|
+ */
|
|
+static int rk_crypto_pm_suspend(struct device *dev)
|
|
+{
|
|
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
|
|
|
|
- if (interrupt_status & 0x0a) {
|
|
- dev_warn(dev->dev, "DMA Error\n");
|
|
- dev->err = -EFAULT;
|
|
- }
|
|
- tasklet_schedule(&dev->done_task);
|
|
+ rk_crypto_disable_clk(rkdev);
|
|
+ reset_control_assert(rkdev->rst);
|
|
|
|
- spin_unlock(&dev->lock);
|
|
- return IRQ_HANDLED;
|
|
+ return 0;
|
|
}
|
|
|
|
-static int rk_crypto_enqueue(struct rk_crypto_info *dev,
|
|
- struct crypto_async_request *async_req)
|
|
+static int rk_crypto_pm_resume(struct device *dev)
|
|
{
|
|
- unsigned long flags;
|
|
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
|
|
int ret;
|
|
|
|
- spin_lock_irqsave(&dev->lock, flags);
|
|
- ret = crypto_enqueue_request(&dev->queue, async_req);
|
|
- if (dev->busy) {
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
+ ret = rk_crypto_enable_clk(rkdev);
|
|
+ if (ret)
|
|
return ret;
|
|
- }
|
|
- dev->busy = true;
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
- tasklet_schedule(&dev->queue_task);
|
|
|
|
- return ret;
|
|
-}
|
|
+ reset_control_deassert(rkdev->rst);
|
|
+ return 0;
|
|
|
|
-static void rk_crypto_queue_task_cb(unsigned long data)
|
|
-{
|
|
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
|
|
- struct crypto_async_request *async_req, *backlog;
|
|
- unsigned long flags;
|
|
- int err = 0;
|
|
+}
|
|
|
|
- dev->err = 0;
|
|
- spin_lock_irqsave(&dev->lock, flags);
|
|
- backlog = crypto_get_backlog(&dev->queue);
|
|
- async_req = crypto_dequeue_request(&dev->queue);
|
|
+static const struct dev_pm_ops rk_crypto_pm_ops = {
|
|
+ SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL)
|
|
+};
|
|
|
|
- if (!async_req) {
|
|
- dev->busy = false;
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
- return;
|
|
- }
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
+static int rk_crypto_pm_init(struct rk_crypto_info *rkdev)
|
|
+{
|
|
+ int err;
|
|
|
|
- if (backlog) {
|
|
- backlog->complete(backlog, -EINPROGRESS);
|
|
- backlog = NULL;
|
|
- }
|
|
+ pm_runtime_use_autosuspend(rkdev->dev);
|
|
+ pm_runtime_set_autosuspend_delay(rkdev->dev, 2000);
|
|
|
|
- dev->async_req = async_req;
|
|
- err = dev->start(dev);
|
|
+ err = pm_runtime_set_suspended(rkdev->dev);
|
|
if (err)
|
|
- dev->complete(dev->async_req, err);
|
|
+ return err;
|
|
+ pm_runtime_enable(rkdev->dev);
|
|
+ return err;
|
|
}
|
|
|
|
-static void rk_crypto_done_task_cb(unsigned long data)
|
|
+static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev)
|
|
+{
|
|
+ pm_runtime_disable(rkdev->dev);
|
|
+}
|
|
+
|
|
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
|
|
{
|
|
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
|
|
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
|
|
+ u32 interrupt_status;
|
|
+
|
|
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
|
|
|
|
- if (dev->err) {
|
|
- dev->complete(dev->async_req, dev->err);
|
|
- return;
|
|
+ dev->status = 1;
|
|
+ if (interrupt_status & 0x0a) {
|
|
+ dev_warn(dev->dev, "DMA Error\n");
|
|
+ dev->status = 0;
|
|
}
|
|
+ complete(&dev->complete);
|
|
|
|
- dev->err = dev->update(dev);
|
|
- if (dev->err)
|
|
- dev->complete(dev->async_req, dev->err);
|
|
+ return IRQ_HANDLED;
|
|
}
|
|
|
|
static struct rk_crypto_tmp *rk_cipher_algs[] = {
|
|
@@ -257,6 +184,62 @@ static struct rk_crypto_tmp *rk_cipher_a
|
|
&rk_ahash_md5,
|
|
};
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
|
+static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+ struct rk_crypto_info *dd;
|
|
+ unsigned int i;
|
|
+
|
|
+ spin_lock(&rocklist.lock);
|
|
+ list_for_each_entry(dd, &rocklist.dev_list, list) {
|
|
+ seq_printf(seq, "%s %s requests: %lu\n",
|
|
+ dev_driver_string(dd->dev), dev_name(dd->dev),
|
|
+ dd->nreq);
|
|
+ }
|
|
+ spin_unlock(&rocklist.lock);
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
|
|
+ if (!rk_cipher_algs[i]->dev)
|
|
+ continue;
|
|
+ switch (rk_cipher_algs[i]->type) {
|
|
+ case CRYPTO_ALG_TYPE_SKCIPHER:
|
|
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
|
|
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
|
|
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
|
|
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
|
|
+ seq_printf(seq, "\tfallback due to length: %lu\n",
|
|
+ rk_cipher_algs[i]->stat_fb_len);
|
|
+ seq_printf(seq, "\tfallback due to alignment: %lu\n",
|
|
+ rk_cipher_algs[i]->stat_fb_align);
|
|
+ seq_printf(seq, "\tfallback due to SGs: %lu\n",
|
|
+ rk_cipher_algs[i]->stat_fb_sgdiff);
|
|
+ break;
|
|
+ case CRYPTO_ALG_TYPE_AHASH:
|
|
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
|
|
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
|
|
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
|
|
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
|
|
+#endif
|
|
+
|
|
+static void register_debugfs(struct rk_crypto_info *crypto_info)
|
|
+{
|
|
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
|
+ /* Ignore error of debugfs */
|
|
+ rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
|
|
+ rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
|
|
+ rocklist.dbgfs_dir,
|
|
+ &rocklist,
|
|
+ &rk_crypto_debugfs_fops);
|
|
+#endif
|
|
+}
|
|
+
|
|
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
|
|
{
|
|
unsigned int i, k;
|
|
@@ -264,12 +247,22 @@ static int rk_crypto_register(struct rk_
|
|
|
|
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
|
|
rk_cipher_algs[i]->dev = crypto_info;
|
|
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
|
|
- err = crypto_register_skcipher(
|
|
- &rk_cipher_algs[i]->alg.skcipher);
|
|
- else
|
|
- err = crypto_register_ahash(
|
|
- &rk_cipher_algs[i]->alg.hash);
|
|
+ switch (rk_cipher_algs[i]->type) {
|
|
+ case CRYPTO_ALG_TYPE_SKCIPHER:
|
|
+ dev_info(crypto_info->dev, "Register %s as %s\n",
|
|
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
|
|
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
|
|
+ err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
|
|
+ break;
|
|
+ case CRYPTO_ALG_TYPE_AHASH:
|
|
+ dev_info(crypto_info->dev, "Register %s as %s\n",
|
|
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
|
|
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
|
|
+ err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
|
|
+ break;
|
|
+ default:
|
|
+ dev_err(crypto_info->dev, "unknown algorithm\n");
|
|
+ }
|
|
if (err)
|
|
goto err_cipher_algs;
|
|
}
|
|
@@ -277,7 +270,7 @@ static int rk_crypto_register(struct rk_
|
|
|
|
err_cipher_algs:
|
|
for (k = 0; k < i; k++) {
|
|
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
|
|
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
|
|
crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
|
|
else
|
|
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
|
|
@@ -290,22 +283,23 @@ static void rk_crypto_unregister(void)
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
|
|
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
|
|
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
|
|
crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
|
|
else
|
|
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
|
|
}
|
|
}
|
|
|
|
-static void rk_crypto_action(void *data)
|
|
-{
|
|
- struct rk_crypto_info *crypto_info = data;
|
|
-
|
|
- reset_control_assert(crypto_info->rst);
|
|
-}
|
|
-
|
|
static const struct of_device_id crypto_of_id_table[] = {
|
|
- { .compatible = "rockchip,rk3288-crypto" },
|
|
+ { .compatible = "rockchip,rk3288-crypto",
|
|
+ .data = &rk3288_variant,
|
|
+ },
|
|
+ { .compatible = "rockchip,rk3328-crypto",
|
|
+ .data = &rk3328_variant,
|
|
+ },
|
|
+ { .compatible = "rockchip,rk3399-crypto",
|
|
+ .data = &rk3399_variant,
|
|
+ },
|
|
{}
|
|
};
|
|
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
|
|
@@ -313,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_tab
|
|
static int rk_crypto_probe(struct platform_device *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
- struct rk_crypto_info *crypto_info;
|
|
+ struct rk_crypto_info *crypto_info, *first;
|
|
int err = 0;
|
|
|
|
crypto_info = devm_kzalloc(&pdev->dev,
|
|
@@ -323,7 +317,16 @@ static int rk_crypto_probe(struct platfo
|
|
goto err_crypto;
|
|
}
|
|
|
|
- crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
|
|
+ crypto_info->dev = &pdev->dev;
|
|
+ platform_set_drvdata(pdev, crypto_info);
|
|
+
|
|
+ crypto_info->variant = of_device_get_match_data(&pdev->dev);
|
|
+ if (!crypto_info->variant) {
|
|
+ dev_err(&pdev->dev, "Missing variant\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ crypto_info->rst = devm_reset_control_array_get_exclusive(dev);
|
|
if (IS_ERR(crypto_info->rst)) {
|
|
err = PTR_ERR(crypto_info->rst);
|
|
goto err_crypto;
|
|
@@ -333,46 +336,19 @@ static int rk_crypto_probe(struct platfo
|
|
usleep_range(10, 20);
|
|
reset_control_deassert(crypto_info->rst);
|
|
|
|
- err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
|
|
- if (err)
|
|
- goto err_crypto;
|
|
-
|
|
- spin_lock_init(&crypto_info->lock);
|
|
-
|
|
crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
|
|
if (IS_ERR(crypto_info->reg)) {
|
|
err = PTR_ERR(crypto_info->reg);
|
|
goto err_crypto;
|
|
}
|
|
|
|
- crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
|
|
- if (IS_ERR(crypto_info->aclk)) {
|
|
- err = PTR_ERR(crypto_info->aclk);
|
|
- goto err_crypto;
|
|
- }
|
|
-
|
|
- crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
|
|
- if (IS_ERR(crypto_info->hclk)) {
|
|
- err = PTR_ERR(crypto_info->hclk);
|
|
- goto err_crypto;
|
|
- }
|
|
-
|
|
- crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
|
|
- if (IS_ERR(crypto_info->sclk)) {
|
|
- err = PTR_ERR(crypto_info->sclk);
|
|
- goto err_crypto;
|
|
- }
|
|
-
|
|
- crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
|
|
- if (IS_ERR(crypto_info->dmaclk)) {
|
|
- err = PTR_ERR(crypto_info->dmaclk);
|
|
+ err = rk_crypto_get_clks(crypto_info);
|
|
+ if (err)
|
|
goto err_crypto;
|
|
- }
|
|
|
|
crypto_info->irq = platform_get_irq(pdev, 0);
|
|
if (crypto_info->irq < 0) {
|
|
- dev_warn(crypto_info->dev,
|
|
- "control Interrupt is not available.\n");
|
|
+ dev_err(&pdev->dev, "control Interrupt is not available.\n");
|
|
err = crypto_info->irq;
|
|
goto err_crypto;
|
|
}
|
|
@@ -382,49 +358,64 @@ static int rk_crypto_probe(struct platfo
|
|
"rk-crypto", pdev);
|
|
|
|
if (err) {
|
|
- dev_err(crypto_info->dev, "irq request failed.\n");
|
|
+ dev_err(&pdev->dev, "irq request failed.\n");
|
|
goto err_crypto;
|
|
}
|
|
|
|
- crypto_info->dev = &pdev->dev;
|
|
- platform_set_drvdata(pdev, crypto_info);
|
|
+ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
|
|
+ crypto_engine_start(crypto_info->engine);
|
|
+ init_completion(&crypto_info->complete);
|
|
|
|
- tasklet_init(&crypto_info->queue_task,
|
|
- rk_crypto_queue_task_cb, (unsigned long)crypto_info);
|
|
- tasklet_init(&crypto_info->done_task,
|
|
- rk_crypto_done_task_cb, (unsigned long)crypto_info);
|
|
- crypto_init_queue(&crypto_info->queue, 50);
|
|
-
|
|
- crypto_info->enable_clk = rk_crypto_enable_clk;
|
|
- crypto_info->disable_clk = rk_crypto_disable_clk;
|
|
- crypto_info->load_data = rk_load_data;
|
|
- crypto_info->unload_data = rk_unload_data;
|
|
- crypto_info->enqueue = rk_crypto_enqueue;
|
|
- crypto_info->busy = false;
|
|
+ err = rk_crypto_pm_init(crypto_info);
|
|
+ if (err)
|
|
+ goto err_pm;
|
|
|
|
- err = rk_crypto_register(crypto_info);
|
|
- if (err) {
|
|
- dev_err(dev, "err in register alg");
|
|
- goto err_register_alg;
|
|
+ spin_lock(&rocklist.lock);
|
|
+ first = list_first_entry_or_null(&rocklist.dev_list,
|
|
+ struct rk_crypto_info, list);
|
|
+ list_add_tail(&crypto_info->list, &rocklist.dev_list);
|
|
+ spin_unlock(&rocklist.lock);
|
|
+
|
|
+ if (!first) {
|
|
+ err = rk_crypto_register(crypto_info);
|
|
+ if (err) {
|
|
+ dev_err(dev, "Fail to register crypto algorithms");
|
|
+ goto err_register_alg;
|
|
+ }
|
|
+
|
|
+ register_debugfs(crypto_info);
|
|
}
|
|
|
|
- dev_info(dev, "Crypto Accelerator successfully registered\n");
|
|
return 0;
|
|
|
|
err_register_alg:
|
|
- tasklet_kill(&crypto_info->queue_task);
|
|
- tasklet_kill(&crypto_info->done_task);
|
|
+ rk_crypto_pm_exit(crypto_info);
|
|
+err_pm:
|
|
+ crypto_engine_exit(crypto_info->engine);
|
|
err_crypto:
|
|
+ dev_err(dev, "Crypto Accelerator not successfully registered\n");
|
|
return err;
|
|
}
|
|
|
|
static int rk_crypto_remove(struct platform_device *pdev)
|
|
{
|
|
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
|
|
+ struct rk_crypto_info *first;
|
|
|
|
- rk_crypto_unregister();
|
|
- tasklet_kill(&crypto_tmp->done_task);
|
|
- tasklet_kill(&crypto_tmp->queue_task);
|
|
+ spin_lock_bh(&rocklist.lock);
|
|
+ list_del(&crypto_tmp->list);
|
|
+ first = list_first_entry_or_null(&rocklist.dev_list,
|
|
+ struct rk_crypto_info, list);
|
|
+ spin_unlock_bh(&rocklist.lock);
|
|
+
|
|
+ if (!first) {
|
|
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
|
+ debugfs_remove_recursive(rocklist.dbgfs_dir);
|
|
+#endif
|
|
+ rk_crypto_unregister();
|
|
+ }
|
|
+ rk_crypto_pm_exit(crypto_tmp);
|
|
+ crypto_engine_exit(crypto_tmp->engine);
|
|
return 0;
|
|
}
|
|
|
|
@@ -433,6 +424,7 @@ static struct platform_driver crypto_dri
|
|
.remove = rk_crypto_remove,
|
|
.driver = {
|
|
.name = "rk3288-crypto",
|
|
+ .pm = &rk_crypto_pm_ops,
|
|
.of_match_table = crypto_of_id_table,
|
|
},
|
|
};
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto.h
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
|
|
@@ -5,9 +5,13 @@
|
|
#include <crypto/aes.h>
|
|
#include <crypto/internal/des.h>
|
|
#include <crypto/algapi.h>
|
|
+#include <linux/dma-mapping.h>
|
|
#include <linux/interrupt.h>
|
|
+#include <linux/debugfs.h>
|
|
#include <linux/delay.h>
|
|
+#include <linux/pm_runtime.h>
|
|
#include <linux/scatterlist.h>
|
|
+#include <crypto/engine.h>
|
|
#include <crypto/internal/hash.h>
|
|
#include <crypto/internal/skcipher.h>
|
|
|
|
@@ -184,85 +188,91 @@
|
|
#define CRYPTO_WRITE(dev, offset, val) \
|
|
writel_relaxed((val), ((dev)->reg + (offset)))
|
|
|
|
+#define RK_MAX_CLKS 4
|
|
+
|
|
+/*
|
|
+ * struct rockchip_ip - struct for managing a list of RK crypto instance
|
|
+ * @dev_list: Used for doing a list of rk_crypto_info
|
|
+ * @lock: Control access to dev_list
|
|
+ * @dbgfs_dir: Debugfs dentry for statistic directory
|
|
+ * @dbgfs_stats: Debugfs dentry for statistic counters
|
|
+ */
|
|
+struct rockchip_ip {
|
|
+ struct list_head dev_list;
|
|
+ spinlock_t lock; /* Control access to dev_list */
|
|
+ struct dentry *dbgfs_dir;
|
|
+ struct dentry *dbgfs_stats;
|
|
+};
|
|
+
|
|
+struct rk_clks {
|
|
+ const char *name;
|
|
+ unsigned long max;
|
|
+};
|
|
+
|
|
+struct rk_variant {
|
|
+ int num_clks;
|
|
+ struct rk_clks rkclks[RK_MAX_CLKS];
|
|
+};
|
|
+
|
|
struct rk_crypto_info {
|
|
+ struct list_head list;
|
|
struct device *dev;
|
|
- struct clk *aclk;
|
|
- struct clk *hclk;
|
|
- struct clk *sclk;
|
|
- struct clk *dmaclk;
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct reset_control *rst;
|
|
void __iomem *reg;
|
|
int irq;
|
|
- struct crypto_queue queue;
|
|
- struct tasklet_struct queue_task;
|
|
- struct tasklet_struct done_task;
|
|
- struct crypto_async_request *async_req;
|
|
- int err;
|
|
- /* device lock */
|
|
- spinlock_t lock;
|
|
-
|
|
- /* the public variable */
|
|
- struct scatterlist *sg_src;
|
|
- struct scatterlist *sg_dst;
|
|
- struct scatterlist sg_tmp;
|
|
- struct scatterlist *first;
|
|
- unsigned int left_bytes;
|
|
- void *addr_vir;
|
|
- int aligned;
|
|
- int align_size;
|
|
- size_t src_nents;
|
|
- size_t dst_nents;
|
|
- unsigned int total;
|
|
- unsigned int count;
|
|
- dma_addr_t addr_in;
|
|
- dma_addr_t addr_out;
|
|
- bool busy;
|
|
- int (*start)(struct rk_crypto_info *dev);
|
|
- int (*update)(struct rk_crypto_info *dev);
|
|
- void (*complete)(struct crypto_async_request *base, int err);
|
|
- int (*enable_clk)(struct rk_crypto_info *dev);
|
|
- void (*disable_clk)(struct rk_crypto_info *dev);
|
|
- int (*load_data)(struct rk_crypto_info *dev,
|
|
- struct scatterlist *sg_src,
|
|
- struct scatterlist *sg_dst);
|
|
- void (*unload_data)(struct rk_crypto_info *dev);
|
|
- int (*enqueue)(struct rk_crypto_info *dev,
|
|
- struct crypto_async_request *async_req);
|
|
+ const struct rk_variant *variant;
|
|
+ unsigned long nreq;
|
|
+ struct crypto_engine *engine;
|
|
+ struct completion complete;
|
|
+ int status;
|
|
};
|
|
|
|
/* the private variable of hash */
|
|
struct rk_ahash_ctx {
|
|
- struct rk_crypto_info *dev;
|
|
+ struct crypto_engine_ctx enginectx;
|
|
/* for fallback */
|
|
struct crypto_ahash *fallback_tfm;
|
|
};
|
|
|
|
-/* the privete variable of hash for fallback */
|
|
+/* the private variable of hash for fallback */
|
|
struct rk_ahash_rctx {
|
|
+ struct rk_crypto_info *dev;
|
|
struct ahash_request fallback_req;
|
|
u32 mode;
|
|
+ int nrsg;
|
|
};
|
|
|
|
/* the private variable of cipher */
|
|
struct rk_cipher_ctx {
|
|
- struct rk_crypto_info *dev;
|
|
+ struct crypto_engine_ctx enginectx;
|
|
unsigned int keylen;
|
|
- u32 mode;
|
|
+ u8 key[AES_MAX_KEY_SIZE];
|
|
u8 iv[AES_BLOCK_SIZE];
|
|
+ struct crypto_skcipher *fallback_tfm;
|
|
};
|
|
|
|
-enum alg_type {
|
|
- ALG_TYPE_HASH,
|
|
- ALG_TYPE_CIPHER,
|
|
+struct rk_cipher_rctx {
|
|
+ struct rk_crypto_info *dev;
|
|
+ u8 backup_iv[AES_BLOCK_SIZE];
|
|
+ u32 mode;
|
|
+ struct skcipher_request fallback_req; // keep at the end
|
|
};
|
|
|
|
struct rk_crypto_tmp {
|
|
- struct rk_crypto_info *dev;
|
|
+ u32 type;
|
|
+ struct rk_crypto_info *dev;
|
|
union {
|
|
struct skcipher_alg skcipher;
|
|
struct ahash_alg hash;
|
|
} alg;
|
|
- enum alg_type type;
|
|
+ unsigned long stat_req;
|
|
+ unsigned long stat_fb;
|
|
+ unsigned long stat_fb_len;
|
|
+ unsigned long stat_fb_sglen;
|
|
+ unsigned long stat_fb_align;
|
|
+ unsigned long stat_fb_sgdiff;
|
|
};
|
|
|
|
extern struct rk_crypto_tmp rk_ecb_aes_alg;
|
|
@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha
|
|
extern struct rk_crypto_tmp rk_ahash_sha256;
|
|
extern struct rk_crypto_tmp rk_ahash_md5;
|
|
|
|
+struct rk_crypto_info *get_rk_crypto(void);
|
|
#endif
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
|
|
@@ -9,6 +9,8 @@
|
|
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
|
|
*/
|
|
#include <linux/device.h>
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/iopoll.h>
|
|
#include "rk3288_crypto.h"
|
|
|
|
/*
|
|
@@ -16,6 +18,44 @@
|
|
* so we put the fixed hash out when met zero message.
|
|
*/
|
|
|
|
+static bool rk_ahash_need_fallback(struct ahash_request *req)
|
|
+{
|
|
+ struct scatterlist *sg;
|
|
+
|
|
+ sg = req->src;
|
|
+ while (sg) {
|
|
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
|
|
+ return true;
|
|
+ }
|
|
+ if (sg->length % 4) {
|
|
+ return true;
|
|
+ }
|
|
+ sg = sg_next(sg);
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static int rk_ahash_digest_fb(struct ahash_request *areq)
|
|
+{
|
|
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
|
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
|
|
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
|
+
|
|
+ algt->stat_fb++;
|
|
+
|
|
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
|
|
+ rctx->fallback_req.base.flags = areq->base.flags &
|
|
+ CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
+
|
|
+ rctx->fallback_req.nbytes = areq->nbytes;
|
|
+ rctx->fallback_req.src = areq->src;
|
|
+ rctx->fallback_req.result = areq->result;
|
|
+
|
|
+ return crypto_ahash_digest(&rctx->fallback_req);
|
|
+}
|
|
+
|
|
static int zero_message_process(struct ahash_request *req)
|
|
{
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
@@ -38,15 +78,9 @@ static int zero_message_process(struct a
|
|
return 0;
|
|
}
|
|
|
|
-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
|
|
-{
|
|
- if (base->complete)
|
|
- base->complete(base, err);
|
|
-}
|
|
-
|
|
-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
|
|
+static void rk_ahash_reg_init(struct ahash_request *req,
|
|
+ struct rk_crypto_info *dev)
|
|
{
|
|
- struct ahash_request *req = ahash_request_cast(dev->async_req);
|
|
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
|
|
int reg_status;
|
|
|
|
@@ -74,7 +108,7 @@ static void rk_ahash_reg_init(struct rk_
|
|
RK_CRYPTO_BYTESWAP_BRFIFO |
|
|
RK_CRYPTO_BYTESWAP_BTFIFO);
|
|
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
|
|
}
|
|
|
|
static int rk_ahash_init(struct ahash_request *req)
|
|
@@ -164,51 +198,80 @@ static int rk_ahash_export(struct ahash_
|
|
|
|
static int rk_ahash_digest(struct ahash_request *req)
|
|
{
|
|
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
|
|
- struct rk_crypto_info *dev = tctx->dev;
|
|
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
|
|
+ struct rk_crypto_info *dev;
|
|
+ struct crypto_engine *engine;
|
|
+
|
|
+ if (rk_ahash_need_fallback(req))
|
|
+ return rk_ahash_digest_fb(req);
|
|
|
|
if (!req->nbytes)
|
|
return zero_message_process(req);
|
|
- else
|
|
- return dev->enqueue(dev, &req->base);
|
|
+
|
|
+ dev = get_rk_crypto();
|
|
+
|
|
+ rctx->dev = dev;
|
|
+ engine = dev->engine;
|
|
+
|
|
+ return crypto_transfer_hash_request_to_engine(engine, req);
|
|
}
|
|
|
|
-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
|
|
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
|
|
{
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
|
|
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
|
|
(RK_CRYPTO_HASH_START << 16));
|
|
}
|
|
|
|
-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
|
|
+static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
|
|
+{
|
|
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
|
|
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
|
+ struct rk_crypto_info *rkc = rctx->dev;
|
|
+ int ret;
|
|
+
|
|
+ ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
|
|
+ if (ret <= 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ rctx->nrsg = ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
|
|
+{
|
|
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
|
|
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
|
+ struct rk_crypto_info *rkc = rctx->dev;
|
|
+
|
|
+ dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rk_hash_run(struct crypto_engine *engine, void *breq)
|
|
{
|
|
- int err;
|
|
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
|
|
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
|
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
|
+ struct scatterlist *sg = areq->src;
|
|
+ struct rk_crypto_info *rkc = rctx->dev;
|
|
+ int err = 0;
|
|
+ int i;
|
|
+ u32 v;
|
|
+
|
|
+ err = pm_runtime_resume_and_get(rkc->dev);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
- err = dev->load_data(dev, dev->sg_src, NULL);
|
|
- if (!err)
|
|
- crypto_ahash_dma_start(dev);
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int rk_ahash_start(struct rk_crypto_info *dev)
|
|
-{
|
|
- struct ahash_request *req = ahash_request_cast(dev->async_req);
|
|
- struct crypto_ahash *tfm;
|
|
- struct rk_ahash_rctx *rctx;
|
|
-
|
|
- dev->total = req->nbytes;
|
|
- dev->left_bytes = req->nbytes;
|
|
- dev->aligned = 0;
|
|
- dev->align_size = 4;
|
|
- dev->sg_dst = NULL;
|
|
- dev->sg_src = req->src;
|
|
- dev->first = req->src;
|
|
- dev->src_nents = sg_nents(req->src);
|
|
- rctx = ahash_request_ctx(req);
|
|
rctx->mode = 0;
|
|
|
|
- tfm = crypto_ahash_reqtfm(req);
|
|
+ algt->stat_req++;
|
|
+ rkc->nreq++;
|
|
+
|
|
switch (crypto_ahash_digestsize(tfm)) {
|
|
case SHA1_DIGEST_SIZE:
|
|
rctx->mode = RK_CRYPTO_HASH_SHA1;
|
|
@@ -220,100 +283,88 @@ static int rk_ahash_start(struct rk_cryp
|
|
rctx->mode = RK_CRYPTO_HASH_MD5;
|
|
break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ err = -EINVAL;
|
|
+ goto theend;
|
|
}
|
|
|
|
- rk_ahash_reg_init(dev);
|
|
- return rk_ahash_set_data_start(dev);
|
|
-}
|
|
-
|
|
-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
|
|
-{
|
|
- int err = 0;
|
|
- struct ahash_request *req = ahash_request_cast(dev->async_req);
|
|
- struct crypto_ahash *tfm;
|
|
+ rk_ahash_reg_init(areq, rkc);
|
|
|
|
- dev->unload_data(dev);
|
|
- if (dev->left_bytes) {
|
|
- if (dev->aligned) {
|
|
- if (sg_is_last(dev->sg_src)) {
|
|
- dev_warn(dev->dev, "[%s:%d], Lack of data\n",
|
|
- __func__, __LINE__);
|
|
- err = -ENOMEM;
|
|
- goto out_rx;
|
|
- }
|
|
- dev->sg_src = sg_next(dev->sg_src);
|
|
+ while (sg) {
|
|
+ reinit_completion(&rkc->complete);
|
|
+ rkc->status = 0;
|
|
+ crypto_ahash_dma_start(rkc, sg);
|
|
+ wait_for_completion_interruptible_timeout(&rkc->complete,
|
|
+ msecs_to_jiffies(2000));
|
|
+ if (!rkc->status) {
|
|
+ dev_err(rkc->dev, "DMA timeout\n");
|
|
+ err = -EFAULT;
|
|
+ goto theend;
|
|
}
|
|
- err = rk_ahash_set_data_start(dev);
|
|
- } else {
|
|
- /*
|
|
- * it will take some time to process date after last dma
|
|
- * transmission.
|
|
- *
|
|
- * waiting time is relative with the last date len,
|
|
- * so cannot set a fixed time here.
|
|
- * 10us makes system not call here frequently wasting
|
|
- * efficiency, and make it response quickly when dma
|
|
- * complete.
|
|
- */
|
|
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
|
|
- udelay(10);
|
|
-
|
|
- tfm = crypto_ahash_reqtfm(req);
|
|
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
|
|
- crypto_ahash_digestsize(tfm));
|
|
- dev->complete(dev->async_req, 0);
|
|
- tasklet_schedule(&dev->queue_task);
|
|
+ sg = sg_next(sg);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * it will take some time to process date after last dma
|
|
+ * transmission.
|
|
+ *
|
|
+ * waiting time is relative with the last date len,
|
|
+ * so cannot set a fixed time here.
|
|
+ * 10us makes system not call here frequently wasting
|
|
+ * efficiency, and make it response quickly when dma
|
|
+ * complete.
|
|
+ */
|
|
+ readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
|
|
+
|
|
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
|
|
+ v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
|
|
+ put_unaligned_le32(v, areq->result + i * 4);
|
|
}
|
|
|
|
-out_rx:
|
|
- return err;
|
|
+theend:
|
|
+ pm_runtime_put_autosuspend(rkc->dev);
|
|
+
|
|
+ local_bh_disable();
|
|
+ crypto_finalize_hash_request(engine, breq, err);
|
|
+ local_bh_enable();
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int rk_cra_hash_init(struct crypto_tfm *tfm)
|
|
{
|
|
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
|
|
- struct rk_crypto_tmp *algt;
|
|
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
|
|
-
|
|
const char *alg_name = crypto_tfm_alg_name(tfm);
|
|
-
|
|
- algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
|
-
|
|
- tctx->dev = algt->dev;
|
|
- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
|
|
- if (!tctx->dev->addr_vir) {
|
|
- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
- tctx->dev->start = rk_ahash_start;
|
|
- tctx->dev->update = rk_ahash_crypto_rx;
|
|
- tctx->dev->complete = rk_ahash_crypto_complete;
|
|
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
|
|
|
/* for fallback */
|
|
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
|
|
- CRYPTO_ALG_NEED_FALLBACK);
|
|
+ CRYPTO_ALG_NEED_FALLBACK);
|
|
if (IS_ERR(tctx->fallback_tfm)) {
|
|
- dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
|
|
+ dev_err(algt->dev->dev, "Could not load fallback driver.\n");
|
|
return PTR_ERR(tctx->fallback_tfm);
|
|
}
|
|
+
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
sizeof(struct rk_ahash_rctx) +
|
|
crypto_ahash_reqsize(tctx->fallback_tfm));
|
|
|
|
- return tctx->dev->enable_clk(tctx->dev);
|
|
+ tctx->enginectx.op.do_one_request = rk_hash_run;
|
|
+ tctx->enginectx.op.prepare_request = rk_hash_prepare;
|
|
+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static void rk_cra_hash_exit(struct crypto_tfm *tfm)
|
|
{
|
|
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
|
|
|
|
- free_page((unsigned long)tctx->dev->addr_vir);
|
|
- return tctx->dev->disable_clk(tctx->dev);
|
|
+ crypto_free_ahash(tctx->fallback_tfm);
|
|
}
|
|
|
|
struct rk_crypto_tmp rk_ahash_sha1 = {
|
|
- .type = ALG_TYPE_HASH,
|
|
+ .type = CRYPTO_ALG_TYPE_AHASH,
|
|
.alg.hash = {
|
|
.init = rk_ahash_init,
|
|
.update = rk_ahash_update,
|
|
@@ -337,13 +388,13 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
|
|
.cra_init = rk_cra_hash_init,
|
|
.cra_exit = rk_cra_hash_exit,
|
|
.cra_module = THIS_MODULE,
|
|
- }
|
|
- }
|
|
+ }
|
|
+ }
|
|
}
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_ahash_sha256 = {
|
|
- .type = ALG_TYPE_HASH,
|
|
+ .type = CRYPTO_ALG_TYPE_AHASH,
|
|
.alg.hash = {
|
|
.init = rk_ahash_init,
|
|
.update = rk_ahash_update,
|
|
@@ -367,13 +418,13 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
|
|
.cra_init = rk_cra_hash_init,
|
|
.cra_exit = rk_cra_hash_exit,
|
|
.cra_module = THIS_MODULE,
|
|
- }
|
|
- }
|
|
+ }
|
|
+ }
|
|
}
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_ahash_md5 = {
|
|
- .type = ALG_TYPE_HASH,
|
|
+ .type = CRYPTO_ALG_TYPE_AHASH,
|
|
.alg.hash = {
|
|
.init = rk_ahash_init,
|
|
.update = rk_ahash_update,
|
|
@@ -397,7 +448,7 @@ struct rk_crypto_tmp rk_ahash_md5 = {
|
|
.cra_init = rk_cra_hash_init,
|
|
.cra_exit = rk_cra_hash_exit,
|
|
.cra_module = THIS_MODULE,
|
|
- }
|
|
}
|
|
+ }
|
|
}
|
|
};
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
|
|
@@ -9,23 +9,94 @@
|
|
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
|
|
*/
|
|
#include <linux/device.h>
|
|
+#include <crypto/scatterwalk.h>
|
|
#include "rk3288_crypto.h"
|
|
|
|
#define RK_CRYPTO_DEC BIT(0)
|
|
|
|
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
|
|
+static int rk_cipher_need_fallback(struct skcipher_request *req)
|
|
{
|
|
- if (base->complete)
|
|
- base->complete(base, err);
|
|
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
|
+ struct scatterlist *sgs, *sgd;
|
|
+ unsigned int stodo, dtodo, len;
|
|
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
|
|
+
|
|
+ if (!req->cryptlen)
|
|
+ return true;
|
|
+
|
|
+ len = req->cryptlen;
|
|
+ sgs = req->src;
|
|
+ sgd = req->dst;
|
|
+ while (sgs && sgd) {
|
|
+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
|
|
+ algt->stat_fb_align++;
|
|
+ return true;
|
|
+ }
|
|
+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
|
|
+ algt->stat_fb_align++;
|
|
+ return true;
|
|
+ }
|
|
+ stodo = min(len, sgs->length);
|
|
+ if (stodo % bs) {
|
|
+ algt->stat_fb_len++;
|
|
+ return true;
|
|
+ }
|
|
+ dtodo = min(len, sgd->length);
|
|
+ if (dtodo % bs) {
|
|
+ algt->stat_fb_len++;
|
|
+ return true;
|
|
+ }
|
|
+ if (stodo != dtodo) {
|
|
+ algt->stat_fb_sgdiff++;
|
|
+ return true;
|
|
+ }
|
|
+ len -= stodo;
|
|
+ sgs = sg_next(sgs);
|
|
+ sgd = sg_next(sgd);
|
|
+ }
|
|
+ return false;
|
|
}
|
|
|
|
-static int rk_handle_req(struct rk_crypto_info *dev,
|
|
- struct skcipher_request *req)
|
|
+static int rk_cipher_fallback(struct skcipher_request *areq)
|
|
{
|
|
- if (!IS_ALIGNED(req->cryptlen, dev->align_size))
|
|
- return -EINVAL;
|
|
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
|
+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
|
|
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
|
+ int err;
|
|
+
|
|
+ algt->stat_fb++;
|
|
+
|
|
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
|
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
|
+ areq->base.complete, areq->base.data);
|
|
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
|
|
+ areq->cryptlen, areq->iv);
|
|
+ if (rctx->mode & RK_CRYPTO_DEC)
|
|
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
|
|
else
|
|
- return dev->enqueue(dev, &req->base);
|
|
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int rk_cipher_handle_req(struct skcipher_request *req)
|
|
+{
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
+ struct rk_crypto_info *rkc;
|
|
+ struct crypto_engine *engine;
|
|
+
|
|
+ if (rk_cipher_need_fallback(req))
|
|
+ return rk_cipher_fallback(req);
|
|
+
|
|
+ rkc = get_rk_crypto();
|
|
+
|
|
+ engine = rkc->engine;
|
|
+ rctx->dev = rkc;
|
|
+
|
|
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
|
|
}
|
|
|
|
static int rk_aes_setkey(struct crypto_skcipher *cipher,
|
|
@@ -38,8 +109,9 @@ static int rk_aes_setkey(struct crypto_s
|
|
keylen != AES_KEYSIZE_256)
|
|
return -EINVAL;
|
|
ctx->keylen = keylen;
|
|
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
|
|
- return 0;
|
|
+ memcpy(ctx->key, key, keylen);
|
|
+
|
|
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
|
|
}
|
|
|
|
static int rk_des_setkey(struct crypto_skcipher *cipher,
|
|
@@ -53,8 +125,9 @@ static int rk_des_setkey(struct crypto_s
|
|
return err;
|
|
|
|
ctx->keylen = keylen;
|
|
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
|
|
- return 0;
|
|
+ memcpy(ctx->key, key, keylen);
|
|
+
|
|
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
|
|
}
|
|
|
|
static int rk_tdes_setkey(struct crypto_skcipher *cipher,
|
|
@@ -68,161 +141,136 @@ static int rk_tdes_setkey(struct crypto_
|
|
return err;
|
|
|
|
ctx->keylen = keylen;
|
|
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
|
|
- return 0;
|
|
+ memcpy(ctx->key, key, keylen);
|
|
+
|
|
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
|
|
}
|
|
|
|
static int rk_aes_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_aes_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_aes_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_aes_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = 0;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = 0;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_DEC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_SELECT;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_TDES_SELECT;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
|
|
- return rk_handle_req(dev, req);
|
|
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct rk_crypto_info *dev = ctx->dev;
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
|
|
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
|
|
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
|
|
RK_CRYPTO_DEC;
|
|
- return rk_handle_req(dev, req);
|
|
+ return rk_cipher_handle_req(req);
|
|
}
|
|
|
|
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
|
|
+static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
|
|
{
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
|
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
|
|
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
|
|
- u32 ivsize, block, conf_reg = 0;
|
|
+ u32 block, conf_reg = 0;
|
|
|
|
block = crypto_tfm_alg_blocksize(tfm);
|
|
- ivsize = crypto_skcipher_ivsize(cipher);
|
|
|
|
if (block == DES_BLOCK_SIZE) {
|
|
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
|
|
+ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
|
|
RK_CRYPTO_TDES_BYTESWAP_KEY |
|
|
RK_CRYPTO_TDES_BYTESWAP_IV;
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
|
|
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
|
|
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
|
|
conf_reg = RK_CRYPTO_DESSEL;
|
|
} else {
|
|
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
|
|
+ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
|
|
RK_CRYPTO_AES_KEY_CHANGE |
|
|
RK_CRYPTO_AES_BYTESWAP_KEY |
|
|
RK_CRYPTO_AES_BYTESWAP_IV;
|
|
if (ctx->keylen == AES_KEYSIZE_192)
|
|
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
|
|
+ rctx->mode |= RK_CRYPTO_AES_192BIT_key;
|
|
else if (ctx->keylen == AES_KEYSIZE_256)
|
|
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
|
|
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
|
|
+ rctx->mode |= RK_CRYPTO_AES_256BIT_key;
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
|
|
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
|
|
}
|
|
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
|
|
RK_CRYPTO_BYTESWAP_BRFIFO;
|
|
@@ -231,189 +279,196 @@ static void rk_ablk_hw_init(struct rk_cr
|
|
RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
|
|
}
|
|
|
|
-static void crypto_dma_start(struct rk_crypto_info *dev)
|
|
-{
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
|
|
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
|
|
+static void crypto_dma_start(struct rk_crypto_info *dev,
|
|
+ struct scatterlist *sgs,
|
|
+ struct scatterlist *sgd, unsigned int todo)
|
|
+{
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
|
|
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
|
|
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
|
|
_SBF(RK_CRYPTO_BLOCK_START, 16));
|
|
}
|
|
|
|
-static int rk_set_data_start(struct rk_crypto_info *dev)
|
|
+static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
|
|
{
|
|
- int err;
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- u32 ivsize = crypto_skcipher_ivsize(tfm);
|
|
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
|
|
- dev->sg_src->offset + dev->sg_src->length - ivsize;
|
|
-
|
|
- /* Store the iv that need to be updated in chain mode.
|
|
- * And update the IV buffer to contain the next IV for decryption mode.
|
|
- */
|
|
- if (ctx->mode & RK_CRYPTO_DEC) {
|
|
- memcpy(ctx->iv, src_last_blk, ivsize);
|
|
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
|
|
- ivsize, dev->total - ivsize);
|
|
- }
|
|
-
|
|
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
|
|
- if (!err)
|
|
- crypto_dma_start(dev);
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int rk_ablk_start(struct rk_crypto_info *dev)
|
|
-{
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
- unsigned long flags;
|
|
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
|
|
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
|
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
|
|
+ struct scatterlist *sgs, *sgd;
|
|
int err = 0;
|
|
+ int ivsize = crypto_skcipher_ivsize(tfm);
|
|
+ int offset;
|
|
+ u8 iv[AES_BLOCK_SIZE];
|
|
+ u8 biv[AES_BLOCK_SIZE];
|
|
+ u8 *ivtouse = areq->iv;
|
|
+ unsigned int len = areq->cryptlen;
|
|
+ unsigned int todo;
|
|
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
|
+ struct rk_crypto_info *rkc = rctx->dev;
|
|
|
|
- dev->left_bytes = req->cryptlen;
|
|
- dev->total = req->cryptlen;
|
|
- dev->sg_src = req->src;
|
|
- dev->first = req->src;
|
|
- dev->src_nents = sg_nents(req->src);
|
|
- dev->sg_dst = req->dst;
|
|
- dev->dst_nents = sg_nents(req->dst);
|
|
- dev->aligned = 1;
|
|
-
|
|
- spin_lock_irqsave(&dev->lock, flags);
|
|
- rk_ablk_hw_init(dev);
|
|
- err = rk_set_data_start(dev);
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
- return err;
|
|
-}
|
|
+ err = pm_runtime_resume_and_get(rkc->dev);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
-static void rk_iv_copyback(struct rk_crypto_info *dev)
|
|
-{
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- u32 ivsize = crypto_skcipher_ivsize(tfm);
|
|
+ algt->stat_req++;
|
|
+ rkc->nreq++;
|
|
|
|
- /* Update the IV buffer to contain the next IV for encryption mode. */
|
|
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
|
|
- if (dev->aligned) {
|
|
- memcpy(req->iv, sg_virt(dev->sg_dst) +
|
|
- dev->sg_dst->length - ivsize, ivsize);
|
|
- } else {
|
|
- memcpy(req->iv, dev->addr_vir +
|
|
- dev->count - ivsize, ivsize);
|
|
+ ivsize = crypto_skcipher_ivsize(tfm);
|
|
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
|
|
+ if (rctx->mode & RK_CRYPTO_DEC) {
|
|
+ offset = areq->cryptlen - ivsize;
|
|
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
|
|
+ offset, ivsize, 0);
|
|
}
|
|
}
|
|
-}
|
|
|
|
-static void rk_update_iv(struct rk_crypto_info *dev)
|
|
-{
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- u32 ivsize = crypto_skcipher_ivsize(tfm);
|
|
- u8 *new_iv = NULL;
|
|
+ sgs = areq->src;
|
|
+ sgd = areq->dst;
|
|
|
|
- if (ctx->mode & RK_CRYPTO_DEC) {
|
|
- new_iv = ctx->iv;
|
|
- } else {
|
|
- new_iv = page_address(sg_page(dev->sg_dst)) +
|
|
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
|
|
+ while (sgs && sgd && len) {
|
|
+ if (!sgs->length) {
|
|
+ sgs = sg_next(sgs);
|
|
+ sgd = sg_next(sgd);
|
|
+ continue;
|
|
+ }
|
|
+ if (rctx->mode & RK_CRYPTO_DEC) {
|
|
+ /* we backup last block of source to be used as IV at next step */
|
|
+ offset = sgs->length - ivsize;
|
|
+ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
|
|
+ }
|
|
+ if (sgs == sgd) {
|
|
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
|
|
+ if (err <= 0) {
|
|
+ err = -EINVAL;
|
|
+ goto theend_iv;
|
|
+ }
|
|
+ } else {
|
|
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
|
|
+ if (err <= 0) {
|
|
+ err = -EINVAL;
|
|
+ goto theend_iv;
|
|
+ }
|
|
+ err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
|
|
+ if (err <= 0) {
|
|
+ err = -EINVAL;
|
|
+ goto theend_sgs;
|
|
+ }
|
|
+ }
|
|
+ err = 0;
|
|
+ rk_cipher_hw_init(rkc, areq);
|
|
+ if (ivsize) {
|
|
+ if (ivsize == DES_BLOCK_SIZE)
|
|
+ memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
|
|
+ else
|
|
+ memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
|
|
+ }
|
|
+ reinit_completion(&rkc->complete);
|
|
+ rkc->status = 0;
|
|
+
|
|
+ todo = min(sg_dma_len(sgs), len);
|
|
+ len -= todo;
|
|
+ crypto_dma_start(rkc, sgs, sgd, todo / 4);
|
|
+ wait_for_completion_interruptible_timeout(&rkc->complete,
|
|
+ msecs_to_jiffies(2000));
|
|
+ if (!rkc->status) {
|
|
+ dev_err(rkc->dev, "DMA timeout\n");
|
|
+ err = -EFAULT;
|
|
+ goto theend;
|
|
+ }
|
|
+ if (sgs == sgd) {
|
|
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
|
|
+ } else {
|
|
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
|
|
+ }
|
|
+ if (rctx->mode & RK_CRYPTO_DEC) {
|
|
+ memcpy(iv, biv, ivsize);
|
|
+ ivtouse = iv;
|
|
+ } else {
|
|
+ offset = sgd->length - ivsize;
|
|
+ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
|
|
+ ivtouse = iv;
|
|
+ }
|
|
+ sgs = sg_next(sgs);
|
|
+ sgd = sg_next(sgd);
|
|
}
|
|
|
|
- if (ivsize == DES_BLOCK_SIZE)
|
|
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
|
|
- else if (ivsize == AES_BLOCK_SIZE)
|
|
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
|
|
-}
|
|
+ if (areq->iv && ivsize > 0) {
|
|
+ offset = areq->cryptlen - ivsize;
|
|
+ if (rctx->mode & RK_CRYPTO_DEC) {
|
|
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
|
|
+ memzero_explicit(rctx->backup_iv, ivsize);
|
|
+ } else {
|
|
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
|
|
+ ivsize, 0);
|
|
+ }
|
|
+ }
|
|
|
|
-/* return:
|
|
- * true some err was occurred
|
|
- * fault no err, continue
|
|
- */
|
|
-static int rk_ablk_rx(struct rk_crypto_info *dev)
|
|
-{
|
|
- int err = 0;
|
|
- struct skcipher_request *req =
|
|
- skcipher_request_cast(dev->async_req);
|
|
+theend:
|
|
+ pm_runtime_put_autosuspend(rkc->dev);
|
|
|
|
- dev->unload_data(dev);
|
|
- if (!dev->aligned) {
|
|
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
|
|
- dev->addr_vir, dev->count,
|
|
- dev->total - dev->left_bytes -
|
|
- dev->count)) {
|
|
- err = -EINVAL;
|
|
- goto out_rx;
|
|
- }
|
|
- }
|
|
- if (dev->left_bytes) {
|
|
- rk_update_iv(dev);
|
|
- if (dev->aligned) {
|
|
- if (sg_is_last(dev->sg_src)) {
|
|
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
|
|
- __func__, __LINE__);
|
|
- err = -ENOMEM;
|
|
- goto out_rx;
|
|
- }
|
|
- dev->sg_src = sg_next(dev->sg_src);
|
|
- dev->sg_dst = sg_next(dev->sg_dst);
|
|
- }
|
|
- err = rk_set_data_start(dev);
|
|
+ local_bh_disable();
|
|
+ crypto_finalize_skcipher_request(engine, areq, err);
|
|
+ local_bh_enable();
|
|
+ return 0;
|
|
+
|
|
+theend_sgs:
|
|
+ if (sgs == sgd) {
|
|
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
|
|
} else {
|
|
- rk_iv_copyback(dev);
|
|
- /* here show the calculation is over without any err */
|
|
- dev->complete(dev->async_req, 0);
|
|
- tasklet_schedule(&dev->queue_task);
|
|
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
|
|
}
|
|
-out_rx:
|
|
+theend_iv:
|
|
return err;
|
|
}
|
|
|
|
-static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
|
|
+static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
|
|
{
|
|
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
+ const char *name = crypto_tfm_alg_name(&tfm->base);
|
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
|
- struct rk_crypto_tmp *algt;
|
|
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
|
|
|
- algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
|
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
|
+ if (IS_ERR(ctx->fallback_tfm)) {
|
|
+ dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
|
+ name, PTR_ERR(ctx->fallback_tfm));
|
|
+ return PTR_ERR(ctx->fallback_tfm);
|
|
+ }
|
|
+
|
|
+ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
|
|
+ crypto_skcipher_reqsize(ctx->fallback_tfm);
|
|
|
|
- ctx->dev = algt->dev;
|
|
- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
|
|
- ctx->dev->start = rk_ablk_start;
|
|
- ctx->dev->update = rk_ablk_rx;
|
|
- ctx->dev->complete = rk_crypto_complete;
|
|
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
|
|
+ ctx->enginectx.op.do_one_request = rk_cipher_run;
|
|
|
|
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
|
|
+ return 0;
|
|
}
|
|
|
|
-static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
|
|
+static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
|
|
{
|
|
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
- free_page((unsigned long)ctx->dev->addr_vir);
|
|
- ctx->dev->disable_clk(ctx->dev);
|
|
+ memzero_explicit(ctx->key, ctx->keylen);
|
|
+ crypto_free_skcipher(ctx->fallback_tfm);
|
|
}
|
|
|
|
struct rk_crypto_tmp rk_ecb_aes_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "ecb(aes)",
|
|
.base.cra_driver_name = "ecb-aes-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x0f,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.setkey = rk_aes_setkey,
|
|
@@ -423,19 +478,19 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_cbc_aes_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "cbc(aes)",
|
|
.base.cra_driver_name = "cbc-aes-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x0f,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
@@ -446,19 +501,19 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_ecb_des_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "ecb(des)",
|
|
.base.cra_driver_name = "ecb-des-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = DES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x07,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
.setkey = rk_des_setkey,
|
|
@@ -468,19 +523,19 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_cbc_des_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "cbc(des)",
|
|
.base.cra_driver_name = "cbc-des-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = DES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x07,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
@@ -491,19 +546,19 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "ecb(des3_ede)",
|
|
.base.cra_driver_name = "ecb-des3-ede-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = DES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x07,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
.setkey = rk_tdes_setkey,
|
|
@@ -513,19 +568,19 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg
|
|
};
|
|
|
|
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
|
|
- .type = ALG_TYPE_CIPHER,
|
|
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
|
|
.alg.skcipher = {
|
|
.base.cra_name = "cbc(des3_ede)",
|
|
.base.cra_driver_name = "cbc-des3-ede-rk",
|
|
.base.cra_priority = 300,
|
|
- .base.cra_flags = CRYPTO_ALG_ASYNC,
|
|
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
.base.cra_blocksize = DES_BLOCK_SIZE,
|
|
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
|
|
.base.cra_alignmask = 0x07,
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
- .init = rk_ablk_init_tfm,
|
|
- .exit = rk_ablk_exit_tfm,
|
|
+ .init = rk_cipher_tfm_init,
|
|
+ .exit = rk_cipher_tfm_exit,
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
--- a/drivers/crypto/Kconfig
|
|
+++ b/drivers/crypto/Kconfig
|
|
@@ -784,7 +784,12 @@ config CRYPTO_DEV_IMGTEC_HASH
|
|
config CRYPTO_DEV_ROCKCHIP
|
|
tristate "Rockchip's Cryptographic Engine driver"
|
|
depends on OF && ARCH_ROCKCHIP
|
|
+ depends on PM
|
|
+ select CRYPTO_ECB
|
|
+ select CRYPTO_CBC
|
|
+ select CRYPTO_DES
|
|
select CRYPTO_AES
|
|
+ select CRYPTO_ENGINE
|
|
select CRYPTO_LIB_DES
|
|
select CRYPTO_MD5
|
|
select CRYPTO_SHA1
|
|
@@ -796,6 +801,16 @@ config CRYPTO_DEV_ROCKCHIP
|
|
This driver interfaces with the hardware crypto accelerator.
|
|
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
|
|
|
|
+config CRYPTO_DEV_ROCKCHIP_DEBUG
|
|
+ bool "Enable Rockchip crypto stats"
|
|
+ depends on CRYPTO_DEV_ROCKCHIP
|
|
+ depends on DEBUG_FS
|
|
+ help
|
|
+ Say y to enable Rockchip crypto debug stats.
|
|
+ This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
|
|
+ the number of requests per algorithm and other internal stats.
|
|
+
|
|
+
|
|
config CRYPTO_DEV_ZYNQMP_AES
|
|
tristate "Support for Xilinx ZynqMP AES hw accelerator"
|
|
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
|
|
--- a/MAINTAINERS
|
|
+++ b/MAINTAINERS
|
|
@@ -16972,6 +16972,13 @@ F: Documentation/ABI/*/sysfs-driver-hid-
|
|
F: drivers/hid/hid-roccat*
|
|
F: include/linux/hid-roccat*
|
|
|
|
+ROCKCHIP CRYPTO DRIVERS
|
|
+M: Corentin Labbe <clabbe@baylibre.com>
|
|
+L: linux-crypto@vger.kernel.org
|
|
+S: Maintained
|
|
+F: Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml
|
|
+F: drivers/crypto/rockchip/
|
|
+
|
|
ROCKCHIP I2S TDM DRIVER
|
|
M: Nicolas Frattaroli <frattaroli.nicolas@gmail.com>
|
|
L: linux-rockchip@lists.infradead.org
|
|
--- /dev/null
|
|
+++ b/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml
|
|
@@ -0,0 +1,133 @@
|
|
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
|
+%YAML 1.2
|
|
+---
|
|
+$id: http://devicetree.org/schemas/crypto/rockchip,rk3288-crypto.yaml#
|
|
+$schema: http://devicetree.org/meta-schemas/core.yaml#
|
|
+
|
|
+title: Rockchip Electronics Security Accelerator
|
|
+
|
|
+maintainers:
|
|
+ - Heiko Stuebner <heiko@sntech.de>
|
|
+
|
|
+properties:
|
|
+ compatible:
|
|
+ enum:
|
|
+ - rockchip,rk3288-crypto
|
|
+ - rockchip,rk3328-crypto
|
|
+ - rockchip,rk3399-crypto
|
|
+
|
|
+ reg:
|
|
+ maxItems: 1
|
|
+
|
|
+ interrupts:
|
|
+ maxItems: 1
|
|
+
|
|
+ clocks:
|
|
+ minItems: 3
|
|
+ maxItems: 4
|
|
+
|
|
+ clock-names:
|
|
+ minItems: 3
|
|
+ maxItems: 4
|
|
+
|
|
+ resets:
|
|
+ minItems: 1
|
|
+ maxItems: 3
|
|
+
|
|
+ reset-names:
|
|
+ minItems: 1
|
|
+ maxItems: 3
|
|
+
|
|
+allOf:
|
|
+ - if:
|
|
+ properties:
|
|
+ compatible:
|
|
+ contains:
|
|
+ const: rockchip,rk3288-crypto
|
|
+ then:
|
|
+ properties:
|
|
+ clocks:
|
|
+ minItems: 4
|
|
+ clock-names:
|
|
+ items:
|
|
+ - const: aclk
|
|
+ - const: hclk
|
|
+ - const: sclk
|
|
+ - const: apb_pclk
|
|
+ minItems: 4
|
|
+ resets:
|
|
+ maxItems: 1
|
|
+ reset-names:
|
|
+ items:
|
|
+ - const: crypto-rst
|
|
+ maxItems: 1
|
|
+ - if:
|
|
+ properties:
|
|
+ compatible:
|
|
+ contains:
|
|
+ const: rockchip,rk3328-crypto
|
|
+ then:
|
|
+ properties:
|
|
+ clocks:
|
|
+ maxItems: 3
|
|
+ clock-names:
|
|
+ items:
|
|
+ - const: hclk_master
|
|
+ - const: hclk_slave
|
|
+ - const: sclk
|
|
+ maxItems: 3
|
|
+ resets:
|
|
+ maxItems: 1
|
|
+ reset-names:
|
|
+ items:
|
|
+ - const: crypto-rst
|
|
+ maxItems: 1
|
|
+ - if:
|
|
+ properties:
|
|
+ compatible:
|
|
+ contains:
|
|
+ const: rockchip,rk3399-crypto
|
|
+ then:
|
|
+ properties:
|
|
+ clocks:
|
|
+ maxItems: 3
|
|
+ clock-names:
|
|
+ items:
|
|
+ - const: hclk_master
|
|
+ - const: hclk_slave
|
|
+ - const: sclk
|
|
+ maxItems: 3
|
|
+ resets:
|
|
+ minItems: 3
|
|
+ reset-names:
|
|
+ items:
|
|
+ - const: rst_master
|
|
+ - const: rst_slave
|
|
+ - const: crypto-rst
|
|
+ minItems: 3
|
|
+
|
|
+required:
|
|
+ - compatible
|
|
+ - reg
|
|
+ - interrupts
|
|
+ - clocks
|
|
+ - clock-names
|
|
+ - resets
|
|
+ - reset-names
|
|
+
|
|
+additionalProperties: false
|
|
+
|
|
+examples:
|
|
+ - |
|
|
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
|
|
+ #include <dt-bindings/clock/rk3288-cru.h>
|
|
+ crypto@ff8a0000 {
|
|
+ compatible = "rockchip,rk3288-crypto";
|
|
+ reg = <0xff8a0000 0x4000>;
|
|
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>,
|
|
+ <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>;
|
|
+ clock-names = "aclk", "hclk", "sclk", "apb_pclk";
|
|
+ resets = <&cru SRST_CRYPTO>;
|
|
+ reset-names = "crypto-rst";
|
|
+ };
|
|
--- a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt
|
|
+++ /dev/null
|
|
@@ -1,28 +0,0 @@
|
|
-Rockchip Electronics And Security Accelerator
|
|
-
|
|
-Required properties:
|
|
-- compatible: Should be "rockchip,rk3288-crypto"
|
|
-- reg: Base physical address of the engine and length of memory mapped
|
|
- region
|
|
-- interrupts: Interrupt number
|
|
-- clocks: Reference to the clocks about crypto
|
|
-- clock-names: "aclk" used to clock data
|
|
- "hclk" used to clock data
|
|
- "sclk" used to clock crypto accelerator
|
|
- "apb_pclk" used to clock dma
|
|
-- resets: Must contain an entry for each entry in reset-names.
|
|
- See ../reset/reset.txt for details.
|
|
-- reset-names: Must include the name "crypto-rst".
|
|
-
|
|
-Examples:
|
|
-
|
|
- crypto: cypto-controller@ff8a0000 {
|
|
- compatible = "rockchip,rk3288-crypto";
|
|
- reg = <0xff8a0000 0x4000>;
|
|
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
|
|
- clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>,
|
|
- <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>;
|
|
- clock-names = "aclk", "hclk", "sclk", "apb_pclk";
|
|
- resets = <&cru SRST_CRYPTO>;
|
|
- reset-names = "crypto-rst";
|
|
- };
|
|
--- a/include/dt-bindings/clock/rk3399-cru.h
|
|
+++ b/include/dt-bindings/clock/rk3399-cru.h
|
|
@@ -547,8 +547,8 @@
|
|
#define SRST_H_PERILP0 171
|
|
#define SRST_H_PERILP0_NOC 172
|
|
#define SRST_ROM 173
|
|
-#define SRST_CRYPTO_S 174
|
|
-#define SRST_CRYPTO_M 175
|
|
+#define SRST_CRYPTO0_S 174
|
|
+#define SRST_CRYPTO0_M 175
|
|
|
|
/* cru_softrst_con11 */
|
|
#define SRST_P_DCF 176
|
|
@@ -556,7 +556,7 @@
|
|
#define SRST_CM0S 178
|
|
#define SRST_CM0S_DBG 179
|
|
#define SRST_CM0S_PO 180
|
|
-#define SRST_CRYPTO 181
|
|
+#define SRST_CRYPTO0 181
|
|
#define SRST_P_PERILP1_SGRF 182
|
|
#define SRST_P_PERILP1_GRF 183
|
|
#define SRST_CRYPTO1_S 184
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
|
|
@@ -1040,6 +1040,17 @@
|
|
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
|
|
};
|
|
|
|
+ crypto: crypto@ff060000 {
|
|
+ compatible = "rockchip,rk3328-crypto";
|
|
+ reg = <0x0 0xff060000 0x0 0x4000>;
|
|
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ clocks = <&cru HCLK_CRYPTO_MST>, <&cru HCLK_CRYPTO_SLV>,
|
|
+ <&cru SCLK_CRYPTO>;
|
|
+ clock-names = "hclk_master", "hclk_slave", "sclk";
|
|
+ resets = <&cru SRST_CRYPTO>;
|
|
+ reset-names = "crypto-rst";
|
|
+ };
|
|
+
|
|
pinctrl: pinctrl {
|
|
compatible = "rockchip,rk3328-pinctrl";
|
|
rockchip,grf = <&grf>;
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
@@ -573,6 +573,26 @@
|
|
status = "disabled";
|
|
};
|
|
|
|
+ crypto0: crypto@ff8b0000 {
|
|
+ compatible = "rockchip,rk3399-crypto";
|
|
+ reg = <0x0 0xff8b0000 0x0 0x4000>;
|
|
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
+ clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
|
|
+ clock-names = "hclk_master", "hclk_slave", "sclk";
|
|
+ resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
|
|
+ reset-names = "rst_master", "rst_slave", "crypto-rst";
|
|
+ };
|
|
+
|
|
+ crypto1: crypto@ff8b8000 {
|
|
+ compatible = "rockchip,rk3399-crypto";
|
|
+ reg = <0x0 0xff8b8000 0x0 0x4000>;
|
|
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
+ clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
|
|
+ clock-names = "hclk_master", "hclk_slave", "sclk";
|
|
+ resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
|
|
+ reset-names = "rst_master", "rst_slave", "crypto-rst";
|
|
+ };
|
|
+
|
|
i2c1: i2c@ff110000 {
|
|
compatible = "rockchip,rk3399-i2c";
|
|
reg = <0x0 0xff110000 0x0 0x1000>;
|