mtk-eip93: drop local source code

This commit is contained in:
AmadeusGhost 2022-02-23 23:20:50 +08:00
parent 888d463137
commit 9eb5e1cd47
13 changed files with 11 additions and 3700 deletions

View File

@ -5,13 +5,20 @@
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=mtk-eip93
PKG_RELEASE:=1.3
PKG_SOURCE_PROTO:=git
PKG_SOURCE_DATE:=2020-12-10
PKG_SOURCE_URL:=https://github.com/vschagen/mtk-eip93.git
PKG_SOURCE_VERSION:=bb5fd1c88a8a74de06b70c281bceeb14d0d960ef
PKG_MIRROR_HASH:=cb114b29f7e06e817341adb9127c4d053592be53356a64d932cf7ba418606260
MAKE_PATH:=src
include $(INCLUDE_DIR)/package.mk
define KernelPackage/crypto-hw-eip93
@ -27,7 +34,7 @@ define KernelPackage/crypto-hw-eip93
+kmod-crypto-sha256
KCONFIG:=
TITLE:=MTK EIP93 crypto module.
FILES:=$(PKG_BUILD_DIR)/crypto-hw-eip93.ko
FILES:=$(PKG_BUILD_DIR)/src/crypto-hw-eip93.ko
AUTOLOAD:=$(call AutoProbe,crypto-hw-eip93)
endef
@ -40,7 +47,7 @@ endef
MAKE_OPTS:= \
$(KERNEL_MAKE_FLAGS) \
M="$(PKG_BUILD_DIR)"
M="$(PKG_BUILD_DIR)/src"
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \

View File

@ -1,37 +0,0 @@
From d789256e237343f17097eed7ef29be554b5a2265 Mon Sep 17 00:00:00 2001
From: ailick <277498654@qq.com>
Date: Sat, 31 Jul 2021 00:49:23 +0800
Subject: [PATCH] Fix compilation issues
---
eip93-prng.c | 1 +
eip93-ring.c | 1 +
2 files changed, 2 insertions(+)
diff --git a/eip93-prng.c b/eip93-prng.c
index 9c02522..87cd4dc 100644
--- a/eip93-prng.c
+++ b/eip93-prng.c
@@ -5,6 +5,7 @@
* Richard van Schagen <vschagen@cs.com>
*/
+#include <linux/dma-mapping.h>
#include "eip93-common.h"
#include "eip93-core.h"
#include "eip93-regs.h"
diff --git a/eip93-ring.c b/eip93-ring.c
index fff5c0b..b553bba 100644
--- a/eip93-ring.c
+++ b/eip93-ring.c
@@ -5,6 +5,7 @@
* Richard van Schagen <vschagen@cs.com>
*/
+#include <linux/device.h>
#include "eip93-common.h"
#include "eip93-core.h"
--
2.30.2

View File

@ -1,3 +0,0 @@
crypto-hw-eip93-objs:= eip93-core.o eip93-ring.o eip93-cipher.o eip93-prng.o
obj-m += crypto-hw-eip93.o

File diff suppressed because it is too large Load Diff

View File

@ -1,89 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#ifndef _CIPHER_H_
#define _CIPHER_H_
extern struct mtk_alg_template mtk_alg_ecb_aes;
extern struct mtk_alg_template mtk_alg_cbc_aes;
extern struct mtk_alg_template mtk_alg_ctr_aes;
extern struct mtk_alg_template mtk_alg_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_ecb_des;
extern struct mtk_alg_template mtk_alg_cbc_des;
extern struct mtk_alg_template mtk_alg_ecb_des3_ede;
extern struct mtk_alg_template mtk_alg_cbc_des3_ede;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_ctr_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_ctr_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_ctr_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_ctr_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des3_ede;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des3_ede;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des3_ede;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des3_ede;
extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_ecb_null;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_ecb_null;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_ecb_null;
extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_ecb_null;
extern struct mtk_alg_template mtk_alg_echainiv_authenc_hmac_md5_cbc_des;
extern struct mtk_alg_template mtk_alg_echainiv_authenc_hmac_sha1_cbc_aes;
extern struct mtk_alg_template mtk_alg_echainiv_authenc_hmac_sha256_cbc_aes;
extern struct mtk_alg_template mtk_alg_seqiv_authenc_hmac_sha1_rfc3686_aes;
extern struct mtk_alg_template mtk_alg_seqiv_authenc_hmac_sha256_rfc3686_aes;
struct mtk_cipher_ctx {
struct mtk_device *mtk;
struct saRecord_s *sa;
struct crypto_skcipher *fallback;
/* AEAD specific */
unsigned int authsize;
struct crypto_shash *shash;
bool aead;
};
struct mtk_cipher_reqctx {
unsigned long flags;
u32 textsize;
u32 ivsize;
bool iv_dma;
struct saRecord_s *saRecord;
dma_addr_t saRecord_base;
struct saState_s *saState;
dma_addr_t saState_base;
/* copy in case of mis-alignment or AEAD if no-consecutive blocks */
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
/* AES-CTR in case of counter overflow */
struct saState_s *saState_ctr;
dma_addr_t saState_base_ctr;
struct scatterlist ctr_src[2];
struct scatterlist ctr_dst[2];
/* AEAD */
u32 assoclen;
u32 authsize;
/* request fallback, keep at the end */
struct skcipher_request fallback_req;
};
void mtk_skcipher_handle_result(struct mtk_device *mtk,
struct crypto_async_request *async,
bool complete, int err);
void mtk_aead_handle_result(struct mtk_device *mtk,
struct crypto_async_request *async,
bool complete, int err);
#endif /* _CIPHER_H_ */

View File

@ -1,202 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#ifndef _COMMON_H_
#define _COMMON_H_
#include <linux/bits.h>
#define MTK_RING_SIZE 256
#define MTK_RING_BUSY 224
#define NUM_AES_BYPASS 256
#define MTK_QUEUE_LENGTH 128
#define MTK_CRA_PRIORITY 1500
#define MTK_DESC_ASYNC BIT(0)
#define MTK_DESC_SKCIPHER BIT(1)
#define MTK_DESC_AEAD BIT(2)
#define MTK_DESC_AHASH BIT(3)
#define MTK_DESC_PRNG BIT(4)
#define MTK_DESC_FAKE_HMAC BIT(5)
#define MTK_DESC_LAST BIT(6)
#define MTK_DESC_FINISH BIT(7)
/* cipher algorithms */
#define MTK_ALG_DES BIT(0)
#define MTK_ALG_3DES BIT(1)
#define MTK_ALG_AES BIT(2)
#define MTK_ALG_MASK GENMASK(2, 0)
/* hash and hmac algorithms */
#define MTK_HASH_MD5 BIT(3)
#define MTK_HASH_SHA1 BIT(4)
#define MTK_HASH_SHA224 BIT(5)
#define MTK_HASH_SHA256 BIT(6)
#define MTK_HASH_HMAC BIT(7)
#define MTK_HASH_MASK GENMASK(6, 3)
/* cipher modes */
#define MTK_MODE_CBC BIT(8)
#define MTK_MODE_ECB BIT(9)
#define MTK_MODE_CTR BIT(10)
#define MTK_MODE_RFC3686 BIT(11)
#define MTK_MODE_MASK GENMASK(10, 8)
/* cipher encryption/decryption operations */
#define MTK_ENCRYPT BIT(12)
#define MTK_DECRYPT BIT(13)
#define MTK_GENIV BIT(14)
#define MTK_BUSY BIT(15)
#define IS_DES(flags) (flags & MTK_ALG_DES)
#define IS_3DES(flags) (flags & MTK_ALG_3DES)
#define IS_AES(flags) (flags & MTK_ALG_AES)
#define IS_HASH_MD5(flags) (flags & MTK_HASH_MD5)
#define IS_HASH_SHA1(flags) (flags & MTK_HASH_SHA1)
#define IS_HASH_SHA224(flags) (flags & MTK_HASH_SHA224)
#define IS_HASH_SHA256(flags) (flags & MTK_HASH_SHA256)
#define IS_HMAC(flags) (flags & MTK_HASH_HMAC)
#define IS_CBC(mode) (mode & MTK_MODE_CBC)
#define IS_ECB(mode) (mode & MTK_MODE_ECB)
#define IS_CTR(mode) (mode & MTK_MODE_CTR)
#define IS_RFC3686(mode) (mode & MTK_MODE_RFC3686)
#define IS_GENIV(flags) (flags & MTK_GENIV)
#define IS_BUSY(flags) (flags & MTK_BUSY)
#define IS_ENCRYPT(dir) (dir & MTK_ENCRYPT)
#define IS_DECRYPT(dir) (dir & MTK_DECRYPT)
#define IS_CIPHER(flags) (flags & (MTK_ALG_DES || \
MTK_ALG_3DES || \
MTK_ALG_AES))
#define IS_HASH(flags) (flags & (MTK_HASH_MD5 || \
MTK_HASH_SHA1 || \
MTK_HASH_SHA224 || \
MTK_HASH_SHA256))
/*
* Interrupts of EIP93
*/
typedef enum {
EIP93_INT_PE_CDRTHRESH_REQ = BIT(0),
EIP93_INT_PE_RDRTHRESH_REQ = BIT(1),
EIP93_INT_PE_OPERATION_DONE = BIT(9),
EIP93_INT_PE_INBUFTHRESH_REQ = BIT(10),
EIP93_INT_PE_OUTBURTHRSH_REQ = BIT(11),
EIP93_INT_PE_PRNG_IRQ = BIT(12),
EIP93_INT_PE_ERR_REG = BIT(13),
EIP93_INT_PE_RD_DONE_IRQ = BIT(16),
} EIP93_InterruptSource_t;
typedef union {
struct {
unsigned int opCode :3;
unsigned int direction :1;
unsigned int opGroup :2;
unsigned int padType :2;
unsigned int cipher :4;
unsigned int hash :4;
unsigned int reserved2 :1;
unsigned int scPad :1;
unsigned int extPad :1;
unsigned int hdrProc :1;
unsigned int digestLength :4;
unsigned int ivSource :2;
unsigned int hashSource :2;
unsigned int saveIv :1;
unsigned int saveHash :1;
unsigned int reserved1 :2;
} bits;
unsigned int word;
} saCmd0_t;
typedef union {
struct {
unsigned int copyDigest :1;
unsigned int copyHeader :1;
unsigned int copyPayload :1;
unsigned int copyPad :1;
unsigned int reserved4 :4;
unsigned int cipherMode :2;
unsigned int reserved3 :1;
unsigned int sslMac :1;
unsigned int hmac :1;
unsigned int byteOffset :1;
unsigned int reserved2 :2;
unsigned int hashCryptOffset :8;
unsigned int aesKeyLen :3;
unsigned int reserved1 :1;
unsigned int aesDecKey :1;
unsigned int seqNumCheck :1;
unsigned int reserved0 :2;
} bits;
unsigned int word;
} saCmd1_t;
typedef struct saRecord_s {
saCmd0_t saCmd0;
saCmd1_t saCmd1;
unsigned int saKey[8];
unsigned int saIDigest[8];
unsigned int saODigest[8];
unsigned int saSpi;
unsigned int saSeqNum[2];
unsigned int saSeqNumMask[2];
unsigned int saNonce;
} saRecord_t;
typedef struct saState_s {
unsigned int stateIv[4];
unsigned int stateByteCnt[2];
unsigned int stateIDigest[8];
} saState_t;
typedef union {
struct {
unsigned int hostReady :1;
unsigned int peReady :1;
unsigned int reserved :1;
unsigned int initArc4 :1;
unsigned int hashFinal :1;
unsigned int haltMode :1;
unsigned int prngMode :2;
unsigned int padValue :8;
unsigned int errStatus :8;
unsigned int padCrtlStat :8;
} bits;
unsigned int word;
} peCrtlStat_t;
typedef union {
struct {
unsigned int length :20;
unsigned int reserved :2;
unsigned int hostReady :1;
unsigned int peReady :1;
unsigned int byPass :8;
} bits;
unsigned int word;
} peLength_t;
typedef struct eip93_descriptor_s {
peCrtlStat_t peCrtlStat;
unsigned int srcAddr;
unsigned int dstAddr;
unsigned int saAddr;
unsigned int stateAddr;
unsigned int arc4Addr;
unsigned int userId;
peLength_t peLength;
} eip93_descriptor_t;
#endif /* _COMMON_H_ */

View File

@ -1,566 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
//#define DEBUG 1
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include "eip93-regs.h"
#include "eip93-common.h"
#include "eip93-core.h"
#include "eip93-ring.h"
#include "eip93-cipher.h"
#include "eip93-prng.h"
static struct mtk_alg_template *mtk_algs[] = {
&mtk_alg_ecb_des,
&mtk_alg_cbc_des,
&mtk_alg_ecb_des3_ede,
&mtk_alg_cbc_des3_ede,
&mtk_alg_ecb_aes,
&mtk_alg_cbc_aes,
&mtk_alg_ctr_aes,
&mtk_alg_rfc3686_aes,
&mtk_alg_authenc_hmac_md5_cbc_des,
&mtk_alg_authenc_hmac_sha1_cbc_des,
&mtk_alg_authenc_hmac_sha224_cbc_des,
&mtk_alg_authenc_hmac_sha256_cbc_des,
&mtk_alg_authenc_hmac_md5_cbc_des3_ede,
&mtk_alg_authenc_hmac_sha1_cbc_des3_ede,
&mtk_alg_authenc_hmac_sha224_cbc_des3_ede,
&mtk_alg_authenc_hmac_sha256_cbc_des3_ede,
&mtk_alg_authenc_hmac_md5_cbc_aes,
&mtk_alg_authenc_hmac_sha1_cbc_aes,
&mtk_alg_authenc_hmac_sha224_cbc_aes,
&mtk_alg_authenc_hmac_sha256_cbc_aes,
&mtk_alg_authenc_hmac_md5_rfc3686_aes,
&mtk_alg_authenc_hmac_sha1_rfc3686_aes,
&mtk_alg_authenc_hmac_sha224_rfc3686_aes,
&mtk_alg_authenc_hmac_sha256_rfc3686_aes,
&mtk_alg_authenc_hmac_md5_ecb_null,
&mtk_alg_authenc_hmac_sha1_ecb_null,
&mtk_alg_authenc_hmac_sha224_ecb_null,
&mtk_alg_authenc_hmac_sha256_ecb_null,
&mtk_alg_echainiv_authenc_hmac_md5_cbc_des,
&mtk_alg_echainiv_authenc_hmac_sha1_cbc_aes,
&mtk_alg_echainiv_authenc_hmac_sha256_cbc_aes,
// &mtk_alg_seqiv_authenc_hmac_sha1_rfc3686_aes,
// &mtk_alg_seqiv_authenc_hmac_sha256_rfc3686_aes,
// &mtk_alg_prng,
// &mtk_alg_cprng,
};
static void mtk_unregister_algs(struct mtk_device *mtk, int i)
{
int j;
for (j = 0; j < i; j++) {
switch (mtk_algs[j]->type) {
case MTK_ALG_TYPE_SKCIPHER:
dev_dbg(mtk->dev, "unregistering: %s",
mtk_algs[j]->alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&mtk_algs[j]->alg.skcipher);
break;
case MTK_ALG_TYPE_AEAD:
dev_dbg(mtk->dev, "unregistering: %s",
mtk_algs[j]->alg.aead.base.cra_name);
crypto_unregister_aead(&mtk_algs[j]->alg.aead);
break;
case MTK_ALG_TYPE_AHASH:
dev_dbg(mtk->dev, "unregistering: %s",
mtk_algs[j]->alg.ahash.halg.base.cra_name);
crypto_unregister_ahash(&mtk_algs[j]->alg.ahash);
break;
case MTK_ALG_TYPE_PRNG:
dev_dbg(mtk->dev, "unregistering: %s",
mtk_algs[j]->alg.rng.base.cra_name);
crypto_unregister_rng(&mtk_algs[j]->alg.rng);
}
}
}
static int mtk_register_algs(struct mtk_device *mtk)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(mtk_algs); i++) {
mtk_algs[i]->mtk = mtk;
switch (mtk_algs[i]->type) {
case MTK_ALG_TYPE_SKCIPHER:
dev_dbg(mtk->dev, "registering: %s",
mtk_algs[i]->alg.skcipher.base.cra_name);
ret = crypto_register_skcipher(&mtk_algs[i]->alg.skcipher);
break;
case MTK_ALG_TYPE_AEAD:
dev_dbg(mtk->dev, "registering: %s",
mtk_algs[i]->alg.aead.base.cra_name);
ret = crypto_register_aead(&mtk_algs[i]->alg.aead);
break;
case MTK_ALG_TYPE_AHASH:
dev_dbg(mtk->dev, "registering: %s",
mtk_algs[i]->alg.ahash.halg.base.cra_name);
ret = crypto_register_ahash(&mtk_algs[i]->alg.ahash);
break;
case MTK_ALG_TYPE_PRNG:
dev_dbg(mtk->dev, "registering: %s",
mtk_algs[i]->alg.rng.base.cra_name);
ret = crypto_register_rng(&mtk_algs[i]->alg.rng);
}
if (ret)
goto fail;
}
return 0;
fail:
mtk_unregister_algs(mtk, i);
return ret;
}
static inline void mtk_irq_disable(struct mtk_device *mtk, u32 mask)
{
__raw_writel(mask, mtk->base + EIP93_REG_MASK_DISABLE);
__raw_readl(mtk->base + EIP93_REG_MASK_DISABLE);
}
static inline void mtk_irq_enable(struct mtk_device *mtk, u32 mask)
{
__raw_writel(mask, mtk->base + EIP93_REG_MASK_ENABLE);
__raw_readl(mtk->base + EIP93_REG_MASK_ENABLE);
}
static inline void mtk_irq_clear(struct mtk_device *mtk, u32 mask)
{
__raw_writel(mask, mtk->base + EIP93_REG_INT_CLR);
__raw_readl(mtk->base + EIP93_REG_INT_CLR);
}
inline void mtk_push_request(struct mtk_device *mtk, int DescriptorPendingCount)
{
int DescriptorCountDone = MTK_RING_SIZE - 1;
int DescriptorDoneTimeout = 3;
DescriptorPendingCount = min_t(int, mtk->ring->requests, 32);
if (!DescriptorPendingCount)
return;
writel(BIT(31) | (DescriptorCountDone & GENMASK(10, 0)) |
(((DescriptorPendingCount - 1) & GENMASK(10, 0)) << 16) |
((DescriptorDoneTimeout & GENMASK(4, 0)) << 26),
mtk->base + EIP93_REG_PE_RING_THRESH);
}
static void mtk_handle_result_descriptor(struct mtk_device *mtk)
{
struct crypto_async_request *async = NULL;
struct eip93_descriptor_s *rdesc;
int handled = 0, nreq;
int try, ret, err = 0;
volatile int done1, done2;
bool last_entry = false;
bool complete = false;
u32 flags;
get_more:
nreq = readl(mtk->base + EIP93_REG_PE_RD_COUNT) & GENMASK(10, 0);
while (nreq) {
rdesc = mtk_get_descriptor(mtk);
if (IS_ERR(rdesc)) {
dev_err(mtk->dev, "Ndesc: %d nreq: %d\n", handled, nreq);
ret = -EIO;
break;
}
/* make sure EIP93 finished writing all data
* (volatile int) used since bits will be updated via DMA
*/
try = 0;
while (try < 1000) {
done1 = (volatile int)rdesc->peCrtlStat.bits.peReady;
done2 = (volatile int)rdesc->peLength.bits.peReady;
if ((!done1) || (!done2)) {
try++;
cpu_relax();
continue;
}
break;
}
/*
if (try)
dev_err(mtk->dev, "EIP93 try-count: %d", try);
*/
err = rdesc->peCrtlStat.bits.errStatus;
if (err) {
dev_err(mtk->dev, "Err: %02x\n", err);
}
handled++;
flags = rdesc->userId;
if (flags & MTK_DESC_FINISH)
complete = true;
if (flags & MTK_DESC_LAST) {
last_entry = true;
break;
}
nreq--;
}
if (last_entry) {
last_entry = false;
if (flags & MTK_DESC_PRNG)
mtk_prng_done(mtk, err);
if (flags & MTK_DESC_SKCIPHER) {
async = (struct crypto_async_request *)rdesc->arc4Addr;
mtk_skcipher_handle_result(mtk, async, complete, err);
}
if (flags & MTK_DESC_AEAD) {
async = (struct crypto_async_request *)rdesc->arc4Addr;
mtk_aead_handle_result(mtk, async, complete, err);
}
}
if (handled) {
writel(handled, mtk->base + EIP93_REG_PE_RD_COUNT);
spin_lock(&mtk->ring->lock);
mtk->ring->requests -= handled;
if (!mtk->ring->requests) {
mtk->ring->busy = false;
spin_unlock(&mtk->ring->lock);
goto queue_done;
}
spin_unlock(&mtk->ring->lock);
handled = 0;
goto get_more;
}
spin_lock(&mtk->ring->lock);
if (mtk->ring->requests)
mtk_push_request(mtk, mtk->ring->requests);
else
mtk->ring->busy = false;
spin_unlock(&mtk->ring->lock);
queue_done:
mtk_irq_enable(mtk, BIT(1));
}
static irqreturn_t mtk_irq_handler(int irq, void *dev_id)
{
struct mtk_device *mtk = (struct mtk_device *)dev_id;
u32 irq_status;
irq_status = readl(mtk->base + EIP93_REG_INT_MASK_STAT);
if (irq_status & BIT(1)) {
mtk_irq_clear(mtk, BIT(1));
mtk_irq_disable(mtk, BIT(1));
tasklet_hi_schedule(&mtk->done);
return IRQ_HANDLED;
}
/* TODO: error handler; for now just clear ALL */
dev_err(mtk->dev, "IRQ: %08x\n", irq_status);
mtk_irq_clear(mtk, irq_status);
if (irq_status) {
printk("disable irq\n");
mtk_irq_disable(mtk, irq_status);
}
return IRQ_NONE;
}
static void mtk_done_tasklet(unsigned long data)
{
struct mtk_device *mtk = (struct mtk_device *)data;
mtk_handle_result_descriptor(mtk);
}
void mtk_initialize(struct mtk_device *mtk)
{
uint8_t fRstPacketEngine = 1;
uint8_t fResetRing = 1;
uint8_t PE_Mode = 3;
uint8_t fBO_PD_en = 0;
uint8_t fBO_SA_en = 0;
uint8_t fBO_Data_en = 0;
uint8_t fBO_TD_en = 0;
uint8_t fEnablePDRUpdate = 1;
int InputThreshold = 128;
int OutputThreshold = 128;
int DescriptorCountDone = MTK_RING_SIZE - 1;
int DescriptorPendingCount = 1;
int DescriptorDoneTimeout = 3;
u32 regVal;
writel((fRstPacketEngine & 1) |
((fResetRing & 1) << 1) |
((PE_Mode & GENMASK(2, 0)) << 8) |
((fBO_PD_en & 1) << 16) |
((fBO_SA_en & 1) << 17) |
((fBO_Data_en & 1) << 18) |
((fBO_TD_en & 1) << 20) |
((fEnablePDRUpdate & 1) << 10),
mtk->base + EIP93_REG_PE_CONFIG);
udelay(10);
fRstPacketEngine = 0;
fResetRing = 0;
writel((fRstPacketEngine & 1) |
((fResetRing & 1) << 1) |
((PE_Mode & GENMASK(2, 0)) << 8) |
((fBO_PD_en & 1) << 16) |
((fBO_SA_en & 1) << 17) |
((fBO_Data_en & 1) << 18) |
((fBO_TD_en & 1) << 20) |
((fEnablePDRUpdate & 1) << 10),
mtk->base + EIP93_REG_PE_CONFIG);
/* Initialize the BYTE_ORDER_CFG register */
writel((EIP93_BYTE_ORDER_PD & GENMASK(4, 0)) |
((EIP93_BYTE_ORDER_SA & GENMASK(4, 0)) << 4) |
((EIP93_BYTE_ORDER_DATA & GENMASK(4, 0)) << 8) |
((EIP93_BYTE_ORDER_TD & GENMASK(2, 0)) << 16),
mtk->base + EIP93_REG_PE_ENDIAN_CONFIG);
/* Initialize the INT_CFG register */
writel((EIP93_INT_HOST_OUTPUT_TYPE & 1) |
((EIP93_INT_PULSE_CLEAR << 1) & 1),
mtk->base + EIP93_REG_INT_CFG);
/* Clock Control, must for DHM, optional for ARM
* 0x1 Only enable Packet Engine Clock
* AES, DES and HASH clocks on demand
* Activating all clocks per performance
*/
regVal = BIT(0) | BIT(1) | BIT(2) | BIT(4);
writel(regVal, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
writel(BIT(31) | (InputThreshold & GENMASK(10, 0)) |
((OutputThreshold & GENMASK(10, 0)) << 16),
mtk->base + EIP93_REG_PE_BUF_THRESH);
/* Clear/ack all interrupts before disable all */
mtk_irq_clear(mtk, 0xFFFFFFFF);
mtk_irq_disable(mtk, 0xFFFFFFFF);
writel((DescriptorCountDone & GENMASK(10, 0)) |
(((DescriptorPendingCount - 1) & GENMASK(10, 0)) << 16) |
((DescriptorDoneTimeout & GENMASK(4, 0)) << 26),
mtk->base + EIP93_REG_PE_RING_THRESH);
regVal = readl(mtk->base + EIP93_REG_PE_REVISION);
dev_dbg(mtk->dev, "Rev: %08x", regVal);
regVal = readl(mtk->base + EIP93_REG_PE_OPTION_1);
dev_dbg(mtk->dev, "Opt1: %08x", regVal);
regVal = readl(mtk->base + EIP93_REG_PE_OPTION_0);
dev_dbg(mtk->dev, "Opt0: %08x", regVal);
}
static void mtk_desc_free(struct mtk_device *mtk,
struct mtk_desc_ring *cdr,
struct mtk_desc_ring *rdr)
{
writel(0, mtk->base + EIP93_REG_PE_RING_CONFIG);
writel(0, mtk->base + EIP93_REG_PE_CDR_BASE);
writel(0, mtk->base + EIP93_REG_PE_RDR_BASE);
}
static int mtk_desc_init(struct mtk_device *mtk,
struct mtk_desc_ring *cdr,
struct mtk_desc_ring *rdr)
{
int RingOffset, RingSize;
cdr->offset = sizeof(struct eip93_descriptor_s);
cdr->base = dmam_alloc_coherent(mtk->dev, cdr->offset * MTK_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
cdr->base_end = cdr->base + cdr->offset * (MTK_RING_SIZE - 1);
cdr->read = cdr->base;
dev_dbg(mtk->dev, "CD Ring : %08X\n", cdr->base_dma);
rdr->offset = sizeof(struct eip93_descriptor_s);
rdr->base = dmam_alloc_coherent(mtk->dev, rdr->offset * MTK_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
rdr->base_end = rdr->base + rdr->offset * (MTK_RING_SIZE - 1);
rdr->read = rdr->base;
dev_dbg(mtk->dev, "RD Ring : %08X\n", rdr->base_dma);
writel((u32)cdr->base_dma, mtk->base + EIP93_REG_PE_CDR_BASE);
writel((u32)rdr->base_dma, mtk->base + EIP93_REG_PE_RDR_BASE);
RingOffset = 8; /* 8 words per descriptor */
RingSize = MTK_RING_SIZE - 1;
writel(((RingOffset & GENMASK(8, 0)) << 16) |
(RingSize & GENMASK(10, 0)),
mtk->base + EIP93_REG_PE_RING_CONFIG);
/* Create Sa and State record DMA pool */
mtk->saRecord_pool = dmam_pool_create("eip93-saRecord",
mtk->dev, sizeof(struct saRecord_s), 32, 0);
if (!mtk->saRecord_pool) {
dev_err(mtk->dev, "Unable to allocate saRecord DMA pool\n");
return -ENOMEM;
}
mtk->saState_pool = dmam_pool_create("eip93-saState",
mtk->dev, sizeof(struct saState_s), 32, 0);
if (!mtk->saState_pool) {
dev_err(mtk->dev, "Unable to allocate saState DMA pool\n");
return -ENOMEM;
}
return 0;
}
static int mtk_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_device *mtk;
struct resource *res;
int ret;
mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
if (!mtk)
return -ENOMEM;
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mtk->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
mtk->irq = platform_get_irq(pdev, 0);
if (mtk->irq < 0) {
dev_err(mtk->dev, "Cannot get IRQ resource\n");
return mtk->irq;
}
dev_dbg(mtk->dev, "Assigning IRQ: %d", mtk->irq);
ret = devm_request_irq(mtk->dev, mtk->irq, mtk_irq_handler,
IRQF_TRIGGER_HIGH, dev_name(mtk->dev), mtk);
mtk->ring = devm_kcalloc(mtk->dev, 1, sizeof(*mtk->ring), GFP_KERNEL);
if (!mtk->ring) {
dev_err(mtk->dev, "Can't allocate Ring memory\n");
}
ret = mtk_desc_init(mtk, &mtk->ring->cdr, &mtk->ring->rdr);
if (ret == -ENOMEM)
return -ENOMEM;
mtk->prng = devm_kcalloc(mtk->dev, 1, sizeof(*mtk->prng), GFP_KERNEL);
if (!mtk->prng) {
dev_err(mtk->dev, "Can't allocate PRNG memory\n");
return -ENOMEM;
}
mtk->ring->requests = 0;
mtk->ring->busy = false;
spin_lock_init(&mtk->ring->lock);
spin_lock_init(&mtk->ring->read_lock);
spin_lock_init(&mtk->ring->write_lock);
/* Init tasklet for bottom half processing */
tasklet_init(&mtk->done, mtk_done_tasklet, (unsigned long)mtk);
mtk_initialize(mtk);
/* Init. finished, enable RDR interupt */
mtk_irq_enable(mtk, BIT(1));
ret = mtk_prng_init(mtk, true);
if (ret)
dev_info(mtk->dev, "PRNG initialized");
else
dev_err(mtk->dev, "Could not initialize PRNG");
ret = mtk_register_algs(mtk);
dev_info(mtk->dev, "EIP93 initialized succesfull\n");
return 0;
}
static int mtk_crypto_remove(struct platform_device *pdev)
{
struct mtk_device *mtk = platform_get_drvdata(pdev);
mtk_unregister_algs(mtk, ARRAY_SIZE(mtk_algs));
/* Clear/ack all interrupts before disable all */
mtk_irq_clear(mtk, 0xFFFFFFFF);
mtk_irq_disable(mtk, 0xFFFFFFFF);
writel(0, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
tasklet_kill(&mtk->done);
mtk_desc_free(mtk, &mtk->ring->cdr, &mtk->ring->rdr);
dev_info(mtk->dev, "EIP93 removed.\n");
return 0;
}
static const struct of_device_id mtk_crypto_of_match[] = {
{ .compatible = "mediatek,mtk-eip93", },
{}
};
MODULE_DEVICE_TABLE(of, mtk_crypto_of_match);
static struct platform_driver mtk_crypto_driver = {
.probe = mtk_crypto_probe,
.remove = mtk_crypto_remove,
.driver = {
.name = "mtk-eip93",
.of_match_table = mtk_crypto_of_match,
},
};
module_platform_driver(mtk_crypto_driver);
MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
MODULE_LICENSE("GPL v2");

View File

@ -1,91 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/dmapool.h>
#include <crypto/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
/**
* struct mtk_device - crypto engine device structure
*/
struct mtk_device {
void __iomem *base;
struct device *dev;
struct clk *clk;
int irq;
struct tasklet_struct dequeue;
struct tasklet_struct done;
struct mtk_ring *ring;
struct dma_pool *saRecord_pool;
struct dma_pool *saState_pool;
struct mtk_prng_device *prng;
};
struct mtk_prng_device {
struct saRecord_s *PRNGSaRecord;
dma_addr_t PRNGSaRecord_dma;
void *PRNGBuffer[2];
dma_addr_t PRNGBuffer_dma[2];
uint32_t cur_buf;
struct completion Filled;
atomic_t State;
};
struct mtk_desc_ring {
void *base;
void *base_end;
dma_addr_t base_dma;
/* write and read pointers */
void *read;
void *write;
/* descriptor element offset */
u32 offset;
};
struct mtk_ring {
spinlock_t lock;
/* command/result rings */
struct mtk_desc_ring cdr;
struct mtk_desc_ring rdr;
spinlock_t write_lock;
spinlock_t read_lock;
/* Number of request in the engine. */
int requests;
/* The rings is handling at least one request */
bool busy;
};
enum mtk_alg_type {
MTK_ALG_TYPE_SKCIPHER,
MTK_ALG_TYPE_AEAD,
MTK_ALG_TYPE_AHASH,
MTK_ALG_TYPE_PRNG,
};
struct mtk_alg_template {
struct mtk_device *mtk;
enum mtk_alg_type type;
unsigned long flags;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
struct ahash_alg ahash;
struct rng_alg rng;
} alg;
};
#endif /* _CORE_H_ */

View File

@ -1,360 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#include "eip93-common.h"
#include "eip93-core.h"
#include "eip93-regs.h"
#include "eip93-ring.h"
#include "eip93-prng.h"
static int mtk_prng_push_job(struct mtk_device *mtk, bool reset)
{
struct mtk_prng_device *prng = mtk->prng;
struct eip93_descriptor_s cdesc;
int cur = prng->cur_buf;
int len, mode, err;
if (reset) {
len = 0;
mode = 1;
} else {
len = 4080;
mode = 2;
}
init_completion(&prng->Filled);
atomic_set(&prng->State, BUF_EMPTY);
memset(&cdesc, 0, sizeof(struct eip93_descriptor_s));
cdesc.peCrtlStat.bits.hostReady = 1;
cdesc.peCrtlStat.bits.prngMode = mode;
cdesc.peCrtlStat.bits.hashFinal = 0;
cdesc.peCrtlStat.bits.padCrtlStat = 0;
cdesc.peCrtlStat.bits.peReady = 0;
cdesc.srcAddr = 0;
cdesc.dstAddr = (u32)prng->PRNGBuffer_dma[cur];
cdesc.saAddr = (u32)prng->PRNGSaRecord_dma;
cdesc.stateAddr = 0;
cdesc.arc4Addr = 0;
cdesc.userId = MTK_DESC_PRNG | MTK_DESC_LAST | MTK_DESC_FINISH;
cdesc.peLength.bits.byPass = 0;
cdesc.peLength.bits.length = 4080;
cdesc.peLength.bits.hostReady = 1;
err = mtk_put_descriptor(mtk, cdesc);
if (err)
dev_err(mtk->dev, "PRNG: No Descriptor space");
/* */
spin_lock(&mtk->ring->lock);
mtk->ring[0].requests += 1;
mtk->ring[0].busy = true;
spin_unlock(&mtk->ring->lock);
writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
wait_for_completion(&prng->Filled);
if (atomic_read(&prng->State) == PRNG_NEED_RESET)
return false;
return true;
}
/*----------------------------------------------------------------------------
* mtk_prng_init
*
* This function initializes the PE PRNG for the ARM mode.
*
* Return Value
* true: PRNG is initialized
* false: PRNG initialization failed
*/
bool mtk_prng_init(struct mtk_device *mtk, bool fLongSA)
{
struct mtk_prng_device *prng = mtk->prng;
int i;
struct saRecord_s *saRecord;
const uint32_t PRNGKey[] = {0xe0fc631d, 0xcbb9fb9a,
0x869285cb, 0xcbb9fb9a};
const uint32_t PRNGSeed[] = {0x758bac03, 0xf20ab39e,
0xa569f104, 0x95dfaea6};
const uint32_t PRNGDateTime[] = {0, 0, 0, 0};
if (!mtk)
return -ENODEV;
prng->cur_buf = 0;
prng->PRNGBuffer[0] = devm_kzalloc(mtk->dev, 4080, GFP_KERNEL);
prng->PRNGBuffer_dma[0] = (u32)dma_map_single(mtk->dev,
(void *)prng->PRNGBuffer[0],
4080, DMA_FROM_DEVICE);
prng->PRNGBuffer[1] = devm_kzalloc(mtk->dev, 4080, GFP_KERNEL);
prng->PRNGBuffer_dma[1] = (u32)dma_map_single(mtk->dev,
(void *)prng->PRNGBuffer[1],
4080, DMA_FROM_DEVICE);
prng->PRNGSaRecord = dmam_alloc_coherent(mtk->dev,
sizeof(struct saRecord_s),
&prng->PRNGSaRecord_dma, GFP_KERNEL);
if (!prng->PRNGSaRecord) {
dev_err(mtk->dev, "PRNG dma_alloc for saRecord failed\n");
return -ENOMEM;
}
saRecord = &prng->PRNGSaRecord[0];
saRecord->saCmd0.word = 0x00001307;
saRecord->saCmd1.word = 0x02000000;
for (i = 0; i < 4; i++) {
saRecord->saKey[i] = PRNGKey[i];
saRecord->saIDigest[i] = PRNGSeed[i];
saRecord->saODigest[i] = PRNGDateTime[i];
}
return mtk_prng_push_job(mtk, true);
}
void mtk_prng_done(struct mtk_device *mtk, u32 err)
{
struct mtk_prng_device *prng = mtk->prng;
int cur = prng->cur_buf;
if (err) {
dev_err(mtk->dev, "PRNG error: %d\n", err);
atomic_set(&prng->State, PRNG_NEED_RESET);
}
/* Buffer refilled, invalidate cache */
dma_unmap_single(mtk->dev, prng->PRNGBuffer_dma[cur],
4080, DMA_FROM_DEVICE);
complete(&prng->Filled);
}
static int get_prng_bytes(char *buf, size_t nbytes, struct mtk_prng_ctx *ctx,
int do_cont_test)
{
int err;
spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
goto done;
done:
spin_unlock_bh(&ctx->prng_lock);
return err;
}
static int mtk_prng_generate(struct crypto_rng *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int dlen)
{
struct mtk_prng_ctx *prng = crypto_rng_ctx(tfm);
return get_prng_bytes(dst, dlen, prng, 1);
}
static int mtk_prng_seed(struct crypto_rng *tfm, const u8 *seed,
unsigned int slen)
{
struct rng_alg *alg = crypto_rng_alg(tfm);
struct mtk_alg_template *tmpl = container_of(alg,
struct mtk_alg_template, alg.rng);
struct mtk_device *mtk = tmpl->mtk;
return 0;
}
static bool mtk_prng_fill_buffer(struct mtk_device *mtk)
{
struct mtk_prng_device *prng = mtk->prng;
int cur = prng->cur_buf;
int ret;
if (!mtk)
return -ENODEV;
/* add logic for 2 buffers and swap */
prng->PRNGBuffer_dma[cur] = (u32)dma_map_single(mtk->dev,
(void *)prng->PRNGBuffer[cur],
4080, DMA_FROM_DEVICE);
ret = mtk_prng_push_job(mtk, false);
return ret;
}
static int reset_prng_context(struct mtk_prng_ctx *ctx,
const unsigned char *key,
const unsigned char *V,
const unsigned char *DT)
{
spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
if (key)
memcpy(ctx->PRNGKey, key, DEFAULT_PRNG_KSZ);
else
memcpy(ctx->PRNGKey, DEFAULT_PRNG_KEY, DEFAULT_PRNG_KSZ);
if (V)
memcpy(ctx->PRNGSeed, V, DEFAULT_BLK_SZ);
else
memcpy(ctx->PRNGSeed, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
if (DT)
memcpy(ctx->PRNGDateTime, DT, DEFAULT_BLK_SZ);
else
memset(ctx->PRNGDateTime, 0, DEFAULT_BLK_SZ);
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ctx->flags &= ~PRNG_NEED_RESET;
spin_unlock_bh(&ctx->prng_lock);
return 0;
}
/*
* This is the cprng_registered reset method the seed value is
* interpreted as the tuple { V KEY DT}
* V and KEY are required during reset, and DT is optional, detected
* as being present by testing the length of the seed
*/
static int cprng_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
struct mtk_prng_ctx *prng = crypto_rng_ctx(tfm);
const u8 *key = seed + DEFAULT_BLK_SZ;
const u8 *dt = NULL;
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
dt = key + DEFAULT_PRNG_KSZ;
reset_prng_context(prng, key, seed, dt);
if (prng->flags & PRNG_NEED_RESET)
return -EINVAL;
return 0;
}
static void free_prng_context(struct mtk_prng_ctx *ctx)
{
crypto_free_cipher(ctx->tfm);
}
static int cprng_init(struct crypto_tfm *tfm)
{
struct mtk_prng_ctx *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
if (reset_prng_context(ctx, NULL, NULL, NULL) < 0)
return -EINVAL;
/*
* after allocation, we should always force the user to reset
* so they don't inadvertently use the insecure default values
* without specifying them intentially
*/
ctx->flags |= PRNG_NEED_RESET;
return 0;
}
static void cprng_exit(struct crypto_tfm *tfm)
{
free_prng_context(crypto_tfm_ctx(tfm));
}
struct mtk_alg_template mtk_alg_prng = {
.type = MTK_ALG_TYPE_PRNG,
.flags = 0,
.alg.rng = {
.generate = mtk_prng_generate,
.seed = mtk_prng_seed,
.seedsize = 0,
.base = {
.cra_name = "stdrng",
.cra_driver_name = "eip93-prng",
.cra_priority = 200,
.cra_ctxsize = sizeof(struct mtk_prng_ctx),
.cra_module = THIS_MODULE,
.cra_init = cprng_init,
.cra_exit = cprng_exit,
},
},
};
//#ifdef CONFIG_CRYPTO_FIPS
static int fips_cprng_get_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
{
struct mtk_prng_ctx *prng = crypto_rng_ctx(tfm);
return get_prng_bytes(rdata, dlen, prng, 1);
}
static int fips_cprng_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
struct mtk_prng_ctx *prng = crypto_rng_ctx(tfm);
u8 rdata[DEFAULT_BLK_SZ];
const u8 *key = seed + DEFAULT_BLK_SZ;
int rc;
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
/* fips strictly requires seed != key */
if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
return -EINVAL;
rc = cprng_reset(tfm, seed, slen);
if (!rc)
goto out;
/* this primes our continuity test */
rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
prng->rand_data_valid = DEFAULT_BLK_SZ;
out:
return rc;
}
struct mtk_alg_template mtk_alg_cprng = {
.type = MTK_ALG_TYPE_PRNG,
.flags = 0,
.alg.rng = {
.generate = fips_cprng_get_random,
.seed = fips_cprng_reset,
.seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
.base = {
.cra_name = "fips(ansi_cprng)",
.cra_driver_name = "eip93-fips_ansi_cprng",
.cra_priority = 300,
.cra_ctxsize = sizeof(struct mtk_prng_ctx),
.cra_module = THIS_MODULE,
.cra_init = cprng_init,
.cra_exit = cprng_exit,
},
},
};
//#endif

View File

@ -1,34 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#define DEFAULT_PRNG_KEY "0123456789abcdef"
#define DEFAULT_PRNG_KSZ 16
#define DEFAULT_BLK_SZ 16
#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
#define BUF_NOT_EMPTY 0
#define BUF_EMPTY 1
#define BUF_PENDING 2
#define PRNG_NEED_RESET 3
extern struct mtk_alg_template mtk_alg_prng;
extern struct mtk_alg_template mtk_alg_cprng;
bool mtk_prng_init(struct mtk_device *mtk, bool fLongSA);
void mtk_prng_done(struct mtk_device *mtk, u32 err);
struct mtk_prng_ctx {
spinlock_t prng_lock;
unsigned char rand_data[DEFAULT_BLK_SZ];
unsigned char last_rand_data[DEFAULT_BLK_SZ];
uint32_t PRNGKey[4];
uint32_t PRNGSeed[4];
uint32_t PRNGDateTime[4];
struct crypto_cipher *tfm;
uint32_t rand_data_valid;
uint32_t flags;
};

View File

@ -1,190 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#ifndef REG_EIP93_H
#define REG_EIP93_H
#define EIP93_REG_WIDTH 4
/*-----------------------------------------------------------------------------
* Register Map
*/
#define DESP_BASE 0x0000000
#define EIP93_REG_PE_CTRL_STAT ((DESP_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_SOURCE_ADDR ((DESP_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_DEST_ADDR ((DESP_BASE)+(0x02 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_SA_ADDR ((DESP_BASE)+(0x03 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_ADDR ((DESP_BASE)+(0x04 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_USER_ID ((DESP_BASE)+(0x06 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_LENGTH ((DESP_BASE)+(0x07 * EIP93_REG_WIDTH))
//PACKET ENGINE RING configuartion registers
#define PE_RNG_BASE 0x0000080
#define EIP93_REG_PE_CDR_BASE ((PE_RNG_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_RDR_BASE ((PE_RNG_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_RING_CONFIG ((PE_RNG_BASE)+(0x02 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_RING_THRESH ((PE_RNG_BASE)+(0x03 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_CD_COUNT ((PE_RNG_BASE)+(0x04 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_RD_COUNT ((PE_RNG_BASE)+(0x05 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_RING_RW_PNTR ((PE_RNG_BASE)+(0x06 * EIP93_REG_WIDTH))
//PACKET ENGINE configuartion registers
#define PE_CFG_BASE 0x0000100
#define EIP93_REG_PE_CONFIG ((PE_CFG_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_STATUS ((PE_CFG_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_BUF_THRESH ((PE_CFG_BASE)+(0x03 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_INBUF_COUNT ((PE_CFG_BASE)+(0x04 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_OUTBUF_COUNT ((PE_CFG_BASE)+(0x05 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_BUF_RW_PNTR ((PE_CFG_BASE)+(0x06 * EIP93_REG_WIDTH))
//PACKET ENGINE endian config
#define EN_CFG_BASE 0x00001CC
#define EIP93_REG_PE_ENDIAN_CONFIG ((EN_CFG_BASE)+(0x00 * EIP93_REG_WIDTH))
//EIP93 CLOCK control registers
#define CLOCK_BASE 0x01E8
#define EIP93_REG_PE_CLOCK_CTRL ((CLOCK_BASE)+(0x00 * EIP93_REG_WIDTH))
//EIP93 Device Option and Revision Register
#define REV_BASE 0x01F4
#define EIP93_REG_PE_OPTION_1 ((REV_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_OPTION_0 ((REV_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_PE_REVISION ((REV_BASE)+(0x02 * EIP93_REG_WIDTH))
//EIP93 Interrupt Control Register
#define INT_BASE 0x0200
#define EIP93_REG_INT_UNMASK_STAT ((INT_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_INT_MASK_STAT ((INT_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_INT_CLR ((INT_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_INT_MASK ((INT_BASE)+(0x02 * EIP93_REG_WIDTH))
#define EIP93_REG_INT_CFG ((INT_BASE)+(0x03 * EIP93_REG_WIDTH))
#define EIP93_REG_MASK_ENABLE ((INT_BASE)+(0X04 * EIP93_REG_WIDTH))
#define EIP93_REG_MASK_DISABLE ((INT_BASE)+(0X05 * EIP93_REG_WIDTH))
//EIP93 SA Record register
#define SA_BASE 0x0400
#define EIP93_REG_SA_CMD_0 ((SA_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_SA_CMD_1 ((SA_BASE)+(0x01 * EIP93_REG_WIDTH))
//#define EIP93_REG_SA_READY ((SA_BASE)+(31 * EIP93_REG_WIDTH))
//State save register
#define STATE_BASE 0x0500
#define EIP93_REG_STATE_IV_0 ((STATE_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_STATE_IV_1 ((STATE_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_PE_ARC4STATE_BASEADDR_REG 0x0700
//RAM buffer start address
#define EIP93_INPUT_BUFFER 0x0800
#define EIP93_OUTPUT_BUFFER 0x0800
//EIP93 PRNG Configuration Register
#define PRNG_BASE 0x0300
#define EIP93_REG_PRNG_STAT ((PRNG_BASE)+(0x00 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_CTRL ((PRNG_BASE)+(0x01 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_SEED_0 ((PRNG_BASE)+(0x02 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_SEED_1 ((PRNG_BASE)+(0x03 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_SEED_2 ((PRNG_BASE)+(0x04 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_SEED_3 ((PRNG_BASE)+(0x05 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_KEY_0 ((PRNG_BASE)+(0x06 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_KEY_1 ((PRNG_BASE)+(0x07 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_KEY_2 ((PRNG_BASE)+(0x08 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_KEY_3 ((PRNG_BASE)+(0x09 * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_RES_0 ((PRNG_BASE)+(0x0A * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_RES_1 ((PRNG_BASE)+(0x0B * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_RES_2 ((PRNG_BASE)+(0x0C * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_RES_3 ((PRNG_BASE)+(0x0D * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_LFSR_0 ((PRNG_BASE)+(0x0E * EIP93_REG_WIDTH))
#define EIP93_REG_PRNG_LFSR_1 ((PRNG_BASE)+(0x0F * EIP93_REG_WIDTH))
/*-----------------------------------------------------------------------------
* Constants & masks
*/
#define EIP93_SUPPORTED_INTERRUPTS_MASK 0xffff7f00
#define EIP93_PRNG_DT_TEXT_LOWERHALF 0xDEAD
#define EIP93_PRNG_DT_TEXT_UPPERHALF 0xC0DE
#define EIP93_10BITS_MASK 0X3FF
#define EIP93_12BITS_MASK 0XFFF
#define EIP93_4BITS_MASK 0X04
#define EIP93_20BITS_MASK 0xFFFFF
#define EIP93_MIN_DESC_DONE_COUNT 0
#define EIP93_MAX_DESC_DONE_COUNT 15
#define EIP93_MIN_DESC_PENDING_COUNT 0
#define EIP93_MAX_DESC_PENDING_COUNT 1023
#define EIP93_MIN_TIMEOUT_COUNT 0
#define EIP93_MAX_TIMEOUT_COUNT 15
#define EIP93_MIN_PE_INPUT_THRESHOLD 1
#define EIP93_MAX_PE_INPUT_THRESHOLD 511
#define EIP93_MIN_PE_OUTPUT_THRESHOLD 1
#define EIP93_MAX_PE_OUTPUT_THRESHOLD 432
#define EIP93_MIN_PE_RING_SIZE 1
#define EIP93_MAX_PE_RING_SIZE 1023
#define EIP93_MIN_PE_DESCRIPTOR_SIZE 7
#define EIP93_MAX_PE_DESCRIPTOR_SIZE 15
//3DES keys,seed,known data and its result
#define EIP93_KEY_0 0x133b3454
#define EIP93_KEY_1 0x5e5b890b
#define EIP93_KEY_2 0x5eb30757
#define EIP93_KEY_3 0x93ab15f7
#define EIP93_SEED_0 0x62c4bf5e
#define EIP93_SEED_1 0x972667c8
#define EIP93_SEED_2 0x6345bf67
#define EIP93_SEED_3 0xcb3482bf
#define EIP93_LFSR_0 0xDEADC0DE
#define EIP93_LFSR_1 0xBEEFF00D
/*-----------------------------------------------------------------------------
* EIP93 device initialization specifics
*/
/*----------------------------------------------------------------------------
* Byte Order Reversal Mechanisms Supported in EIP93
* EIP93_BO_REVERSE_HALF_WORD : reverse the byte order within a half-word
* EIP93_BO_REVERSE_WORD : reverse the byte order within a word
* EIP93_BO_REVERSE_DUAL_WORD : reverse the byte order within a dual-word
* EIP93_BO_REVERSE_QUAD_WORD : reverse the byte order within a quad-word
*/
typedef enum
{
EIP93_BO_REVERSE_HALF_WORD = 1,
EIP93_BO_REVERSE_WORD = 2,
EIP93_BO_REVERSE_DUAL_WORD = 4,
EIP93_BO_REVERSE_QUAD_WORD = 8,
} EIP93_Byte_Order_Value_t;
/*----------------------------------------------------------------------------
* Byte Order Reversal Mechanisms Supported in EIP93 for Target Data
* EIP93_BO_REVERSE_HALF_WORD : reverse the byte order within a half-word
* EIP93_BO_REVERSE_WORD : reverse the byte order within a word
*/
typedef enum
{
EIP93_BO_REVERSE_HALF_WORD_TD = 1,
EIP93_BO_REVERSE_WORD_TD = 2,
} EIP93_Byte_Order_Value_TD_t;
// BYTE_ORDER_CFG register values
#define EIP93_BYTE_ORDER_PD EIP93_BO_REVERSE_WORD
#define EIP93_BYTE_ORDER_SA EIP93_BO_REVERSE_WORD
#define EIP93_BYTE_ORDER_DATA EIP93_BO_REVERSE_WORD
#define EIP93_BYTE_ORDER_TD EIP93_BO_REVERSE_WORD_TD
// INT_CFG register values
#define EIP93_INT_HOST_OUTPUT_TYPE 0 // 0 = Level
#define EIP93_INT_PULSE_CLEAR 0 // 0 = Manual clear
#endif

View File

@ -1,82 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
#include "eip93-common.h"
#include "eip93-core.h"
inline void *mtk_ring_next_wptr(struct mtk_device *mtk,
struct mtk_desc_ring *ring)
{
void *ptr = ring->write;
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
if (ring->write == ring->base_end)
ring->write = ring->base;
else
ring->write += ring->offset;
return ptr;
}
inline void *mtk_ring_next_rptr(struct mtk_device *mtk,
struct mtk_desc_ring *ring)
{
void *ptr = ring->read;
if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
if (ring->read == ring->base_end)
ring->read = ring->base;
else
ring->read += ring->offset;
return ptr;
}
inline int mtk_put_descriptor(struct mtk_device *mtk,
struct eip93_descriptor_s desc)
{
struct eip93_descriptor_s *cdesc;
struct eip93_descriptor_s *rdesc;
spin_lock(&mtk->ring->write_lock);
cdesc = mtk_ring_next_wptr(mtk, &mtk->ring->cdr);
if (IS_ERR(cdesc))
return -ENOENT;
rdesc = mtk_ring_next_wptr(mtk, &mtk->ring->rdr);
if (IS_ERR(rdesc)) {
spin_lock(&mtk->ring->write_lock);
return -ENOENT;
}
memset(rdesc, 0, sizeof(struct eip93_descriptor_s));
memcpy(cdesc, &desc, sizeof(struct eip93_descriptor_s));
spin_unlock(&mtk->ring->write_lock);
return 0;
}
inline void *mtk_get_descriptor(struct mtk_device *mtk)
{
struct eip93_descriptor_s *cdesc;
cdesc = mtk_ring_next_rptr(mtk, &mtk->ring->cdr);
if (IS_ERR(cdesc)) {
dev_err(mtk->dev, "Cant get Cdesc");
return cdesc;
}
return mtk_ring_next_rptr(mtk, &mtk->ring->rdr);
}

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 - 2020
*
* Richard van Schagen <vschagen@cs.com>
*/
inline int mtk_put_descriptor(struct mtk_device *mtk,
struct eip93_descriptor_s desc);
inline void *mtk_get_descriptor(struct mtk_device *mtk);