diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi
index a22b1b936fdab..e874bc05fa517 100644
--- a/arch/riscv/boot/dts/starfive/jh7100.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi
@@ -149,7 +149,10 @@
clkgen: clock-controller@11800000 {
compatible = "starfive,jh7100-clkgen";
- reg = <0x0 0x11800000 0x0 0x10000>;
+ /* TODO: Need to be fixed when clock register programming
+ will be used by clock controller related to crypto clock "secclk" */
+ reg = <0x0 0x11800000 0x0 0x234>,
+ <0x0 0x11800240 0x0 0xfdc0>;
clocks = <&osc0_clk>, <&osc1_clk>;
clock-names = "osc0", "osc1";
#clock-cells = <1>;
diff --git a/arch/riscv/configs/beaglev_defconfig b/arch/riscv/configs/beaglev_defconfig
index e87745e739e6c..853bdae446191 100644
--- a/arch/riscv/configs/beaglev_defconfig
+++ b/arch/riscv/configs/beaglev_defconfig
@@ -186,7 +186,8 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
CONFIG_LSM=""
CONFIG_CRYPTO_ZSTD=y
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_SIFIVE_VIC_ENCRYPT=y
# CONFIG_RAID6_PQ_BENCHMARK is not set
CONFIG_DMA_CMA=y
# CONFIG_SYMBOLIC_ERRNAME is not set
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9a4c275a13350..a6730c01bee15 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -716,6 +716,8 @@ config CRYPTO_DEV_VMX
source "drivers/crypto/vmx/Kconfig"
+source "drivers/crypto/sifive-vic/Kconfig"
+
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fa22cb19e242a..b61bb156865a4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-y += sifive-vic/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/sifive-vic/Kconfig b/drivers/crypto/sifive-vic/Kconfig
new file mode 100644
index 0000000000000..0f1e520555ba6
--- /dev/null
+++ b/drivers/crypto/sifive-vic/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_SIFIVE_VIC_ENCRYPT
+ tristate "Encryption acceleration support on sifive vic"
+ depends on RISCV
+ select CRYPTO_ENGINE
+ select CRYPTO_RSA
+ default m
+ help
+ Support for SIFIVE VIC cryptographic acceleration instructions on riscv64 CPU.
+ This module supports acceleration for AES and GHASH in hardware. If you
+ choose 'M' here, this module will be called vic-crypto.
diff --git a/drivers/crypto/sifive-vic/Makefile b/drivers/crypto/sifive-vic/Makefile
new file mode 100644
index 0000000000000..27ac09e4fc589
--- /dev/null
+++ b/drivers/crypto/sifive-vic/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_SIFIVE_VIC_ENCRYPT) += vic-crypto.o
+vic-crypto-objs := vic-sec.o vic-aes.o vic-sha.o vic-pka.o vic-pka-tools.o
diff --git a/drivers/crypto/sifive-vic/vic-aes.c b/drivers/crypto/sifive-vic/vic-aes.c
new file mode 100644
index 0000000000000..dcf7004e0d9f2
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-aes.c
@@ -0,0 +1,1634 @@
+/*
+ ******************************************************************************
+ * @file vic-aes.c
+ * @author StarFive Technology
+ * @version V1.0
+ * @date 08/13/2020
+ * @brief
+ ******************************************************************************
+ * @copy
+ *
+ * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
+ * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
+ * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
+ * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
+ * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ *
© COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "vic-sec.h"
+
+
+/* Mode mask = bits [3..0] */
+#define FLG_MODE_MASK GENMASK(3, 0)
+
+/* Bit [4] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(4)
+
+/* Bit [31..16] status */
+#define FLG_CCM_PADDED_WA BIT(5)
+
+#define CR_KEY128 BIT(9)
+#define CR_KEY192 BIT(10)
+#define CR_KEY256 BIT(11)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+#define CRYP_CSGCMCCM0R 0x00000050
+#define CRYP_CSGCM0R 0x00000070
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define GCM_CTR_INIT 1
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY 50
+
+static inline int vic_aes_wait_busy(struct vic_sec_dev *sdev)
+{
+ int ret = -1;
+
+ mutex_lock(&sdev->doing);
+ if(sdev->status.sec_done&& (!sdev->status.aes_busy))
+ ret = 0;
+ mutex_unlock(&sdev->doing);
+ return ret;
+}
+
+static inline int is_ecb(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_ECB;
+}
+
+static inline int is_cbc(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_CBC;
+}
+
+static inline int is_cmac(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_CMAC;
+}
+
+static inline int is_ofb(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_OFB;
+}
+
+static inline int is_cfb(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_CFB;
+}
+
+static inline int is_ctr(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_CTR;
+}
+
+static inline int is_gcm(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_GCM;
+}
+
+static inline int is_ccm(struct vic_sec_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == VIC_AES_MODE_CCM;
+}
+
+static inline int get_aes_mode(struct vic_sec_dev *cryp)
+{
+ return cryp->flags & FLG_MODE_MASK;
+}
+
+static inline int is_encrypt(struct vic_sec_dev *cryp)
+{
+ return !!(cryp->flags & FLG_ENCRYPT);
+}
+
+static inline int is_decrypt(struct vic_sec_dev *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static int vic_cryp_read_auth_tag(struct vic_sec_dev *sdev);
+
+static void vic_cryp_hw_write_iv(struct vic_sec_dev *sdev, u32 *iv)
+{
+ if (!iv)
+ return;
+ if(sdev->ctx->begin_new){
+ vic_write_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_IV_OFS, (u8 *)iv,
+ VIC_AES_IV_LEN);
+ }
+}
+
+static void vic_cryp_hw_write_ctr(struct vic_sec_dev *sdev, u32 *ctr)
+{
+ if(sdev->ctx->begin_new){
+ vic_write_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_CTR_OFS, (u8 *)ctr,
+ VIC_AES_IV_LEN);
+ }
+}
+
+static void vic_cryp_hw_write_key(struct vic_sec_dev *sdev)
+{
+ if(sdev->ctx->begin_new){
+ vic_write_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_KEYS_OFS, sdev->ctx->key,
+ sdev->ctx->keylen);
+ }
+}
+
+static unsigned int vic_cryp_get_input_text_len(struct vic_sec_dev *cryp)
+{
+ return is_encrypt(cryp) ? cryp->areq->cryptlen :
+ cryp->areq->cryptlen - cryp->authsize;
+}
+
+static int vic_cryp_gcm_init(struct vic_sec_dev *sdev, u32 cfg)
+{
+ /* Phase 1 : init */
+ memcpy(sdev->last_ctr, sdev->areq->iv, 12);
+ sdev->last_ctr[3] = cpu_to_be32(GCM_CTR_INIT);
+
+ vic_cryp_hw_write_ctr(sdev,(u32 *)sdev->last_ctr);
+ return 0;
+}
+
+static int vic_cryp_write_cryp_out(struct vic_sec_dev *sdev);
+
+static int vic_cryp_ccm_init(struct vic_sec_dev *sdev, u32 cfg)
+{
+ u8 iv[AES_BLOCK_SIZE], *b0;
+ unsigned int textlen;
+
+ /* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */
+ memcpy(iv, sdev->areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+ //iv[AES_BLOCK_SIZE - 1] = 1;
+
+ vic_cryp_hw_write_ctr(sdev,(u32 *)iv);
+
+ /* Build B0 */
+
+ b0 = (u8 *)sdev->data;
+ memcpy(b0, iv, AES_BLOCK_SIZE);
+
+ b0[0] |= (8 * ((sdev->authsize - 2) / 2));
+
+ if (sdev->areq->assoclen)
+ b0[0] |= 0x40;
+
+ textlen = vic_cryp_get_input_text_len(sdev);
+
+ b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+ b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+ memcpy((void *)sdev->last_ctr,sdev->data,AES_BLOCK_SIZE);
+ vic_cryp_hw_write_iv(sdev,(u32 *)b0);
+
+ return 0;
+}
+
+static int vic_cryp_hw_init(struct vic_sec_dev *cryp)
+{
+ int ret;
+ u32 cfg = cryp->rctx->mode, hw_mode;
+
+ /* Set key */
+ vic_cryp_hw_write_key(cryp);
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = get_aes_mode(cryp);
+
+ cfg |= hw_mode;
+
+ memset((void *)cryp->last_ctr, 0, sizeof(cryp->last_ctr));
+
+ switch (hw_mode) {
+ case VIC_AES_MODE_GCM:
+ case VIC_AES_MODE_CCM:
+ /* Phase 1 : init */
+ if (hw_mode == VIC_AES_MODE_CCM)
+ ret = vic_cryp_ccm_init(cryp, cfg);
+ else
+ ret = vic_cryp_gcm_init(cryp, cfg);
+
+ if (ret)
+ return ret;
+ break;
+
+ case VIC_AES_MODE_CBC:
+ case VIC_AES_MODE_CFB:
+ case VIC_AES_MODE_OFB:
+ vic_cryp_hw_write_iv(cryp, (u32 *)cryp->sreq->iv);
+ break;
+ case VIC_AES_MODE_CTR:
+ vic_cryp_hw_write_ctr(cryp, (u32 *)cryp->sreq->iv);
+ memcpy((void *)cryp->last_ctr,(void *)cryp->sreq->iv,16);
+ break;
+
+ default:
+ break;
+ }
+
+ cryp->flags |= cfg;
+
+ return 0;
+}
+
+int vic_cryp_get_from_sg(struct vic_sec_request_ctx *rctx, size_t offset,
+ size_t count,size_t data_offset)
+{
+ size_t of, ct, index;
+ struct scatterlist *sg = rctx->sg;
+
+ of = offset;
+ ct = count;
+
+ while (sg->length <= of){
+ of -= sg->length;
+
+ if (!sg_is_last(sg)){
+ sg = sg_next(sg);
+ continue;
+ } else {
+ return -EBADE;
+ }
+ }
+
+ index = data_offset;
+ while (ct > 0) {
+ if(sg->length - of >= ct) {
+ scatterwalk_map_and_copy(rctx->sdev->data + index, sg,
+ of, ct, 0);
+ index = index + ct;
+ return index - data_offset;
+ } else {
+ scatterwalk_map_and_copy(rctx->sdev->data + index, sg,
+ of, sg->length - of, 0);
+ index += sg->length - of;
+ ct = ct - (sg->length - of);
+
+ of = 0;
+ }
+ if (!sg_is_last(sg))
+ sg = sg_next(sg);
+ else
+ return -EBADE;
+ }
+ return index - data_offset;
+}
+
+static int vic_cryp_read_auth_tag(struct vic_sec_dev *sdev)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)];
+ int err = 0;
+
+ if(sdev->status.sha_busy || sdev->status.aes_busy) {
+ return -EBUSY;
+ }
+
+ vic_read_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_MAC_OFS,
+ sdev->rctx->digest,sdev->authsize);
+
+
+ if(is_cmac(sdev)) {
+ if(rctx->op & HASH_OP_FINAL) {
+ memcpy(sdev->req->result, sdev->rctx->digest, sdev->authsize);
+ } else {
+ rctx->is_load = 1;
+ }
+ } else {
+ if(is_encrypt(sdev)) {
+ sg_copy_buffer(rctx->out_sg,sg_nents(rctx->out_sg), rctx->digest,
+ sdev->authsize, rctx->offset, 0);
+ } else {
+ scatterwalk_map_and_copy(idigest,sdev->areq->src, sdev->total_in - sdev->authsize,
+ sdev->authsize, 0);
+ if (crypto_memneq(idigest, rctx->digest, sdev->authsize)) {
+ err = -EBADMSG;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int vic_cryp_read_data(struct vic_sec_dev *sdev)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ int count;
+
+ if(sdev->status.sha_busy || sdev->status.aes_busy) {
+ return -EBUSY;
+ }
+ if(rctx->bufcnt >= sdev->total_out) {
+ count = sdev->total_out;
+ } else {
+ count = rctx->bufcnt;
+ }
+
+ vic_read_n(sdev->io_base + VIC_AES_MSG_RAM_OFFSET,sdev->data,
+ count);
+
+ sg_copy_buffer(sdev->rctx->out_sg,sg_nents(sdev->rctx->out_sg), sdev->data,
+ count, rctx->offset, 0);
+
+ // sdev->total_out = sdev->total_out - count;
+
+ return 0;
+}
+
+
+static int vic_gcm_zero_message_data(struct vic_sec_dev *sdev);
+
+static int vic_cryp_finish_req(struct vic_sec_dev *sdev, int err)
+{
+ if (!err && (is_gcm(sdev) || is_ccm(sdev))) {
+ /* Phase 4 : output tag */
+ err = vic_cryp_read_auth_tag(sdev);
+ }
+
+ if (is_gcm(sdev) || is_ccm(sdev)) {
+ crypto_finalize_aead_request(sdev->engine, sdev->areq, err);
+ sdev->areq = NULL;
+ } else {
+ crypto_finalize_skcipher_request(sdev->engine, sdev->sreq,
+ err);
+ sdev->sreq = NULL;
+ }
+
+ memset(sdev->ctx->key, 0, sdev->ctx->keylen);
+
+ return err;
+}
+
+static int vic_aes_start(struct vic_sec_request_ctx *rctx)
+{
+ struct vic_sec_dev *sdev = rctx->sdev;
+ int loop, int_len = sizeof(unsigned int);
+
+ if(sdev->status.sha_busy || sdev->status.aes_busy) {
+ return -EBUSY;
+ }
+
+ for(loop = 0; loop < CFG_REGS_LEN / int_len; loop++) {
+ writel(*(rctx->aes_cfg.vs + loop), sdev->io_base + VIC_AES_CFG_REGS + loop * int_len);
+ }
+ sdev->ie.sec_done_ie = 1;
+ sdev->ie.mac_valid_ie = 1;
+
+ mutex_lock(&sdev->doing);
+ writel(sdev->ie.v, sdev->io_base + SEC_IE_REG);
+ sdev->status.aes_mac_valid = 0;
+ sdev->status.aes_busy = 1;
+ sdev->status.sec_done = 0;
+
+ rctx->aes_ctrl.aes_start = 1;
+
+ writel(rctx->aes_ctrl.v, sdev->io_base + VIC_AES_CTRL_REG);
+
+ return 0;
+}
+
+static int vic_cryp_write_assoc_out(struct vic_sec_dev *sdev)
+{
+ int ret = 0;
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ unsigned int align_len = rctx->bufcnt % AES_BLOCK_SIZE;
+ int ap;
+
+ align_len = AES_BLOCK_SIZE - align_len;
+
+ if(is_ccm(sdev) && align_len) {
+ memset(sdev->data + rctx->bufcnt, 0, align_len);
+ rctx->bufcnt += align_len;
+ sdev->data_offset += align_len;
+ }
+
+ memset((void *)&(rctx->aes_cfg), 0, sizeof(rctx->aes_cfg));
+ memset((void *)&(rctx->aes_ctrl), 0, sizeof(rctx->aes_ctrl));
+
+ rctx->aes_cfg.authsize = sdev->authsize;
+ rctx->aes_cfg.aes_ctx_idx = 0;
+ rctx->aes_cfg.aes_blk_idx = 0;
+
+ rctx->aes_ctrl.aes_mode = get_aes_mode(sdev);
+ rctx->aes_ctrl.aes_encrypt = is_encrypt(sdev);
+ rctx->aes_ctrl.aes_key_sz = (sdev->ctx->keylen >> 3) - 2;
+ rctx->aes_ctrl.aes_str_ctx = 1;
+ rctx->aes_ctrl.aes_ret_ctx = 1;
+
+ rctx->aes_cfg.aes_assoclen = rctx->bufcnt;
+ rctx->aes_cfg.aes_n_bytes = rctx->bufcnt;
+ if (rctx->offset == 0)
+ rctx->aes_ctrl.aes_msg_begin = 1;
+ else
+ rctx->aes_ctrl.aes_msg_begin = 0;
+ ap = 0;
+ if(!is_encrypt(sdev))
+ ap = sdev->authsize;
+
+ if ((sdev->total_in - ap - rctx->assoclen == 0) &&
+ (rctx->offset + rctx->bufcnt == rctx->assoclen + sdev->data_offset)) {
+ rctx->aes_ctrl.aes_msg_end = 1;
+ rctx->aes_cfg.aes_assoclen_tot = rctx->assoclen + sdev->data_offset;
+ }
+
+ vic_write_n(sdev->io_base + VIC_AES_MSG_RAM_OFFSET, sdev->data,
+ rctx->bufcnt);
+
+ ret = vic_aes_start(rctx);
+ if (ret)
+ return ret;
+
+ if(vic_aes_wait_busy(sdev))
+ ret = -ETIMEDOUT;
+
+ return ret;
+}
+
+static int vic_cryp_write_cryp_out(struct vic_sec_dev *sdev)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ int ret = 0;
+ int ap;
+
+ memset((void *)&(rctx->aes_cfg), 0, sizeof(rctx->aes_cfg));
+ memset((void *)&(rctx->aes_ctrl), 0, sizeof(rctx->aes_ctrl));
+
+
+ rctx->aes_cfg.authsize = sdev->authsize;
+ //if(sdev->authsize == 16)
+ // rctx->aes_cfg.authsize = 0xff;
+ rctx->aes_cfg.aes_assoclen_tot = rctx->assoclen + sdev->data_offset;
+ rctx->aes_ctrl.aes_msg_begin = rctx->assoclen ? 0 : 1;
+
+ rctx->aes_cfg.aes_ctx_idx = 0;
+ rctx->aes_cfg.aes_blk_idx = 0;
+
+ rctx->aes_cfg.aes_n_bytes = rctx->bufcnt;
+
+ rctx->aes_ctrl.aes_mode = get_aes_mode(sdev);
+ rctx->aes_ctrl.aes_encrypt = is_encrypt(sdev);
+ rctx->aes_ctrl.aes_key_sz = (sdev->ctx->keylen >> 3) - 2;
+
+ rctx->aes_ctrl.aes_str_ctx = 1;
+ rctx->aes_ctrl.aes_ret_ctx = 1;
+
+ rctx->aes_cfg.aes_tot_n_bytes = sdev->total_in - rctx->assoclen;
+
+ ap = 0;
+ if(!is_encrypt(sdev)) {
+ rctx->aes_cfg.aes_tot_n_bytes -= sdev->authsize;
+ ap = sdev->authsize;
+ }
+
+ if (rctx->offset + rctx->bufcnt + ap == sdev->total_in) {
+ rctx->aes_ctrl.aes_msg_end = 1;
+ } else {
+ rctx->aes_ctrl.aes_msg_end = 0;
+ }
+ if(is_cmac(sdev)) {
+ if(rctx->is_load) {
+ rctx->aes_ctrl.aes_msg_begin = 0;
+ vic_write_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_MAC_OFS,
+ rctx->digest,sdev->authsize);
+ rctx->is_load = 0;
+ }
+ }
+ if(rctx->aes_ctrl.aes_msg_begin){
+ if(rctx->sdev->ctx->begin_new){
+ rctx->sdev->ctx->begin_new = 0;
+ }
+ }
+ vic_write_n(sdev->io_base + VIC_AES_MSG_RAM_OFFSET, sdev->data,
+ rctx->bufcnt);
+ // vic_write_n(sdev->io_base + VIC_AES_MSG_RAM_OFFSET, sdev->data,
+ // rctx->bufcnt + AES_BLOCK_SIZE);
+
+ ret = vic_aes_start(rctx);
+ if (ret)
+ return ret;
+
+ if(vic_aes_wait_busy(sdev))
+ ret = -ETIMEDOUT;
+
+ if(!is_cmac(sdev))
+ ret = vic_cryp_read_data(sdev);
+
+ return ret;
+}
+
+static bool vic_check_counter_overflow(struct vic_sec_dev *sdev, size_t count)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ bool ret = false;
+ u32 start, end, ctr, blocks;
+
+ if(count) {
+ blocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
+ sdev->last_ctr[3] = cpu_to_be32(be32_to_cpu(sdev->last_ctr[3]) + blocks);
+
+ if(sdev->last_ctr[3] == 0){
+ sdev->last_ctr[2] = cpu_to_be32(be32_to_cpu(sdev->last_ctr[2]) + 1);
+ if (sdev->last_ctr[2] == 0){
+ sdev->last_ctr[1] = cpu_to_be32(be32_to_cpu(sdev->last_ctr[1]) + 1);
+ if (sdev->last_ctr[1] == 0){
+ sdev->last_ctr[0] = cpu_to_be32(be32_to_cpu(sdev->last_ctr[0]) + 1);
+ if (sdev->last_ctr[1] == 0) {
+ vic_cryp_hw_write_ctr(sdev, (u32 *)sdev->last_ctr);
+ }
+ }
+ }
+ }
+ }
+
+ /* ctr counter overflow. */
+ ctr = sdev->total_in - rctx->assoclen - sdev->authsize;
+ blocks = DIV_ROUND_UP(ctr, AES_BLOCK_SIZE);
+ start = be32_to_cpu(sdev->last_ctr[3]);
+
+ end = start + blocks - 1;
+ if (end < start) {
+ sdev->ctr_over_count = AES_BLOCK_SIZE * -start;
+ ret = true;
+ }
+
+ return ret;
+}
+
+static int vic_cryp_write_data(struct vic_sec_dev *sdev)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ size_t data_len, total, count, data_buf_len;
+ int ret;
+ bool fragmented = false;
+ u32 data_offset;
+
+ if (unlikely(!sdev->total_in)) {
+ dev_warn(sdev->dev, "No more data to process\n");
+ return -EINVAL;
+ }
+
+ /* ctr counter overflow. */
+ fragmented = vic_check_counter_overflow(sdev, 0);
+
+ total = 0;
+ rctx->offset = 0;
+ sdev->data_offset = 0;
+
+ if(is_ccm(sdev)){
+ int index = AES_BLOCK_SIZE;
+ if(rctx->assoclen <= 65280) {
+
+ ((u8 *)sdev->data)[index] = (rctx->assoclen >> 8) & 0xff;
+ ((u8 *)sdev->data)[index + 1] = rctx->assoclen & 0xff;
+ sdev->data_offset = index + 2;
+ } else {
+ ((u8 *)sdev->data)[index] = 0xff;
+ ((u8 *)sdev->data)[index + 1] = 0xfe;
+ ((u8 *)sdev->data)[index + 2] = rctx->assoclen & 0xFF000000;
+ ((u8 *)sdev->data)[index + 3] = rctx->assoclen & 0x00FF0000;
+ ((u8 *)sdev->data)[index + 4] = rctx->assoclen & 0x0000FF00;
+ ((u8 *)sdev->data)[index + 5] = rctx->assoclen & 0x000000FF;
+ sdev->data_offset = index + 6;
+ }
+ }
+ data_offset = sdev->data_offset;
+ while(total < rctx->assoclen) {
+ data_buf_len = sdev->data_buf_len - (sdev->data_buf_len % sdev->ctx->keylen) - data_offset;
+ count = min (rctx->assoclen - rctx->offset, data_buf_len);
+ count = min (count, rctx->assoclen - total);
+ data_len = vic_cryp_get_from_sg(rctx, rctx->offset, count, data_offset);
+ if(data_len < 0)
+ return data_len;
+ if(data_len != count) {
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = data_len + data_offset;
+
+ total += data_len;
+ ret = vic_cryp_write_assoc_out(sdev);
+ if(ret)
+ return ret;
+ data_offset = 0;
+ rctx->offset += data_len;
+ }
+
+ total = 0;
+
+ while(total < sdev->total_in - rctx->assoclen) {
+ data_buf_len = sdev->data_buf_len - (sdev->data_buf_len % sdev->ctx->keylen) - data_offset;
+ count = min (sdev->total_in - rctx->offset, data_buf_len);
+ count = min (count, sdev->total_in - rctx->assoclen - total);
+
+ /* ctr counter overflow. */
+ if(fragmented && sdev->ctr_over_count != 0) {
+ if (count >= sdev->ctr_over_count) {
+ count = sdev->ctr_over_count;
+ }
+ }
+
+ data_len = vic_cryp_get_from_sg(rctx, rctx->offset, count,data_offset);
+ if(data_len < 0)
+ return data_len;
+ if(data_len != count) {
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = data_len;
+ total += data_len;
+
+ if(!is_encrypt(sdev) && (total + rctx->assoclen >= sdev->total_in))
+ rctx->bufcnt = rctx->bufcnt - sdev->authsize;
+
+ if(rctx->bufcnt) {
+ ret = vic_cryp_write_cryp_out(sdev);
+ if(ret)
+ return ret;
+ }
+ rctx->offset += data_len;
+
+ fragmented = vic_check_counter_overflow(sdev, data_len);
+ }
+
+ return 0;
+}
+
+static int vic_cmac_write_data(struct vic_sec_dev *sdev)
+{
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ size_t data_len, count, data_buf_len;
+
+ int ret;
+
+ if(!sdev->total_in) {
+ sdev->rctx->bufcnt = 0;
+ sdev->rctx->offset = 0;
+ ret = vic_cryp_write_cryp_out(sdev);
+ if(ret)
+ return ret;
+ }
+
+ while(rctx->total < sdev->total_in) {
+ data_buf_len = sdev->data_buf_len - (sdev->data_buf_len % sdev->ctx->keylen) - sdev->data_offset;
+ count = min (sdev->total_in - rctx->offset, data_buf_len);
+ count = min (count, sdev->total_in - rctx->total);
+
+ if(rctx->op == HASH_OP_UPDATE) {
+ if((count > sdev->ctx->keylen) && (count % sdev->ctx->keylen)) {
+ count = count - count % sdev->ctx->keylen;
+ }
+ }
+
+ data_len = vic_cryp_get_from_sg(rctx, rctx->offset, count, sdev->data_offset);
+ if(data_len < 0)
+ return data_len;
+ if(data_len != count) {
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = data_len + sdev->data_offset;
+ rctx->total += data_len;
+
+ if((rctx->op == HASH_OP_UPDATE) &&
+ (data_len < sdev->ctx->keylen)) {
+ sdev->data_offset = data_len;
+ } else {
+ ret = vic_cryp_write_cryp_out(sdev);
+ if(ret)
+ return ret;
+ rctx->offset += data_len;
+ }
+ }
+
+ if(sdev->data_offset && (rctx->op & HASH_OP_FINAL)) {
+ ret = vic_cryp_write_cryp_out(sdev);
+ if(ret)
+ return ret;
+ rctx->offset += sdev->data_offset;
+ }
+
+ return 0;
+}
+
+static int vic_gcm_zero_message_data(struct vic_sec_dev *sdev)
+{
+ int ret;
+
+ sdev->rctx->bufcnt = 0;
+ sdev->rctx->offset = 0;
+ ret = vic_cryp_write_cryp_out(sdev);
+
+ if(ret)
+ return ret;
+
+ return 0;
+}
+
+static int vic_cryp_cpu_start(struct vic_sec_dev *sdev, struct vic_sec_request_ctx *rctx)
+{
+ int ret;
+
+ ret = vic_cryp_write_data(sdev);
+ if(ret)
+ return ret;
+
+ ret = vic_cryp_finish_req(sdev,ret);
+
+ return ret;
+}
+
+static int vic_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
+static int vic_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ void *areq);
+
+static int vic_cryp_cra_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->sdev = vic_sec_find_dev(ctx);
+ if (!ctx->sdev)
+ return -ENODEV;
+
+ mutex_lock(&ctx->sdev->lock);
+ vic_clk_enable(ctx->sdev,AES_CLK);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct vic_sec_request_ctx));
+
+ ctx->begin_new = 1;
+ ctx->enginectx.op.do_one_request = vic_cryp_cipher_one_req;
+ ctx->enginectx.op.prepare_request = vic_cryp_prepare_cipher_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ return 0;
+}
+
+static void vic_cryp_cra_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->begin_new = 0;
+ ctx->enginectx.op.do_one_request = NULL;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ vic_clk_disable(ctx->sdev,AES_CLK);
+ mutex_unlock(&ctx->sdev->lock);
+}
+
+static int vic_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
+static int vic_cryp_prepare_aead_req(struct crypto_engine *engine,
+ void *areq);
+
+static int vic_cryp_aes_aead_init(struct crypto_aead *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->sdev = vic_sec_find_dev(ctx);
+
+ if (!ctx->sdev)
+ return -ENODEV;
+
+ mutex_lock(&ctx->sdev->lock);
+ vic_clk_enable(ctx->sdev,AES_CLK);
+
+ tfm->reqsize = sizeof(struct vic_sec_request_ctx);
+
+ ctx->begin_new = 1;
+ ctx->enginectx.op.do_one_request = vic_cryp_aead_one_req;
+ ctx->enginectx.op.prepare_request = vic_cryp_prepare_aead_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void vic_cryp_aes_aead_exit(struct crypto_aead *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->begin_new = 0;
+ ctx->enginectx.op.do_one_request = NULL;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ vic_clk_disable(ctx->sdev,AES_CLK);
+ mutex_unlock(&ctx->sdev->lock);
+}
+
+static int vic_cryp_crypt(struct skcipher_request *req, unsigned long mode)
+{
+ struct vic_sec_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct vic_sec_request_ctx *rctx = skcipher_request_ctx(req);
+ struct vic_sec_dev *sdev = ctx->sdev;
+
+ if (!sdev)
+ return -ENODEV;
+
+ rctx->mode = mode;
+ rctx->req_type = AES_ABLK;
+
+ return crypto_transfer_skcipher_request_to_engine(sdev->engine, req);
+}
+
+static int vic_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
+{
+ struct vic_sec_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct vic_sec_request_ctx *rctx = aead_request_ctx(req);
+ struct vic_sec_dev *cryp = ctx->sdev;
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+ rctx->req_type = AES_AEAD;
+
+ return crypto_transfer_aead_request_to_engine(cryp->engine, req);
+}
+
+static int vic_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int vic_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return vic_cryp_setkey(tfm, key, keylen);
+}
+
+static int vic_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int vic_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+}
+
+static int vic_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vic_cryp_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_ECB | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_ECB);
+}
+
+static int vic_cryp_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CBC | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CBC);
+}
+
+static int vic_cryp_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CFB | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CFB);
+}
+
+static int vic_cryp_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_OFB | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_OFB);
+}
+
+static int vic_cryp_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CTR | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return vic_cryp_crypt(req, VIC_AES_MODE_CTR);
+}
+
+static int vic_cryp_aes_gcm_encrypt(struct aead_request *req)
+{
+ return vic_cryp_aead_crypt(req, VIC_AES_MODE_GCM | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_gcm_decrypt(struct aead_request *req)
+{
+ return vic_cryp_aead_crypt(req, VIC_AES_MODE_GCM);
+}
+
+static int vic_cryp_aes_ccm_encrypt(struct aead_request *req)
+{
+ return vic_cryp_aead_crypt(req, VIC_AES_MODE_CCM | FLG_ENCRYPT);
+}
+
+static int vic_cryp_aes_ccm_decrypt(struct aead_request *req)
+{
+ return vic_cryp_aead_crypt(req, VIC_AES_MODE_CCM);
+}
+
+static int vic_cryp_prepare_req(struct skcipher_request *req,
+ struct aead_request *areq)
+{
+ struct vic_sec_ctx *ctx;
+ struct vic_sec_dev *sdev;
+ struct vic_sec_request_ctx *rctx;
+ int ret;
+
+ if (!req && !areq)
+ return -EINVAL;
+
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
+ crypto_aead_ctx(crypto_aead_reqtfm(areq));
+
+ sdev = ctx->sdev;
+
+ if (!sdev)
+ return -ENODEV;
+ if(is_ccm(sdev)){
+ if(!areq->assoclen) {
+ dev_info(sdev->dev, "AES CCM input assoclen can not be 0\n");
+ return -EINVAL;
+ }
+ }
+
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+
+ rctx->sdev = sdev;
+ ctx->sdev = sdev;
+
+ sdev->flags = rctx->mode;
+ sdev->ctx = ctx;
+ sdev->rctx = rctx;
+
+ if (req) {
+ sdev->sreq = req;
+ sdev->total_in = req->cryptlen;
+ sdev->total_out = sdev->total_in;
+ sdev->authsize = 0;
+ rctx->assoclen = 0;
+ } else {
+ /*
+ * Length of input and output data:
+ * Encryption case:
+ * INPUT = AssocData || PlainText
+ * <- assoclen -> <- cryptlen ->
+ * <------- total_in ----------->
+ *
+ * OUTPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <- cryptlen -> <- authsize ->
+ * <---------------- total_out ----------------->
+ *
+ * Decryption case:
+ * INPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <--------- cryptlen --------->
+ * <- authsize ->
+ * <---------------- total_in ------------------>
+ *
+ * OUTPUT = AssocData || PlainText
+ * <- assoclen -> <- crypten - authsize ->
+ * <---------- total_out ----------------->
+ */
+ sdev->areq = areq;
+ sdev->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+ sdev->total_in = areq->assoclen + areq->cryptlen;
+ rctx->assoclen = areq->assoclen;
+ if (is_encrypt(sdev))
+ /* Append auth tag to output */
+ sdev->total_out = sdev->total_in + sdev->authsize;
+ else
+ /* No auth tag in output */
+ sdev->total_out = sdev->total_in - sdev->authsize;
+ }
+
+ rctx->sg = req ? req->src : areq->src;
+ rctx->out_sg = req ? req->dst : areq->dst;
+
+ sdev->in_sg_len = sg_nents_for_len(rctx->sg, sdev->total_in);
+ if (sdev->in_sg_len < 0) {
+ dev_err(sdev->dev, "Cannot get in_sg_len\n");
+ ret = sdev->in_sg_len;
+ goto out;
+ }
+
+ sdev->out_sg_len = sg_nents_for_len(rctx->out_sg, sdev->total_out);
+ if (sdev->out_sg_len < 0) {
+ dev_err(sdev->dev, "Cannot get out_sg_len\n");
+ ret = sdev->out_sg_len;
+ goto out;
+ }
+#if 0
+ if (is_gcm(sdev) || is_ccm(sdev)) {
+ /* In output, jump after assoc data */
+ sdev->total_out -= sdev->areq->assoclen;
+ }
+#endif
+ ret = vic_cryp_hw_init(sdev);
+
+out:
+
+ return ret;
+}
+
+static int vic_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ void *areq)
+{
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
+ base);
+
+ return vic_cryp_prepare_req(req, NULL);
+}
+
+static int vic_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
+ base);
+ struct vic_sec_request_ctx *rctx = skcipher_request_ctx(req);
+ struct vic_sec_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct vic_sec_dev *cryp = ctx->sdev;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return vic_cryp_cpu_start(cryp,rctx);
+}
+
+static int vic_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+
+ return vic_cryp_prepare_req(NULL, req);
+}
+
+static int vic_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct vic_sec_request_ctx *rctx = aead_request_ctx(req);
+ struct vic_sec_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct vic_sec_dev *sdev = ctx->sdev;
+
+ if (!sdev)
+ return -ENODEV;
+
+ if (unlikely(!sdev->areq->assoclen &&
+ !vic_cryp_get_input_text_len(sdev))) {
+ /* No input data to process: get tag and finish */
+ vic_gcm_zero_message_data(sdev);
+ vic_cryp_finish_req(sdev, 0);
+ return 0;
+ }
+
+ return vic_cryp_cpu_start(sdev, rctx);
+}
+
+static int vic_cryp_prepare_cmac_req(struct crypto_engine *engine,
+ void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct vic_sec_dev *sdev;
+ struct vic_sec_request_ctx *rctx;
+ int ret;
+
+ sdev = ctx->sdev;
+
+ if (!sdev)
+ return -ENODEV;
+
+ rctx = ahash_request_ctx(req);
+
+ rctx->sdev = sdev;
+ ctx->sdev = sdev;
+
+ sdev->flags = rctx->mode;
+ sdev->ctx = ctx;
+ sdev->rctx = rctx;
+
+ sdev->req = req;
+ sdev->total_in = req->nbytes;
+ sdev->authsize = AES_BLOCK_SIZE;
+ rctx->assoclen = 0;
+
+ //rctx->sg = req->src;
+
+ ret = vic_cryp_hw_init(sdev);
+
+ return ret;
+}
+
+static int vic_cryp_cmac_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct vic_sec_dev *sdev = ctx->sdev;
+ int ret;
+
+ if (!sdev)
+ return -ENODEV;
+
+ ret = vic_cmac_write_data(sdev);
+
+ if(ret)
+ return ret;
+
+ ret = vic_cryp_read_auth_tag(sdev);
+
+ crypto_finalize_hash_request(sdev->engine, sdev->req, ret);
+ sdev->req = NULL;
+
+
+ return ret;
+}
+
+static int vic_cryp_aes_cmac_init(struct crypto_tfm *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->sdev = vic_sec_find_dev(ctx);
+
+ if (!ctx->sdev)
+ return -ENODEV;
+
+ mutex_lock(&ctx->sdev->lock);
+ vic_clk_enable(ctx->sdev,AES_CLK);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct vic_sec_request_ctx));
+
+ ctx->begin_new = 1;
+ ctx->enginectx.op.do_one_request = vic_cryp_cmac_one_req;
+ ctx->enginectx.op.prepare_request = vic_cryp_prepare_cmac_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void vic_cryp_aes_cmac_exit(struct crypto_tfm *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->begin_new = 0;
+ ctx->enginectx.op.do_one_request = NULL;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ vic_clk_disable(ctx->sdev,AES_CLK);
+ mutex_unlock(&ctx->sdev->lock);
+}
+
+static int vic_aes_cmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *sdev = ctx->sdev;
+
+ if (!sdev)
+ return -ENODEV;
+
+ rctx->sdev = sdev;
+ rctx->mode = VIC_AES_MODE_CMAC | FLG_ENCRYPT;
+ rctx->req_type = AES_ABLK;
+ rctx->op = 0;
+ rctx->is_load = 0;
+ rctx->bufcnt = 0;
+ rctx->offset = 0;
+ sdev->data_offset = 0;
+
+ return 0;
+}
+static int vic_aes_cmac_update(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *sdev = rctx->sdev;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->total = 0;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ rctx->op |= HASH_OP_UPDATE;
+
+ return crypto_transfer_hash_request_to_engine(sdev->engine, req);
+}
+
+static int vic_aes_cmac_final(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *sdev = rctx->sdev;
+
+ rctx->op |= HASH_OP_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(sdev->engine, req);
+}
+
+static int vic_aes_cmac_finup(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ rctx->op |= HASH_OP_FINAL | HASH_OP_UPDATE;
+
+ err1 = vic_aes_cmac_update(req);
+
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ err2 = vic_aes_cmac_final(req);
+
+ return err1 ?: err2;
+}
+
+static int vic_aes_cmac_digest(struct ahash_request *req)
+{
+ return vic_aes_cmac_init(req) ?: vic_aes_cmac_finup(req);
+}
+
+static int vic_aes_cmac_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int vic_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *sdev = rctx->sdev;
+
+ rctx->digcnt = sdev->authsize;
+
+ vic_read_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_MAC_OFS,
+ sdev->rctx->digest,rctx->digcnt);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int vic_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *sdev = rctx->sdev;
+
+ memcpy(rctx, in, sizeof(*rctx));
+ vic_write_n(sdev->io_base + VIC_AES_CTX_RAM_OFFSET + VIC_AES_CTX_MAC_OFS,
+ sdev->rctx->digest,rctx->digcnt);
+ sdev->authsize = rctx->digcnt;
+
+ return 0;
+}
+
+static struct ahash_alg algs_aes_cmac[] = {
+ {
+ .init = vic_aes_cmac_init,
+ .update = vic_aes_cmac_update,
+ .final = vic_aes_cmac_final,
+ .finup = vic_aes_cmac_finup,
+ .digest = vic_aes_cmac_digest,
+ .setkey = vic_aes_cmac_setkey,
+ .export = vic_aes_cmac_export,
+ .import = vic_aes_cmac_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "vic-cmac-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_cryp_aes_cmac_init,
+ .cra_exit = vic_cryp_aes_cmac_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct skcipher_alg crypto_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "vic-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = vic_cryp_cra_init_tfm,
+ .exit = vic_cryp_cra_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = vic_cryp_aes_setkey,
+ .encrypt = vic_cryp_aes_ecb_encrypt,
+ .decrypt = vic_cryp_aes_ecb_decrypt,
+},
+{
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "vic-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = vic_cryp_cra_init_tfm,
+ .exit = vic_cryp_cra_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = vic_cryp_aes_setkey,
+ .encrypt = vic_cryp_aes_cbc_encrypt,
+ .decrypt = vic_cryp_aes_cbc_decrypt,
+},
+{
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "vic-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = vic_cryp_cra_init_tfm,
+ .exit = vic_cryp_cra_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = vic_cryp_aes_setkey,
+ .encrypt = vic_cryp_aes_ctr_encrypt,
+ .decrypt = vic_cryp_aes_ctr_decrypt,
+},
+{
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "vic-cfb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = vic_cryp_cra_init_tfm,
+ .exit = vic_cryp_cra_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = vic_cryp_aes_setkey,
+ .encrypt = vic_cryp_aes_cfb_encrypt,
+ .decrypt = vic_cryp_aes_cfb_decrypt,
+},
+{
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "vic-ofb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = vic_cryp_cra_init_tfm,
+ .exit = vic_cryp_cra_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = vic_cryp_aes_setkey,
+ .encrypt = vic_cryp_aes_ofb_encrypt,
+ .decrypt = vic_cryp_aes_ofb_decrypt,
+},
+};
+
+static struct aead_alg aead_algs[] = {
+#if 1
+{
+ .setkey = vic_cryp_aes_aead_setkey,
+ .setauthsize = vic_cryp_aes_gcm_setauthsize,
+ .encrypt = vic_cryp_aes_gcm_encrypt,
+ .decrypt = vic_cryp_aes_gcm_decrypt,
+ .init = vic_cryp_aes_aead_init,
+ .exit = vic_cryp_aes_aead_exit,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "vic-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+#endif
+{
+ .setkey = vic_cryp_aes_aead_setkey,
+ .setauthsize = vic_cryp_aes_ccm_setauthsize,
+ .encrypt = vic_cryp_aes_ccm_encrypt,
+ .decrypt = vic_cryp_aes_ccm_decrypt,
+ .init = vic_cryp_aes_aead_init,
+ .exit = vic_cryp_aes_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "vic-ccm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+};
+#if 0
+int vic_aes_register_algs(void)
+{return 0;}
+
+int vic_aes_unregister_algs(void)
+{return 0;}
+#else
+int vic_aes_register_algs(void)
+{
+ int ret;
+#if 1
+ ret = crypto_register_ahashes(algs_aes_cmac, ARRAY_SIZE(algs_aes_cmac));
+ if (ret) {
+ printk("Could not register algs_aes_cmac\n");
+ goto err_hash;
+ }
+
+ ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ printk("Could not register algs\n");
+ goto err_algs;
+ }
+#endif
+ ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ goto err_aead_algs;
+
+ return 0;
+
+err_aead_algs:
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+err_algs:
+ crypto_unregister_ahashes(algs_aes_cmac, ARRAY_SIZE(algs_aes_cmac));
+err_hash:
+ return ret;
+}
+
+int vic_aes_unregister_algs(void)
+{
+
+ crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+#if 1
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_unregister_ahashes(algs_aes_cmac, ARRAY_SIZE(algs_aes_cmac));
+#endif
+ return 0;
+}
+#endif
diff --git a/drivers/crypto/sifive-vic/vic-pka-hw.h b/drivers/crypto/sifive-vic/vic-pka-hw.h
new file mode 100644
index 0000000000000..b6b420bf0e44a
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-pka-hw.h
@@ -0,0 +1,1671 @@
+// ------------------------------------------------------------------------
+//
+// (C) COPYRIGHT 2011 - 2015 SYNOPSYS, INC.
+// ALL RIGHTS RESERVED
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, see .
+//
+// ------------------------------------------------------------------------
+
+#ifndef __VIC_PKA_HW_H__
+#define __VIC_PKA_HW_H__
+
+/* Control/status registers */
+enum {
+ PKA_CTRL = 0,
+ PKA_ENTRY,
+ PKA_RC,
+ PKA_BUILD_CONF,
+ PKA_F_STACK,
+ PKA_INST_SINCE_GO,
+ PKA_P_STACK,
+ PKA_CONF,
+ PKA_STATUS,
+ PKA_FLAGS,
+ PKA_WATCHDOG,
+ PKA_CYCLES_SINCE_GO,
+ PKA_INDEX_I,
+ PKA_INDEX_J,
+ PKA_INDEX_K,
+ PKA_INDEX_L,
+ PKA_IRQ_EN,
+ PKA_DTA_JUMP,
+ PKA_LFSR_SEED,
+
+ PKA_BANK_SWITCH_A = 20,
+ PKA_BANK_SWITCH_B,
+ PKA_BANK_SWITCH_C,
+ PKA_BANK_SWITCH_D,
+
+ PKA_OPERAND_A_BASE = 0x100,
+ PKA_OPERAND_B_BASE = 0x200,
+ PKA_OPERAND_C_BASE = 0x300,
+ PKA_OPERAND_D_BASE = 0x400,
+
+ /* F/W base for old cores */
+ PKA_FIRMWARE_BASE = 0x800,
+
+ /* F/W base for new ("type 2") cores, with fixed RAM/ROM split offset */
+ PKA_FIRMWARE_T2_BASE = 0x1000,
+ PKA_FIRMWARE_T2_SPLIT = 0x1800
+};
+
+#define PKA_MAX_OPERAND_SIZE 512 /* 4096 bits */
+#define PKA_ECC521_OPERAND_SIZE 66 /* 528 bits */
+#define PKA_OPERAND_BANK_SIZE 0x400
+
+#define PKA_CTRL_GO 31
+#define PKA_CTRL_STOP_RQST 27
+#define PKA_CTRL_M521_MODE 16
+#define PKA_CTRL_M521_MODE_BITS 5
+#define PKA_CTRL_BASE_RADIX 8
+#define PKA_CTRL_BASE_RADIX_BITS 3
+#define PKA_CTRL_PARTIAL_RADIX 0
+#define PKA_CTRL_PARTIAL_RADIX_BITS 8
+
+#define PKA_CTRL_M521_ECC521 9
+
+#define PKA_RC_BUSY 31
+#define PKA_RC_IRQ 30
+#define PKA_RC_WR_PENDING 29
+#define PKA_RC_ZERO 28
+#define PKA_RC_REASON 16
+#define PKA_RC_REASON_BITS 8
+
+#define PKA_BC_FORMAT_TYPE 30
+#define PKA_BC_FORMAT_TYPE_BITS 2
+
+/*
+ * Bit fields for BUILD_CONF format type 1 (H/W version >= 1.13)
+ * Note that format type 2 (H/W version >= 1.17) has the same layout.
+ */
+#define PKA_BC1_ALU_SZ 19
+#define PKA_BC1_ALU_SZ_BITS 2
+#define PKA_BC1_RSA_SZ 16
+#define PKA_BC1_RSA_SZ_BITS 3
+#define PKA_BC1_ECC_SZ 14
+#define PKA_BC1_ECC_SZ_BITS 2
+#define PKA_BC1_FW_ROM_SZ 11
+#define PKA_BC1_FW_ROM_SZ_BITS 3
+#define PKA_BC1_FW_RAM_SZ 8
+#define PKA_BC1_FW_RAM_SZ_BITS 3
+#define PKA_BC1_BANK_SW_D 6
+#define PKA_BC1_BANK_SW_D_BITS 2
+#define PKA_BC1_BANK_SW_C 4
+#define PKA_BC1_BANK_SW_C_BITS 2
+#define PKA_BC1_BANK_SW_B 2
+#define PKA_BC1_BANK_SW_B_BITS 2
+#define PKA_BC1_BANK_SW_A 0
+#define PKA_BC1_BANK_SW_A_BITS 2
+
+/* Enumerations for the FW_RAM and FW_ROM fields in format type 1. */
+enum {
+ PKA_BC1_FW_SZ_0,
+ PKA_BC1_FW_SZ_256,
+ PKA_BC1_FW_SZ_512,
+ PKA_BC1_FW_SZ_1024,
+ PKA_BC1_FW_SZ_2048,
+};
+
+/* Bit fields for BUILD_CONF format type 0 (H/W version <= 1.12) */
+#define PKA_BC_ALU_SZ 19
+#define PKA_BC_ALU_SZ_BITS 2
+#define PKA_BC_RSA_SZ 16
+#define PKA_BC_RSA_SZ_BITS 3
+#define PKA_BC_ECC_SZ 14
+#define PKA_BC_ECC_SZ_BITS 2
+#define PKA_BC_FW_HAS_ROM 13
+#define PKA_BC_FW_HAS_RAM 12
+#define PKA_BC_FW_ROM_SZ 10
+#define PKA_BC_FW_ROM_SZ_BITS 2
+#define PKA_BC_FW_RAM_SZ 8
+#define PKA_BC_FW_RAM_SZ_BITS 2
+#define PKA_BC_BANK_SW_D 6
+#define PKA_BC_BANK_SW_D_SZ 2
+#define PKA_BC_BANK_SW_C 4
+#define PKA_BC_BANK_SW_C_SZ 2
+#define PKA_BC_BANK_SW_B 2
+#define PKA_BC_BANK_SW_B_SZ 2
+#define PKA_BC_BANK_SW_A 0
+#define PKA_BC_BANK_SW_A_SZ 2
+
+/* Enumerations for the FW_RAM and FW_ROM fields in format type 0. */
+enum {
+ PKA_FW_SZ_256,
+ PKA_FW_SZ_512,
+ PKA_FW_SZ_1024,
+ PKA_FW_SZ_2048,
+};
+
+#define PKA_STAT_IRQ 30
+
+#define PKA_IRQ_EN_STAT 30
+
+#define PKA_FLAG_ZERO 0
+#define PKA_FLAG_MEMBIT 1
+#define PKA_FLAG_BORROW 2
+#define PKA_FLAG_CARRY 3
+#define PKA_FLAG_F0 4
+#define PKA_FLAG_F1 5
+#define PKA_FLAG_F2 6
+#define PKA_FLAG_F3 7
+
+#define PKA_CONF_BYTESWAP 26
+
+#define PKA_DTA_JUMP_PROBABILITY 0
+#define PKA_DTA_JUMP_PROBABILITY_BITS 13
+
+// from clp300_ram_fw.hex
+static const u32 PKA_FW[] = {
+ 0xf8000009,
+ 0xf8eb4d8a,
+ 0xf8f93bc3,
+ 0xf869f0f1,
+ 0xf8a38aaa,
+ 0xf8fe215f,
+ 0xf80828f0,
+ 0xf80000fc,
+ 0xf80005d1,
+ 0x000000ff,
+ 0x2000002b,
+ 0x2000002d,
+ 0x2000002f,
+ 0x20000031,
+ 0x20000033,
+ 0x20000035,
+ 0x20000037,
+ 0x20000039,
+ 0x2000003b,
+ 0x2000003d,
+ 0x20000110,
+ 0x20000112,
+ 0x20000114,
+ 0x20000116,
+ 0x20000118,
+ 0x200001bd,
+ 0x200001bf,
+ 0x200001c1,
+ 0x200001c3,
+ 0x200001c5,
+ 0x200001c7,
+ 0x200001c9,
+ 0x200001cb,
+ 0x200001cd,
+ 0x200001cf,
+ 0x20000392,
+ 0x200003f7,
+ 0x200003f9,
+ 0x200003fb,
+ 0x200003fd,
+ 0x200003ff,
+ 0x20000401,
+ 0x20000578,
+ 0x220000a4,
+ 0x00000000,
+ 0x22000090,
+ 0x00000000,
+ 0x22000099,
+ 0x00000000,
+ 0x2200003f,
+ 0x00000000,
+ 0x22000041,
+ 0x00000000,
+ 0x220000c1,
+ 0x00000000,
+ 0x220000ee,
+ 0x00000000,
+ 0x220000e9,
+ 0x00000000,
+ 0x220000e1,
+ 0x00000000,
+ 0xc8004400,
+ 0x00000000,
+ 0x48200000,
+ 0x20000043,
+ 0x48200000,
+ 0x44000400,
+ 0x40008200,
+ 0x220000da,
+ 0x40004400,
+ 0x08c00000,
+ 0x43000680,
+ 0x4000c200,
+ 0x23c00050,
+ 0xd9004000,
+ 0x33000087,
+ 0x6b000000,
+ 0x33600058,
+ 0x41000440,
+ 0x40008800,
+ 0x6b008000,
+ 0x33600055,
+ 0x41008040,
+ 0x40000c00,
+ 0x2000004a,
+ 0xd010c040,
+ 0x41000c00,
+ 0x2000004a,
+ 0x6b004000,
+ 0x33600065,
+ 0x41004440,
+ 0x40008a00,
+ 0x6b00d000,
+ 0x33600061,
+ 0x4100d440,
+ 0x40008e80,
+ 0x2000004a,
+ 0x4000d440,
+ 0xd011c040,
+ 0x41000e80,
+ 0x2000004a,
+ 0xd8004440,
+ 0x33400076,
+ 0x41008800,
+ 0xd810d040,
+ 0x40000c00,
+ 0x3140006d,
+ 0xd010c040,
+ 0x40000c00,
+ 0x6b008000,
+ 0x33600073,
+ 0x48200000,
+ 0x41008040,
+ 0x40000c00,
+ 0x2000004a,
+ 0xd010c040,
+ 0x41000c00,
+ 0x2000004a,
+ 0xd8080440,
+ 0x41008a00,
+ 0xd81a8040,
+ 0x40000e80,
+ 0x3140007d,
+ 0xd001c440,
+ 0x40008e80,
+ 0x6b00d000,
+ 0x33600083,
+ 0x48200000,
+ 0x4100d440,
+ 0x40008e80,
+ 0x2000004a,
+ 0x4000d440,
+ 0xd011c040,
+ 0x41000e80,
+ 0x2000004a,
+ 0x24000000,
+ 0x3340008c,
+ 0xd010c240,
+ 0x40008000,
+ 0x2000008f,
+ 0xd010c000,
+ 0x40008240,
+ 0x2000008f,
+ 0x24000000,
+ 0x48400000,
+ 0x48200000,
+ 0x220000da,
+ 0xd0004400,
+ 0x220000c2,
+ 0xd800c400,
+ 0x31400094,
+ 0x20000098,
+ 0x24000000,
+ 0x48200000,
+ 0x220000da,
+ 0x220000d1,
+ 0x334000a0,
+ 0xd8188040,
+ 0x40008000,
+ 0x200000a3,
+ 0xd8188000,
+ 0x40008040,
+ 0x200000a3,
+ 0x24000000,
+ 0x40004680,
+ 0x220000ae,
+ 0x40000400,
+ 0x4000d000,
+ 0x40008680,
+ 0x220000ae,
+ 0x4000d200,
+ 0x220000b2,
+ 0x220000b1,
+ 0x24000000,
+ 0x40000200,
+ 0x4000d800,
+ 0x200000b2,
+ 0x44000200,
+ 0xc8004400,
+ 0xc910ca00,
+ 0xc808c000,
+ 0xd0008200,
+ 0xd4018a00,
+ 0x40004400,
+ 0x200000c2,
+ 0x40000200,
+ 0xca004400,
+ 0xc910ca00,
+ 0xc808c000,
+ 0xd0008200,
+ 0xd4018a00,
+ 0x40004400,
+ 0x200000c2,
+ 0x48200000,
+ 0x332000c6,
+ 0xd810c040,
+ 0x40008040,
+ 0x200000c9,
+ 0xd810c040,
+ 0x40000c00,
+ 0x200000c9,
+ 0x40008000,
+ 0xd800c400,
+ 0x334000ce,
+ 0x40008000,
+ 0x200000d0,
+ 0x40000400,
+ 0x200000d0,
+ 0x24000000,
+ 0xd8004400,
+ 0x334000d6,
+ 0xd9004000,
+ 0x48400000,
+ 0x200000d9,
+ 0xd8080400,
+ 0x49400000,
+ 0x200000d9,
+ 0x24000000,
+ 0x40000680,
+ 0x40004400,
+ 0x220000c2,
+ 0x40000200,
+ 0x48200000,
+ 0x4000d400,
+ 0x200000c2,
+ 0x08c00000,
+ 0x40008240,
+ 0x43000200,
+ 0xd808c400,
+ 0x40004800,
+ 0x22000047,
+ 0x400086c0,
+ 0x24000000,
+ 0x08c00001,
+ 0x45000000,
+ 0x44000400,
+ 0x22000047,
+ 0x24000000,
+ 0x40008680,
+ 0x43000200,
+ 0x43000640,
+ 0x43000040,
+ 0x44000000,
+ 0x4100c240,
+ 0x5180c000,
+ 0x6a804000,
+ 0x08c00000,
+ 0xd0004400,
+ 0xd4014c40,
+ 0xd91a8800,
+ 0x334000ff,
+ 0x3300010b,
+ 0x40008000,
+ 0x40008840,
+ 0x6a80c800,
+ 0x08c00001,
+ 0x6b004800,
+ 0x33600103,
+ 0x08c00000,
+ 0x41004400,
+ 0x41004c40,
+ 0x40008200,
+ 0x40008a40,
+ 0x21c00109,
+ 0x6d204000,
+ 0x11800000,
+ 0x200000f7,
+ 0x51c08000,
+ 0x310000ff,
+ 0x6a80c800,
+ 0x4000d400,
+ 0x24000000,
+ 0x2200011a,
+ 0x00000000,
+ 0x22000139,
+ 0x00000000,
+ 0x2200018f,
+ 0x00000000,
+ 0x2200014b,
+ 0x00000000,
+ 0x2200017d,
+ 0x00000000,
+ 0x220000ae,
+ 0x4000da00,
+ 0x400006c0,
+ 0x44000000,
+ 0x220000b2,
+ 0x31800123,
+ 0x09800000,
+ 0x11800000,
+ 0x20000127,
+ 0x5180d000,
+ 0x31000127,
+ 0x44000000,
+ 0x20000138,
+ 0x220000b9,
+ 0x6880d000,
+ 0x31600130,
+ 0x2000012b,
+ 0x4000da00,
+ 0x40000240,
+ 0x220000b2,
+ 0x40004840,
+ 0x20000136,
+ 0x26200136,
+ 0x4000da00,
+ 0x40000240,
+ 0x220000b2,
+ 0x40004800,
+ 0x20000136,
+ 0x28800127,
+ 0x220000b1,
+ 0x24000000,
+ 0x44000400,
+ 0x40004800,
+ 0x08c00000,
+ 0x22000047,
+ 0x400080c0,
+ 0x44000400,
+ 0xd8188040,
+ 0x40000e00,
+ 0x4000ca00,
+ 0x40004680,
+ 0x4000cc00,
+ 0x4000dc40,
+ 0x2200014b,
+ 0x44000040,
+ 0xd8090e00,
+ 0x40008240,
+ 0x4000d400,
+ 0x4000dc40,
+ 0x51808800,
+ 0x3100014f,
+ 0xd910c000,
+ 0x3340017c,
+ 0x51c08800,
+ 0x31000153,
+ 0x51a08000,
+ 0x20000155,
+ 0x09e00000,
+ 0x14bb8000,
+ 0x5180c000,
+ 0x15d60000,
+ 0x09800000,
+ 0x15930000,
+ 0x3300015b,
+ 0x31400163,
+ 0x09800000,
+ 0x159a0000,
+ 0x43000200,
+ 0x6a804000,
+ 0xc808c000,
+ 0x40000640,
+ 0x43000600,
+ 0x20000169,
+ 0x43000200,
+ 0x6ac04000,
+ 0xc808c000,
+ 0x40000600,
+ 0x40000e40,
+ 0x20000169,
+ 0x10c00000,
+ 0xd810c000,
+ 0xda11c840,
+ 0x33400170,
+ 0x40000400,
+ 0x40000c40,
+ 0x2000016a,
+ 0x48200000,
+ 0x48c00000,
+ 0x6b00c800,
+ 0x31600175,
+ 0x49c00000,
+ 0x4100c840,
+ 0x31c00178,
+ 0x49200000,
+ 0x4100c000,
+ 0x40000e40,
+ 0x40000600,
+ 0x25c0016a,
+ 0x24000000,
+ 0xd9188000,
+ 0x3140018e,
+ 0x51a08000,
+ 0x5180c000,
+ 0x15d60000,
+ 0x43000200,
+ 0x6ac04000,
+ 0xc808c000,
+ 0x40000600,
+ 0x10c00000,
+ 0xd810c000,
+ 0x3340018b,
+ 0x40000400,
+ 0x20000187,
+ 0x4100c000,
+ 0x40000600,
+ 0x25c00187,
+ 0x24000000,
+ 0x40001400,
+ 0x40001c40,
+ 0x40005600,
+ 0x2200014b,
+ 0x400087c0,
+ 0x40001400,
+ 0x40001c40,
+ 0x40005e00,
+ 0x2200014b,
+ 0x40008000,
+ 0x40005e00,
+ 0x4000f040,
+ 0x40000e40,
+ 0x2200011a,
+ 0x40000780,
+ 0x40000400,
+ 0x40005600,
+ 0x220000c2,
+ 0x40000240,
+ 0x40004880,
+ 0x40005600,
+ 0x4000e000,
+ 0x40000640,
+ 0x40009e80,
+ 0x4000e800,
+ 0x400006c0,
+ 0x4000f800,
+ 0x2200011a,
+ 0x40001200,
+ 0xd8004400,
+ 0x22000088,
+ 0x40000400,
+ 0x2200017d,
+ 0x40009200,
+ 0xc8088000,
+ 0x40000400,
+ 0x40000c40,
+ 0x40005600,
+ 0x2200014b,
+ 0xc8105800,
+ 0x430007c0,
+ 0xd01e0400,
+ 0xd41f0c40,
+ 0x40008000,
+ 0x40008840,
+ 0x24000000,
+ 0x220001ed,
+ 0x00000000,
+ 0x220001d8,
+ 0x00000000,
+ 0x220002fd,
+ 0x00000000,
+ 0x220001e1,
+ 0x00000000,
+ 0x220001d5,
+ 0x00000000,
+ 0x22000226,
+ 0x00000000,
+ 0x220001d1,
+ 0x00000000,
+ 0x22000371,
+ 0x00000000,
+ 0x22000377,
+ 0x00000000,
+ 0x22000366,
+ 0x00000000,
+ 0x22000262,
+ 0x2200037e,
+ 0x22000267,
+ 0x24000000,
+ 0x48800000,
+ 0x2200028e,
+ 0x24000000,
+ 0x440004c0,
+ 0x22000280,
+ 0x220002fd,
+ 0x22000254,
+ 0x22000262,
+ 0x2200037e,
+ 0x22000267,
+ 0x22000248,
+ 0x24000000,
+ 0x44000480,
+ 0x440004c0,
+ 0x22000272,
+ 0x22000280,
+ 0x2200028e,
+ 0x33c0038f,
+ 0x22000254,
+ 0x22000262,
+ 0x2200037e,
+ 0x22000267,
+ 0x22000248,
+ 0x24000000,
+ 0x2200038c,
+ 0x318001f3,
+ 0x40003c80,
+ 0x22000272,
+ 0x2200035a,
+ 0x200001f5,
+ 0x44000480,
+ 0x22000272,
+ 0x48800000,
+ 0x50a0f800,
+ 0x11a00000,
+ 0x2200024d,
+ 0x23e001fb,
+ 0x2000020f,
+ 0x15fe8000,
+ 0x21e0020f,
+ 0x20000200,
+ 0x22000254,
+ 0x20000200,
+ 0x220002fd,
+ 0x22000254,
+ 0x68e0f800,
+ 0x20000204,
+ 0x26200207,
+ 0x2200028e,
+ 0x20000207,
+ 0x20000208,
+ 0x28e001fe,
+ 0x2200024d,
+ 0x2000020f,
+ 0x22000254,
+ 0x2000020f,
+ 0x2200025b,
+ 0x2000020f,
+ 0x220002fd,
+ 0x22000254,
+ 0x68a0f800,
+ 0x3160021a,
+ 0x20000214,
+ 0x2200028e,
+ 0x33c0038f,
+ 0x20000217,
+ 0x28a0020b,
+ 0x22000254,
+ 0x20000221,
+ 0x2620021e,
+ 0x2200028e,
+ 0x2000021d,
+ 0x2000021e,
+ 0x28a0020d,
+ 0x2200025b,
+ 0x20000221,
+ 0x22000262,
+ 0x2200037e,
+ 0x22000267,
+ 0x22000248,
+ 0x24000000,
+ 0xd902c000,
+ 0x31400245,
+ 0xd90ac000,
+ 0x31400245,
+ 0x44000480,
+ 0x4000dc00,
+ 0x22000272,
+ 0x40005000,
+ 0x220000b9,
+ 0x40000380,
+ 0x40001200,
+ 0x40004000,
+ 0x220000b9,
+ 0x40001200,
+ 0x220000b2,
+ 0x40000340,
+ 0x40003200,
+ 0x4000d800,
+ 0x220000b2,
+ 0x40001200,
+ 0x220000b2,
+ 0x400002c0,
+ 0x40003a00,
+ 0x4000d800,
+ 0x220000b2,
+ 0xd0005c00,
+ 0x220000c2,
+ 0xd0006c00,
+ 0x220000c2,
+ 0xd9007000,
+ 0x20000247,
+ 0x48000000,
+ 0x20000247,
+ 0x24000000,
+ 0x40001c00,
+ 0x40008080,
+ 0x40005c00,
+ 0x40008280,
+ 0x24000000,
+ 0x40001200,
+ 0x400040c0,
+ 0x40005000,
+ 0x400002c0,
+ 0x40009000,
+ 0x400004c0,
+ 0x24000000,
+ 0x40002200,
+ 0x400040c0,
+ 0x40006000,
+ 0x400002c0,
+ 0x4000a000,
+ 0x400004c0,
+ 0x24000000,
+ 0x40002200,
+ 0x40004140,
+ 0x40006000,
+ 0x40000340,
+ 0x4000a000,
+ 0x40000540,
+ 0x24000000,
+ 0x40009800,
+ 0x44000200,
+ 0x220000b2,
+ 0x400004c0,
+ 0x24000000,
+ 0x40001a00,
+ 0x40004000,
+ 0x40009a00,
+ 0x220000b2,
+ 0x40000200,
+ 0x400040c0,
+ 0x40005800,
+ 0x40009a00,
+ 0x220000b2,
+ 0x400002c0,
+ 0x24000000,
+ 0x40001200,
+ 0x4000d800,
+ 0x220000b2,
+ 0x40000200,
+ 0x40004080,
+ 0x40005000,
+ 0x4000da00,
+ 0x220000b2,
+ 0x40000280,
+ 0x40009000,
+ 0x4000da00,
+ 0x220000b2,
+ 0x40000480,
+ 0x24000000,
+ 0x40001a00,
+ 0x4000d800,
+ 0x220000b2,
+ 0x40000200,
+ 0x400040c0,
+ 0x40005800,
+ 0x4000da00,
+ 0x220000b2,
+ 0x400002c0,
+ 0x40009800,
+ 0x4000da00,
+ 0x220000b2,
+ 0x400004c0,
+ 0x24000000,
+ 0x40001200,
+ 0x40009800,
+ 0x220000b2,
+ 0x40000700,
+ 0x40001a00,
+ 0x40009000,
+ 0x220000b2,
+ 0x40000300,
+ 0xd80ce400,
+ 0x3100029a,
+ 0x49c00000,
+ 0x2000029b,
+ 0x48c00000,
+ 0x2000029c,
+ 0x22000088,
+ 0x40000500,
+ 0xd00ce400,
+ 0x220000c2,
+ 0x40000540,
+ 0x4000a000,
+ 0x220000b9,
+ 0x40000740,
+ 0x4000a200,
+ 0x220000b2,
+ 0x40000780,
+ 0x4000e800,
+ 0x4000aa00,
+ 0x220000b2,
+ 0x40000340,
+ 0x40005000,
+ 0x40009a00,
+ 0x220000b2,
+ 0x40000580,
+ 0x40005800,
+ 0x40009200,
+ 0x220000b2,
+ 0x40000700,
+ 0xd81cb380,
+ 0x40007400,
+ 0x22000088,
+ 0x40000380,
+ 0xd01cb000,
+ 0x40000400,
+ 0x220000c2,
+ 0x400003c0,
+ 0x40007000,
+ 0x220000b9,
+ 0x400005c0,
+ 0x40009000,
+ 0x40009a00,
+ 0x220000b2,
+ 0x40000700,
+ 0x4000ba00,
+ 0x220000b2,
+ 0x40000740,
+ 0xd8006c00,
+ 0x22000088,
+ 0x4000a200,
+ 0x220000b2,
+ 0x40000200,
+ 0x40004100,
+ 0x42006800,
+ 0x40000400,
+ 0x220000c2,
+ 0xd00d0400,
+ 0x220000c2,
+ 0x40000500,
+ 0x4200ec00,
+ 0x220000c2,
+ 0x40000200,
+ 0xd8144000,
+ 0x40000400,
+ 0x22000088,
+ 0x40000200,
+ 0x40007000,
+ 0x220000b2,
+ 0x40000500,
+ 0x40007800,
+ 0x4000f200,
+ 0x220000b2,
+ 0xd8140740,
+ 0x4000ec00,
+ 0x22000088,
+ 0x40000740,
+ 0x6b00e800,
+ 0x338002ef,
+ 0x336002ea,
+ 0x4000c200,
+ 0xd008e840,
+ 0x40000680,
+ 0x48200000,
+ 0x200002f5,
+ 0x4000c200,
+ 0xd008e800,
+ 0x40000740,
+ 0x48000000,
+ 0x200002f5,
+ 0x336002f2,
+ 0x48200000,
+ 0x200002f5,
+ 0x4000c200,
+ 0xd008e800,
+ 0x40000740,
+ 0x4100eb00,
+ 0x4000e000,
+ 0x200002f9,
+ 0x40009800,
+ 0x4000f200,
+ 0x220000b2,
+ 0x40000500,
+ 0x24000000,
+ 0x40009a00,
+ 0x40005800,
+ 0x220000b2,
+ 0x42000400,
+ 0x220000c2,
+ 0x40000500,
+ 0x33e0031b,
+ 0x40001a00,
+ 0x40004000,
+ 0x220000b9,
+ 0x40000780,
+ 0x42000200,
+ 0x40004400,
+ 0x220000c2,
+ 0xd000f400,
+ 0x220000c2,
+ 0x40000580,
+ 0x40009800,
+ 0x220000b9,
+ 0x400004c0,
+ 0x40003200,
+ 0x4000d800,
+ 0x220000b2,
+ 0x40009a00,
+ 0x220000b2,
+ 0xd0160200,
+ 0x40004400,
+ 0x220000c2,
+ 0x40000580,
+ 0x2000032a,
+ 0xd8039a00,
+ 0x40004400,
+ 0x22000088,
+ 0x40000200,
+ 0xd0039a40,
+ 0x40004c00,
+ 0x220000c2,
+ 0x220000b2,
+ 0x40000440,
+ 0x40000200,
+ 0x42000400,
+ 0x220000c2,
+ 0xd0004400,
+ 0x220000c2,
+ 0x40000580,
+ 0x40001a00,
+ 0x40005800,
+ 0x220000b2,
+ 0x400005c0,
+ 0x4000b000,
+ 0x220000b9,
+ 0x40000380,
+ 0x4000a000,
+ 0x4000ba00,
+ 0x220000b2,
+ 0x400003c0,
+ 0x4000a000,
+ 0x220000b9,
+ 0x40000700,
+ 0x42007c00,
+ 0x220000c2,
+ 0x40000740,
+ 0x42000400,
+ 0x220000c2,
+ 0x400003c0,
+ 0x40007000,
+ 0xd8007c00,
+ 0x22000088,
+ 0x4000a200,
+ 0x220000b2,
+ 0x40000400,
+ 0x40008100,
+ 0xd00fec00,
+ 0x220000c2,
+ 0xd8007400,
+ 0x22000088,
+ 0x4000b200,
+ 0x220000b2,
+ 0x400003c0,
+ 0x40005800,
+ 0x220000b9,
+ 0x4000e200,
+ 0x220000b2,
+ 0x42000400,
+ 0x220000c2,
+ 0xd80f0400,
+ 0x22000088,
+ 0x40000300,
+ 0x4000a000,
+ 0x4000e200,
+ 0x220000b2,
+ 0x40000500,
+ 0x24000000,
+ 0x40001200,
+ 0x40004000,
+ 0x22000363,
+ 0x40000200,
+ 0x40004080,
+ 0x40005000,
+ 0x22000363,
+ 0x40000280,
+ 0x24000000,
+ 0x40009200,
+ 0x220000b2,
+ 0x24000000,
+ 0x44000000,
+ 0x08800001,
+ 0x6a800000,
+ 0x43000200,
+ 0xd8080400,
+ 0x22000088,
+ 0x40000200,
+ 0xd9083000,
+ 0x31000370,
+ 0x49e00000,
+ 0x24000000,
+ 0x40001dc0,
+ 0xd902b800,
+ 0x31000376,
+ 0x400055c0,
+ 0xd90bb800,
+ 0x24000000,
+ 0x40001dc0,
+ 0xd902b800,
+ 0x3100037d,
+ 0x40005800,
+ 0xd00a0400,
+ 0x220000c1,
+ 0x24000000,
+ 0x40009800,
+ 0x44000400,
+ 0x08c00000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x24000000,
+ 0x44000000,
+ 0xd91f0000,
+ 0x3300038b,
+ 0x43000000,
+ 0xd91f0000,
+ 0x3300038b,
+ 0x24000000,
+ 0x22000385,
+ 0x33000390,
+ 0x24000000,
+ 0x00000043,
+ 0x00000042,
+ 0x00000041,
+ 0x22000394,
+ 0x00000000,
+ 0x220003f1,
+ 0x49800000,
+ 0x22000371,
+ 0x3300038f,
+ 0x22000377,
+ 0x3300038f,
+ 0x44000480,
+ 0x440004c0,
+ 0x43000100,
+ 0x43000300,
+ 0x43000500,
+ 0x22000272,
+ 0x22000280,
+ 0xe007fbc0,
+ 0x330003a5,
+ 0x2200028e,
+ 0x33c0038f,
+ 0x40001700,
+ 0x40001f40,
+ 0x40002680,
+ 0x18000000,
+ 0x4000e000,
+ 0x40005040,
+ 0x40009080,
+ 0x4000e8c0,
+ 0x40005900,
+ 0x40009940,
+ 0x40006180,
+ 0x4000a1c0,
+ 0x19000000,
+ 0x50c03800,
+ 0x50e0f800,
+ 0x159b8000,
+ 0x334003bf,
+ 0x330003c7,
+ 0x40001700,
+ 0x4000e100,
+ 0x40005700,
+ 0x4000e300,
+ 0x40009700,
+ 0x4000e500,
+ 0x16980000,
+ 0x200003c8,
+ 0x40001f00,
+ 0x4000e100,
+ 0x40005f00,
+ 0x4000e300,
+ 0x40009f00,
+ 0x4000e500,
+ 0x169c0000,
+ 0x200003c8,
+ 0x16980000,
+ 0x11800000,
+ 0x22000254,
+ 0x220002fd,
+ 0x22000254,
+ 0x68803800,
+ 0x336003d1,
+ 0x6880f800,
+ 0x336003da,
+ 0x200003e7,
+ 0x6880f800,
+ 0x336003e1,
+ 0x18000000,
+ 0x40000700,
+ 0x40000a80,
+ 0x40001480,
+ 0x19000000,
+ 0x4000e080,
+ 0x200003e8,
+ 0x18000000,
+ 0x40001f00,
+ 0x40002280,
+ 0x40002c80,
+ 0x19000000,
+ 0x4000e080,
+ 0x200003e8,
+ 0x4000d080,
+ 0x18000000,
+ 0x40003280,
+ 0x40003c80,
+ 0x19000000,
+ 0x200003e8,
+ 0x200003ea,
+ 0x2200028e,
+ 0x33c0038f,
+ 0x288003c9,
+ 0x22000254,
+ 0x22000262,
+ 0x2200037e,
+ 0x22000267,
+ 0x22000248,
+ 0x24000000,
+ 0x2200038c,
+ 0x4000fa00,
+ 0x40003fc0,
+ 0x2200038c,
+ 0x400047c0,
+ 0x24000000,
+ 0x22000403,
+ 0x00000000,
+ 0x2200043e,
+ 0x00000000,
+ 0x2200044d,
+ 0x00000000,
+ 0x2200045e,
+ 0x00000000,
+ 0x2200055b,
+ 0x00000000,
+ 0x22000531,
+ 0x00000000,
+ 0x2200038c,
+ 0x31800408,
+ 0x40003c80,
+ 0x2200054f,
+ 0x20000409,
+ 0x44000480,
+ 0x48800000,
+ 0x50a0f800,
+ 0x11a00000,
+ 0x2200024d,
+ 0x23e0040f,
+ 0x20000423,
+ 0x15fe8000,
+ 0x21e00423,
+ 0x20000414,
+ 0x22000254,
+ 0x20000414,
+ 0x220004e6,
+ 0x22000254,
+ 0x68e0f800,
+ 0x20000418,
+ 0x2620041b,
+ 0x22000479,
+ 0x2000041b,
+ 0x2000041c,
+ 0x28e00412,
+ 0x2200024d,
+ 0x20000423,
+ 0x22000254,
+ 0x20000423,
+ 0x2200025b,
+ 0x20000423,
+ 0x220004e6,
+ 0x22000254,
+ 0x68a0f800,
+ 0x3160042e,
+ 0x20000428,
+ 0x22000479,
+ 0x33c0038f,
+ 0x2000042b,
+ 0x28a0041f,
+ 0x22000254,
+ 0x20000435,
+ 0x26200432,
+ 0x22000479,
+ 0x20000431,
+ 0x20000432,
+ 0x28a00421,
+ 0x2200025b,
+ 0x20000435,
+ 0x44000400,
+ 0x40009800,
+ 0x08c00000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x22000545,
+ 0x22000248,
+ 0x24000000,
+ 0x440004c0,
+ 0x08c00000,
+ 0x220004e6,
+ 0x44000400,
+ 0x4000a000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x40002500,
+ 0x4000a0c0,
+ 0x40006500,
+ 0x4000a2c0,
+ 0x22000545,
+ 0x22000248,
+ 0x24000000,
+ 0x44000480,
+ 0x440004c0,
+ 0x08c00000,
+ 0x22000479,
+ 0x33c0038f,
+ 0x44000400,
+ 0x4000a000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x40002500,
+ 0x4000a0c0,
+ 0x40006500,
+ 0x4000a2c0,
+ 0x22000545,
+ 0x22000248,
+ 0x24000000,
+ 0xd902c000,
+ 0x31400476,
+ 0xd90ac000,
+ 0x31400476,
+ 0x40005000,
+ 0x2200052d,
+ 0x40000380,
+ 0x40001200,
+ 0x40004000,
+ 0x2200052d,
+ 0x40001200,
+ 0x22000531,
+ 0x40000340,
+ 0x40001200,
+ 0x40004000,
+ 0x40003200,
+ 0x22000531,
+ 0x40000300,
+ 0xd0076c00,
+ 0x220000c2,
+ 0xd0006400,
+ 0x220000c2,
+ 0xd9007000,
+ 0x20000478,
+ 0x48000000,
+ 0x20000478,
+ 0x24000000,
+ 0x40001200,
+ 0x40009800,
+ 0x22000531,
+ 0x40000700,
+ 0x40001a00,
+ 0x40009000,
+ 0x22000531,
+ 0x40000300,
+ 0xd80ce400,
+ 0x31000485,
+ 0x49c00000,
+ 0x20000486,
+ 0x48c00000,
+ 0x20000487,
+ 0x22000088,
+ 0x40000500,
+ 0xd00ce400,
+ 0x220000c2,
+ 0x40000580,
+ 0x4000a000,
+ 0x2200052d,
+ 0x40000740,
+ 0x4000a200,
+ 0x22000531,
+ 0x40000780,
+ 0x4000e800,
+ 0x4000b200,
+ 0x22000531,
+ 0x400006c0,
+ 0x40005000,
+ 0x40009a00,
+ 0x22000531,
+ 0x40000580,
+ 0x40005800,
+ 0x40009200,
+ 0x22000531,
+ 0x40000700,
+ 0xd81cb380,
+ 0x40007400,
+ 0x22000088,
+ 0x40000380,
+ 0xd01cb000,
+ 0x40000400,
+ 0x220000c2,
+ 0x400003c0,
+ 0x40007000,
+ 0x2200052d,
+ 0x400005c0,
+ 0x40009000,
+ 0x40009a00,
+ 0x22000531,
+ 0x40000700,
+ 0x4000ba00,
+ 0x22000531,
+ 0x40000740,
+ 0xd800dc00,
+ 0x22000088,
+ 0x4000a200,
+ 0x22000531,
+ 0x40000200,
+ 0x40004100,
+ 0x4200d800,
+ 0x40000400,
+ 0x220000c2,
+ 0xd01b0400,
+ 0x220000c2,
+ 0x40000500,
+ 0x4200ec00,
+ 0x220000c2,
+ 0x40000200,
+ 0xd8144000,
+ 0x40000400,
+ 0x22000088,
+ 0x40000200,
+ 0x40007000,
+ 0x22000531,
+ 0x40000500,
+ 0x40007800,
+ 0x4000f200,
+ 0x22000531,
+ 0xd8140740,
+ 0x4000ec00,
+ 0x22000088,
+ 0x40000740,
+ 0x6b00e800,
+ 0x338004da,
+ 0x336004d5,
+ 0x4000c200,
+ 0xd008e840,
+ 0x40000680,
+ 0x48200000,
+ 0x200004e0,
+ 0x4000c200,
+ 0xd008e800,
+ 0x40000740,
+ 0x48000000,
+ 0x200004e0,
+ 0x336004dd,
+ 0x48200000,
+ 0x200004e0,
+ 0x4000c200,
+ 0xd008e800,
+ 0x40000740,
+ 0x4100eb00,
+ 0x4000e000,
+ 0x4000f200,
+ 0x22000531,
+ 0x40000500,
+ 0x24000000,
+ 0x40009a00,
+ 0x40005800,
+ 0x22000531,
+ 0x42000400,
+ 0x220000c2,
+ 0x40000500,
+ 0x40001a00,
+ 0x40004000,
+ 0x2200052d,
+ 0x40000780,
+ 0x42000400,
+ 0x220000c2,
+ 0xd000f400,
+ 0x220000c2,
+ 0x40000580,
+ 0x40009800,
+ 0x2200052d,
+ 0x40003200,
+ 0x22000531,
+ 0xd0160200,
+ 0x40004400,
+ 0x220000c2,
+ 0x40000580,
+ 0x40001a00,
+ 0x40005800,
+ 0x22000531,
+ 0x400005c0,
+ 0x4000b000,
+ 0x2200052d,
+ 0x40000380,
+ 0x4000a000,
+ 0x4000ba00,
+ 0x22000531,
+ 0x400003c0,
+ 0x4000a000,
+ 0x2200052d,
+ 0x40000700,
+ 0x42007c00,
+ 0x220000c2,
+ 0x40000740,
+ 0x42000400,
+ 0x220000c2,
+ 0x400003c0,
+ 0x40007000,
+ 0xd8007c00,
+ 0x22000088,
+ 0x4000a200,
+ 0x22000531,
+ 0x40000400,
+ 0x40008100,
+ 0xd00fec00,
+ 0x220000c2,
+ 0xd8007400,
+ 0x22000088,
+ 0x4000b200,
+ 0x22000531,
+ 0x400003c0,
+ 0x40005800,
+ 0x2200052d,
+ 0x4000e200,
+ 0x22000531,
+ 0x42000400,
+ 0x220000c2,
+ 0xd80f0400,
+ 0x22000088,
+ 0x40000300,
+ 0x4000a000,
+ 0x4000e200,
+ 0x22000531,
+ 0x40000500,
+ 0x24000000,
+ 0x40000200,
+ 0xca004400,
+ 0x22000532,
+ 0x24000000,
+ 0xc8004400,
+ 0x46008200,
+ 0xd0088000,
+ 0x43000400,
+ 0x40000200,
+ 0xd4088000,
+ 0x24000000,
+ 0x220004e6,
+ 0x44000400,
+ 0x4000a000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x40002500,
+ 0x4000a0c0,
+ 0x40006500,
+ 0x4000a2c0,
+ 0x22000545,
+ 0x22000248,
+ 0x24000000,
+ 0x40001a00,
+ 0x40009800,
+ 0x22000531,
+ 0x40000400,
+ 0x400080c0,
+ 0x40005800,
+ 0x40009a00,
+ 0x22000531,
+ 0x400002c0,
+ 0x24000000,
+ 0x40001200,
+ 0x40004000,
+ 0x22000558,
+ 0x40000200,
+ 0x40004080,
+ 0x40005000,
+ 0x22000558,
+ 0x40000280,
+ 0x24000000,
+ 0x40009200,
+ 0x22000531,
+ 0x24000000,
+ 0x40004680,
+ 0x40000200,
+ 0x4000d800,
+ 0x22000568,
+ 0x40000400,
+ 0x4000d000,
+ 0x40008680,
+ 0x40000200,
+ 0x4000d800,
+ 0x22000568,
+ 0x4000d200,
+ 0x22000568,
+ 0x44000200,
+ 0xc8004400,
+ 0x46008780,
+ 0x4000f440,
+ 0xc910ca00,
+ 0x46004780,
+ 0xc808c000,
+ 0x46000780,
+ 0x4000f040,
+ 0xd0008200,
+ 0xd4018a00,
+ 0x40004400,
+ 0x200000c2,
+ 0xc8004400,
+ 0x46008200,
+ 0x40004440,
+ 0x24000000,
+ 0x2200057a,
+ 0x00000000,
+ 0x220003f1,
+ 0x49800000,
+ 0x22000371,
+ 0x3300038f,
+ 0x22000377,
+ 0x3300038f,
+ 0x44000480,
+ 0x440004c0,
+ 0x43000100,
+ 0x43000300,
+ 0x43000500,
+ 0xe007fbc0,
+ 0x33000589,
+ 0x22000479,
+ 0x33c0038f,
+ 0x40001700,
+ 0x40001f40,
+ 0x40002680,
+ 0x18000000,
+ 0x4000e000,
+ 0x40005040,
+ 0x40009080,
+ 0x4000e8c0,
+ 0x40005900,
+ 0x40009940,
+ 0x40006180,
+ 0x4000a1c0,
+ 0x19000000,
+ 0x51c03800,
+ 0x51e0f800,
+ 0x159b8000,
+ 0x334005a3,
+ 0x330005ab,
+ 0x40001700,
+ 0x4000e100,
+ 0x40005700,
+ 0x4000e300,
+ 0x40009700,
+ 0x4000e500,
+ 0x16980000,
+ 0x200005ac,
+ 0x40001f00,
+ 0x4000e100,
+ 0x40005f00,
+ 0x4000e300,
+ 0x40009f00,
+ 0x4000e500,
+ 0x169c0000,
+ 0x200005ac,
+ 0x16980000,
+ 0x11800000,
+ 0x16f00000,
+ 0x22000254,
+ 0x220004e6,
+ 0x22000254,
+ 0x68e03800,
+ 0x336005b6,
+ 0x68e0f800,
+ 0x336005bf,
+ 0x200005cc,
+ 0x68e0f800,
+ 0x336005c6,
+ 0x18000000,
+ 0x40000700,
+ 0x40000a80,
+ 0x40001480,
+ 0x19000000,
+ 0x4000e080,
+ 0x200005cd,
+ 0x18000000,
+ 0x40001f00,
+ 0x40002280,
+ 0x40002c80,
+ 0x19000000,
+ 0x4000e080,
+ 0x200005cd,
+ 0x4000d080,
+ 0x18000000,
+ 0x40003280,
+ 0x40003c80,
+ 0x19000000,
+ 0x200005cd,
+ 0x200005cf,
+ 0x22000479,
+ 0x33c0038f,
+ 0x28e005ae,
+ 0x22000254,
+ 0x44000400,
+ 0x40009800,
+ 0x08c00000,
+ 0x22000047,
+ 0x40008000,
+ 0x400004c0,
+ 0x22000545,
+ 0x22000248,
+ 0x24000000,
+};
+
+
+#endif
diff --git a/drivers/crypto/sifive-vic/vic-pka-tools.c b/drivers/crypto/sifive-vic/vic-pka-tools.c
new file mode 100644
index 0000000000000..44d2f094b75eb
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-pka-tools.c
@@ -0,0 +1,364 @@
+/*
+ ******************************************************************************
+ * @file vic-sec.c
+ * @author StarFive Technology
+ * @version V1.0
+ * @date 08/13/2020
+ * @brief
+ ******************************************************************************
+ * @copy
+ *
+ * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
+ * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
+ * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
+ * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
+ * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ * © COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd.
+ */
+
+#include "vic-sec.h"
+#include "vic-pka.h"
+#include "vic-pka-hw.h"
+
+#define MAX(a, b) (((a)>(b)) ? (a) : (b))
+
+/*
+ * Determine the base radix for the given operand size,
+ * ceiling(lg(size/8))
+ * where size > 16 bytes.
+ * Returns 0 if the size is invalid.
+ */
+static unsigned elppka_base_radix(unsigned size)
+{
+ if (size <= 16) return 0; /* Error */
+ if (size <= 32) return 2;
+ if (size <= 64) return 3;
+ if (size <= 128) return 4;
+ if (size <= 256) return 5;
+ if (size <= 512) return 6;
+
+ return 0;
+}
+
+/*
+ * Helper to compute the operand page size, which depends only on the base
+ * radix.
+ */
+static unsigned elppka_page_size(unsigned size)
+{
+ unsigned ret;
+
+ ret = elppka_base_radix(size);
+ if (!ret)
+ return ret;
+ return 8 << ret;
+}
+
+/*
+ * Check that the given PKA operand index is valid for a particular bank and
+ * operand size. The bank and size values themselves are not validated.
+ */
+static int index_is_valid(const struct pka_config *cfg, unsigned bank,
+ unsigned index, unsigned size)
+{
+ unsigned ecc_max_bytes, rsa_max_bytes, abc_storage, d_storage;
+
+ ecc_max_bytes = cfg->ecc_size >> 3;
+ rsa_max_bytes = cfg->rsa_size >> 3;
+
+ if (size > ecc_max_bytes && size > rsa_max_bytes)
+ return 0;
+ if (index > 7)
+ return 0;
+
+ abc_storage = MAX(ecc_max_bytes*8, rsa_max_bytes*2);
+ d_storage = MAX(ecc_max_bytes*8, rsa_max_bytes*4);
+
+ if (bank == PKA_OPERAND_D) {
+ return index < d_storage / size;
+ } else {
+ return index < abc_storage / size;
+ }
+}
+
+/*
+ * Determine the offset (in 32-bit words) of a particular operand in the PKA
+ * memory map.
+ * Returns the (non-negative) offset on success, or -errno on failure.
+ */
+static int operand_base_offset(const struct pka_config *cfg, unsigned bank,
+ unsigned index, unsigned size)
+{
+ unsigned pagesize;
+ int ret;
+
+ pagesize = elppka_page_size(size);
+ if (!pagesize)
+ return CRYPTO_INVALID_SIZE;
+
+ if (!index_is_valid(cfg, bank, index, pagesize))
+ return CRYPTO_NOT_FOUND;
+
+ switch (bank) {
+ case PKA_OPERAND_A:
+ ret = PKA_OPERAND_A_BASE;
+ break;
+ case PKA_OPERAND_B:
+ ret = PKA_OPERAND_B_BASE;
+ break;
+ case PKA_OPERAND_C:
+ ret = PKA_OPERAND_C_BASE;
+ break;
+ case PKA_OPERAND_D:
+ ret = PKA_OPERAND_D_BASE;
+ break;
+ default:
+ return CRYPTO_INVALID_ARGUMENT;
+ }
+
+ return ret + index * (pagesize>>2);
+}
+
+/* Parse out the fields from a type-0 BUILD_CONF register in bc. */
+static void elppka_get_config_type0(uint32_t bc, struct pka_config *out)
+{
+ struct pka_config cfg = {0};
+
+ if (bc & (1ul << PKA_BC_FW_HAS_RAM)) {
+ cfg.fw_ram_size = 256u << ((bc >> PKA_BC_FW_RAM_SZ)
+ & ((1ul << PKA_BC_FW_RAM_SZ_BITS)-1));
+ }
+ if (bc & (1ul << PKA_BC_FW_HAS_ROM)) {
+ cfg.fw_rom_size = 256u << ((bc >> PKA_BC_FW_ROM_SZ)
+ & ((1ul << PKA_BC_FW_ROM_SZ_BITS)-1));
+ }
+
+ cfg.alu_size = 32u << ((bc >> PKA_BC_ALU_SZ)
+ & ((1ul << PKA_BC_ALU_SZ_BITS)-1));
+ cfg.rsa_size = 512u << ((bc >> PKA_BC_RSA_SZ)
+ & ((1ul << PKA_BC_RSA_SZ_BITS)-1));
+ cfg.ecc_size = 256u << ((bc >> PKA_BC_ECC_SZ)
+ & ((1ul << PKA_BC_ECC_SZ_BITS)-1));
+
+ *out = cfg;
+}
+
+/* Parse out the fields from a type-1 BUILD_CONF register in bc. */
+static void elppka_get_config_type1(uint32_t bc, struct pka_config *out)
+{
+ struct pka_config cfg = {0};
+ uint32_t tmp;
+
+ tmp = (bc >> PKA_BC1_FW_RAM_SZ) & ((1ul << PKA_BC1_FW_RAM_SZ_BITS)-1);
+ if (tmp)
+ cfg.fw_ram_size = 256u << (tmp-1);
+
+ tmp = (bc >> PKA_BC1_FW_ROM_SZ) & ((1ul << PKA_BC1_FW_ROM_SZ_BITS)-1);
+ if (tmp)
+ cfg.fw_rom_size = 256u << (tmp-1);
+
+ tmp = (bc >> PKA_BC1_RSA_SZ) & ((1ul << PKA_BC1_RSA_SZ_BITS)-1);
+ if (tmp)
+ cfg.rsa_size = 512u << (tmp-1);
+
+ tmp = (bc >> PKA_BC1_ECC_SZ) & ((1ul << PKA_BC1_ECC_SZ_BITS)-1);
+ if (tmp)
+ cfg.ecc_size = 256u << (tmp-1);
+
+ tmp = (bc >> PKA_BC1_ALU_SZ) & ((1ul << PKA_BC1_ALU_SZ_BITS)-1);
+ cfg.alu_size = 32u << tmp;
+
+ *out = cfg;
+}
+
+/* Read out PKA H/W configuration into config structure. */
+static int elppka_get_config(uint32_t *regs, struct pka_config *out)
+{
+ uint32_t bc = vic_pka_io_read32(®s[PKA_BUILD_CONF]);
+
+ unsigned type = bc >> PKA_BC_FORMAT_TYPE;
+ type &= (1ul << PKA_BC_FORMAT_TYPE_BITS) - 1;
+
+ switch (type) {
+ case 0:
+ elppka_get_config_type0(bc, out);
+ break;
+ case 1:
+ case 2: /* Type 2 has same format as type 1 */
+ elppka_get_config_type1(bc, out);
+ break;
+ }
+
+ /* RAM/ROM base addresses depend on core version */
+ if (type < 2) {
+ out->ram_offset = PKA_FIRMWARE_BASE;
+ out->rom_offset = PKA_FIRMWARE_BASE + out->fw_ram_size;
+ } else {
+ out->ram_offset = out->rom_offset = PKA_FIRMWARE_T2_BASE;
+ if (out->fw_ram_size)
+ out->rom_offset = PKA_FIRMWARE_T2_SPLIT;
+ }
+
+ return 0;
+}
+
+int elppka_start(struct pka_state *pka, uint32_t entry, uint32_t flags,
+ unsigned size)
+{
+ uint32_t ctrl, base;
+
+ base = elppka_base_radix(size);
+ if (!base)
+ return CRYPTO_INVALID_SIZE;
+
+ ctrl = base << PKA_CTRL_BASE_RADIX;
+
+ /* Handle ECC-521 oddities as a special case. */
+ if (size == PKA_ECC521_OPERAND_SIZE) {
+ flags |= 1ul << PKA_FLAG_F1;
+ ctrl |= PKA_CTRL_M521_ECC521 << PKA_CTRL_M521_MODE;
+
+ /* Round up partial radix to multiple of ALU size. */
+ size = (512 + pka->cfg.alu_size)/8;
+ }
+
+ ctrl |= (size & (size-1) ? (size+3)/4 : 0) << PKA_CTRL_PARTIAL_RADIX;
+ ctrl |= 1ul << PKA_CTRL_GO;
+
+ vic_pka_io_write32(&pka->regbase[PKA_INDEX_I], 0);
+ vic_pka_io_write32(&pka->regbase[PKA_INDEX_J], 0);
+ vic_pka_io_write32(&pka->regbase[PKA_INDEX_K], 0);
+ vic_pka_io_write32(&pka->regbase[PKA_INDEX_L], 0);
+
+ vic_pka_io_write32(&pka->regbase[PKA_F_STACK], 0);
+ vic_pka_io_write32(&pka->regbase[PKA_FLAGS], flags);
+ vic_pka_io_write32(&pka->regbase[PKA_ENTRY], entry);
+ vic_pka_io_write32(&pka->regbase[PKA_CTRL], ctrl);
+
+ vic_pka_io_write32(&pka->regbase[PKA_IRQ_EN], 1 << PKA_IRQ_EN_STAT);
+ pka->pka_done = 0;
+ pka->pka_err = 0;
+
+ return 0;
+}
+
+void elppka_abort(struct pka_state *pka)
+{
+ vic_pka_io_write32(&pka->regbase[PKA_CTRL], 1 << PKA_CTRL_STOP_RQST);
+}
+
+int elppka_get_status(struct pka_state *pka, unsigned *code)
+{
+ uint32_t status = vic_pka_io_read32(&pka->regbase[PKA_RC]);
+
+ if (status & (1 << PKA_RC_BUSY)) {
+ return CRYPTO_INPROGRESS;
+ }
+
+ if (code) {
+ *code = (status >> PKA_RC_REASON) & ((1 << PKA_RC_REASON_BITS)-1);
+ }
+
+ return 0;
+}
+
+int elppka_load_operand(struct pka_state *pka, unsigned bank, unsigned index,
+ unsigned size, const uint8_t *data)
+{
+ uint32_t *opbase, tmp;
+ unsigned i, n;
+ int rc;
+
+ rc = operand_base_offset(&pka->cfg, bank, index, size);
+ if (rc < 0)
+ return rc;
+
+ opbase = pka->regbase + rc;
+ n = size >> 2;
+
+ for (i = 0; i < n; i++) {
+ /*
+ * For lengths that are not a multiple of 4, the incomplete word is
+ * at the _start_ of the data buffer, so we must add the remainder.
+ */
+ memcpy(&tmp, data+((n-i-1)<<2)+(size&3), 4);
+ vic_pka_io_write32(&opbase[i], tmp);
+ }
+
+ /* Write the incomplete word, if any. */
+ if (size & 3) {
+ tmp = 0;
+ memcpy((char *)&tmp + sizeof tmp - (size&3), data, size & 3);
+ vic_pka_io_write32(&opbase[i++], tmp);
+ }
+
+ /* Zero the remainder of the operand. */
+ for (n = elppka_page_size(size) >> 2; i < n; i++) {
+ vic_pka_io_write32(&opbase[i], 0);
+ }
+
+ return 0;
+}
+
+int elppka_unload_operand(struct pka_state *pka, unsigned bank, unsigned index,
+ unsigned size, uint8_t *data)
+{
+ uint32_t *opbase, tmp;
+ unsigned i, n;
+ int rc;
+
+ rc = operand_base_offset(&pka->cfg, bank, index, size);
+ if (rc < 0)
+ return rc;
+
+ opbase = pka->regbase + rc;
+ n = size >> 2;
+
+ for (i = 0; i < n; i++) {
+ tmp = vic_pka_io_read32(&opbase[i]);
+ memcpy(data+((n-i-1)<<2)+(size&3), &tmp, 4);
+ }
+
+ if (size & 3) {
+ tmp = vic_pka_io_read32(&opbase[i]);
+ memcpy(data, (char *)&tmp + sizeof tmp - (size&3), size & 3);
+ }
+
+ return 0;
+}
+
+void elppka_set_byteswap(struct pka_state *pka, int swap)
+{
+ uint32_t val = vic_pka_io_read32(&pka->regbase[PKA_CONF]);
+
+ if (swap) {
+ val |= 1 << PKA_CONF_BYTESWAP;
+ } else {
+ val &= ~(1 << PKA_CONF_BYTESWAP);
+ }
+
+ vic_pka_io_write32(&pka->regbase[PKA_CONF], val);
+}
+
+int elppka_setup(struct pka_state *pka)
+{
+ const unsigned char big[4] = { 0x00, 0x11, 0x22, 0x33 };
+ const unsigned char little[4] = { 0x33, 0x22, 0x11, 0x00 };
+ uint32_t testval = 0x00112233;
+ int rc;
+
+ rc = elppka_get_config(pka->regbase, &pka->cfg);
+ if (rc < 0)
+ return rc;
+
+ /* Try to automatically determine byteswap setting */
+ if (!memcmp(&testval, big, sizeof testval)) {
+ elppka_set_byteswap(pka, 0);
+ } else if (!memcmp(&testval, little, sizeof testval)) {
+ elppka_set_byteswap(pka, 1);
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/sifive-vic/vic-pka.c b/drivers/crypto/sifive-vic/vic-pka.c
new file mode 100644
index 0000000000000..cafa0670b516b
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-pka.c
@@ -0,0 +1,1314 @@
+/*
+ ******************************************************************************
+ * @file vic-pka.c
+ * @author StarFive Technology
+ * @version V1.0
+ * @date 08/13/2020
+ * @brief
+ ******************************************************************************
+ * @copy
+ *
+ * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
+ * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
+ * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
+ * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
+ * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ * © COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "vic-sec.h"
+#include "vic-pka-hw.h"
+
+#define ERROR(fmt, ...) printk("ERROR %s() ln %d:" fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+
+static int pka_wait(struct vic_sec_dev *sdev)
+{
+ struct pka_state *pka = &sdev->pka;
+ int ret = -1;
+ mutex_lock(&sdev->doing);
+ if(pka->pka_done || pka->pka_err)
+ ret = 0;
+ mutex_unlock(&sdev->doing);
+ return ret;
+ //return wait_cond_timeout(pka->pka_done || pka->pka_err, 10, 40000000);
+}
+
+#define PKA_LOAD(pka, bank, index, size, para) \
+ if (para) { \
+ rc = elppka_load_operand(pka, bank, index, size, para); \
+ if (rc) { \
+ ERROR("failed to load a param\r\n"); \
+ return -EIO; \
+ } \
+ }
+
+#define PKA_UNLOAD(pka, bank, index, size, para) \
+ if (para) { \
+ rc = elppka_unload_operand(pka, bank, index, size, para); \
+ if (rc) { \
+ ERROR("failed to unload a param\r\n"); \
+ return -EIO; \
+ } \
+ }
+
+#define PKA_RUN(sdev, pka, func, flags, size) \
+ do { \
+ elppka_start(pka, func, flags, size); \
+ rc = pka_wait(sdev); \
+ if (rc) { \
+ ERROR("failed\r\n"); \
+ return rc; \
+ } \
+ } while (0)
+
+
+
+/**
+ Base Modular Arithmetic Library Functions
+
+ The Base Modular Arithmetic library suite provides a set of modular arithmetic operations commonly used by
+ cryptographic applications. These include Montgomery precomputation operations and other generic modular
+ operations.
+**/
+
+static int vic_rsa_calc_rinv(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *m,
+ u8 *rinv) // C0
+{
+ struct vic_sec_dev *sdev = ctx->sdev;
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(sdev, pka, PKA_CALC_R_INV, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, rinv);
+ return rc;
+}
+
+static int vic_rsa_calc_mp(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *rinv, // C0
+ const u8 *m, // D0
+ u8 *mp) // D1
+{
+ struct vic_sec_dev *sdev = ctx->sdev;
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_D, 2, size, rinv);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(sdev, pka, PKA_CALC_MP, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_D, 1, size, mp);
+ return rc;
+}
+
+static int vic_rsa_calc_rsqr(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *rinv, // C0
+ const u8 *m, // D0
+ const u8 *mp, // D1
+ u8 *rsqr) // D3
+{
+ struct vic_sec_dev *sdev = ctx->sdev;
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_C, 0, size, rinv);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, mp);
+ PKA_RUN(sdev, pka, PKA_CALC_R_SQR, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_D, 3, size, rsqr);
+ return rc;
+}
+
+
+static int vic_rsa_calc_modexp(struct vic_sec_ctx *ctx, const u8 *src, // A0
+ u32 full_width, // F0
+ u8 *dst,
+ int enc) // A0
+{
+ struct vic_sec_dev *sdev = ctx->sdev;
+ struct pka_state *pka = &ctx->sdev->pka;
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ size_t size = rsa_key->key_sz;
+ int rc;
+
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, src);
+ if(enc) {
+ PKA_LOAD(pka, PKA_OPERAND_D, 2, size, rsa_key->e);
+ } else {
+ PKA_LOAD(pka, PKA_OPERAND_D, 2, size, rsa_key->d);
+ }
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, rsa_key->n);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, rsa_key->mp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, rsa_key->rsqr);
+ PKA_RUN(sdev, pka, PKA_MODEXP, (full_width&0x1)<sdev;
+ struct pka_state *pka = &ctx->sdev->pka;
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, msg_lo);
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, msg_hi);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, rsa_key->p);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, rsa_key->q);
+ PKA_LOAD(pka, PKA_OPERAND_C, 2, size, rsa_key->qinv);
+ PKA_LOAD(pka, PKA_OPERAND_C, 3, size, rsa_key->dp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 2, size, rsa_key->dq);
+ PKA_LOAD(pka, PKA_OPERAND_D, 5, size, rsa_key->rsqr_p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 4, size, rsa_key->pmp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, rsa_key->rsqr_q);
+ PKA_LOAD(pka, PKA_OPERAND_D, 6, size, rsa_key->qmp);
+ PKA_RUN(sdev, pka, PKA_CRT, (full_width&0x1)<sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, mp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr);
+ PKA_RUN(pka, PKA_MODMULT, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_modadd(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // A0
+ const u8 *y, // B0
+ const u8 *m, // D0
+ u8 *c) // A0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_MODADD, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_modsub(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // A0
+ const u8 *y, // B0
+ const u8 *m, // D0
+ u8 *c) // A0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_MODSUB, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_reduce(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // C0
+ const u8 *m, // D0
+ u8 *c) // A0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_C, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_REDUCE, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_moddiv(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *y, // C0
+ const u8 *x, // A0
+ const u8 *m, // D0
+ u8 *c) // C0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_C, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_MODDIV, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, c);
+ return rc;
+}
+
+static int pka_modinv(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // A0
+ const u8 *m, // D0
+ u8 *c) // C0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_MODINV, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, c);
+ return rc;
+}
+
+static int pka_mult(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *a, // A0
+ const u8 *b, // B0
+ u8 *c_lo, // C0
+ u8 *c_hi) // C1
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, b);
+ PKA_RUN(pka, PKA_MULT, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, c_lo);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 1, size, c_hi);
+ return rc;
+}
+
+static int vic_rsa_crt_key_setup(struct vic_sec_ctx *ctx) // A3
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ size_t size = rsa_key->key_sz >> 1;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, rsa_key->p);
+ PKA_LOAD(pka, PKA_OPERAND_B, 1, size, rsa_key->q);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, rsa_key->d + size);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, rsa_key->d);
+ PKA_RUN(pka, PKA_CRT_KEY_SETUP, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 1, size, rsa_key->dp);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, rsa_key->dq);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 3, size, rsa_key->qinv);
+ return rc;
+}
+
+static int pka_rsa_bit_serial_mod(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // C0
+ const u8 *m, // D0
+ u8 *c) // C0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_C, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_BIT_SERIAL_MOD, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, c);
+ return rc;
+}
+
+static int pka_rsa_bit_serial_mod_dp(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x_lo, // C0
+ const u8 *x_hi, // C1
+ const u8 *m, // D0
+ u8 *c) // C0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_C, 0, size, x_lo);
+ PKA_LOAD(pka, PKA_OPERAND_C, 1, size, x_hi);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_BIT_SERIAL_MOD_DP, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_C, 0, size, c);
+ return rc;
+}
+
+
+static int pka_ecc_pmult(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A2
+ const u8 *py, // B2
+ const u8 *a, // A6
+ const u8 *k, // D7
+ const u8 *w, // A7
+ const u8 *p, // D0
+ const u8 *pp, // D1
+ const u8 *r_sqr_p, // D3
+ u32 blinding, // F0
+ u32 is_a_m3, // F3
+ u8 *qx, // A2
+ u8 *qy) // B2
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ u32 flags;
+
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_D, 7, size, k);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, w);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ flags = ((blinding&0x1)<sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, qx);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, qy);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ PKA_RUN(pka, PKA_PADD, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 2, size, rx);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 2, size, ry);
+ return rc;
+}
+
+static int pka_ecc_pdbl(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A3
+ const u8 *py, // B3
+ const u8 *a, // A6
+ const u8 *p, // D0
+ const u8 *pp, // D1
+ const u8 *r_sqr_p, // D3
+ u8 *qx, // A2
+ u8 *qy) // B2
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ PKA_RUN(pka, PKA_PDBL, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 2, size, qx);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 2, size, qy);
+ return rc;
+}
+
+static int pka_ecc_pver(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A2
+ const u8 *py, // B2
+ const u8 *a, // A6
+ const u8 *b, // A7
+ const u8 *p, // D0
+ const u8 *pp, // D1
+ const u8 *r_sqr_p, // D3
+ u32 *ok) // Z
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, b);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ PKA_RUN(pka, PKA_PVER, 0, size);
+ if (ok) {
+ u32 flags = vic_pka_io_read32((void *)&pka->regbase[PKA_FLAGS]);
+ *ok = (flags>>PKA_FLAG_ZERO)&0x1;
+ }
+ return rc;
+}
+
+static int pka_ecc_shamir(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A2
+ const u8 *py, // B2
+ const u8 *qx, // A3
+ const u8 *qy, // B3
+ const u8 *a, // A6
+ const u8 *k, // A7
+ const u8 *l, // D7
+ const u8 *p, // D0
+ const u8 *pp, // D1
+ const u8 *r_sqr_p, // D3
+ u32 is_a_m3, // F3
+ u8 *rx, // A2
+ u8 *ry) // B2
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, qx);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, qy);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, k);
+ PKA_LOAD(pka, PKA_OPERAND_D, 7, size, l);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ PKA_RUN(pka, PKA_SHAMIR, (is_a_m3&0x1)<sdev->pka;
+ int rc;
+
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_D, 7, size, k);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, w);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_RUN(pka, PKA_PMULT_521, (blinding&0x1)<sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, qx);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, qy);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_RUN(pka, PKA_PADD_521, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 2, size, rx);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 2, size, ry);
+ return rc;
+}
+
+static int pka_ecc_pdbl_521(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A3
+ const u8 *py, // B3
+ const u8 *a, // A6
+ const u8 *p, // D0
+ const u8 *pp, // D1
+ const u8 *r_sqr_p, // D3
+ u8 *qx, // A2
+ u8 *qy) // B2
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, pp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr_p);
+ PKA_RUN(pka, PKA_PDBL_521, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 2, size, qx);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 2, size, qy);
+ return rc;
+}
+
+static int pka_ecc_pver_521(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A2
+ const u8 *py, // B2
+ const u8 *a, // A6
+ const u8 *b, // A7
+ const u8 *p, // D0
+ u32 *ok) // Z
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, b);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_RUN(pka, PKA_PVER_521, 0, size);
+ if (ok) {
+ u32 flags = vic_pka_io_read32((void *)&pka->regbase[PKA_FLAGS]);
+ *ok = (flags>>PKA_FLAG_ZERO)&0x1;
+ }
+ return rc;
+}
+
+static int pka_ecc_modmult_521(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // A0
+ const u8 *y, // B0
+ const u8 *m, // D0
+ u8 *c) // A0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_RUN(pka, PKA_MODMULT_521, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_ecc_m_521_montmult(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *x, // A0
+ const u8 *y, // B0
+ const u8 *m, // D0
+ const u8 *mp, // D1
+ const u8 *r_sqr, // D3
+ u8 *c) // A0
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 0, size, x);
+ PKA_LOAD(pka, PKA_OPERAND_B, 0, size, y);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, m);
+ PKA_LOAD(pka, PKA_OPERAND_D, 1, size, mp);
+ PKA_LOAD(pka, PKA_OPERAND_D, 3, size, r_sqr);
+ PKA_RUN(pka, PKA_M_521_MONTMULT, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 0, size, c);
+ return rc;
+}
+
+static int pka_ecc_shamir_521(struct vic_sec_ctx *ctx, u32 size,
+ const u8 *px, // A2
+ const u8 *py, // B2
+ const u8 *qx, // A3
+ const u8 *qy, // B3
+ const u8 *a, // A6
+ const u8 *k, // A7
+ const u8 *l, // D7
+ const u8 *p, // D0
+ u8 *rx, // A2
+ u8 *ry) // B2
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ int rc;
+ PKA_LOAD(pka, PKA_OPERAND_A, 2, size, px);
+ PKA_LOAD(pka, PKA_OPERAND_B, 2, size, py);
+ PKA_LOAD(pka, PKA_OPERAND_A, 3, size, qx);
+ PKA_LOAD(pka, PKA_OPERAND_B, 3, size, qy);
+ PKA_LOAD(pka, PKA_OPERAND_A, 6, size, a);
+ PKA_LOAD(pka, PKA_OPERAND_A, 7, size, k);
+ PKA_LOAD(pka, PKA_OPERAND_D, 7, size, l);
+ PKA_LOAD(pka, PKA_OPERAND_D, 0, size, p);
+ PKA_RUN(pka, PKA_SHAMIR_521, 0, size);
+ PKA_UNLOAD(pka, PKA_OPERAND_A, 2, size, rx);
+ PKA_UNLOAD(pka, PKA_OPERAND_B, 2, size, ry);
+ return rc;
+}
+
+static void pka_clear_state(struct vic_sec_ctx *ctx)
+{
+ struct pka_state *pka = &ctx->sdev->pka;
+ pka->pka_done = 0;
+ pka->pka_err = 0;
+}
+#endif
+static void vic_rsa_free_key(struct vic_rsa_key *key)
+{
+ if(key->d)
+ kfree(key->d);
+ if(key->p)
+ kfree(key->p);
+ if(key->q)
+ kfree(key->q);
+ if(key->dp)
+ kfree(key->dp);
+ if(key->dq)
+ kfree(key->dq);
+ if(key->qinv)
+ kfree(key->qinv);
+ if(key->rinv)
+ kfree(key->rinv);
+ if(key->rinv_p)
+ kfree(key->rinv_p);
+ if(key->rinv_q)
+ kfree(key->rinv_q);
+ if(key->mp)
+ kfree(key->mp);
+ if(key->rsqr)
+ kfree(key->rsqr);
+ if(key->rsqr_p)
+ kfree(key->rsqr_p);
+ if(key->rsqr_q)
+ kfree(key->rsqr_q);
+ if(key->pmp)
+ kfree(key->pmp);
+ if(key->qmp)
+ kfree(key->qmp);
+ if(key->e)
+ kfree(key->e);
+ if(key->n)
+ kfree(key->n);
+ memset(key, 0, sizeof(*key));
+}
+
+static int vic_rsa_pre_cal(struct vic_sec_ctx *ctx)
+{
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ size_t size = rsa_key->key_sz;
+ int ret = -ENOMEM;
+
+ rsa_key->rinv = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rinv)
+ goto err;
+
+ rsa_key->mp = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->mp)
+ goto err;
+
+ rsa_key->rsqr = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rsqr)
+ goto err;
+
+ ret = vic_rsa_calc_rinv(ctx, size, rsa_key->n, rsa_key->rinv);
+ if(ret)
+ return ret;
+ ret = vic_rsa_calc_mp(ctx, size, rsa_key->rinv,rsa_key->n,rsa_key->mp);
+ if(ret)
+ return ret;
+
+ ret = vic_rsa_calc_rsqr(ctx, size, rsa_key->rinv, rsa_key->n, rsa_key->mp,
+ rsa_key->rsqr);
+
+ err:
+ return ret;
+}
+
+static int vic_rsa_pre_cal_crt(struct vic_sec_ctx *ctx)
+{
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ int ret = -ENOMEM;
+ size_t size = rsa_key->key_sz >> 1;
+
+ rsa_key->rinv_p = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rinv_p)
+ goto err;
+
+ rsa_key->rinv_q = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rinv_q)
+ goto err;
+
+ rsa_key->pmp = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->pmp)
+ goto err;
+
+ rsa_key->qmp = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->qmp)
+ goto err;
+
+ rsa_key->rsqr_p = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rsqr_p)
+ goto err;
+
+ rsa_key->rsqr_q = kzalloc(size, GFP_KERNEL);
+ if(!rsa_key->rsqr_q)
+ goto err;
+
+ ret = vic_rsa_calc_rinv(ctx, size, rsa_key->p, rsa_key->rinv_p);
+ if(ret)
+ return ret;
+
+ ret = vic_rsa_calc_mp(ctx, size, rsa_key->rinv_p, rsa_key->p, rsa_key->pmp);
+ if(ret)
+ return ret;
+
+ ret = vic_rsa_calc_rsqr(ctx, size, rsa_key->rinv_p, rsa_key->p, rsa_key->pmp,
+ rsa_key->rsqr_p);
+ if(ret)
+ return ret;
+
+
+ ret = vic_rsa_calc_rinv(ctx, size, rsa_key->q, rsa_key->rinv_q);
+ if(ret)
+ return ret;
+
+ ret = vic_rsa_calc_mp(ctx, size, rsa_key->rinv_q, rsa_key->q, rsa_key->qmp);
+ if(ret)
+ return ret;
+
+ ret = vic_rsa_calc_rsqr(ctx, size, rsa_key->rinv_q, rsa_key->q, rsa_key->qmp,
+ rsa_key->rsqr_q);
+
+ err:
+ return ret;
+}
+
+static int vic_rsa_enc_core(struct vic_sec_ctx *ctx, int enc)
+{
+ struct vic_sec_dev *sdev = ctx->sdev;
+ struct vic_sec_request_ctx *rctx = sdev->rctx;
+ struct vic_rsa_key *key = &ctx->rsa_key;
+ size_t data_len, total, count, data_offset;
+ int ret = 0;
+
+ rctx->offset = 0;
+ total = 0;
+
+
+ while(total < sdev->total_in) {
+ count = min (sdev->data_buf_len, sdev->total_in);
+ count = min (count, key->key_sz);
+ memset(sdev->data, 0, key->key_sz);
+ data_offset = key->key_sz - count;
+ data_len = vic_cryp_get_from_sg(rctx, rctx->offset, count, data_offset);
+ if(data_len < 0)
+ return data_len;
+ if(data_len != count) {
+ return -EINVAL;
+ }
+ if(!enc && key->crt_mode) {
+ size_t size = key->key_sz >> 1;
+ ret = vic_rsa_crt(ctx, size, sdev->data + size, sdev->data,
+ 0,sdev->data + key->key_sz + size, sdev->data + key->key_sz);
+ } else {
+ ret = vic_rsa_calc_modexp(ctx, sdev->data, 0, sdev->data + key->key_sz, enc);
+ }
+ if(ret) {
+ return ret;
+ }
+
+ sg_copy_buffer(rctx->out_sg,sg_nents(rctx->out_sg), sdev->data + key->key_sz,
+ key->key_sz, rctx->offset, 0);
+
+ rctx->offset += data_len;
+ total += data_len;
+ }
+
+ return ret;
+}
+
+static int vic_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct vic_rsa_key *key = &ctx->rsa_key;
+ struct vic_sec_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ if (unlikely(!key->n || !key->e))
+ return -EINVAL;
+
+
+ if (req->dst_len < key->key_sz) {
+ req->dst_len = key->key_sz;
+ dev_err(ctx->sdev->dev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ rctx->sg = req->src;
+ rctx->out_sg = req->dst;
+ rctx->sdev = ctx->sdev;
+ ctx->sdev->rctx = rctx;
+ ctx->sdev->total_in = req->src_len;
+ ctx->sdev->total_out = req->dst_len;
+
+ ret = vic_rsa_pre_cal(ctx);
+ if(ret) {
+ return ret;
+ }
+
+ ret = vic_rsa_enc_core(ctx, 1);
+
+ return ret;
+}
+
+static int vic_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct vic_rsa_key *key = &ctx->rsa_key;
+ struct vic_sec_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->key_sz) {
+ req->dst_len = key->key_sz;
+ dev_err(ctx->sdev->dev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ rctx->sg = req->src;
+ rctx->out_sg = req->dst;
+ rctx->sdev = ctx->sdev;
+ ctx->sdev->rctx = rctx;
+ ctx->sdev->total_in = req->src_len;
+ ctx->sdev->total_out = req->dst_len;
+
+ if(key->crt_mode) {
+ ret = vic_rsa_pre_cal_crt(ctx);
+ } else {
+ ret = vic_rsa_pre_cal(ctx);
+ }
+
+ if(ret) {
+ return ret;
+ }
+
+ ret = vic_rsa_enc_core(ctx, 0);
+ return ret;
+}
+
+static unsigned long vic_rsa_enc_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ //case 256:
+ case 512:
+ case 768:
+ case 1024:
+ case 1536:
+ case 2048:
+ case 3072:
+ case 4096:
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+static int vic_rsa_set_n(struct vic_rsa_key *rsa_key, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ rsa_key->key_sz = vlen;
+ ret = -EINVAL;
+ /* invalid key size provided */
+ if (vic_rsa_enc_fn_id(rsa_key->key_sz))
+ goto err;
+
+ ret = -ENOMEM;
+ rsa_key->n = kmemdup(ptr, rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->n)
+ goto err;
+
+ return 0;
+ err:
+ rsa_key->key_sz = 0;
+ rsa_key->n = NULL;
+ return ret;
+}
+
+static int vic_rsa_set_e(struct vic_rsa_key *rsa_key, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) {
+ rsa_key->e = NULL;
+ return -EINVAL;
+ }
+
+ rsa_key->e = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->e)
+ return -ENOMEM;
+
+ memcpy(rsa_key->e + (rsa_key->key_sz - vlen), ptr, vlen);
+ return 0;
+}
+
+static int vic_rsa_set_d(struct vic_rsa_key *rsa_key, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ret = -EINVAL;
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz)
+ goto err;
+
+ ret = -ENOMEM;
+ rsa_key->d = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->d)
+ goto err;
+
+ memcpy(rsa_key->d + (rsa_key->key_sz - vlen), ptr, vlen);
+ return 0;
+ err:
+ rsa_key->d = NULL;
+ return ret;
+}
+
+static void vic_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static void vic_rsa_setkey_crt(struct vic_rsa_key *rsa_key, struct rsa_key *raw_key)
+{
+ const char *ptr;
+ unsigned int len;
+ unsigned int half_key_sz = rsa_key->key_sz / 2;
+
+ /* p */
+ ptr = raw_key->p;
+ len = raw_key->p_sz;
+ vic_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len) {
+ goto err;
+ }
+ rsa_key->p = kzalloc(half_key_sz, GFP_KERNEL);
+ if (!rsa_key->p) {
+ goto err;
+ }
+ memcpy(rsa_key->p + (half_key_sz - len), ptr, len);
+
+ /* q */
+ ptr = raw_key->q;
+ len = raw_key->q_sz;
+ vic_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len) {
+ goto free_p;
+ }
+ rsa_key->q = kzalloc(half_key_sz, GFP_KERNEL);
+ if (!rsa_key->q) {
+ goto free_p;
+ }
+ memcpy(rsa_key->q + (half_key_sz - len), ptr, len);
+
+ /* dp */
+ ptr = raw_key->dp;
+ len = raw_key->dp_sz;
+ vic_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len) {
+ goto free_q;
+ }
+ rsa_key->dp = kzalloc(half_key_sz, GFP_KERNEL);
+ if (!rsa_key->dp) {
+ goto free_q;
+ }
+ memcpy(rsa_key->dp + (half_key_sz - len), ptr, len);
+
+ /* dq */
+ ptr = raw_key->dq;
+ len = raw_key->dq_sz;
+ vic_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len) {
+ goto free_dp;
+ }
+ rsa_key->dq = kzalloc(half_key_sz, GFP_KERNEL);
+ if (!rsa_key->dq) {
+ goto free_dp;
+ }
+ memcpy(rsa_key->dq + (half_key_sz - len), ptr, len);
+
+ /* qinv */
+ ptr = raw_key->qinv;
+ len = raw_key->qinv_sz;
+ vic_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len) {
+ goto free_dq;
+ }
+ rsa_key->qinv = kzalloc(half_key_sz, GFP_KERNEL);
+ if (!rsa_key->qinv) {
+ goto free_dq;
+ }
+ memcpy(rsa_key->qinv + (half_key_sz - len), ptr, len);
+
+ rsa_key->crt_mode = true;
+ return;
+
+ free_dq:
+ memset(rsa_key->dq, '\0', half_key_sz);
+ kfree(rsa_key->dq);
+ rsa_key->dq = NULL;
+ free_dp:
+ memset(rsa_key->dp, '\0', half_key_sz);
+ kfree(rsa_key->dp);
+ rsa_key->dp = NULL;
+ free_q:
+ memset(rsa_key->q, '\0', half_key_sz);
+ kfree(rsa_key->q);
+ rsa_key->q = NULL;
+ free_p:
+ memset(rsa_key->p, '\0', half_key_sz);
+ kfree(rsa_key->p);
+ rsa_key->p = NULL;
+ err:
+ rsa_key->crt_mode = false;
+}
+
+static int vic_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {NULL};
+ struct vic_rsa_key *rsa_key = &ctx->rsa_key;
+ int ret;
+
+ vic_rsa_free_key(rsa_key);
+
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ ret = vic_rsa_set_n(rsa_key, raw_key.n, raw_key.n_sz);
+ if (ret < 0)
+ goto free;
+ ret = vic_rsa_set_e(rsa_key, raw_key.e, raw_key.e_sz);
+ if (ret < 0)
+ goto free;
+ if (private) {
+ ret = vic_rsa_set_d(rsa_key, raw_key.d, raw_key.d_sz);
+ if (ret < 0)
+ goto free;
+ vic_rsa_setkey_crt(rsa_key, &raw_key);
+ }
+
+ if (!rsa_key->n || !rsa_key->e) {
+ /* invalid key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+ if (private && !rsa_key->d) {
+ /* invalid private key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+ free:
+ vic_rsa_free_key(rsa_key);
+ return ret;
+}
+
+static int vic_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return vic_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int vic_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return vic_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int vic_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ctx->rsa_key.key_sz;
+}
+
+static int vic_pka_reload_firmware(struct pka_state *pka)
+{
+ u32 fw_words, i, *fw;
+ int ret = 0;
+
+ fw_words = sizeof(PKA_FW)/sizeof(u32);
+ if (fw_words > pka->cfg.fw_ram_size) {
+ ERROR("large firmware\r\n");
+ return -EINVAL;
+ }
+
+ fw = (u32 *)(pka->regbase + pka->cfg.ram_offset);
+ for (i = 0; i < fw_words; fw++, i++) {
+ vic_pka_io_write32(fw, PKA_FW[i]);
+ }
+
+ return ret;
+}
+
+/* Per session pkc's driver context creation function */
+static int vic_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ //akcipher_set_reqsize(tfm, sizeof(struct vic_sec_request_ctx));
+ ctx->sdev = vic_sec_find_dev(ctx);
+
+ if (!ctx->sdev)
+ return -ENODEV;
+ mutex_lock(&ctx->sdev->lock);
+ vic_clk_enable(ctx->sdev,PKA_CLK);
+ vic_pka_reload_firmware(&ctx->sdev->pka);
+
+ return 0;
+}
+
+/* Per session pkc's driver context cleanup function */
+static void vic_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct vic_sec_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct vic_rsa_key *key = (struct vic_rsa_key *)&ctx->rsa_key;
+
+ vic_rsa_free_key(key);
+ vic_clk_disable(ctx->sdev,PKA_CLK);
+ mutex_unlock(&ctx->sdev->lock);
+ //vic_jr_free(ctx->dev);
+}
+
+irqreturn_t vic_pka_irq_done(struct vic_sec_dev *sdev)
+{
+ struct pka_state *pka = &sdev->pka;
+ u32 status;
+
+ status = vic_pka_io_read32((void *)&sdev->pka.regbase[PKA_F_STACK]);
+ if (status & 0xF) {
+ pka->pka_err |= BIT(PKA_F_STACK);
+ }
+
+ status = vic_pka_io_read32((void *)&sdev->pka.regbase[PKA_STATUS]);
+ if (!(status & BIT(PKA_STAT_IRQ))) {
+ pka->pka_err |= BIT(PKA_STAT_IRQ);
+ }
+
+ status = vic_pka_io_read32((void *)&sdev->pka.regbase[PKA_RC]);
+ if (status & 0x00FF0000) {
+ pka->pka_err |= BIT(PKA_RC);
+ }
+
+ vic_pka_io_write32(&sdev->pka.regbase[PKA_STATUS], BIT(PKA_STAT_IRQ));
+
+ status = vic_pka_io_read32((void *)&sdev->pka.regbase[PKA_STATUS]);
+ if (status & BIT(PKA_STAT_IRQ)) {
+ pka->pka_err |= BIT(PKA_STAT_IRQ);
+ }
+
+ pka->pka_done = 1;
+ status = vic_pka_io_read32((void *)&sdev->pka.regbase[PKA_IRQ_EN]);
+
+ //up(&sdev->core_running);
+ return IRQ_WAKE_THREAD;
+}
+
+static struct akcipher_alg vic_rsa = {
+ .encrypt = vic_rsa_enc,
+ .decrypt = vic_rsa_dec,
+ .sign = vic_rsa_dec,
+ .verify = vic_rsa_enc,
+ .set_pub_key = vic_rsa_set_pub_key,
+ .set_priv_key = vic_rsa_set_priv_key,
+ .max_size = vic_rsa_max_size,
+ .init = vic_rsa_init_tfm,
+ .exit = vic_rsa_exit_tfm,
+ .reqsize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-vic",
+ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ },
+};
+
+int vic_pka_init(struct pka_state *pka)
+{
+ u32 fw_words, i, *fw;
+ int ret = 0;
+
+ ret = elppka_setup(pka);
+ if(ret)
+ return ret;
+
+ fw_words = sizeof(PKA_FW)/sizeof(u32);
+ if (fw_words > pka->cfg.fw_ram_size) {
+ ERROR("large firmware\r\n");
+ return -EINVAL;
+ }
+
+ fw = (u32 *)(pka->regbase + pka->cfg.ram_offset);
+ for (i = 0; i < fw_words; fw++, i++) {
+ vic_pka_io_write32(fw, PKA_FW[i]);
+ }
+
+ vic_pka_io_write32(&pka->regbase[PKA_IRQ_EN], 1 << PKA_IRQ_EN_STAT);
+
+ vic_pka_io_write32(&pka->regbase[PKA_WATCHDOG], 100000000);
+
+ return ret;
+}
+
+int vic_pka_register_algs(void)
+{
+ int ret;
+
+ ret = crypto_register_akcipher(&vic_rsa);
+ if (ret)
+ printk("VIC RSA registration failed\n");
+
+ return ret;
+}
+
+int vic_pka_unregister_algs(void)
+{
+ crypto_unregister_akcipher(&vic_rsa);
+ return 0;
+}
diff --git a/drivers/crypto/sifive-vic/vic-pka.h b/drivers/crypto/sifive-vic/vic-pka.h
new file mode 100644
index 0000000000000..d1f2b1f7c1923
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-pka.h
@@ -0,0 +1,175 @@
+#ifndef __VIC_PKA_H__
+#define __VIC_PKA_H__
+#include
+#include
+
+#include
+
+#define CRYPTO_OK ( 0)
+#define CRYPTO_FAILED ( -1)
+#define CRYPTO_INPROGRESS ( -2)
+#define CRYPTO_INVALID_HANDLE ( -3)
+#define CRYPTO_INVALID_CONTEXT ( -4)
+#define CRYPTO_INVALID_SIZE ( -5)
+#define CRYPTO_NOT_INITIALIZED ( -6)
+#define CRYPTO_NO_MEM ( -7)
+#define CRYPTO_INVALID_ALG ( -8)
+#define CRYPTO_INVALID_KEY_SIZE ( -9)
+#define CRYPTO_INVALID_ARGUMENT ( -10)
+#define CRYPTO_MODULE_DISABLED ( -11)
+#define CRYPTO_NOT_IMPLEMENTED ( -12)
+#define CRYPTO_INVALID_BLOCK_ALIGNMENT ( -13)
+#define CRYPTO_INVALID_MODE ( -14)
+#define CRYPTO_INVALID_KEY ( -15)
+#define CRYPTO_AUTHENTICATION_FAILED ( -16)
+#define CRYPTO_INVALID_IV_SIZE ( -17)
+#define CRYPTO_MEMORY_ERROR ( -18)
+#define CRYPTO_LAST_ERROR ( -19)
+#define CRYPTO_HALTED ( -20)
+#define CRYPTO_TIMEOUT ( -21)
+#define CRYPTO_SRM_FAILED ( -22)
+#define CRYPTO_COMMON_ERROR_MAX (-100)
+#define CRYPTO_INVALID_ICV_KEY_SIZE (-100)
+#define CRYPTO_INVALID_PARAMETER_SIZE (-101)
+#define CRYPTO_SEQUENCE_OVERFLOW (-102)
+#define CRYPTO_DISABLED (-103)
+#define CRYPTO_INVALID_VERSION (-104)
+#define CRYPTO_FATAL (-105)
+#define CRYPTO_INVALID_PAD (-106)
+#define CRYPTO_FIFO_FULL (-107)
+#define CRYPTO_INVALID_SEQUENCE (-108)
+#define CRYPTO_INVALID_FIRMWARE (-109)
+#define CRYPTO_NOT_FOUND (-110)
+#define CRYPTO_CMD_FIFO_INACTIVE (-111)
+
+enum PKA_ENTRY_E {
+ PKA_MODMULT = 10, //0x0a
+ PKA_MODADD = 11, //0x0b
+ PKA_MODSUB = 12, //0x0c
+ PKA_MODDIV = 13, //0x0d
+ PKA_MODINV = 14, //0x0e
+ PKA_REDUCE = 15, //0x0f
+ PKA_CALC_MP = 16, //0x10
+ PKA_CALC_R_INV = 17, //0x11
+ PKA_CALC_R_SQR = 18, //0x12
+ PKA_MULT = 19, //0x13
+ PKA_MODEXP = 20, //0x14
+ PKA_CRT_KEY_SETUP = 21, //0x15
+ PKA_CRT = 22, //0x16
+ PKA_BIT_SERIAL_MOD_DP = 23, //0x17
+ PKA_BIT_SERIAL_MOD = 24, //0x18
+ PKA_PMULT = 25, //0x19
+ PKA_PDBL = 26, //0x1a
+ PKA_PDBL_STD_PRJ = 27, //0x1b
+ PKA_PADD = 28, //0x1c
+ PKA_PADD_STD_PRJ = 29, //0x1d
+ PKA_PVER = 30, //0x1e
+ PKA_STD_PRJ_TO_AFFINE = 31, //0x1f
+ PKA_IS_P_EQUAL_Q = 32, //0x20
+ PKA_IS_P_REFLECT_Q = 33, //0x21
+ PKA_IS_A_M3 = 34, //0x22
+ PKA_SHAMIR = 35, //0x23
+ PKA_PMULT_521 = 36, //0x24
+ PKA_PDBL_521 = 37, //0x25
+ PKA_PADD_521 = 38, //0x26
+ PKA_PVER_521 = 39, //0x27
+ PKA_M_521_MONTMULT = 40, //0x28
+ PKA_MODMULT_521 = 41, //0x29
+ PKA_SHAMIR_521 = 42, //0x2a
+};
+
+struct pka_state {
+ u32 *regbase;
+
+ struct pka_config {
+ unsigned alu_size, rsa_size, ecc_size;
+ unsigned fw_ram_size, fw_rom_size;
+ unsigned ram_offset, rom_offset;
+ } cfg;
+ uint32_t pka_done;
+ uint32_t pka_err;
+};
+
+struct pka_fw {
+ unsigned long ram_size, rom_size;
+ const char *errmsg;
+
+ struct pka_fw_tag {
+ unsigned long origin, tag_length, timestamp, md5_coverage;
+ unsigned char md5[16];
+ } ram_tag, rom_tag;
+
+ /* For internal use */
+ struct elppka_fw_priv *priv;
+};
+
+struct vic_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ u8 *p;
+ u8 *q;
+ u8 *dp;
+ u8 *dq;
+ u8 *qinv;
+ u8 *rinv;
+ u8 *rinv_p;
+ u8 *rinv_q;
+ u8 *mp;
+ u8 *rsqr;
+ u8 *rsqr_p;
+ u8 *rsqr_q;
+ u8 *pmp;
+ u8 *qmp;
+ size_t key_sz;
+ bool crt_mode;
+};
+
+enum {
+ PKA_OPERAND_A,
+ PKA_OPERAND_B,
+ PKA_OPERAND_C,
+ PKA_OPERAND_D,
+ PKA_OPERAND_MAX
+};
+
+static inline void vic_pka_io_write32(void *addr, unsigned long val)
+{
+ writel(val,addr);
+}
+
+static inline unsigned int vic_pka_io_read32(void *addr)
+{
+ return readl(addr);
+}
+
+int elppka_setup(struct pka_state *pka);
+
+int elppka_start(struct pka_state *pka, uint32_t entry, uint32_t flags,
+ unsigned size);
+void elppka_abort(struct pka_state *pka);
+int elppka_get_status(struct pka_state *pka, unsigned *code);
+
+int elppka_load_operand(struct pka_state *pka, unsigned bank, unsigned index,
+ unsigned size, const uint8_t *data);
+int elppka_unload_operand(struct pka_state *pka, unsigned bank, unsigned index,
+ unsigned size, uint8_t *data);
+
+void elppka_set_byteswap(struct pka_state *pka, int swap);
+
+/* Firmware image handling */
+int elppka_fw_parse(struct pka_fw *fw, const unsigned char *data,
+ unsigned long len);
+void elppka_fw_free(struct pka_fw *fw);
+
+int elppka_fw_lookup_entry(struct pka_fw *fw, const char *entry);
+
+int elppka_fw_load(struct pka_state *pka, struct pka_fw *fw);
+
+/* The firmware timestamp epoch (2009-11-11 11:00:00Z) as a UNIX timestamp. */
+#define PKA_FW_TS_EPOCH 1257937200ull
+
+/* Resolution of the timestamp, in seconds. */
+#define PKA_FW_TS_RESOLUTION 20
+
+#endif
diff --git a/drivers/crypto/sifive-vic/vic-sec.c b/drivers/crypto/sifive-vic/vic-sec.c
new file mode 100644
index 0000000000000..7182825ddec78
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-sec.c
@@ -0,0 +1,320 @@
+/*
+ ******************************************************************************
+ * @file vic-sec.c
+ * @author StarFive Technology
+ * @version V1.0
+ * @date 08/13/2020
+ * @brief
+ ******************************************************************************
+ * @copy
+ *
+ * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
+ * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
+ * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
+ * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
+ * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ * © COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "vic-sec.h"
+
+#define DRIVER_NAME "vic-sec"
+
+#define CRYP_AUTOSUSPEND_DELAY 50
+
+struct vic_dev_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct vic_dev_list dev_list = {
+ .dev_list = LIST_HEAD_INIT(dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dev_list.lock),
+};
+
+struct vic_sec_dev *vic_sec_find_dev(struct vic_sec_ctx *ctx)
+{
+ struct vic_sec_dev *sdev = NULL, *tmp;
+
+ spin_lock_bh(&dev_list.lock);
+ if (!ctx->sdev) {
+ list_for_each_entry(tmp, &dev_list.dev_list, list) {
+ sdev = tmp;
+ break;
+ }
+ ctx->sdev = sdev;
+ } else {
+ sdev = ctx->sdev;
+ }
+
+ spin_unlock_bh(&dev_list.lock);
+
+ return sdev;
+}
+
+static irqreturn_t vic_cryp_irq_thread(int irq, void *arg)
+{
+ struct vic_sec_dev *sdev = (struct vic_sec_dev *) arg;
+
+ mutex_unlock(&sdev->doing);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vic_cryp_irq(int irq, void *arg)
+{
+ struct vic_sec_dev *sdev = (struct vic_sec_dev *) arg;
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+
+ if(sdev->status.aes_busy || sdev->status.sha_busy) {
+ sdev->status.v = readl(sdev->io_base + SEC_STATUS_REG);
+ writel(sdev->status.v, sdev->io_base + SEC_STATUS_REG);
+ } else {
+ ret = vic_pka_irq_done(sdev);
+ }
+
+ return ret;
+}
+static const struct of_device_id vic_dt_ids[] = {
+ { .compatible = "starfive,vic-sec", .data = NULL},
+ {},
+};
+MODULE_DEVICE_TABLE(of, vic_dt_ids);
+
+extern void vic_hash_test(struct vic_sec_dev *sdev);
+
+static int vic_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vic_sec_dev *sdev;
+ struct resource *res;
+ int irq, ret;
+ int pages = 0;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev = dev;
+
+ mutex_init(&sdev->lock);
+ mutex_init(&sdev->doing);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secmem");
+ if (!res) {
+ dev_err(dev, "couldn't get secmem resource\n");
+ return -ENXIO;
+ }
+
+ sdev->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdev->io_base))
+ return PTR_ERR(sdev->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secclk");
+ if (!res) {
+ dev_err(dev, "couldn't get secclk resource\n");
+ return -ENXIO;
+ }
+
+ sdev->clk_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdev->clk_base))
+ return PTR_ERR(sdev->clk_base);
+
+ sdev->pka.regbase = sdev->io_base + PKA_IO_BASE_OFFSET;
+
+ /* pka irq handle check */
+ sdev->status.v = 0;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, vic_cryp_irq,
+ vic_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), sdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ sdev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sdev->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(sdev->clk);
+ }
+
+ ret = clk_prepare_enable(sdev->clk);
+ if (ret) {
+ dev_err(sdev->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, sdev);
+
+ spin_lock(&dev_list.lock);
+ list_add(&sdev->list, &dev_list.dev_list);
+ spin_unlock(&dev_list.lock);
+
+ pages = get_order(VIC_AES_MSG_RAM_SIZE);
+
+ sdev->data = (void *)__get_free_pages(GFP_KERNEL, pages);
+ if (!sdev->data) {
+ dev_err(sdev->dev, "Can't allocate pages when unaligned\n");
+ return -EFAULT;
+ }
+ sdev->data_buf_len = VIC_AES_BUF_RAM_SIZE;
+ sdev->pages_count = pages;
+
+ /* Initialize crypto engine */
+ sdev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!sdev->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ ret = crypto_engine_start(sdev->engine);
+ if (ret)
+ goto err_engine_start;
+
+ ret = vic_hash_register_algs();
+ if (ret) {
+ goto err_algs_sha;
+ }
+
+ vic_clk_enable(sdev,AES_CLK);
+ ret = vic_aes_register_algs();
+ if (ret) {
+ vic_clk_disable(sdev,AES_CLK);
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs_aes;
+ }
+ vic_clk_disable(sdev,AES_CLK);
+
+ vic_clk_enable(sdev,PKA_CLK);
+ ret = vic_pka_init(&sdev->pka);
+ if (ret) {
+ vic_clk_disable(sdev,PKA_CLK);
+ dev_err(dev, "pka init error\n");
+ goto err_pka_init;
+ }
+
+ ret = vic_pka_register_algs();
+ if (ret) {
+ vic_clk_disable(sdev,PKA_CLK);
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs_pka;
+ }
+ vic_clk_disable(sdev,PKA_CLK);
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs_pka:
+err_pka_init:
+ vic_aes_unregister_algs();
+err_algs_aes:
+ vic_hash_unregister_algs();
+err_engine_start:
+ crypto_engine_exit(sdev->engine);
+err_engine:
+err_algs_sha:
+ free_pages((unsigned long)sdev->data, pages);
+ spin_lock(&dev_list.lock);
+ list_del(&sdev->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(sdev->clk);
+
+ return ret;
+}
+
+static int vic_cryp_remove(struct platform_device *pdev)
+{
+ struct vic_sec_dev *sdev = platform_get_drvdata(pdev);
+
+ if (!sdev)
+ return -ENODEV;
+
+
+ vic_pka_unregister_algs();
+ vic_aes_unregister_algs();
+ vic_hash_unregister_algs();
+
+ crypto_engine_exit(sdev->engine);
+
+ free_pages((unsigned long)sdev->data, sdev->pages_count);
+
+ spin_lock(&dev_list.lock);
+ list_del(&sdev->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(sdev->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vic_cryp_runtime_suspend(struct device *dev)
+{
+ struct vic_sec_dev *cryp = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static int vic_cryp_runtime_resume(struct device *dev)
+{
+ struct vic_sec_dev *cryp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops vic_cryp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(vic_cryp_runtime_suspend,
+ vic_cryp_runtime_resume, NULL)
+};
+
+static struct platform_driver vic_cryp_driver = {
+ .probe = vic_cryp_probe,
+ .remove = vic_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &vic_cryp_pm_ops,
+ .of_match_table = vic_dt_ids,
+ },
+};
+
+module_platform_driver(vic_cryp_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huan Feng ");
+MODULE_DESCRIPTION("Starfive VIC CRYP SHA and AES driver");
diff --git a/drivers/crypto/sifive-vic/vic-sec.h b/drivers/crypto/sifive-vic/vic-sec.h
new file mode 100644
index 0000000000000..e69645c6b7ff8
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-sec.h
@@ -0,0 +1,450 @@
+#ifndef __VIC_SEC_H__
+#define __VIC_SEC_H__
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "vic-pka.h"
+
+#define SEC_IE_REG 0x00
+#define SEC_STATUS_REG 0x04
+
+#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+
+#define AES_ABLK 1
+#define AES_AEAD 2
+
+#define SHA_CLK 1
+#define AES_CLK 2
+#define PKA_CLK 3
+
+#define AES_CLK_OFFSET 0x0
+#define SHA_CLK_OFFSET 0x4
+#define PKA_CLK_OFFSET 0x8
+
+#define HASH_OP_UPDATE 1
+#define HASH_OP_FINAL 2
+
+#define CFG_REGS_LEN 32
+
+#define PKA_IO_BASE_OFFSET (32*1024)
+
+union vic_sec_ie {
+ u32 v;
+ struct {
+ u32 sec_done_ie :1 ;
+ u32 mac_valid_ie :1 ;
+ u32 rsvd_0 :30;
+ };
+};
+
+union vic_sec_status {
+ u32 v;
+ struct {
+ u32 sec_done :1 ;
+ u32 aes_mac_valid :1 ;
+ u32 aes_busy :1 ;
+ u32 sha_busy :1 ;
+ u32 rsvd_1 :28;
+ };
+};
+
+#define swap32(val) ( \
+ (((u32)(val) << 24) & (u32)0xFF000000) | \
+ (((u32)(val) << 8) & (u32)0x00FF0000) | \
+ (((u32)(val) >> 8) & (u32)0x0000FF00) | \
+ (((u32)(val) >> 24) & (u32)0x000000FF))
+
+static inline void vic_write_n (void *addr, const u8 *value, unsigned int count)
+{
+ unsigned int *data = (unsigned int *) value;
+ int loop = count >> 2;
+
+ for (; loop--; data++, addr += sizeof(unsigned int))
+ writel_relaxed(swap32(*data), addr);
+
+ if(unlikely(count & 0x3)) {
+ int ext = count & 0x3;
+ value = (u8 *)data;
+ for(; ext; ext--){
+ writeb_relaxed(*(value + ext - 1), addr + 4 - ext);
+ }
+ }
+}
+
+static inline void vic_read_n (void *addr, unsigned char *out, unsigned int count)
+{
+ unsigned int *data = (unsigned int *) out;
+ int loop = count >> 2;
+
+ for (; loop--; data++, addr += sizeof(unsigned int))
+ *data = swap32(readl_relaxed(addr));
+
+ if(unlikely(count & 0x3)) {
+ int ext = count & 0x3;
+ out = (u8 *)data;
+ for(; ext; ext-- )
+ *(out + ext - 1) = readb_relaxed(addr + 4 - ext);
+ }
+}
+
+struct vic_sec_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct vic_sec_dev *sdev;
+ struct crypto_aead *sw_cipher;
+ unsigned long flags;
+
+ u8 key[MAX_KEY_SIZE];
+ int keylen;
+ int begin_new;
+ struct vic_rsa_key rsa_key;
+};
+
+struct vic_sec_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *io_base;
+ void __iomem *clk_base;
+ void *data;
+ //void *data_out;
+ int pages_count;
+ struct vic_sec_ctx *ctx;
+ struct vic_sec_request_ctx *rctx;
+
+ struct ahash_request *req;
+ struct crypto_engine *engine;
+
+ unsigned long flags;
+
+ union vic_sec_ie ie;
+ union vic_sec_status status;
+ struct mutex doing;
+
+ struct mutex lock; /* protects req / areq */
+ struct skcipher_request *sreq;
+ struct aead_request *areq;
+
+ size_t data_buf_len;
+ size_t data_offset;
+ size_t authsize;
+
+ size_t total_in;
+ size_t total_out;
+
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+ u32 ctr_over_count;
+ u32 gcm_ctr;
+
+ struct semaphore firmware_loading, core_running;
+
+ struct pka_state pka;
+ char fw_name[32];
+
+ /*
+ * If you hold a reference to the firmware (obtained by pka_get_firmware),
+ * then the fw pointer is guaranteed to remain valid until the reference is
+ * dropped; otherwise, one must only access the fw pointer while holding
+ * the fw_mutex.
+ */
+ struct pka_fw_priv *fw;
+ struct mutex fw_mutex;
+
+ /*
+ * Rather than access PKA flags register directly, store flags to be used
+ * for the next operation in work_flags, and cache flags from the previous
+ * operation in saved_flags.
+ */
+ u32 work_flags, saved_flags;
+};
+
+// aes
+
+#define VIC_AES_QUEUE_SIZE 512
+#define VIC_AES_BUF_ORDER 2
+
+#define VIC_AES_CTRL_REG 0x40
+#define VIC_AES_CFG_REGS 0x44
+
+#define VIC_AES_MSG_RAM_OFFSET (16*1024)
+#define VIC_AES_MSG_RAM_SIZE (8*1024)
+// 8160 is mod(32) and mod(24)
+#define VIC_AES_BUF_RAM_SIZE 8160
+
+#define VIC_AES_CTX_RAM_OFFSET (VIC_AES_MSG_RAM_OFFSET + VIC_AES_MSG_RAM_SIZE)
+#define VIC_AES_CTX_RAM_SIZE (4*1024)
+
+#define VIC_AES_CTX_KEYS_OFS 0x00
+#define VIC_AES_CTX_KEYS_SIZE 0x20
+#define VIC_AES_CTX_CTR_OFS 0x30
+#define VIC_AES_CTX_CTR_SIZE 0x20
+#define VIC_AES_CTX_IV_OFS 0x40
+#define VIC_AES_CTX_IV_SIZE 0x10
+#define VIC_AES_CTX_MAC_OFS 0x50
+#define VIC_AES_CTX_MAC_SIZE 0x10
+
+#define VIC_AES_IV_LEN AES_BLOCK_SIZE
+#define VIC_AES_CTR_LEN AES_BLOCK_SIZE
+
+union vic_aes_ctrl {
+ unsigned int v;
+ struct {
+ unsigned int aes_mode :4 ;
+#define VIC_AES_MODE_ECB 0
+#define VIC_AES_MODE_CBC 1
+#define VIC_AES_MODE_CTR 2
+#define VIC_AES_MODE_CCM 3
+#define VIC_AES_MODE_CMAC 4
+#define VIC_AES_MODE_GCM 5
+#define VIC_AES_MODE_OFB 7
+#define VIC_AES_MODE_CFB 8
+ unsigned int aes_encrypt :1 ;
+#define VIC_AES_DECRYPT 0
+#define VIC_AES_ENCRYPT 1
+ unsigned int aes_msg_begin :1 ;
+ unsigned int aes_msg_end :1 ;
+ unsigned int aes_str_ctx :1 ; // Stores intermediate context data back into context memory.
+ unsigned int aes_ret_ctx :1 ; // Retrieves intermediate context data from context memory.
+ unsigned int aes_inv_key :1 ;
+ unsigned int aes_str_inv_key :1 ;
+ unsigned int rsvd_0 :1 ;
+ unsigned int aes_key_sz :2 ;
+#define VIC_AES_KEY_SZ_128 0
+#define VIC_AES_KEY_SZ_192 1
+#define VIC_AES_KEY_SZ_256 2
+ unsigned int rsvd_1 :17; // [30:14]
+ unsigned int aes_start :1 ;
+ };
+};
+
+union vic_aes_cfg {
+ unsigned int vs[1];
+ struct {
+ // 0x44
+ unsigned int aes_tag_msg_addr :13;
+ unsigned int rsvd_0 :18; // [30:13]
+ unsigned int aes_str_tag2msg :1 ;
+
+ // 0x48
+ unsigned int authsize :4; //unsigned int aes_mac_len :4 ;
+ unsigned int rsvd_1 :28; // [31:4]
+
+ // 0x4C
+ unsigned int aes_blk_idx :9 ;
+#define VIC_AES_BLK_SIZE 0x10
+#define VIC_AES_BLKS_NUM 512 //8k/0x10
+ unsigned int rsvd_2 :23; // [31:9]
+
+ // 0x50
+ unsigned int aes_ctx_idx :5 ;
+#define VIC_AES_CTX_SIZE 0x60
+#define VIC_AES_CTXS_NUM 32 //4k/0x60
+ unsigned int rsvd_3 :27; // [31:5]
+
+ // 0x54
+ unsigned int aes_assoclen :14; //aes_aad_len :14;
+ unsigned int rsvd_4 :18; // [31:14]
+
+ // 0x58
+ unsigned int aes_n_bytes :14; // Number of bytes of message to cipher in current operation.
+ unsigned int rsvd_5 :18; // [31:14]
+
+ // 0x5C
+ unsigned int aes_tot_n_bytes :28; // Total length of message data (across all segments), not including AD, to process. Required in CCM and GCM modes.
+ unsigned int rsvd_6 :4 ; // [31:28]
+
+ // 0x60
+ unsigned int aes_assoclen_tot :28; //aes_aad_len_tot :28;
+ unsigned int rsvd_7 :4 ; // [31:28]
+ };
+};
+
+#define KEY_SET_FLAG 1
+#define IV_SET_FLAG (1 << 1)
+#define CTR_SET_FLAG (1 << 2)
+#define AD_SET_FLAG (1 << 3)
+#define MAC_SET_FLAG (1 << 4)
+
+
+extern void vic_aes_irq_complete(int irq, void *arg);
+
+// sha
+#define HASH_BUFLEN 256
+
+#define HASH_AUTOSUSPEND_DELAY 50
+#define CTX_BLOCK_SIZE 64
+#define VIC_MAX_ALIGN_SIZE 128
+
+union vic_sha_ctrl {
+ u32 v;
+ struct {
+ // 0x80
+ u32 sha_mode :4 ;
+#define SHA_MODE_224 0
+#define SHA_MODE_256 1
+#define SHA_MODE_384 2
+#define SHA_MODE_512 3
+#define SHA_MODE_1 4
+#define SHA_MODE_MD5 5
+#define SHA_MODE_512_DIV_224 7
+#define SHA_MODE_512_DIV_256 8
+ u32 sha_hmac :1 ;
+ u32 sha_sslmac :1 ;
+ u32 sha_msg_begin :1 ;
+ u32 sha_msg_end :1 ;
+ u32 sha_store_ctx :1 ;
+ u32 sha_retrieve_ctx :1 ;
+ u32 rsvd_0 :21; // [30:10]
+ u32 sha_start :1 ;
+ };
+};
+
+union vic_sha_cfg {
+ u32 vs[1];
+ struct {
+ // 0x84
+#define SHA_CTX_MSG_ADDR 0x100
+#define SHA_CTX_MSG_ADDR_ROUNDS 0x100
+ u32 sha_ctx_msg_addr :13;
+ u32 rsvd_0 :18; // [30:13]
+ u32 sha_store_ctx_2msg :1 ;
+
+ // 0x88
+ u32 sha_secret_bytes :8 ;
+ u32 sha_secret_addr :11;
+ u32 rsvd_1 :13; // [31:19]
+
+ // 0x8C
+ u32 sha_num_bytes :14;
+ u32 rsvd_2 :18; // [31:14]
+
+ // 0x90
+ u32 sha_icv_len :6 ;
+ u32 rsvd_3 :26; // [31:6]
+
+ // 0x94
+ u32 sha_ctx_idx :6 ;
+ u32 rsvd_4 :26; // [31:6]
+
+ // 0x98
+ u32 sha_blk_idx :7 ;
+ u32 rsvd_5 :25; // [31:7]
+
+ // 0x9C
+ u32 sha_tot_bytes :26;
+ u32 rsvd_6 :6 ; // [31:26]
+
+ // 0xA0
+ u32 sha_seqn0 ;
+ };
+};
+
+struct vic_sec_request_ctx {
+ struct vic_sec_dev *sdev;
+ unsigned long mode;
+ unsigned long flags;
+ unsigned long op;
+
+ u8 digest[CTX_BLOCK_SIZE] __aligned(sizeof(u32));
+ size_t digcnt;
+ size_t bufcnt;
+ size_t buflen;
+ size_t cmac_up_len;
+
+ size_t assoclen;
+ unsigned int is_load;
+ unsigned int req_type;
+
+
+ struct scatterlist *sg;
+ struct scatterlist *out_sg;
+ unsigned int offset;
+ unsigned int total;
+ unsigned long msg_tot;
+
+ union vic_aes_cfg aes_cfg;
+ union vic_aes_ctrl aes_ctrl;
+ union vic_sha_cfg sha_cfg;
+ union vic_sha_ctrl sha_ctrl;
+ unsigned int last_block_idx;
+
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
+};
+
+static inline void vic_clk_enable(struct vic_sec_dev *sdev, int type)
+{
+ u32 val;
+
+ switch(type) {
+ case AES_CLK:
+ val = readl(sdev->clk_base + AES_CLK_OFFSET);
+ val &= ~(0x1 << 31);
+ val |= 0x1 << 31;
+ writel(val, sdev->clk_base + AES_CLK_OFFSET);
+ break;
+ case PKA_CLK:
+ val = readl(sdev->clk_base + PKA_CLK_OFFSET);
+ val &= ~(0x1 << 31);
+ val |= 0x1 << 31;
+ writel(val, sdev->clk_base + PKA_CLK_OFFSET);
+ break;
+ }
+}
+
+static inline void vic_clk_disable(struct vic_sec_dev *sdev, int type)
+{
+ u32 val;
+
+ switch(type) {
+ case AES_CLK:
+ val = readl(sdev->clk_base + AES_CLK_OFFSET);
+ val &= ~(0x1 << 31);
+ val |= 0x0 << 31;
+ writel(val, sdev->clk_base + AES_CLK_OFFSET);
+ break;
+ case PKA_CLK:
+ val = readl(sdev->clk_base + PKA_CLK_OFFSET);
+ val &= ~(0x1 << 31);
+ val |= 0x0 << 31;
+ writel(val, sdev->clk_base + PKA_CLK_OFFSET);
+ break;
+ }
+}
+
+#if 0
+struct vic_hash_algs_info {
+ struct ahash_alg *algs_list;
+ size_t size;
+};
+
+struct vic_hash_pdata {
+ struct vic_hash_algs_info *algs_info;
+ size_t algs_info_size;
+};
+#endif
+extern int vic_aes_register_algs(void);
+extern int vic_aes_unregister_algs(void);
+extern struct vic_sec_dev *vic_sec_find_dev(struct vic_sec_ctx *ctx);
+extern int vic_cryp_get_from_sg(struct vic_sec_request_ctx *rctx, size_t offset,
+ size_t count,size_t data_offset);
+
+extern int vic_hash_register_algs(void);
+extern int vic_hash_unregister_algs(void);
+
+extern int vic_pka_register_algs(void);
+extern int vic_pka_unregister_algs(void);
+extern irqreturn_t vic_pka_irq_done(struct vic_sec_dev *sdev);
+extern int vic_pka_init(struct pka_state *pka);
+#endif
diff --git a/drivers/crypto/sifive-vic/vic-sha.c b/drivers/crypto/sifive-vic/vic-sha.c
new file mode 100644
index 0000000000000..ee7748a7e4c7c
--- /dev/null
+++ b/drivers/crypto/sifive-vic/vic-sha.c
@@ -0,0 +1,1102 @@
+/*
+ ******************************************************************************
+ * @file vic-sha.c
+ * @author StarFive Technology
+ * @version V1.0
+ * @date 08/13/2020
+ * @brief
+ ******************************************************************************
+ * @copy
+ *
+ * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
+ * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
+ * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY
+ * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
+ * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
+ * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ * © COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include "vic-sec.h"
+
+#define HASH_IE 0x00
+#define HASH_STATUS 0x04
+#define HASH_CTRL 0x80
+#define HASH_CFG 0x84
+
+#define SHA_MSG_RAM_OFFSET (16*1024)
+#define SHA_MSG_RAM_SIZE (8*1024)
+#define SHA_CTX_RAM_OFFSET (SHA_MSG_RAM_OFFSET + SHA_MSG_RAM_SIZE)
+#define SHA_CTX_RAM_SIZE (4*1024)
+#define SHA_SEC_RAM_OFFSET (SHA_CTX_RAM_OFFSET + SHA_CTX_RAM_SIZE)
+#define SHA_SEC_RAM_SIZE (2*1024)
+
+
+#define HASH_FLAGS_INIT BIT(0)
+#define HASH_FLAGS_FINAL BIT(3)
+#define HASH_FLAGS_FINUP BIT(4)
+
+#define HASH_FLAGS_ALGO_MASK GENMASK(8, 13)
+#define HASH_FLAGS_MD5 BIT(8)
+#define HASH_FLAGS_SHA1 BIT(9)
+#define HASH_FLAGS_SHA224 BIT(10)
+#define HASH_FLAGS_SHA256 BIT(11)
+#define HASH_FLAGS_SHA384 BIT(12)
+#define HASH_FLAGS_SHA512 BIT(13)
+#define HASH_FLAGS_ERRORS BIT(14)
+#define HASH_FLAGS_HMAC BIT(15)
+
+static inline int vic_hash_wait_busy(struct vic_sec_dev *hdev)
+{
+ int ret = -1;
+
+ mutex_lock(&hdev->doing);
+ if(hdev->status.sec_done && (!hdev->status.sha_busy))
+ ret = 0;
+ mutex_unlock(&hdev->doing);
+ return ret;
+ //return wait_cond_timeout(hdev->status.sec_done && (!hdev->status.sha_busy), 10, 10000);
+}
+
+static int vic_hash_write_key(struct vic_sec_dev *hdev)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+ int keylen = ctx->keylen;
+ void *key = ctx->key;
+
+ if (keylen) {
+ vic_write_n(hdev->io_base + SHA_SEC_RAM_OFFSET, key, keylen);
+ }
+
+ return 0;
+}
+
+static void vic_hash_append_sg(struct vic_sec_request_ctx *rctx)
+{
+ size_t count;
+
+ while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ count = min(rctx->sg->length - rctx->offset, rctx->total);
+ count = min(count, rctx->buflen - rctx->bufcnt);
+
+ if (count <= 0) {
+ if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
+ rctx->sg = sg_next(rctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
+ rctx->offset, count, 0);
+
+ rctx->bufcnt += count;
+ rctx->offset += count;
+ rctx->total -= count;
+
+ if (rctx->offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ if (rctx->sg)
+ rctx->offset = 0;
+ else
+ rctx->total = 0;
+ }
+ }
+}
+
+static int vic_sha_start(struct vic_sec_request_ctx *rctx)
+{
+ struct vic_sec_dev *sdev = rctx->sdev;
+ int loop, int_len = sizeof(unsigned int);
+
+ if(sdev->status.sha_busy) {
+ return -EBUSY;
+ }
+
+ for(loop = 0; loop < CFG_REGS_LEN / int_len; loop++) {
+ writel(*(rctx->sha_cfg.vs + loop), sdev->io_base + HASH_CFG + loop * int_len);
+ }
+ sdev->ie.sec_done_ie = 1;
+ mutex_lock(&sdev->doing);
+
+ writel(sdev->ie.v, sdev->io_base + HASH_IE);
+ sdev->status.sha_busy = 1;
+ sdev->status.sec_done = 0;
+ rctx->sha_ctrl.sha_start = 1;
+ writel(rctx->sha_ctrl.v, sdev->io_base + HASH_CTRL);
+
+ return 0;
+}
+
+static int vic_hash_xmit_cpu(struct vic_sec_dev *sdev,
+ struct vic_sec_request_ctx *rctx, int final)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sdev->req);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+ int length = rctx->bufcnt;
+ int ret = -EINPROGRESS;
+
+ memset(&rctx->sha_cfg,0,sizeof(rctx->sha_cfg));
+ memset(&rctx->sha_ctrl,0,sizeof(rctx->sha_ctrl));
+
+ if (final) {
+ if(rctx->msg_tot){
+ rctx->sha_cfg.sha_num_bytes = length;
+ rctx->sha_cfg.sha_tot_bytes = rctx->msg_tot + length;
+ rctx->sha_cfg.sha_store_ctx_2msg = 1;
+ rctx->sha_cfg.sha_ctx_msg_addr = SHA_CTX_MSG_ADDR;
+ rctx->sha_cfg.sha_ctx_idx = 0;
+ rctx->sha_ctrl.sha_mode = rctx->mode;
+ rctx->sha_ctrl.sha_msg_end = 1;
+ rctx->sha_ctrl.sha_retrieve_ctx = 1;
+ } else {
+ rctx->sha_cfg.sha_num_bytes = length;
+ rctx->sha_cfg.sha_tot_bytes = length;
+ rctx->sha_cfg.sha_store_ctx_2msg = 1;
+ rctx->sha_cfg.sha_ctx_msg_addr = SHA_CTX_MSG_ADDR;
+ rctx->sha_cfg.sha_ctx_idx = 0;
+
+ rctx->sha_ctrl.sha_mode = rctx->mode;
+ rctx->sha_ctrl.sha_msg_begin = 1;
+ rctx->sha_ctrl.sha_msg_end = 1;
+ rctx->sha_ctrl.sha_store_ctx = 1;
+ }
+ sdev->flags |= HASH_FLAGS_FINAL;
+ ret = 0;
+ } else {
+ rctx->msg_tot += length;
+
+ rctx->sha_cfg.sha_num_bytes = length;
+ rctx->sha_cfg.sha_ctx_idx = 0;
+
+ rctx->sha_ctrl.sha_store_ctx = 1;
+ rctx->sha_ctrl.sha_mode = rctx->mode;
+ if(rctx->last_block_idx == 0){
+ rctx->sha_ctrl.sha_msg_begin = 1;
+ } else {
+ rctx->sha_ctrl.sha_retrieve_ctx = 1;
+ }
+
+ rctx->last_block_idx = 1;
+ }
+
+ if (rctx->flags & HASH_FLAGS_HMAC) {
+ rctx->sha_cfg.sha_secret_bytes = ctx->keylen;
+ rctx->sha_ctrl.sha_hmac = 1;
+ }
+
+ // put_msg
+ vic_write_n(sdev->io_base + SHA_MSG_RAM_OFFSET, rctx->buffer, rctx->bufcnt);
+
+ //set key
+ vic_hash_write_key(sdev);
+
+ //start
+ vic_sha_start(rctx);
+
+ //wait();
+ if(vic_hash_wait_busy(sdev))
+ ret = -ETIMEDOUT;
+
+ return ret;
+}
+
+static int vic_hash_update_cpu(struct vic_sec_dev *hdev)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ int err = 0, final;
+
+ dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+
+ final = (rctx->flags & HASH_FLAGS_FINUP);
+
+ while ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ vic_hash_append_sg(rctx);
+
+ err = vic_hash_xmit_cpu(hdev, rctx, 0);
+ rctx->bufcnt = 0;
+ }
+
+ vic_hash_append_sg(rctx);
+
+ if (final) {
+ err = vic_hash_xmit_cpu(hdev, rctx,
+ (rctx->flags & HASH_FLAGS_FINUP));
+ rctx->bufcnt = 0;
+ } else {
+ err = vic_hash_xmit_cpu(hdev, rctx,
+ 0);
+ rctx->bufcnt = 0;
+ }
+
+ return err;
+}
+
+static int vic_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *hdev = ctx->sdev;
+
+ memset(rctx,0,sizeof(struct vic_sec_request_ctx));
+
+ rctx->sdev = hdev;
+
+ rctx->sdev->req = req;
+
+ rctx->digcnt = crypto_ahash_digestsize(tfm);
+ switch (rctx->digcnt) {
+ case MD5_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->mode = SHA_MODE_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->buflen = HASH_BUFLEN;
+ rctx->total = 0;
+ rctx->msg_tot = 0;
+ rctx->offset = 0;
+ rctx->is_load = 0;
+ rctx->last_block_idx = 0;
+
+ memset(rctx->buffer, 0, HASH_BUFLEN);
+
+ if (ctx->flags & HASH_FLAGS_HMAC)
+ rctx->flags |= HASH_FLAGS_HMAC;
+
+ dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+
+ return 0;
+}
+
+static int vic_hash_update_req(struct vic_sec_dev *hdev)
+{
+ return vic_hash_update_cpu(hdev);
+}
+
+static int vic_hash_final_req(struct vic_sec_dev *hdev)
+{
+ struct ahash_request *req = hdev->req;
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ int err;
+
+ err = vic_hash_xmit_cpu(hdev, rctx, 1);
+
+ rctx->bufcnt = 0;
+
+
+ return err;
+}
+
+static void vic_hash_set_ctx(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ vic_write_n(rctx->sdev->io_base + SHA_CTX_RAM_OFFSET,
+ rctx->digest, CTX_BLOCK_SIZE);
+}
+
+static void vic_hash_copy_hash(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ unsigned int hashsize;
+
+ switch (rctx->mode) {
+ case SHA_MODE_MD5:
+ hashsize = MD5_DIGEST_SIZE;
+ break;
+ case SHA_MODE_1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+ case SHA_MODE_224:
+ hashsize = SHA224_DIGEST_SIZE;
+ break;
+ case SHA_MODE_256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+ case SHA_MODE_384:
+ hashsize = SHA384_DIGEST_SIZE;
+ break;
+ case SHA_MODE_512:
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+ default:
+ return;
+ }
+
+ vic_read_n(rctx->sdev->io_base + SHA_MSG_RAM_OFFSET + rctx->sha_cfg.sha_ctx_msg_addr,
+ rctx->digest, hashsize);
+}
+
+static int vic_hash_finish(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return -EINVAL;
+
+ memcpy(req->result, rctx->digest, rctx->digcnt);
+
+ return 0;
+}
+
+static void vic_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_dev *hdev = rctx->sdev;
+
+ if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
+ vic_hash_copy_hash(req);
+ err = vic_hash_finish(req);
+ hdev->flags &= ~(HASH_FLAGS_FINAL |
+ HASH_FLAGS_INIT | HASH_FLAGS_HMAC);
+ } else {
+ rctx->flags |= HASH_FLAGS_ERRORS;
+ }
+
+ crypto_finalize_hash_request(hdev->engine, req, err);
+}
+
+static int vic_hash_one_request(struct crypto_engine *engine, void *areq);
+static int vic_hash_prepare_req(struct crypto_engine *engine, void *areq);
+
+static int vic_hash_handle_queue(struct vic_sec_dev *hdev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(hdev->engine, req);
+}
+
+static int vic_hash_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct vic_sec_dev *hdev = ctx->sdev;
+ struct vic_sec_request_ctx *rctx;
+
+ if (!hdev) {
+ return -ENODEV;
+ }
+
+ rctx = ahash_request_ctx(req);
+
+ dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
+ rctx->op, req->nbytes);
+
+ return 0;
+}
+
+static int vic_hash_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct vic_sec_dev *hdev = ctx->sdev;
+ struct vic_sec_request_ctx *rctx;
+ int err = 0;
+
+ if (!hdev) {
+ return -ENODEV;
+ }
+
+ rctx = ahash_request_ctx(req);
+
+ if(rctx->is_load) {
+ vic_hash_set_ctx(req);
+ rctx->is_load = 0;
+ }
+
+ if (rctx->op == HASH_OP_UPDATE){
+ err = vic_hash_update_req(hdev);
+ } else if (rctx->op == HASH_OP_FINAL) {
+ err = vic_hash_final_req(hdev);
+ }
+
+ if (err != -EINPROGRESS)
+ /* done task will not finish it, so do it here */
+ vic_hash_finish_req(req, err);
+
+ return 0;
+}
+
+static int vic_hash_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ struct vic_sec_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct vic_sec_dev *hdev = ctx->sdev;
+
+ rctx->op = op;
+
+ return vic_hash_handle_queue(hdev, req);
+}
+
+static int vic_hash_update(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ if(rctx->is_load) {
+ vic_hash_set_ctx(req);
+ rctx->is_load = 0;
+ }
+ while ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ vic_hash_append_sg(rctx);
+
+ vic_hash_xmit_cpu(rctx->sdev, rctx, 0);
+ rctx->bufcnt = 0;
+ }
+ rctx->is_load = 1;
+ vic_read_n(rctx->sdev->io_base + SHA_CTX_RAM_OFFSET,
+ rctx->digest, CTX_BLOCK_SIZE);
+ }
+
+ if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ vic_hash_append_sg(rctx);
+ return 0;
+ }
+
+
+ return vic_hash_enqueue(req, HASH_OP_UPDATE);
+}
+
+static int vic_hash_final(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ return vic_hash_enqueue(req, HASH_OP_FINAL);
+}
+
+static int vic_hash_finup(struct ahash_request *req)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ err1 = vic_hash_update(req);
+
+ if (err1 == -EINPROGRESS || err1 == -EBUSY) {
+ return err1;
+ }
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ err2 = vic_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+static int vic_hash_digest(struct ahash_request *req)
+{
+ return vic_hash_init(req) ?: vic_hash_finup(req);
+}
+
+static int vic_hash_export(struct ahash_request *req, void *out)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int vic_hash_import(struct ahash_request *req, const void *in)
+{
+ struct vic_sec_request_ctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+#if 0
+static int vic_hash224_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= SHA224_BLOCK_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif
+#if 0
+static int vic_hash256_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= SHA256_BLOCK_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int vic_hash384_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= SHA384_BLOCK_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#else
+static int vic_hmac_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_wait wait;
+ struct ahash_request *req;
+ struct scatterlist sg;
+ unsigned int blocksize;
+ struct crypto_ahash *ahash_tfm;
+ u8 *buf;
+ int ret;
+ const char *alg_name;
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ if (keylen <= blocksize) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ return 0;
+ }
+
+ if (digestsize == SHA256_DIGEST_SIZE)
+ alg_name = "vic-sha256";
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ alg_name = "vic-sha384";
+ else
+ return -EINVAL;
+
+ ctx->keylen = digestsize;
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ buf = kzalloc(keylen + VIC_MAX_ALIGN_SIZE, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_req;
+ }
+
+ memcpy(buf, key, keylen);
+ sg_init_one(&sg, buf, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+err_free_req:
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+ return ret;
+}
+#endif
+
+#if 0
+static int vic_hash512_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct vic_sec_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= SHA512_BLOCK_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif
+static int vic_hash_cra_init_algs(struct crypto_tfm *tfm,
+ const char *algs_hmac_name)
+{
+ struct vic_sec_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->sdev = vic_sec_find_dev(ctx);
+
+ if (!ctx->sdev)
+ return -ENODEV;
+
+ mutex_lock(&ctx->sdev->lock);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct vic_sec_request_ctx));
+
+ ctx->keylen = 0;
+
+ if (algs_hmac_name)
+ ctx->flags |= HASH_FLAGS_HMAC;
+
+ ctx->enginectx.op.do_one_request = vic_hash_one_request;
+ ctx->enginectx.op.prepare_request = vic_hash_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ return 0;
+}
+
+static int vic_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, NULL);
+}
+
+static void vic_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct vic_sec_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mutex_unlock(&ctx->sdev->lock);
+}
+#if 0
+static int vic_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "md5");
+}
+
+static int vic_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "sha1");
+}
+
+static int vic_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "sha224");
+}
+#endif
+static int vic_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "sha256");
+}
+
+static int vic_hash_cra_sha384_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "sha384");
+}
+#if 0
+static int vic_hash_cra_sha512_init(struct crypto_tfm *tfm)
+{
+ return vic_hash_cra_init_algs(tfm, "sha512");
+}
+#endif
+static struct ahash_alg algs_md5_sha512[] = {
+#if 0
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "vic-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .setkey = vic_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "vic-hmac-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_md5_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "vic-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .setkey = vic_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "vic-hmac-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_sha1_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "vic-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .setkey = vic_hash224_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "vic-hmac-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_sha224_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+#endif
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "vic-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .setkey = vic_hmac_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "vic-hmac-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_sha256_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "vic-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .setkey = vic_hmac_setkey,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "vic-hmac-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_sha384_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+#if 0
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "vic-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = vic_hash_init,
+ .update = vic_hash_update,
+ .final = vic_hash_final,
+ .finup = vic_hash_finup,
+ .digest = vic_hash_digest,
+ .setkey = vic_hash512_setkey,
+ .export = vic_hash_export,
+ .import = vic_hash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct vic_sec_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "vic-hmac-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct vic_sec_ctx),
+ .cra_alignmask = 3,
+ .cra_init = vic_hash_cra_sha512_init,
+ .cra_exit = vic_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+#endif
+};
+
+int vic_hash_register_algs()
+{
+ int ret = 0;
+
+ ret = crypto_register_ahashes(algs_md5_sha512, ARRAY_SIZE(algs_md5_sha512));
+
+ return ret;
+}
+
+int vic_hash_unregister_algs()
+{
+ crypto_unregister_ahashes(algs_md5_sha512, ARRAY_SIZE(algs_md5_sha512));
+ return 0;
+}