From 3416b97dc49ca05087870066ba991a727133b9bd Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 16:26:16 +0200 Subject: add trusted execution environment (tee) driver TEE working now for Android, tested ok with COPS! * Updated according to review comments - Added ST-Ericsson copyright headers to all tee files. - Fixed problem with not using readl/writel when using ioremap. - Fixed problem with forgetting to do iounmap on special case. - Fixed incorrect usage when doing copy_to_user when writing to the device. - Added architecture dependent file for the tee service that calls the secure world. - Added support for more several inputs (sharedmemory buffers) for tee. - Added dummy macro to map MT_MEMORY device. - Fixed memory leak in secure world due to not closing a TEE session correctly from the kernel. - Now we only copies input buffer from user space for tee. - Documented structures in tee.h. - Moved SVP implementation into arch/arm/mach-ux500 folder. - Added new config flags for ux500 and SVP regarding TEE driver. - Update mach-ux500/Kconfig: - Enable TEE_UX500 by default when using target hardware. - Enabel TEE_SVP by default when building simulator. - Fix the cache sync problem: not request ROM code to clean cache - ioremap for ICN_BASE, remove static mapping in cpu-db8500.c. - Fix ioremap of ICN_BASE and do iounmap after use. ST-Ericsson ID: WP269815 Change-Id: Ie861a90ec790e95fb3992e560512661693548a43 Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/4168 Reviewed-by: Shujuan CHEN Tested-by: Shujuan CHEN Signed-off-by: Lee Jones --- .../mach-ux500/include/mach/tee_ta_start_modem.h | 48 +++++++++++++ arch/arm/mach-ux500/tee_service_svp.c | 66 ++++++++++++++++++ arch/arm/mach-ux500/tee_ta_start_modem_svp.c | 56 +++++++++++++++ arch/arm/mach-ux500/tee_ux500.c | 79 ++++++++++++++++++++++ 4 files changed, 249 insertions(+) create mode 100644 arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h create mode 100644 arch/arm/mach-ux500/tee_service_svp.c create mode 100644 arch/arm/mach-ux500/tee_ta_start_modem_svp.c create mode 100644 arch/arm/mach-ux500/tee_ux500.c (limited to 'arch') diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h new file mode 100644 index 00000000000..6978b7314c5 --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h @@ -0,0 +1,48 @@ +/* + * Data types and interface for TEE application for starting the modem. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef TEE_TA_START_MODEM_H +#define TEE_TA_START_MODEM_H + +#define COMMAND_ID_START_MODEM 0x00000001 + +#define UUID_TEE_TA_START_MODEM_LOW 0x8AD94107 +#define UUID_TEE_TA_START_MODEM_MID 0x6E50 +#define UUID_TEE_TA_START_MODEM_HIGH 0x418E +#define UUID_TEE_TA_START_MODEM_CLOCKSEQ \ + {0xB1, 0x14, 0x75, 0x7D, 0x60, 0x21, 0xBD, 0x36} + +struct mcore_segment_descr { + void *segment; + void *hash; + u32 size; +}; + +struct access_image_descr { + void *elf_hdr; + void *pgm_hdr_tbl; + void *signature; + unsigned long nbr_segment; + struct mcore_segment_descr *descr; +}; + +/* TODO: To be redefined with only info needed by Secure world. */ +struct tee_ta_start_modem { + void *access_mem_start; + u32 shared_mem_size; + u32 access_private_mem_size; + struct access_image_descr access_image_descr; +}; + +/** + * This is the function to handle the modem release. + */ +int tee_ta_start_modem(struct tee_ta_start_modem *data); + +#endif + diff --git a/arch/arm/mach-ux500/tee_service_svp.c b/arch/arm/mach-ux500/tee_service_svp.c new file mode 100644 index 00000000000..aa65dd961a0 --- /dev/null +++ b/arch/arm/mach-ux500/tee_service_svp.c @@ -0,0 +1,66 @@ +/* + * TEE service to handle the calls to trusted applications in SVP. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include "mach/tee_ta_start_modem.h" + +static int cmp_uuid_start_modem(struct tee_uuid *uuid) +{ + int ret = -EINVAL; + + if (uuid == NULL) + return -EINVAL; + + /* This handles the calls to TA for start the modem */ + if ((uuid->timeLow == UUID_TEE_TA_START_MODEM_LOW) && + (uuid->timeMid == UUID_TEE_TA_START_MODEM_MID) && + (uuid->timeHiAndVersion == UUID_TEE_TA_START_MODEM_HIGH)) { + + u8 clockSeqAndNode[TEE_UUID_CLOCK_SIZE] = + UUID_TEE_TA_START_MODEM_CLOCKSEQ; + + ret = memcmp(uuid->clockSeqAndNode, clockSeqAndNode, + TEE_UUID_CLOCK_SIZE); + } + + return ret; +} + +int call_sec_world(struct tee_session *ts, int sec_cmd) +{ + int ret = 0; + + if (ts == NULL) + return -EINVAL; + + if (cmp_uuid_start_modem(ts->uuid)) + return -EINVAL; + + switch (ts->cmd) { + case COMMAND_ID_START_MODEM: + ret = tee_ta_start_modem((struct tee_ta_start_modem *) + ts->op); + if (ret) { + ts->err = TEED_ERROR_GENERIC; + ts->origin = TEED_ORIGIN_TEE_APPLICATION; + pr_err("tee_ta_start_modem() failed!\n"); + return ret; + } + break; + + default: + break; + } + + /* TODO: to handle more trusted applications. */ + + return ret; +} diff --git a/arch/arm/mach-ux500/tee_ta_start_modem_svp.c b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c new file mode 100644 index 00000000000..12337b93154 --- /dev/null +++ b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c @@ -0,0 +1,56 @@ +/* + * Trusted application for starting the modem. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include + +#include "mach/tee_ta_start_modem.h" + +static int reset_modem(unsigned long modem_start_addr) +{ + void __iomem *base = ioremap(U5500_ACCCON_BASE_SEC, 0x2FF); + if (!base) + return -ENOMEM; + + pr_info("[%s] Setting modem start address!\n", __func__); + writel(base + (U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET/sizeof(uint32_t)), + modem_start_addr); + + pr_info("[%s] resetting the modem!\n", __func__); + writel(base + (U5500_ACCCON_ACC_CPU_CTRL_OFFSET/sizeof(uint32_t)), 1); + + iounmap(base); + + return 0; +} + +int tee_ta_start_modem(struct tee_ta_start_modem *data) +{ + int ret = 0; + struct elfhdr *elfhdr; + void __iomem *vaddr; + + vaddr = ioremap((unsigned long)data->access_image_descr.elf_hdr, + sizeof(struct elfhdr)); + if (!vaddr) + return -ENOMEM; + + elfhdr = (struct elfhdr *)readl(vaddr); + pr_info("Reading in kernel:elfhdr 0x%x:elfhdr->entry=0x%x\n", + (uint32_t)elfhdr, (uint32_t)elfhdr->e_entry); + + pr_info("[%s] reset modem()...\n", __func__); + ret = reset_modem(elfhdr->e_entry); + + iounmap(vaddr); + + return ret; +} diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c new file mode 100644 index 00000000000..ab3782a323c --- /dev/null +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -0,0 +1,79 @@ +/* + * TEE service to handle the calls to trusted applications. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Joakim Bech + * License terms: GNU General Public License (GPL) version 2 + */ +#include +#include +#include + +#include + +#define BOOT_BRIDGE_FUNC (U8500_BOOT_ROM_BASE + 0x18300) + +#define ISSWAPI_EXECUTE_TA 0x11000001 +#define ISSWAPI_CLOSE_TA 0x11000002 + +#define SEC_ROM_NO_FLAG_MASK 0x0000 + +static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) +{ + typedef u32 (*bridge_func)(u32, u32, va_list); + static bridge_func hw_sec_rom_pub_bridge; + va_list ap; + u32 ret; + + hw_sec_rom_pub_bridge = + (bridge_func)((u32)IO_ADDRESS(BOOT_BRIDGE_FUNC)); + + va_start(ap, cfg); + ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); + va_end(ap); + + return ret; +} + +int call_sec_world(struct tee_session *ts, int sec_cmd) +{ + /* + * ts->ta and ts->uuid is set to NULL when opening the device, + * hence it should be safe to just do the call here. + */ + + switch (sec_cmd) { + case TEED_INVOKE: + if (!ts->uuid) { + call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, + SEC_ROM_NO_FLAG_MASK, + virt_to_phys(&ts->id), + NULL, + virt_to_phys(ts->ta), + ts->cmd, + virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->origin))); + } else { + call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, + SEC_ROM_NO_FLAG_MASK, + virt_to_phys(&ts->id), + virt_to_phys(ts->uuid), + virt_to_phys(ts->ta), + ts->cmd, + virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->origin))); + } + break; + + case TEED_CLOSE_SESSION: + call_sec_rom_bridge(ISSWAPI_CLOSE_TA, + SEC_ROM_NO_FLAG_MASK, + ts->id, + NULL, + virt_to_phys(ts->ta), + virt_to_phys((void *)(&ts->origin))); + break; + } + + return 0; +} -- cgit v1.2.3 From 6e66274cbeb53f22a4dd08f9c1ac119c50d34768 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Wed, 19 Oct 2011 13:20:08 +0200 Subject: crypto: ux500: Add crypto and hash acceleration Adds device driver support for crypto and hash acceleration for the u8500 chip. ST-Ericsson Linux next: Builds and boot, but fails on cryp probe ER320876. ST-Ericsson ID: AP 270734 crypto: ux500: cryp: Add power-awareness Adds power awareness to the cryp part of the device driver for accelerating crypto in u8500. ST-Ericsson ID: ER277473 crypto: ux500: cryp: Fix of NULL pointer dereference in power-save Fix of NULL pointer dereference in cryp_disable_power. ST-Ericsson ID: ER277473 crypto: ux500: cryp/hash: Power-awareness - Hash: Adds power awareness to the hash part of the device driver for accelerating hashing in u8500. - Cryp: - Removed erroneous call to cryp_enable_power() in u8500_cryp_resume(). - Added spinlocks to protect usage of current_ctx. - Corrected erroneous gotos in hw_cryp_noxts(). - Added down_interruptible()/up() in suspend/resume, to make sure the device is not allocated during suspend. ST-Ericsson ID: ER280692 crypto: ux500: AES ECB converted to ablk_cipher and supports DMA. - DMA support for AES_ECB added. - ablk_cipher support added to the driver. In this commit AES_ECB is using this asynchronous API. This is a must since you will get miscellaneous sleep warning- error-messages from the crypto testmgr which runs sanity tests when loading a module in synchrounous mode using DMA. Therefore DMA operations should use the ablk_cipher (asynchronous) API, - Added scatterlist walk function for ablk_cipher for the non DMA version. - Added power awarness to DMA related code in this cryp driver. - Refactored code in hw_cryp_noxts with functions calls when getting the device and when setting up the context. - Renamed registers so they corresponds to the names in the design spec. ST-Ericsson ID: AP277474 ST-Ericsson Linux next: ER320876, v-ape regulator missing. crypto: ux500: cryp: DES ECB converted to ablk_cipher. ablk_cipher support added to the driver. In this commit DES_ECB and DES3_ECB is using this asynchronous API. This removes the log printout: BUG: sleeping function called from invalid context at kernel/mutex.c:94. ST-Ericsson ID: ER322583 crypto: ux500: cryp: More algorithms converted to use ablk_cipher. In this commit AES_CBC, AES_CTR, DES_CBC and DES3_CBC is using the asynchronous API. These algorithms also support DMA, except for givciphers. ST-Ericsson ID: AP277474 crypto: ux500: cryp: Power save redesign, to improve performance - Enable and disable power moved to be called only at beginning and end of algorithm calls. - Removed compiler warnings (uninitialized variables) visible using Linux-next compiler. Note! Those warnings not visible using default compiler in the android forest. ST-Ericsson ID: AP277474 crypto: Fixes after 2.6.35 merge Signed-off-by: Berne Hebark Signed-off-by: Lee Jones Signed-off-by: Philippe Langlais Conflicts: arch/arm/mach-ux500/board-mop500.c --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 16 + drivers/crypto/Kconfig | 2 +- drivers/crypto/ux500/Kconfig | 30 +- drivers/crypto/ux500/Makefile | 15 +- drivers/crypto/ux500/cryp/Makefile | 13 + drivers/crypto/ux500/cryp/cryp.c | 556 ++++++ drivers/crypto/ux500/cryp/cryp.h | 338 ++++ drivers/crypto/ux500/cryp/cryp_core.c | 2331 +++++++++++++++++++++++ drivers/crypto/ux500/cryp/cryp_irq.c | 45 + drivers/crypto/ux500/cryp/cryp_irq.h | 31 + drivers/crypto/ux500/cryp/cryp_irqp.h | 125 ++ drivers/crypto/ux500/cryp/cryp_p.h | 113 ++ drivers/crypto/ux500/hash/Makefile | 12 +- drivers/crypto/ux500/hash/hash_alg.h | 209 +- drivers/crypto/ux500/hash/hash_core.c | 1615 ++++++---------- 15 files changed, 4203 insertions(+), 1248 deletions(-) create mode 100644 arch/arm/mach-ux500/include/mach/crypto-ux500.h mode change 100755 => 100644 drivers/crypto/ux500/Kconfig mode change 100755 => 100644 drivers/crypto/ux500/Makefile create mode 100644 drivers/crypto/ux500/cryp/Makefile create mode 100644 drivers/crypto/ux500/cryp/cryp.c create mode 100644 drivers/crypto/ux500/cryp/cryp.h create mode 100644 drivers/crypto/ux500/cryp/cryp_core.c create mode 100644 drivers/crypto/ux500/cryp/cryp_irq.c create mode 100644 drivers/crypto/ux500/cryp/cryp_irq.h create mode 100644 drivers/crypto/ux500/cryp/cryp_irqp.h create mode 100644 drivers/crypto/ux500/cryp/cryp_p.h mode change 100755 => 100644 drivers/crypto/ux500/hash/Makefile mode change 100755 => 100644 drivers/crypto/ux500/hash/hash_alg.h mode change 100755 => 100644 drivers/crypto/ux500/hash/hash_core.c (limited to 'arch') diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h new file mode 100644 index 00000000000..57da88398d5 --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Joakim Bech for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _CRYPTO_UX500_H +#include +#include + +struct cryp_platform_data { + struct stedma40_chan_cfg mem_to_engine; + struct stedma40_chan_cfg engine_to_mem; +}; + +#endif diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 52e0bf5738e..638648816c9 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES config CRYPTO_DEV_UX500 tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" - #depends on ARCH_U8500 + depends on ARCH_U8500 select CRYPTO_ALGAPI help Driver for ST-Ericsson UX500 crypto engine. diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig old mode 100755 new mode 100644 index 4ac419757d0..165a03d46c0 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -1,15 +1,29 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +config CRYPTO_DEV_UX500_CRYP + tristate "UX500 crypto driver for CRYP block" + depends on CRYPTO_DEV_UX500 + select CRYPTO_DES + help + This is the driver for the crypto block CRYP. config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" - depends on ARCH_U8500 - select CRYPTO_ALGAPI + depends on CRYPTO_DEV_UX500 select CRYPTO_HASH select CRYPTO_HMAC - help - This selects the UX500 hash driver for the HASH hardware. - Depends on U8500/STM DMA if running in DMA mode. + help + This selects the UX500 hash driver for the HASH hardware. + Depends on U8500/STM DMA if running in DMA mode. -config CRYPTO_DEV_UX500_DEBUG_INFO - tristate "Enable UX500 crypto drivers debug info" +config CRYPTO_DEV_UX500_DEBUG + bool "Activate ux500 platform debug-mode for crypto and hash block" + depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH + default n help - This is to enable the debug info for UX500 crypto drivers. + Say Y if you want to add debug prints to ux500_hash and + ux500_cryp devices. diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile old mode 100755 new mode 100644 index 4c187857120..b9a365bade8 --- a/drivers/crypto/ux500/Makefile +++ b/drivers/crypto/ux500/Makefile @@ -1,11 +1,8 @@ - -ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) - EXTRA_CFLAGS += -D__DEBUG -else - EXTRA_CFLAGS += -D__RELEASE -endif +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ - - - +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile new file mode 100644 index 00000000000..fd5e6df3861 --- /dev/null +++ b/drivers/crypto/ux500/cryp/Makefile @@ -0,0 +1,13 @@ +#/* +# * Copyright (C) ST-Ericsson SA 2010 +# * Author: shujuan.chen@stericsson.com for ST-Ericsson. +# * License terms: GNU General Public License (GPL) version 2 */ + +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_cryp_core.o := -DDEBUG -O0 +CFLAGS_cryp.o := -DDEBUG -O0 +CFLAGS_cryp_irq.o := -DDEBUG -O0 +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += u8500_cryp.o +u8500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c new file mode 100644 index 00000000000..94928f7efce --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -0,0 +1,556 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cryp_p.h" +#include "cryp.h" + +/** + * cryp_wait_until_done - wait until the device logic is not busy + */ +void cryp_wait_until_done(struct cryp_device_data *device_data) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); +} + +/** + * cryp_check - This routine checks Peripheral and PCell Id + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_check(struct cryp_device_data *device_data) +{ + if (NULL == device_data) + return -EINVAL; + + /* Check Peripheral and Pcell Id Register for CRYP */ + if ((CRYP_PERIPHERAL_ID0 == readl(&device_data->base->periphId0)) + && (CRYP_PERIPHERAL_ID1 == readl(&device_data->base->periphId1)) + && (CRYP_PERIPHERAL_ID2 == readl(&device_data->base->periphId2)) + && (CRYP_PERIPHERAL_ID3 == readl(&device_data->base->periphId3)) + && (CRYP_PCELL_ID0 == readl(&device_data->base->pcellId0)) + && (CRYP_PCELL_ID1 == readl(&device_data->base->pcellId1)) + && (CRYP_PCELL_ID2 == readl(&device_data->base->pcellId2)) + && (CRYP_PCELL_ID3 == readl(&device_data->base->pcellId3))) { + return 0; + } + + return -EPERM; +} + +/** + * cryp_reset - This routine loads the cryp register with the default values + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_reset(struct cryp_device_data *device_data) +{ + writel(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); + writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + + writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_r); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_l); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_r); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_l); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_r); + + /* Last step since the protection mode bits need to be modified. */ + writel(CRYP_CR_DEFAULT | CRYP_CR_FFLUSH, &device_data->base->cr); + + /* + * CRYP_INFIFO_READY_MASK is the expected value on the status register + * when starting a new calculation, which means Input FIFO is not full + * and input FIFO is empty. + */ + while (readl(&device_data->base->status) != CRYP_INFIFO_READY_MASK) + cpu_relax(); +} + +/** + * cryp_activity - This routine enables/disable the cryptography function. + * @device_data: Pointer to the device data struct for base address. + * @cryp_activity: Enable/Disable functionality + */ +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_crypen, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); +} + +/** + * cryp_start - starts the computation + * @device_data: Pointer to the device data struct for base address. + * @cryp_start: Enable/Disable functionality + */ +void cryp_start(struct cryp_device_data *device_data) +{ + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_START_ENABLE, + CRYP_START_POS, + CRYP_START_MASK); +} + +/** + * cryp_init_signal - This routine submit the initialization values. + * @device_data: Pointer to the device data struct for base address. + * @cryp_init_bit: Enable/Disable init signal + */ +void cryp_init_signal(struct cryp_device_data *device_data, + enum cryp_init cryp_init_bit) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_init_bit, + CRYP_INIT_POS, + CRYP_INIT_MASK); +} + +/** + * cryp_key_preparation - This routine prepares key for decryption. + * @device_data: Pointer to the device data struct for base address. + * @cryp_prepkey: Enable/Disable + */ +void cryp_key_preparation(struct cryp_device_data *device_data, + enum cryp_key_prep cryp_prepkey) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_prepkey, + CRYP_KSE_POS, + CRYP_KSE_MASK); +} + +/** + * cryp_flush_inoutfifo - Resets both the input and the output FIFOs + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_flush_inoutfifo(struct cryp_device_data *device_data) +{ + CRYP_SET_BITS(&device_data->base->cr, CRYP_FIFO_FLUSH_MASK); +} + +/** + * cryp_set_dir - + * @device_data: Pointer to the device data struct for base address. + * @dir: Crypto direction, encrypt/decrypt + */ +void cryp_set_dir(struct cryp_device_data *device_data, int dir) +{ + CRYP_PUT_BITS(&device_data->base->cr, + dir, + CRYP_ENC_DEC_POS, + CRYP_ENC_DEC_MASK); + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_DATA_TYPE_8BIT_SWAP, + CRYP_DATA_TYPE_POS, + CRYP_DATA_TYPE_MASK); +} + +/** + * cryp_cen_flush - + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_cen_flush(struct cryp_device_data *device_data) +{ + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_STATE_DISABLE, + CRYP_KEY_ACCESS_POS, + CRYP_KEY_ACCESS_MASK); + CRYP_SET_BITS(&device_data->base->cr, + CRYP_FIFO_FLUSH_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); +} + +/** + * cryp_set_configuration - This routine set the cr CRYP IP + * @device_data: Pointer to the device data struct for base address. + * @p_cryp_config: Pointer to the configuration parameter + */ +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == p_cryp_config) + return -EINVAL; + + /* Since more than one bit is written macro put_bits is used*/ + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->key_access, + CRYP_KEY_ACCESS_POS, + CRYP_KEY_ACCESS_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->key_size, + CRYP_KEY_SIZE_POS, + CRYP_KEY_SIZE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->data_type, + CRYP_DATA_TYPE_POS, + CRYP_DATA_TYPE_MASK); + + /* Prepare key for decryption */ + if ((CRYP_ALGORITHM_DECRYPT == p_cryp_config->encrypt_or_decrypt) && + ((CRYP_ALGO_AES_ECB == p_cryp_config->algo_mode) || + (CRYP_ALGO_AES_CBC == p_cryp_config->algo_mode))) { + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_ALGO_AES_ECB, + CRYP_ALGOMODE_POS, + CRYP_ALGOMODE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + KSE_ENABLED, + CRYP_KSE_POS, + CRYP_KSE_MASK); + + cryp_wait_until_done(device_data); + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_DISABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + } + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->algo_mode, + CRYP_ALGOMODE_POS, + CRYP_ALGOMODE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->encrypt_or_decrypt, + CRYP_ENC_DEC_POS, + CRYP_ENC_DEC_MASK); + + return 0; +} + +/** + * cryp_get_configuration - gets the parameter of the control register of IP + * @device_data: Pointer to the device data struct for base address. + * @p_cryp_config: Gets the configuration parameter from cryp ip. + */ +int cryp_get_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config) +{ + if (NULL == p_cryp_config) + return -EINVAL; + + p_cryp_config->key_access = + ((readl(&device_data->base->cr) & CRYP_KEY_ACCESS_MASK) ? + CRYP_STATE_ENABLE : + CRYP_STATE_DISABLE); + p_cryp_config->key_size = + ((readl(&device_data->base->cr) & CRYP_KEY_SIZE_MASK) >> + CRYP_KEY_SIZE_POS); + + p_cryp_config->encrypt_or_decrypt = + ((readl(&device_data->base->cr) & CRYP_ENC_DEC_MASK) ? + CRYP_ALGORITHM_DECRYPT : + CRYP_ALGORITHM_ENCRYPT); + + p_cryp_config->data_type = + ((readl(&device_data->base->cr) & CRYP_DATA_TYPE_MASK) >> + CRYP_DATA_TYPE_POS); + p_cryp_config->algo_mode = + ((readl(&device_data->base->cr) & CRYP_ALGOMODE_MASK) >> + CRYP_ALGOMODE_POS); + + return 0; +} + +/** + * cryp_configure_protection - set the protection bits in the CRYP logic. + * @device_data: Pointer to the device data struct for base address. + * @p_protect_config: Pointer to the protection mode and + * secure mode configuration + */ +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config) +{ + if (NULL == p_protect_config) + return -EINVAL; + + CRYP_WRITE_BIT(&device_data->base->cr, + (u32) p_protect_config->secure_access, + CRYP_SECURE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_protect_config->privilege_access, + CRYP_PRLG_POS, + CRYP_PRLG_MASK); + + return 0; +} + +/** + * cryp_is_logic_busy - returns the busy status of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_is_logic_busy(struct cryp_device_data *device_data) +{ + return CRYP_TEST_BITS(&device_data->base->status, + CRYP_BUSY_STATUS_MASK); +} + +/** + * cryp_get_status - This routine returns the complete status of the cryp logic + * @device_data: Pointer to the device data struct for base address. + */ +/* +int cryp_get_status(struct cryp_device_data *device_data) +{ + return (int) readl(device_data->base->status); +} +*/ + +/** + * cryp_configure_for_dma - configures the CRYP IP for DMA operation + * @device_data: Pointer to the device data struct for base address. + * @dma_req: Specifies the DMA request type value. + */ +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req) +{ + CRYP_SET_BITS(&device_data->base->dmacr, + (u32) dma_req); +} + +/** + * cryp_configure_key_values - configures the key values for CRYP operations + * @device_data: Pointer to the device data struct for base address. + * @key_reg_index: Key value index register + * @key_value: The key value struct + */ +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (key_reg_index) { + case CRYP_KEY_REG_1: + writel(key_value.key_value_left, + &device_data->base->key_1_l); + writel(key_value.key_value_right, + &device_data->base->key_1_r); + break; + case CRYP_KEY_REG_2: + writel(key_value.key_value_left, + &device_data->base->key_2_l); + writel(key_value.key_value_right, + &device_data->base->key_2_r); + break; + case CRYP_KEY_REG_3: + writel(key_value.key_value_left, + &device_data->base->key_3_l); + writel(key_value.key_value_right, + &device_data->base->key_3_r); + break; + case CRYP_KEY_REG_4: + writel(key_value.key_value_left, + &device_data->base->key_4_l); + writel(key_value.key_value_right, + &device_data->base->key_4_r); + break; + default: + return -EINVAL; + } + + return 0; + +} + +/** + * cryp_configure_init_vector - configures the initialization vector register + * @device_data: Pointer to the device data struct for base address. + * @init_vector_index: Specifies the index of the init vector. + * @init_vector_value: Specifies the value for the init vector. + */ +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (init_vector_index) { + case CRYP_INIT_VECTOR_INDEX_0: + writel(init_vector_value.init_value_left, + &device_data->base->init_vect_0_l); + writel(init_vector_value.init_value_right, + &device_data->base->init_vect_0_r); + break; + case CRYP_INIT_VECTOR_INDEX_1: + writel(init_vector_value.init_value_left, + &device_data->base->init_vect_1_l); + writel(init_vector_value.init_value_right, + &device_data->base->init_vect_1_r); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * cryp_prep_ctx_mgmt - Prepares for handling the context of the block + * @device_data: Pointer to the device data struct for base address. + */ +static void cryp_prep_ctx_mgmt(struct cryp_device_data *device_data) +{ + cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); +} + +/** + * cryp_save_device_context - Store hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx) +{ + struct cryp_register *src_reg = device_data->base; + + cryp_prep_ctx_mgmt(device_data); + + ctx->din = readl(&src_reg->din); + + ctx->dout = readl(&src_reg->dout); + + ctx->cr = readl(&src_reg->cr); + ctx->dmacr = readl(&src_reg->dmacr); + ctx->imsc = readl(&src_reg->imsc); + + ctx->key_1_l = readl(&src_reg->key_1_l); + ctx->key_1_r = readl(&src_reg->key_1_r); + ctx->key_2_l = readl(&src_reg->key_2_l); + ctx->key_2_r = readl(&src_reg->key_2_r); + ctx->key_3_l = readl(&src_reg->key_3_l); + ctx->key_3_r = readl(&src_reg->key_3_r); + ctx->key_4_l = readl(&src_reg->key_4_l); + ctx->key_4_r = readl(&src_reg->key_4_r); + + ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); + ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); + ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); + ctx->init_vect_1_r = readl(&src_reg->init_vect_1_r); +} + +/** + * cryp_restore_device_context - Restore hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx) +{ + struct cryp_register *reg = device_data->base; + + cryp_prep_ctx_mgmt(device_data); + + writel(ctx->din, ®->din); + writel(ctx->dout, ®->dout); + writel(ctx->cr, ®->cr); + writel(ctx->dmacr, ®->dmacr); + writel(ctx->imsc, ®->imsc); + writel(ctx->key_1_l, ®->key_1_l); + writel(ctx->key_1_r, ®->key_1_r); + writel(ctx->key_2_l, ®->key_2_l); + writel(ctx->key_2_r, ®->key_2_r); + writel(ctx->key_3_l, ®->key_3_l); + writel(ctx->key_3_r, ®->key_3_r); + writel(ctx->key_4_l, ®->key_4_l); + writel(ctx->key_4_r, ®->key_4_r); + writel(ctx->init_vect_0_l, ®->init_vect_0_l); + writel(ctx->init_vect_0_r, ®->init_vect_0_r); + writel(ctx->init_vect_1_l, ®->init_vect_1_l); + writel(ctx->init_vect_1_r, ®->init_vect_1_r); +} + +/** + * cryp_write_indata - This routine writes 32 bit data into the data input + * register of the cryptography IP. + * @device_data: Pointer to the device data struct for base address. + * @write_data: Data word to write + */ +int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) +{ + if (NULL == device_data) + return -EINVAL; + writel(write_data, &device_data->base->din); + + return 0; +} + +/** + * cryp_read_indata - This routine reads the 32 bit data from the data input + * register into the specified location. + * @device_data: Pointer to the device data struct for base address. + * @p_read_data: Read the data from the input FIFO. + */ +int cryp_read_indata(struct cryp_device_data *device_data, u32 *p_read_data) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == p_read_data) + return -EINVAL; + + *p_read_data = readl(&device_data->base->din); + + return 0; +} + +/** + * cryp_read_outdata - This routine reads the data from the data output + * register of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + * @read_data: Read the data from the output FIFO. + */ +int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == read_data) + return -EINVAL; + + *read_data = readl(&device_data->base->dout); + + return 0; +} diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h new file mode 100644 index 00000000000..2d98923071c --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -0,0 +1,338 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_H_ +#define _CRYP_H_ + +#include +#include +#include +#include + +/* Module Defines */ +#define CRYP_MODULE_NAME "CRYP HCL Module" + +#define DEV_DBG_NAME "crypX crypX:" + +/* CRYP enable/disable */ +enum cryp_crypen { + CRYP_CRYPEN_DISABLE = 0, + CRYP_CRYPEN_ENABLE = 1 +}; + +/* CRYP Start Computation enable/disable */ +enum cryp_start { + CRYP_START_DISABLE = 0, + CRYP_START_ENABLE = 1 +}; + +/* CRYP Init Signal enable/disable */ +enum cryp_init { + CRYP_INIT_DISABLE = 0, + CRYP_INIT_ENABLE = 1 +}; + +/* Cryp State enable/disable */ +enum cryp_state { + CRYP_STATE_DISABLE = 0, + CRYP_STATE_ENABLE = 1 +}; + +/* Key preparation bit enable */ +enum cryp_key_prep { + KSE_DISABLED, + KSE_ENABLED +}; + +/* Key size for AES*/ +#define CRYP_KEY_SIZE_128 (0) +#define CRYP_KEY_SIZE_192 (1) +#define CRYP_KEY_SIZE_256 (2) + +/* Data type Swap */ +#define CRYP_DATA_TYPE_32BIT_SWAP (0) +#define CRYP_DATA_TYPE_16BIT_SWAP (1) +#define CRYP_DATA_TYPE_8BIT_SWAP (2) +#define CRYP_DATA_TYPE_BIT_SWAP (3) + +/* AES modes */ +enum cryp_algo_mode { + CRYP_ALGO_TDES_ECB, + CRYP_ALGO_TDES_CBC, + CRYP_ALGO_DES_ECB, + CRYP_ALGO_DES_CBC, + CRYP_ALGO_AES_ECB, + CRYP_ALGO_AES_CBC, + CRYP_ALGO_AES_CTR, + CRYP_ALGO_AES_XTS +}; + +/* Cryp Encryption or Decryption */ +enum cryp_algorithm_dir { + CRYP_ALGORITHM_ENCRYPT, + CRYP_ALGORITHM_DECRYPT +}; + +/* Hardware access method */ +enum cryp_mode { + CRYP_MODE_POLLING, + CRYP_MODE_INTERRUPT, + CRYP_MODE_DMA +}; + +/** + * struct cryp_config - + * @key_access: Cryp state enable/disable + * @key_size: Key size for AES + * @data_type: Data type Swap + * @algo_mode: AES modes + * @encrypt_or_decrypt: Cryp Encryption or Decryption + * + * CRYP configuration structure to be passed to set configuration + */ +struct cryp_config { + enum cryp_state key_access; + int key_size; + int data_type; + enum cryp_algo_mode algo_mode; + enum cryp_algorithm_dir encrypt_or_decrypt; +}; + +/** + * struct cryp_protection_config - + * @privilege_access: Privileged cryp state enable/disable + * @secure_access: Secure cryp state enable/disable + * + * Protection configuration structure for setting privilage access + */ +struct cryp_protection_config { + enum cryp_state privilege_access; + enum cryp_state secure_access; +}; + +/* Cryp status */ +enum cryp_status_id { + CRYP_STATUS_BUSY = 0x10, + CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08, + CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04, + CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02, + CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01 +}; + +/* Cryp DMA interface */ +enum cryp_dma_req_type { + CRYP_DMA_DISABLE_BOTH, + CRYP_DMA_ENABLE_IN_DATA, + CRYP_DMA_ENABLE_OUT_DATA, + CRYP_DMA_ENABLE_BOTH_DIRECTIONS +}; + +enum cryp_dma_channel { + CRYP_DMA_RX = 0, + CRYP_DMA_TX +}; + +/* Key registers */ +enum cryp_key_reg_index { + CRYP_KEY_REG_1, + CRYP_KEY_REG_2, + CRYP_KEY_REG_3, + CRYP_KEY_REG_4 +}; + +/* Key register left and right */ +struct cryp_key_value { + u32 key_value_left; + u32 key_value_right; +}; + +/* Cryp Initialization structure */ +enum cryp_init_vector_index { + CRYP_INIT_VECTOR_INDEX_0, + CRYP_INIT_VECTOR_INDEX_1 +}; + +/* struct cryp_init_vector_value - + * @init_value_left + * @init_value_right + * */ +struct cryp_init_vector_value { + u32 init_value_left; + u32 init_value_right; +}; + +/** + * struct cryp_device_context - structure for a cryp context. + * @cr: control register + * @dmacr: DMA control register + * @imsc: Interrupt mask set/clear register + * @key_1_l: Key 1l register + * @key_1_r: Key 1r register + * @key_2_l: Key 2l register + * @key_2_r: Key 2r register + * @key_3_l: Key 3l register + * @key_3_r: Key 3r register + * @key_4_l: Key 4l register + * @key_4_r: Key 4r register + * @init_vect_0_l: Initialization vector 0l register + * @init_vect_0_r: Initialization vector 0r register + * @init_vect_1_l: Initialization vector 1l register + * @init_vect_1_r: Initialization vector 0r register + * @din: Data in register + * @dout: Data out register + * + * CRYP power management specifc structure. + */ +struct cryp_device_context { + u32 cr; + u32 dmacr; + u32 imsc; + + u32 key_1_l; + u32 key_1_r; + u32 key_2_l; + u32 key_2_r; + u32 key_3_l; + u32 key_3_r; + u32 key_4_l; + u32 key_4_r; + + u32 init_vect_0_l; + u32 init_vect_0_r; + u32 init_vect_1_l; + u32 init_vect_1_r; + + u32 din; + u32 dout; +}; + +struct cryp_dma { + dma_cap_mask_t mask; + struct completion cryp_dma_complete; + struct dma_chan *chan_cryp2mem; + struct dma_chan *chan_mem2cryp; + struct stedma40_chan_cfg *cfg_cryp2mem; + struct stedma40_chan_cfg *cfg_mem2cryp; + int sg_src_len; + int sg_dst_len; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + int nents_src; + int nents_dst; +}; + +/** + * struct cryp_device_data - structure for a cryp device. + * @base: Pointer to the hardware base address. + * @dev: Pointer to the devices dev structure. + * @cryp_irq_complete: Pointer to an interrupt completion structure. + * @clk: Pointer to the device's clock control. + * @pwr_regulator: Pointer to the device's power control. + * @power_status: Current status of the power. + * @ctx_lock: Lock for current_ctx. + * @current_ctx: Pointer to the currently allocated context. + * @list_node: For inclusion into a klist. + * @dma: The dma structure holding channel configuration. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_mutex: Mutex for power_state. + * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. + */ +struct cryp_device_data { + struct cryp_register __iomem *base; + struct device *dev; + struct completion cryp_irq_complete; + struct clk *clk; + struct regulator *pwr_regulator; + int power_status; + struct spinlock ctx_lock; + struct cryp_ctx *current_ctx; + struct klist_node list_node; + struct cryp_dma dma; + bool power_state; + struct mutex power_state_mutex; + bool restore_dev_ctx; +}; + +void cryp_wait_until_done(struct cryp_device_data *device_data); + +/* Initialization functions */ + +int cryp_check(struct cryp_device_data *device_data); + +void cryp_reset(struct cryp_device_data *device_data); + +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen); + +void cryp_start(struct cryp_device_data *device_data); + +void cryp_init_signal(struct cryp_device_data *device_data, + enum cryp_init cryp_init); + +void cryp_key_preparation(struct cryp_device_data *device_data, + enum cryp_key_prep cryp_key_prep); + +void cryp_flush_inoutfifo(struct cryp_device_data *device_data); + +void cryp_cen_flush(struct cryp_device_data *device_data); + +void cryp_set_dir(struct cryp_device_data *device_data, int dir); + +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config); + +int cryp_get_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config); + +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req); + +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value); + +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value); + +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config); + +/* Power management funtions */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx); + +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx); + +/* Data transfer and status bits. */ +int cryp_is_logic_busy(struct cryp_device_data *device_data); + +int cryp_get_status(struct cryp_device_data *device_data); + +/** + * cryp_write_indata - This routine writes 32 bit data into the data input + * register of the cryptography IP. + * @device_data: Pointer to the device data struct for base address. + * @write_data: Data to write. + */ +int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data); + +/** + * cryp_read_outdata - This routine reads the data from the data output + * register of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + * @read_data: Read the data from the output FIFO. + */ +int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data); + +#endif /* _CRYP_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c new file mode 100644 index 00000000000..197bb416067 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -0,0 +1,2331 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "cryp_p.h" +#include "cryp.h" + +#define CRYP_MAX_KEY_SIZE 32 +#define BYTES_PER_WORD 4 + +static int cryp_mode; + +static DEFINE_KLIST(cryp_device_list, NULL, NULL); + +static struct stedma40_chan_cfg *mem_to_engine; +static struct stedma40_chan_cfg *engine_to_mem; + +/** + * struct cryp_driver_data - data specific to the driver. + * + * @cryp_device_list: A list of registered devices to choose from. + * @device_allocation: A semaphore initialized with number of devices. + */ +struct cryp_driver_data { + struct klist device_list; + struct semaphore device_allocation; +}; + +/** + * struct cryp_ctx - Crypto context + * @config: Crypto mode. + * @key[CRYP_MAX_KEY_SIZE]: Key. + * @keylen: Length of key. + * @iv: Pointer to initialization vector. + * @indata: Pointer to indata. + * @outdata: Pointer to outdata. + * @datalen: Length of indata. + * @outlen: Length of outdata. + * @blocksize: Size of blocks. + * @updated: Updated flag. + * @dev_ctx: Device dependent context. + * @device: Pointer to the device. + */ +struct cryp_ctx { + struct cryp_config config; + u8 key[CRYP_MAX_KEY_SIZE]; + u32 keylen; + u8 *iv; + const u8 *indata; + u8 *outdata; + u32 datalen; + u32 outlen; + u32 blocksize; + u8 updated; + struct cryp_device_context dev_ctx; + struct cryp_device_data *device; +}; + +static struct cryp_driver_data driver_data; + +/** + * uint8p_to_uint32_be - 4*uint8 to uint32 big endian + * @in: Data to convert. + */ +static inline u32 uint8p_to_uint32_be(u8 *in) +{ + return (u32)in[0]<<24 | + ((u32)in[1]<<16) | + ((u32)in[2]<<8) | + ((u32)in[3]); +} + +/** + * uint8p_to_uint32_le - 4*uint8 to uint32 little endian + * @in: Data to convert. + */ +static inline u32 uint8p_to_uint32_le(u8 *in) +{ + return (u32)in[3]<<24 | + ((u32)in[2]<<16) | + ((u32)in[1]<<8) | + ((u32)in[0]); +} + +static inline void uint32_to_uint8p_be(u32 in, u8 *out) +{ + out[0] = (u8)(in>>24); + out[1] = (u8)(in>>16); + out[2] = (u8)(in>>8); + out[3] = (u8) in; +} + +static inline void uint32_to_uint8p_le(u32 in, u8 *out) +{ + out[3] = (u8)(in>>24); + out[2] = (u8)(in>>16); + out[1] = (u8)(in>>8); + out[0] = (u8) in; +} + +/** + * swap_bits_in_byte - mirror the bits in a byte + * @b: the byte to be mirrored + * + * The bits are swapped the following way: + * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and + * nibble 2 (n2) bits 4-7. + * + * Nibble 1 (n1): + * (The "old" (moved) bit is replaced with a zero) + * 1. Move bit 6 and 7, 4 positions to the left. + * 2. Move bit 3 and 5, 2 positions to the left. + * 3. Move bit 1-4, 1 position to the left. + * + * Nibble 2 (n2): + * 1. Move bit 0 and 1, 4 positions to the right. + * 2. Move bit 2 and 4, 2 positions to the right. + * 3. Move bit 3-6, 1 position to the right. + * + * Combine the two nibbles to a complete and swapped byte. + */ + +static inline u8 swap_bits_in_byte(u8 b) +{ +#define R_SHIFT_4_MASK (0xc0) /* Bits 6 and 7, right shift 4 */ +#define R_SHIFT_2_MASK (0x28) /* (After right shift 4) Bits 3 and 5, + right shift 2 */ +#define R_SHIFT_1_MASK (0x1e) /* (After right shift 2) Bits 1-4, + right shift 1 */ +#define L_SHIFT_4_MASK (0x03) /* Bits 0 and 1, left shift 4 */ +#define L_SHIFT_2_MASK (0x14) /* (After left shift 4) Bits 2 and 4, + left shift 2 */ +#define L_SHIFT_1_MASK (0x78) /* (After left shift 1) Bits 3-6, + left shift 1 */ + + u8 n1; + u8 n2; + + /* Swap most significant nibble */ + /* Right shift 4, bits 6 and 7 */ + n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); + /* Right shift 2, bits 3 and 5 */ + n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); + /* Right shift 1, bits 1-4 */ + n1 = (n1 & R_SHIFT_1_MASK) >> 1; + + /* Swap least significant nibble */ + /* Left shift 4, bits 0 and 1 */ + n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); + /* Left shift 2, bits 2 and 4 */ + n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); + /* Left shift 1, bits 3-6 */ + n2 = (n2 & L_SHIFT_1_MASK) << 1; + + return n1 | n2; +} + +static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, + u8 *out, u32 len) +{ + unsigned int i = 0; + int j; + int index = 0; + + j = len - BYTES_PER_WORD; + while (j >= 0) { + for (i = 0; i < BYTES_PER_WORD; i++) { + index = len - j - BYTES_PER_WORD + i; + out[j + i] = + swap_bits_in_byte(in[index]); + } + j -= BYTES_PER_WORD; + } +} + +static inline void swap_4bits_in_bytes(const u8 *in, u8 *out, u32 len) +{ + unsigned int i; + for (i = 0; i < len; i++) + out[i] = swap_bits_in_byte(in[i]); +} + +static irqreturn_t cryp_interrupt_handler(int irq, void *param) +{ + struct cryp_ctx *ctx; + int i; + struct cryp_device_data *device_data; + + if (param == NULL) { + BUG_ON(!param); + return IRQ_HANDLED; + } + + device_data = (struct cryp_device_data *)param; + + ctx = device_data->current_ctx; + + if (ctx == NULL) { + BUG_ON(!ctx); + return IRQ_HANDLED; + } + + if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO)) { + if (ctx->outlen / ctx->blocksize > 0) { + for (i = 0; i < ctx->blocksize / 4; i++) { + cryp_read_outdata(device_data, + (u32 *)ctx->outdata); + ctx->outdata += 4; + ctx->outlen -= 4; + } + + if (ctx->outlen == 0) { + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO); + complete(&ctx->device->cryp_irq_complete); + } + } + } else if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO)) { + if (ctx->datalen / ctx->blocksize > 0) { + for (i = 0 ; i < ctx->blocksize / 4; i++) { + cryp_write_indata(device_data, + *((u32 *)ctx->indata)); + ctx->indata += 4; + ctx->datalen -= 4; + } + + if (ctx->datalen == 0) + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO); + + if (ctx->config.algo_mode == CRYP_ALGO_AES_XTS) { + cryp_start(device_data); + cryp_wait_until_done(device_data); + } + } + } + + return IRQ_HANDLED; +} + +static int mode_is_aes(enum cryp_algo_mode mode) +{ + return (CRYP_ALGO_AES_ECB == mode) || + (CRYP_ALGO_AES_CBC == mode) || + (CRYP_ALGO_AES_CTR == mode) || + (CRYP_ALGO_AES_XTS == mode); +} + +static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right, + enum cryp_init_vector_index index) +{ + struct cryp_init_vector_value vector_value; + + dev_dbg(device_data->dev, "[%s]", __func__); + + vector_value.init_value_left = left; + vector_value.init_value_right = right; + + return cryp_configure_init_vector(device_data, + index, + vector_value); +} + +static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) +{ + int i; + int status = 0; + int num_of_regs = ctx->blocksize / 8; + u32 iv[AES_BLOCK_SIZE / 4]; + + dev_dbg(device_data->dev, "[%s]", __func__); + + /* + * Since we loop on num_of_regs we need to have a check in case + * someone provides an incorrect blocksize which would force calling + * cfg_iv with i greater than 2 which is an error. + */ + if (num_of_regs > 2) { + dev_err(device_data->dev, "[%s] Incorrect blocksize %d", + __func__, ctx->blocksize); + return -EINVAL; + } + + for (i = 0; i < ctx->blocksize / 4; i++) + iv[i] = uint8p_to_uint32_be(ctx->iv + i*4); + + for (i = 0; i < num_of_regs; i++) { + status = cfg_iv(device_data, iv[i*2], iv[i*2+1], + (enum cryp_init_vector_index) i); + if (status != 0) + return status; + } + return status; +} + +static int set_key(struct cryp_device_data *device_data, + u32 left_key, + u32 right_key, + enum cryp_key_reg_index index) +{ + struct cryp_key_value key_value; + int cryp_error; + + dev_dbg(device_data->dev, "[%s]", __func__); + + key_value.key_value_left = left_key; + key_value.key_value_right = right_key; + + cryp_error = cryp_configure_key_values(device_data, + index, + key_value); + if (cryp_error != 0) + dev_err(device_data->dev, "[%s]: " + "cryp_configure_key_values() failed!", __func__); + + return cryp_error; +} + +static int cfg_keys(struct cryp_ctx *ctx) +{ + int i; + int num_of_regs = ctx->keylen / 8; + u32 swapped_key[CRYP_MAX_KEY_SIZE / 4]; + int cryp_error = 0; + + dev_dbg(ctx->device->dev, "[%s]", __func__); + + if (mode_is_aes(ctx->config.algo_mode)) { + swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, + (u8 *)swapped_key, + ctx->keylen); + } else { + for (i = 0; i < ctx->keylen / 4; i++) + swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4); + } + + for (i = 0; i < num_of_regs; i++) { + cryp_error = set_key(ctx->device, + *(((u32 *)swapped_key)+i*2), + *(((u32 *)swapped_key)+i*2+1), + (enum cryp_key_reg_index) i); + + if (cryp_error != 0) { + dev_err(ctx->device->dev, "[%s]: set_key() failed!", + __func__); + return cryp_error; + } + } + return cryp_error; +} + +static int cryp_setup_context(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + if (ctx->updated) + cryp_restore_device_context(device_data, &ctx->dev_ctx); + else { + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + + if (cfg_keys(ctx) != 0) { + dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", + __func__); + return -EPERM; + } + + if ((ctx->iv) && + (CRYP_ALGO_AES_ECB != ctx->config.algo_mode) && + (CRYP_ALGO_DES_ECB != ctx->config.algo_mode) && + (CRYP_ALGO_TDES_ECB != ctx->config.algo_mode)) { + if (cfg_ivs(device_data, ctx) != 0) + return -EPERM; + } + + cryp_set_configuration(device_data, &ctx->config); + } + + return 0; +} + + +static int cryp_get_device_data(struct cryp_ctx *ctx, + struct cryp_device_data **device_data) +{ + int ret; + struct klist_iter device_iterator; + struct klist_node *device_node; + struct cryp_device_data *local_device_data = NULL; + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* Wait until a device is available */ + ret = down_interruptible(&driver_data.device_allocation); + if (ret) + return ret; /* Interrupted */ + + /* Select a device */ + klist_iter_init(&driver_data.device_list, &device_iterator); + + device_node = klist_next(&device_iterator); + while (device_node) { + local_device_data = container_of(device_node, + struct cryp_device_data, list_node); + spin_lock(&local_device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (local_device_data->current_ctx) { + device_node = klist_next(&device_iterator); + } else { + local_device_data->current_ctx = ctx; + ctx->device = local_device_data; + spin_unlock(&local_device_data->ctx_lock); + break; + } + spin_unlock(&local_device_data->ctx_lock); + } + klist_iter_exit(&device_iterator); + + if (!device_node) { + /** + * No free device found. + * Since we allocated a device with down_interruptible, this + * should not be able to happen. + * Number of available devices, which are contained in + * device_allocation, is therefore decremented by not doing + * an up(device_allocation). + */ + return -EBUSY; + } + + *device_data = local_device_data; + + return 0; +} + +static void cryp_dma_setup_channel(struct cryp_device_data *device_data, + struct device *dev) +{ + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2cryp = mem_to_engine; + device_data->dma.chan_mem2cryp = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_mem2cryp); + + device_data->dma.cfg_cryp2mem = engine_to_mem; + device_data->dma.chan_cryp2mem = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_cryp2mem); + + init_completion(&device_data->dma.cryp_dma_complete); +} + +static void cryp_dma_out_callback(void *data) +{ + struct cryp_ctx *ctx = (struct cryp_ctx *) data; + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + complete(&ctx->device->dma.cryp_dma_complete); +} + +static int cryp_set_dma_transfer(struct cryp_ctx *ctx, + struct scatterlist *sg, + int len, + enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + switch (direction) { + case DMA_TO_DEVICE: + channel = ctx->device->dma.chan_mem2cryp; + ctx->device->dma.sg_src = sg; + ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_src, + ctx->device->dma.nents_src, + direction); + + if (!ctx->device->dma.sg_src_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list (TO_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(TO_DEVICE)", __func__); + + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, + direction, + DMA_CTRL_ACK); + break; + + case DMA_FROM_DEVICE: + channel = ctx->device->dma.chan_cryp2mem; + ctx->device->dma.sg_dst = sg; + + ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_dst, + ctx->device->dma.nents_dst, + direction); + + if (!ctx->device->dma.sg_dst_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list " + "(FROM_DEVICE)", __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(FROM_DEVICE)", __func__); + + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, + direction, + DMA_CTRL_ACK | + DMA_PREP_INTERRUPT); + + desc->callback = cryp_dma_out_callback; + desc->callback_param = ctx; + break; + + default: + dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", + __func__); + return -EFAULT; + } + + cookie = desc->tx_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void cryp_dma_done(struct cryp_ctx *ctx) +{ + struct dma_chan *chan; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + chan = ctx->device->dma.chan_mem2cryp; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, DMA_TO_DEVICE); + + chan = ctx->device->dma.chan_cryp2mem; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); +} + +static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, + int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static int cryp_polling_mode(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int i; + int ret = 0; + int remaining_length = ctx->datalen; + const u8 *indata = ctx->indata; + u8 *outdata = ctx->outdata; + + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + while (remaining_length > 0) { + for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { + ret = cryp_write_indata(device_data, + *((u32 *)indata)); + if (ret) + goto out; + indata += BYTES_PER_WORD; + remaining_length -= BYTES_PER_WORD; + } + cryp_wait_until_done(device_data); + for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { + ret = cryp_read_outdata(device_data, + (u32 *)outdata); + if (ret) + goto out; + outdata += BYTES_PER_WORD; + } + cryp_wait_until_done(device_data); + } +out: + return ret; +} + +static int cryp_disable_power( + struct device *dev, + struct cryp_device_data *device_data, + bool save_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) + goto out; + + spin_lock(&device_data->ctx_lock); + if (save_device_context && device_data->current_ctx) { + cryp_save_device_context(device_data, + &device_data->current_ctx->dev_ctx); + device_data->restore_dev_ctx = true; + } + spin_unlock(&device_data->ctx_lock); + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->pwr_regulator); + if (ret) + dev_err(dev, "[%s]: " + "regulator_disable() failed!", + __func__); + + device_data->power_state = false; + +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; +} + +static int cryp_enable_power( + struct device *dev, + struct cryp_device_data *device_data, + bool restore_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) { + ret = regulator_enable(device_data->pwr_regulator); + if (ret) { + dev_err(dev, "[%s]: regulator_enable() failed!", + __func__); + goto out; + } + + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_enable() failed!", + __func__); + regulator_disable(device_data->pwr_regulator); + goto out; + } + device_data->power_state = true; + } + + if (device_data->restore_dev_ctx) { + spin_lock(&device_data->ctx_lock); + if (restore_device_context && device_data->current_ctx) { + device_data->restore_dev_ctx = false; + cryp_restore_device_context(device_data, + &device_data->current_ctx->dev_ctx); + } + spin_unlock(&device_data->ctx_lock); + } +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; +} + +static int hw_crypt_noxts(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int ret; + + const u8 *indata = ctx->indata; + u8 *outdata = ctx->outdata; + u32 datalen = ctx->datalen; + u32 outlen = datalen; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->outlen = ctx->datalen; + ctx->config.key_access = CRYP_STATE_ENABLE; + ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + + cryp_reset(device_data); + + ret = cryp_setup_context(ctx, device_data); + if (ret) + goto out; + + cryp_flush_inoutfifo(device_data); + + if (cryp_mode == CRYP_MODE_INTERRUPT) { + INIT_COMPLETION(ctx->device->cryp_irq_complete); + + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO); + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO); + + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + + wait_for_completion(&ctx->device->cryp_irq_complete); + } else if (cryp_mode == CRYP_MODE_POLLING || + cryp_mode == CRYP_MODE_DMA) { + /* + * The reason for having DMA in this if case is that if we are + * running cryp_mode = 2, then we separate DMA routines for + * handling cipher/plaintext > blocksize, except when + * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use + * the polling mode. Overhead of doing DMA setup eats up the + * benefits using it. + */ + ret = cryp_polling_mode(ctx, device_data); + if (ret) + goto out; + } else { + dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", + __func__); + ret = -EPERM; + goto out; + } + + ret = 0; + cryp_save_device_context(device_data, &ctx->dev_ctx); + if (ctx->updated == 0) + ctx->updated = 1; + +out: + ctx->indata = indata; + ctx->outdata = outdata; + ctx->datalen = datalen; + ctx->outlen = outlen; + + return ret; +} + +static int get_nents(struct scatterlist *sg, int nbytes) +{ + int nents = 0; + + while (nbytes > 0) { + nbytes -= sg->length; + sg = scatterwalk_sg_next(sg); + nents++; + } + + return nents; +} + +static int ablk_dma_crypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct cryp_device_data *device_data; + + int bytes_written = 0; + int bytes_read = 0; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.key_access = CRYP_STATE_ENABLE; + ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + ctx->datalen = areq->nbytes; + ctx->outlen = areq->nbytes; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + return ret; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out; + } + + cryp_reset(device_data); + + ret = cryp_setup_context(ctx, device_data); + if (ret) + goto out_power; + + /* We have the device now, so store the nents in the dma struct. */ + ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); + ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); + + /* Enable DMA in- and output. */ + cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS); + + bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); + bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); + + wait_for_completion(&ctx->device->dma.cryp_dma_complete); + cryp_dma_done(ctx); + + cryp_save_device_context(device_data, &ctx->dev_ctx); + ctx->updated = 1; + +out_power: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); + +out: + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + if (unlikely(bytes_written != bytes_read)) + return -EPERM; + + return 0; +} + +static int ablk_crypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct ablkcipher_walk walk; + unsigned long src_paddr; + unsigned long dst_paddr; + int ret; + int nbytes; + struct cryp_device_data *device_data; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + goto out; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out_power; + } + + ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); + ret = ablkcipher_walk_phys(areq, &walk); + + if (ret) { + pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!", + __func__); + goto out_power; + } + + while ((nbytes = walk.nbytes) > 0) { + ctx->iv = walk.iv; + src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); + ctx->indata = phys_to_virt(src_paddr); + + dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); + ctx->outdata = phys_to_virt(dst_paddr); + + ctx->datalen = nbytes - (nbytes % ctx->blocksize); + + ret = hw_crypt_noxts(ctx, device_data); + if (ret) + goto out_power; + + nbytes -= ctx->datalen; + ret = ablkcipher_walk_done(areq, &walk, nbytes); + if (ret) + goto out_power; + } + ablkcipher_walk_complete(&walk); + +out_power: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); +out: + /* Release the device */ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; +} + +static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + switch (keylen) { + case AES_KEYSIZE_128: + ctx->config.key_size = CRYP_KEY_SIZE_128; + break; + + case AES_KEYSIZE_192: + ctx->config.key_size = CRYP_KEY_SIZE_192; + break; + + case AES_KEYSIZE_256: + ctx->config.key_size = CRYP_KEY_SIZE_256; + break; + + default: + pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + + return 0; +} + +static int aes_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* For CTR mode */ + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { + + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s] invalid keylen", __func__); + return -EINVAL; + } + + if (keylen == AES_KEYSIZE_128) + ctx->config.key_size = CRYP_KEY_SIZE_128; + else if (keylen == AES_KEYSIZE_192) + ctx->config.key_size = CRYP_KEY_SIZE_192; + else if (keylen == AES_KEYSIZE_256) + ctx->config.key_size = CRYP_KEY_SIZE_256; + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + int ret; + u32 tmp[DES_EXPKEY_WORDS]; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + if (keylen != DES_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + const u32 *K = (const u32 *)key; + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + int i, ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES3_EDE_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + /* Checking key interdependency for weak key detection. */ + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + for (i = 0; i < 3; i++) { + ret = des_ekey(tmp, key + i*DES_KEY_SIZE); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: " + "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + return -EINVAL; + } + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + const u32 *K = (const u32 *)key; + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + int i, ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + if (keylen != DES3_EDE_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + for (i = 0; i < 3; i++) { + ret = des_ekey(tmp, key + i*DES_KEY_SIZE); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: " + "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + return -EINVAL; + } + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int cryp_hw_calculate(struct cryp_ctx *ctx) +{ + struct cryp_device_data *device_data; + int ret; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + goto out; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out; + } + + if (hw_crypt_noxts(ctx, device_data)) + pr_err("u8500_cryp:crypX: [%s]: hw_crypt_noxts() failed!", + __func__); + +out: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); + /* Release the device */ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + + +static int aes_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->blocksize = AES_BLOCK_SIZE; + + if (cryp_mode == CRYP_MODE_DMA) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->blocksize = AES_BLOCK_SIZE; + + if (cryp_mode == CRYP_MODE_DMA) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ctr_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ctr_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int des_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->blocksize = DES_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->blocksize = DES_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->blocksize = DES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->blocksize = DES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +/** + * struct crypto_alg aes_alg + */ +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_setkey, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +/** + * struct crypto_alg des_alg + */ +static struct crypto_alg des_alg = { + .cra_name = "des", + .cra_driver_name = "des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES_KEY_SIZE, + .cia_max_keysize = DES_KEY_SIZE, + .cia_setkey = des_setkey, + .cia_encrypt = des_encrypt, + .cia_decrypt = des_decrypt + } + } +}; + +/** + * struct crypto_alg des3_alg + */ +static struct crypto_alg des3_alg = { + .cra_name = "des3_ede", + .cra_driver_name = "des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES3_EDE_KEY_SIZE, + .cia_max_keysize = DES3_EDE_KEY_SIZE, + .cia_setkey = des3_setkey, + .cia_encrypt = des3_encrypt, + .cia_decrypt = des3_decrypt + } + } +}; + +/** + * struct crypto_alg aes_ecb_alg + */ +static struct crypto_alg aes_ecb_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_ecb_encrypt, + .decrypt = aes_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg aes_cbc_alg + */ +static struct crypto_alg aes_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_cbc_encrypt, + .decrypt = aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg aes_ctr_alg + */ +static struct crypto_alg aes_ctr_alg = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_ctr_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_ctr_encrypt, + .decrypt = aes_ctr_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg des_ecb_alg + */ +static struct crypto_alg des_ecb_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = des_ecb_encrypt, + .decrypt = des_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg des_cbc_alg + */ +static struct crypto_alg des_cbc_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = des_cbc_encrypt, + .decrypt = des_cbc_decrypt, + .ivsize = DES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg des3_ecb_alg + */ +static struct crypto_alg des3_ecb_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = des3_ecb_encrypt, + .decrypt = des3_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg des3_cbc_alg + */ +static struct crypto_alg des3_cbc_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = des3_cbc_encrypt, + .decrypt = des3_cbc_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg *u8500_cryp_algs[] - + */ +static struct crypto_alg *u8500_cryp_algs[] = { + &aes_alg, + &des_alg, + &des3_alg, + &aes_ecb_alg, + &aes_cbc_alg, + &aes_ctr_alg, + &des_ecb_alg, + &des_cbc_alg, + &des3_ecb_alg, + &des3_cbc_alg +}; + +/** + * cryp_algs_register_all - + */ +static int cryp_algs_register_all(void) +{ + int ret; + int i; + int count; + + pr_debug("[%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) { + ret = crypto_register_alg(u8500_cryp_algs[i]); + if (ret) { + count = i; + pr_err("[%s] alg registration failed", + u8500_cryp_algs[i]->cra_driver_name); + goto unreg; + } + } + return 0; +unreg: + for (i = 0; i < count; i++) + crypto_unregister_alg(u8500_cryp_algs[i]); + return ret; +} + +/** + * cryp_algs_unregister_all - + */ +static void cryp_algs_unregister_all(void) +{ + int i; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) + crypto_unregister_alg(u8500_cryp_algs[i]); +} + +static int u8500_cryp_probe(struct platform_device *pdev) +{ + int ret; + int cryp_error = 0; + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + struct cryp_protection_config prot = { + .privilege_access = CRYP_STATE_ENABLE + }; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "[%s]", __func__); + device_data = kzalloc(sizeof(struct cryp_device_data), GFP_KERNEL); + if (!device_data) { + dev_err(dev, "[%s]: kzalloc() failed!", __func__); + ret = -ENOMEM; + goto out; + } + + device_data->dev = dev; + device_data->current_ctx = NULL; + + /* Grab the DMA configuration from platform data. */ + mem_to_engine = &((struct cryp_platform_data *) + dev->platform_data)->mem_to_engine; + engine_to_mem = &((struct cryp_platform_data *) + dev->platform_data)->engine_to_mem; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "[%s]: platform_get_resource() failed", + __func__); + ret = -ENODEV; + goto out_kfree; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_err(dev, "[%s]: request_mem_region() failed", + __func__); + ret = -EBUSY; + goto out_kfree; + } + + device_data->base = ioremap(res->start, resource_size(res)); + if (!device_data->base) { + dev_err(dev, "[%s]: ioremap failed!", __func__); + ret = -ENOMEM; + goto out_free_mem; + } + + spin_lock_init(&device_data->ctx_lock); + mutex_init(&device_data->power_state_mutex); + + /* Enable power for CRYP hardware block */ + device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(device_data->pwr_regulator)) { + dev_err(dev, "[%s]: could not get cryp regulator", __func__); + ret = PTR_ERR(device_data->pwr_regulator); + device_data->pwr_regulator = NULL; + goto out_unmap; + } + + /* Enable the clk for CRYP hardware block */ + device_data->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(device_data->clk)) { + dev_err(dev, "[%s]: clk_get() failed!", __func__); + ret = PTR_ERR(device_data->clk); + goto out_regulator; + } + + /* Enable device power (and clock) */ + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); + goto out_clk; + } + + cryp_error = cryp_check(device_data); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_init() failed!", __func__); + ret = -EINVAL; + goto out_power; + } + + cryp_error = cryp_configure_protection(device_data, &prot); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_configure_protection() failed!", + __func__); + ret = -EINVAL; + goto out_power; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) { + dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", + __func__); + goto out_power; + } + + ret = request_irq(res_irq->start, + cryp_interrupt_handler, + 0, + "cryp1", + device_data); + if (ret) { + dev_err(dev, "[%s]: Unable to request IRQ", __func__); + goto out_power; + } + + init_completion(&device_data->cryp_irq_complete); + + if (cryp_mode == CRYP_MODE_DMA) + cryp_dma_setup_channel(device_data, dev); + + platform_set_drvdata(pdev, device_data); + + /* Put the new device into the device list... */ + klist_add_tail(&device_data->list_node, &driver_data.device_list); + + /* ... and signal that a new device is available. */ + up(&driver_data.device_allocation); + + ret = cryp_algs_register_all(); + if (ret) { + dev_err(dev, "[%s]: cryp_algs_register_all() failed!", + __func__); + goto out_power; + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(dev, "[%s]: cryp_disable_power() failed!", __func__); + + return 0; + +out_power: + cryp_disable_power(&pdev->dev, device_data, false); + +out_clk: + clk_put(device_data->clk); + +out_regulator: + regulator_put(device_data->pwr_regulator); + +out_unmap: + iounmap(device_data->base); + +out_free_mem: + release_mem_region(res->start, resource_size(res)); + +out_kfree: + kfree(device_data); +out: + return ret; +} + +static int u8500_cryp_remove(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + /* Try to decrease the number of available devices. */ + if (down_trylock(&driver_data.device_allocation)) + return -EBUSY; + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (device_data->current_ctx) { + /* The device is busy */ + spin_unlock(&device_data->ctx_lock); + /* Return the device to the pool. */ + up(&driver_data.device_allocation); + return -EBUSY; + } + + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + + clk_put(device_data->clk); + regulator_put(device_data->pwr_regulator); + + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, res->end - res->start + 1); + + kfree(device_data); + + return 0; +} + +static void u8500_cryp_shutdown(struct platform_device *pdev) +{ + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return; + } + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (!device_data->current_ctx) { + if (down_trylock(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" + "Shutting down anyway...", __func__); + /** + * (Allocate the device) + * Need to set this to non-null (dummy) value, + * to avoid usage if context switching. + */ + device_data->current_ctx++; + } + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + +} + +static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + /* Handle state? */ + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else + disable_irq(res_irq->start); + + spin_lock(&device_data->ctx_lock); + if (!device_data->current_ctx) + device_data->current_ctx++; + spin_unlock(&device_data->ctx_lock); + + if (device_data->current_ctx == ++temp_ctx) { + if (down_interruptible(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: down_interruptible() " + "failed", __func__); + ret = cryp_disable_power(&pdev->dev, device_data, false); + + } else + ret = cryp_disable_power(&pdev->dev, device_data, true); + + if (ret) + dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__); + + return ret; +} + +static int u8500_cryp_resume(struct platform_device *pdev) +{ + int ret = 0; + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + spin_lock(&device_data->ctx_lock); + if (device_data->current_ctx == ++temp_ctx) + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + + if (!device_data->current_ctx) + up(&driver_data.device_allocation); + else + ret = cryp_enable_power(&pdev->dev, device_data, true); + + if (ret) + dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!", + __func__); + else { + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res_irq) + enable_irq(res_irq->start); + } + + return ret; +} + +static struct platform_driver cryp_driver = { + .probe = u8500_cryp_probe, + .remove = u8500_cryp_remove, + .shutdown = u8500_cryp_shutdown, + .suspend = u8500_cryp_suspend, + .resume = u8500_cryp_resume, + .driver = { + .owner = THIS_MODULE, + .name = "cryp1" + } +}; + +static int __init u8500_cryp_mod_init(void) +{ + pr_debug("[%s] is called!", __func__); + + klist_init(&driver_data.device_list, NULL, NULL); + /* Initialize the semaphore to 0 devices (locked state) */ + sema_init(&driver_data.device_allocation, 0); + return platform_driver_register(&cryp_driver); +} + +static void __exit u8500_cryp_mod_fini(void) +{ + pr_debug("[%s] is called!", __func__); + platform_driver_unregister(&cryp_driver); + return; +} + +module_init(u8500_cryp_mod_init); +module_exit(u8500_cryp_mod_fini); + +module_param(cryp_mode, int, 0); + +MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 CRYP crypto engine."); + +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c new file mode 100644 index 00000000000..eacff226aa8 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -0,0 +1,45 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ + +#include +#include +#include + +#include "cryp.h" +#include "cryp_p.h" +#include "cryp_irq.h" +#include "cryp_irqp.h" + +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl(&device_data->base->imsc); + set_bit(irq_src, (void *)&i); + writel(i, &device_data->base->imsc); +} + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl(&device_data->base->imsc); + clear_bit(irq_src, (void *)&i); + writel(i, &device_data->base->imsc); +} + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + return (readl(&device_data->base->mis) & irq_src) > 0; +} diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h new file mode 100644 index 00000000000..5a7837f1b8f --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.h @@ -0,0 +1,31 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_IRQ_H_ +#define _CRYP_IRQ_H_ + +#include "cryp.h" + +enum cryp_irq_src_id { + CRYP_IRQ_SRC_INPUT_FIFO = 0x1, + CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2, + CRYP_IRQ_SRC_ALL = 0x3 +}; + +/** + * M0 Funtions + */ +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +#endif /* _CRYP_IRQ_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h new file mode 100644 index 00000000000..5b60f887d02 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -0,0 +1,125 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef __CRYP_IRQP_H_ +#define __CRYP_IRQP_H_ + +#include "cryp_irq.h" + +/** + * + * CRYP Registers - Offset mapping + * +-----------------+ + * 00h | CRYP_CR | Configuration register + * +-----------------+ + * 04h | CRYP_SR | Status register + * +-----------------+ + * 08h | CRYP_DIN | Data In register + * +-----------------+ + * 0ch | CRYP_DOUT | Data out register + * +-----------------+ + * 10h | CRYP_DMACR | DMA control register + * +-----------------+ + * 14h | CRYP_IMSC | IMSC + * +-----------------+ + * 18h | CRYP_RIS | Raw interrupt status + * +-----------------+ + * 1ch | CRYP_MIS | Masked interrupt status. + * +-----------------+ + * Key registers + * IVR registers + * Peripheral + * Cell IDs + * + * Refer data structure for other register map + */ + +/** + * struct cryp_register + * @cr - Configuration register + * @status - Status register + * @din - Data input register + * @din_size - Data input size register + * @dout - Data output register + * @dout_size - Data output size register + * @dmacr - Dma control register + * @imsc - Interrupt mask set/clear register + * @ris - Raw interrupt status + * @mis - Masked interrupt statu register + * @key_1_l - Key register 1 L + * @key_1_r - Key register 1 R + * @key_2_l - Key register 2 L + * @key_2_r - Key register 2 R + * @key_3_l - Key register 3 L + * @key_3_r - Key register 3 R + * @key_4_l - Key register 4 L + * @key_4_r - Key register 4 R + * @init_vect_0_l - init vector 0 L + * @init_vect_0_r - init vector 0 R + * @init_vect_1_l - init vector 1 L + * @init_vect_1_r - init vector 1 R + * @cryp_unused1 - unused registers + * @itcr - Integration test control register + * @itip - Integration test input register + * @itop - Integration test output register + * @cryp_unused2 - unused registers + * @periphId0 - FE0 CRYP Peripheral Identication Register + * @periphId1 - FE4 + * @periphId2 - FE8 + * @periphId3 - FEC + * @pcellId0 - FF0 CRYP PCell Identication Register + * @pcellId1 - FF4 + * @pcellId2 - FF8 + * @pcellId3 - FFC + */ +struct cryp_register { + u32 cr; /* Configuration register */ + u32 status; /* Status register */ + u32 din; /* Data input register */ + u32 din_size; /* Data input size register */ + u32 dout; /* Data output register */ + u32 dout_size; /* Data output size register */ + u32 dmacr; /* Dma control register */ + u32 imsc; /* Interrupt mask set/clear register */ + u32 ris; /* Raw interrupt status */ + u32 mis; /* Masked interrupt statu register */ + + u32 key_1_l; /*Key register 1 L */ + u32 key_1_r; /*Key register 1 R */ + u32 key_2_l; /*Key register 2 L */ + u32 key_2_r; /*Key register 2 R */ + u32 key_3_l; /*Key register 3 L */ + u32 key_3_r; /*Key register 3 R */ + u32 key_4_l; /*Key register 4 L */ + u32 key_4_r; /*Key register 4 R */ + + u32 init_vect_0_l; /*init vector 0 L */ + u32 init_vect_0_r; /*init vector 0 R */ + u32 init_vect_1_l; /*init vector 1 L */ + u32 init_vect_1_r; /*init vector 1 R */ + + u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */ + u32 itcr; /*Integration test control register */ + u32 itip; /*Integration test input register */ + u32 itop; /*Integration test output register */ + u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */ + + u32 periphId0; /* FE0 CRYP Peripheral Identication Register */ + u32 periphId1; /* FE4 */ + u32 periphId2; /* FE8 */ + u32 periphId3; /* FEC */ + + u32 pcellId0; /* FF0 CRYP PCell Identication Register */ + u32 pcellId1; /* FF4 */ + u32 pcellId2; /* FF8 */ + u32 pcellId3; /* FFC */ +}; + +#endif diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h new file mode 100644 index 00000000000..966de4633cc --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -0,0 +1,113 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_P_H_ +#define _CRYP_P_H_ + +#include +#include + +#include "cryp.h" +#include "cryp_irqp.h" + +/** + * Generic Macros + */ +#define CRYP_SET_BITS(reg_name, mask) \ + writel((readl(reg_name) | mask), reg_name) + +#define CRYP_WRITE_BIT(reg_name, val, mask) \ + writel(((readl(reg_name) & ~(mask)) | ((val) & (mask))), reg_name) + +#define CRYP_TEST_BITS(reg_name, val) \ + (readl(reg_name) & (val)) + +#define CRYP_PUT_BITS(reg, val, shift, mask) \ + writel(((readl(reg) & ~(mask)) | \ + (((u32)val << shift) & (mask))), reg) + +/** + * CRYP specific Macros + */ +#define CRYP_PERIPHERAL_ID0 0xE3 +#define CRYP_PERIPHERAL_ID1 0x05 +#define CRYP_PERIPHERAL_ID2 0x28 +#define CRYP_PERIPHERAL_ID3 0x00 + +#define CRYP_PCELL_ID0 0x0D +#define CRYP_PCELL_ID1 0xF0 +#define CRYP_PCELL_ID2 0x05 +#define CRYP_PCELL_ID3 0xB1 + +/** + * CRYP register default values + */ +#define MAX_DEVICE_SUPPORT 2 +#define CRYP_CR_DEFAULT 0x0002 +#define CRYP_CR_FFLUSH BIT(14) +#define CRYP_DMACR_DEFAULT 0x0 +#define CRYP_IMSC_DEFAULT 0x0 +#define CRYP_DIN_DEFAULT 0x0 +#define CRYP_DOUT_DEFAULT 0x0 +#define CRYP_KEY_DEFAULT 0x0 +#define CRYP_INIT_VECT_DEFAULT 0x0 + +/** + * CRYP Control register specific mask + */ +#define CRYP_SECURE_MASK BIT(0) +#define CRYP_PRLG_MASK BIT(1) +#define CRYP_ENC_DEC_MASK BIT(2) +#define CRYP_SR_BUSY_MASK BIT(4) +#define CRYP_KEY_ACCESS_MASK BIT(10) +#define CRYP_KSE_MASK BIT(11) +#define CRYP_START_MASK BIT(12) +#define CRYP_INIT_MASK BIT(13) +#define CRYP_FIFO_FLUSH_MASK BIT(14) +#define CRYP_CRYPEN_MASK BIT(15) +#define CRYP_INFIFO_READY_MASK (BIT(0) | BIT(1)) +#define CRYP_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) +#define CRYP_DATA_TYPE_MASK (BIT(7) | BIT(6)) +#define CRYP_KEY_SIZE_MASK (BIT(9) | BIT(8)) + +/** + * Bit position used while setting bits in register + */ +#define CRYP_PRLG_POS 1 +#define CRYP_ENC_DEC_POS 2 +#define CRYP_ALGOMODE_POS 3 +#define CRYP_SR_BUSY_POS 4 +#define CRYP_DATA_TYPE_POS 6 +#define CRYP_KEY_SIZE_POS 8 +#define CRYP_KEY_ACCESS_POS 10 +#define CRYP_KSE_POS 11 +#define CRYP_START_POS 12 +#define CRYP_INIT_POS 13 +#define CRYP_CRYPEN_POS 15 + +/** + * CRYP Status register + */ +#define CRYP_BUSY_STATUS_MASK BIT(4) + +/** + * CRYP PCRs------PC_NAND control register + * BIT_MASK + */ +#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0)) +#define CRYP_DMA_REQ_MASK_POS 0 + + +struct cryp_system_context { + /* CRYP Register structure */ + struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT]; +}; + +#endif diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile old mode 100755 new mode 100644 index 613330a4ca4..6194da8eec8 --- a/drivers/crypto/ux500/hash/Makefile +++ b/drivers/crypto/ux500/hash/Makefile @@ -1,8 +1,10 @@ - -ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) - EXTRA_CFLAGS += -D__DEBUG -else - EXTRA_CFLAGS += -D__RELEASE +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_hash_core.o := -DDEBUG endif obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h old mode 100755 new mode 100644 index e1f7c2eb60b..1c3dd5705fb --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -1,25 +1,19 @@ -#ifndef _HASH_ALG_H -#define _HASH_ALG_H /* - * Copyright (C) 2010 ST-Ericsson. - * Copyright (C) 2010 STMicroelectronics. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen (shujuan.chen@stericsson.com) + * Author: Joakim Bech (joakim.xx.bech@stericsson.com) + * License terms: GNU General Public License (GPL) version 2 */ -#ifdef __cplusplus -extern "C" { -#endif +#ifndef _HASH_ALG_H +#define _HASH_ALG_H #include /* Number of bytes the message digest */ #define HASH_MSG_DIGEST_SIZE 32 #define HASH_BLOCK_SIZE 64 - -#define __HASH_ENHANCED +#define HASH_SHA1_DIGEST_SIZE 20 +#define HASH_SHA2_DIGEST_SIZE 32 /* Version defines */ #define HASH_HCL_VERSION_ID 1 @@ -106,42 +100,42 @@ extern "C" { #define HASH_CELL_ID2 0x05 #define HASH_CELL_ID3 0xB1 -#define HASH_SET_DIN(val) HCL_WRITE_REG(g_sys_ctx.registry[hid]->din, (val)) +#define HASH_SET_DIN(val) HCL_WRITE_REG( \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->din, (val)) #define HASH_INITIALIZE \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->cr, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ 0x01 << HASH_CR_INIT_POS, \ HASH_CR_INIT_MASK) #define HASH_SET_DATA_FORMAT(data_format) \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->cr, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ (u32) (data_format) << HASH_CR_DATAFORM_POS, \ HASH_CR_DATAFORM_MASK) #define HASH_GET_HX(pos) \ - HCL_READ_REG(g_sys_ctx.registry[hid]->hx[pos]) - -#define HASH_SET_HX(pos, val) \ - HCL_WRITE_REG(g_sys_ctx.registry[hid]->hx[pos], (val)); + HCL_READ_REG(sys_ctx_g.registry[HASH_DEVICE_ID_1]->hx[pos]) #define HASH_SET_NBLW(val) \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->str, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ (u32) (val) << HASH_STR_NBLW_POS, \ HASH_STR_NBLW_MASK) #define HASH_SET_DCAL \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->str, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ 0x01 << HASH_STR_DCAL_POS, \ HASH_STR_DCAL_MASK) +#define HASH_BLOCK_BYTE_SIZE 64 + /** * struct uint64 - Structure to handle 64 bits integers. * @high_word: Most significant bits - * @high_word: Least significant bits + * @low_word: Least significant bits * * Used to handle 64 bits integers. */ @@ -184,19 +178,19 @@ struct hash_register { u32 str; u32 hx[8]; - u32 padding0[(0x080 - 0x02C) >> 2]; + u32 padding0[(0x080 - 0x02C) / sizeof(u32)]; u32 itcr; u32 itip; u32 itop; - u32 padding1[(0x0F8 - 0x08C) >> 2]; + u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)]; u32 csfull; u32 csdatain; u32 csrx[HASH_CSR_COUNT]; - u32 padding2[(0xFE0 - 0x1D0) >> 2]; + u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)]; u32 periphid0; u32 periphid1; @@ -249,7 +243,14 @@ struct hash_state { * @state: State of the hash device */ struct hash_system_context { - /* Pointer to HASH registers structure */ + /* + * Pointer to HASH registers structure. We know that this gives a + * checkpatch warning and in the current design it needs to be a + * volatile. We will change it when we will rewrite the driver similar + * to how we have done in cryp-part. We have also read + * Documentation/volatile-considered-harmful.txt as checkpatch tell + * us to do. + */ volatile struct hash_register *registry[MAX_HASH_DEVICE]; /* State of HASH device */ @@ -280,16 +281,6 @@ enum hash_data_format { HASH_DATA_1_BIT = 0x3 }; -/** - * enum hash_device_state - Device state - * @DISABLE: Disable the hash hardware - * @ENABLE: Enable the hash hardware - */ -enum hash_device_state { - DISABLE = 0, - ENABLE = 1 -}; - /** * struct hash_protection_config - Device protection configuration. * @privilege_access: FIXME, add comment. @@ -300,69 +291,6 @@ struct hash_protection_config { int secure_access; }; -/** - * enum hash_input_status - Data Input flag status. - * @HASH_DIN_EMPTY: Indicates that nothing is in data registers - * @HASH_DIN_FULL: Indicates that data registers are full - */ -enum hash_input_status { - HASH_DIN_EMPTY = 0, - HASH_DIN_FULL = 1 -}; - -/** - * Number of words already pushed - */ -enum hash_nbw_pushed { - HASH_NBW_00 = 0x00, - HASH_NBW_01 = 0x01, - HASH_NBW_02 = 0x02, - HASH_NBW_03 = 0x03, - HASH_NBW_04 = 0x04, - HASH_NBW_05 = 0x05, - HASH_NBW_06 = 0x06, - HASH_NBW_07 = 0x07, - HASH_NBW_08 = 0x08, - HASH_NBW_09 = 0x09, - HASH_NBW_10 = 0x0A, - HASH_NBW_11 = 0x0B, - HASH_NBW_12 = 0x0C, - HASH_NBW_13 = 0x0D, - HASH_NBW_14 = 0x0E, - HASH_NBW_15 = 0x0F -}; - -/** - * struct hash_device_status - Device status for DINF, NBW, and NBLW bit - * fields. - * @dinf_status: HASH data in full flag - * @nbw_status: Number of words already pushed - * @nblw_status: Number of Valid Bits Last Word of the Message - */ -struct hash_device_status { - int dinf_status; - int nbw_status; - u8 nblw_status; -}; - -/** - * enum hash_dma_request - Enumeration for HASH DMA request types. - */ -enum hash_dma_request { - HASH_DISABLE_DMA_REQ = 0x0, - HASH_ENABLE_DMA_REQ = 0x1 -}; - -/** - * enum hash_digest_cal - Enumeration for digest calculation. - * @HASH_DISABLE_DCAL: Indicates that DCAL bit is not set/used. - * @HASH_ENABLE_DCAL: Indicates that DCAL bit is set/used. - */ -enum hash_digest_cal { - HASH_DISABLE_DCAL = 0x0, - HASH_ENABLE_DCAL = 0x1 -}; - /** * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm * @HASH_ALGO_SHA1: Indicates that SHA1 is used. @@ -383,94 +311,61 @@ enum hash_op { HASH_OPER_MODE_HMAC = 0x1 }; -/** - * enum hash_key_type - Enumeration for selecting between long and short key. - * @HASH_SHORT_KEY: Key used is shorter or equal to block size (64 bytes) - * @HASH_LONG_KEY: Key used is greater than block size (64 bytes) - */ -enum hash_key_type { - HASH_SHORT_KEY = 0x0, - HASH_LONG_KEY = 0x1 -}; - /** * struct hash_config - Configuration data for the hardware * @data_format: Format of data entered into the hash data in register * @algorithm: Algorithm selection bit * @oper_mode: Operating mode selection bit - * @hmac_key: Long key selection bit HMAC mode */ struct hash_config { int data_format; int algorithm; int oper_mode; - int hmac_key; }; - /** - * enum hash_error - Error codes for hash. + * enum hash_rv - Return values / error codes for hash. */ -enum hash_error { +enum hash_rv { HASH_OK = 0, HASH_MSG_LENGTH_OVERFLOW, - HASH_INTERNAL_ERROR, - HASH_NOT_CONFIGURED, - HASH_REQUEST_PENDING, - HASH_REQUEST_NOT_APPLICABLE, HASH_INVALID_PARAMETER, - HASH_UNSUPPORTED_FEATURE, HASH_UNSUPPORTED_HW }; -int hash_init_base_address(int hash_device_id, t_logical_address base_address); - -int HASH_GetVersion(t_version *p_version); - -int HASH_Reset(int hash_devive_id); - -int HASH_ConfigureDmaRequest(int hash_device_id, int request_state); - -int HASH_ConfigureLastValidBits(int hash_device_id, u8 nblw_val); - -int HASH_ConfigureDigestCal(int hash_device_id, int dcal_state); +/** + * struct hash_ctx - The context used for hash calculations. + * @key: The key used in the operation + * @keylen: The length of the key + * @updated: Indicates if hardware is initialized for new operations + * @state: The state of the current calculations + * @config: The current configuration + */ +struct hash_ctx { + u8 key[HASH_BLOCK_BYTE_SIZE]; + u32 keylen; + u8 updated; + struct hash_state state; + struct hash_config config; +}; -int HASH_ConfigureProtection(int hash_device_id, - struct hash_protection_config - *p_protect_config); +int hash_init_base_address(int hash_device_id, t_logical_address base_address); int hash_setconfiguration(int hash_device_id, struct hash_config *p_config); -int hash_begin(int hash_device_id); - -int hash_get_digest(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); +void hash_begin(struct hash_ctx *ctx); -int HASH_ClockGatingOff(int hash_device_id); +void hash_get_digest(int hid, u8 *digest, int algorithm); -struct hash_device_status HASH_GetDeviceStatus(int hash_device_id); - -t_bool HASH_IsDcalOngoing(int hash_device_id); - -int hash_hw_update(int hash_device_id, +int hash_hw_update(struct shash_desc *desc, + int hash_device_id, const u8 *p_data_buffer, u32 msg_length); -int hash_end(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); - -int hash_compute(int hash_device_id, - const u8 *p_data_buffer, - u32 msg_length, - struct hash_config *p_hash_config, - u8 digest[HASH_MSG_DIGEST_SIZE]); - -int hash_end_key(int hash_device_id); +int hash_end(struct hash_ctx *ctx, u8 digest[HASH_MSG_DIGEST_SIZE]); int hash_save_state(int hash_device_id, struct hash_state *state); int hash_resume_state(int hash_device_id, const struct hash_state *state); -#ifdef __cplusplus -} -#endif #endif - diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c old mode 100755 new mode 100644 index fd5f8a870bf..a2e4ebd8ac1 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1,14 +1,13 @@ /* * Cryptographic API. - * * Support for Nomadik hardware crypto engine. - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * + + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson + * Author: Joakim Bech for ST-Ericsson + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 */ #include @@ -20,85 +19,53 @@ #include #include #include - #include +#include +#include + #include #include -#include #include #include "hash_alg.h" #define DRIVER_NAME "DRIVER HASH" -/* enables/disables debug msgs */ +/* Enable/Disables debug msgs */ #define DRIVER_DEBUG 1 #define DRIVER_DEBUG_PFX DRIVER_NAME -#define DRIVER_DBG KERN_ERR +#define DRIVER_DBG KERN_DEBUG #define MAX_HASH_DIGEST_BYTE_SIZE 32 -#define HASH_BLOCK_BYTE_SIZE 64 -#define HASH_ACC_SYNC_CONTROL -#ifdef HASH_ACC_SYNC_CONTROL static struct mutex hash_hw_acc_mutex; -#endif -int debug; -static int mode; -static int contextsaving; -static struct hash_system_context g_sys_ctx; +static int debug; +static struct hash_system_context sys_ctx_g; +static struct hash_driver_data *internal_drv_data; /** * struct hash_driver_data - IO Base and clock. - * @base: The IO base for the block - * @clk: FIXME, add comment + * @base: The IO base for the block. + * @clk: The clock. + * @regulator: The current regulator. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_mutex: Mutex for power_state. + * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. */ struct hash_driver_data { void __iomem *base; + struct device *dev; struct clk *clk; -}; - -/** - * struct hash_ctx - The context used for hash calculations. - * @key: The key used in the operation - * @keylen: The length of the key - * @updated: Indicates if hardware is initialized for new operations - * @state: The state of the current calculations - * @config: The current configuration - */ -struct hash_ctx { - u8 key[HASH_BLOCK_BYTE_SIZE]; - u32 keylen; - u8 updated; - struct hash_state state; - struct hash_config config; -}; - -/** - * struct hash_tfm_ctx - Transform context - * @key: The key stored in the transform context - * @keylen: The length of the key in the transform context - */ -struct hash_tfm_ctx { - u8 key[HASH_BLOCK_BYTE_SIZE]; - u32 keylen; + struct regulator *regulator; + bool power_state; + struct mutex power_state_mutex; + bool restore_dev_state; }; /* Declaration of functions */ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes); -/** - * hexdump - Dumps buffers in hex. - * @buf: The buffer to dump - * @len: The length of the buffer - */ -static void hexdump(unsigned char *buf, unsigned int len) -{ - print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); -} - /** * clear_reg_str - Clear the registry hash_str. * @hid: Hardware device ID @@ -107,83 +74,86 @@ static void hexdump(unsigned char *buf, unsigned int len) */ static inline void clear_reg_str(int hid) { - /* We will only clear the valid registers and not the reserved */ - g_sys_ctx.registry[hid]->str &= ~HASH_STR_DCAL_MASK; - g_sys_ctx.registry[hid]->str &= ~HASH_STR_NBLW_MASK; + /* + * We will only clear NBLW since writing 0 to DCAL is done by the + * hardware + */ + sys_ctx_g.registry[hid]->str &= ~HASH_STR_NBLW_MASK; } -/** - * write_nblw - Writes the number of valid bytes to nblw. - * @hid: Hardware device ID - * @bytes: The number of valid bytes in last word of a message - * - * Note that this function only writes, i.e. it does not clear the registry - * before it writes the new data. - */ -static inline void write_nblw(int hid, int bytes) +static int hash_disable_power( + struct device *dev, + struct hash_driver_data *device_data, + bool save_device_state) { - g_sys_ctx.registry[hid]->str |= - ((bytes * 8) & HASH_STR_NBLW_MASK); -} + int ret = 0; -/** - * write_dcal - Write/set the dcal bit. - * @hid: Hardware device ID - */ -static inline void write_dcal(int hid) -{ - g_sys_ctx.registry[hid]->str |= (1 << HASH_STR_DCAL_POS); -} + dev_dbg(dev, "[%s]", __func__); -/** - * pad_message - Function that pads a message. - * @hid: Hardware device ID - * - * FIXME: This function should be replaced. - */ -static inline void pad_message(int hid) -{ - hash_messagepad(hid, g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) + goto out; + + if (save_device_state) { + hash_save_state(HASH_DEVICE_ID_1, + &sys_ctx_g.state[HASH_DEVICE_ID_1]); + device_data->restore_dev_state = true; + } + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->regulator); + if (ret) + dev_err(dev, "[%s]: " + "regulator_disable() failed!", + __func__); + + device_data->power_state = false; + +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; } -/** - * write_key - Writes the key to the hardware registries. - * @hid: Hardware device ID - * @key: The key used in the operation - * @keylen: The length of the key - * - * Note that in this function we DO NOT write to the NBLW registry even though - * the hardware reference manual says so. There must be incorrect information in - * the manual or there must be a bug in the state machine in the hardware. - */ -static void write_key(int hid, const u8 *key, u32 keylen) +static int hash_enable_power( + struct device *dev, + struct hash_driver_data *device_data, + bool restore_device_state) { - u32 word = 0; - clear_reg_str(hid); + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); - while (keylen >= 4) { - word = ((u32) (key[3] & 255) << 24) | - ((u32) (key[2] & 255) << 16) | - ((u32) (key[1] & 255) << 8) | - ((u32) (key[0] & 255)); + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) { + ret = regulator_enable(device_data->regulator); + if (ret) { + dev_err(dev, "[%s]: regulator_enable() failed!", + __func__); + goto out; + } - HASH_SET_DIN(word); - keylen -= 4; - key += 4; + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_enable() failed!", + __func__); + regulator_disable(device_data->regulator); + goto out; + } + device_data->power_state = true; } - /* This takes care of the remaining bytes on the last word */ - if (keylen) { - word = 0; - while (keylen) { - word |= (key[keylen - 1] << (8 * (keylen - 1))); - keylen--; + if (device_data->restore_dev_state) { + if (restore_device_state) { + device_data->restore_dev_state = false; + hash_resume_state(HASH_DEVICE_ID_1, + &sys_ctx_g.state[HASH_DEVICE_ID_1]); } - HASH_SET_DIN(word); } +out: + mutex_unlock(&device_data->power_state_mutex); - write_dcal(hid); + return ret; } /** @@ -196,32 +166,20 @@ static void write_key(int hid, const u8 *key, u32 keylen) static int init_hash_hw(struct shash_desc *desc) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv; struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[init_hash_hw] (ctx=0x%x)!", (u32)ctx); + pr_debug("[init_hash_hw] (ctx=0x%x)!", (u32)ctx); - hash_error = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); - if (hash_error != HASH_OK) { - stm_error("hash_setconfiguration() failed!"); - ret = -1; - goto out; + hash_rv = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); + if (hash_rv != HASH_OK) { + pr_err("hash_setconfiguration() failed!"); + ret = -EPERM; + return ret; } - hash_error = hash_begin(HASH_DEVICE_ID_1); - if (hash_error != HASH_OK) { - stm_error("hash_begin() failed!"); - ret = -1; - goto out; - } - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { - stm_dbg(debug, "[init_hash_hw] update key=0x%0x, len=%d", - (u32) ctx->key, ctx->keylen); - write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); - } + hash_begin(ctx); -out: return ret; } @@ -229,22 +187,13 @@ out: * hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @desc: The hash descriptor for the job * - * Initialize structures and copy the key from the transform context to the - * descriptor context if the mode is HMAC. + * Initialize structures. */ static int hash_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - struct hash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(&desc->tfm->base); - - stm_dbg(debug, "[hash_init]: (ctx=0x%x)!", (u32)ctx); - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { - if (tfm_ctx->key) { - memcpy(ctx->key, tfm_ctx->key, tfm_ctx->keylen); - ctx->keylen = tfm_ctx->keylen; - } - } + pr_debug("[hash_init]: (ctx=0x%x)!", (u32)ctx); memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; @@ -262,60 +211,23 @@ static int hash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { int ret = 0; - int hash_error = HASH_OK; - struct hash_ctx *ctx = shash_desc_ctx(desc); + int hash_rv = HASH_OK; - stm_dbg(debug, "[hash_update]: (ctx=0x%x, data=0x%x, len=%d)!", - (u32)ctx, (u32)data, len); + pr_debug("[hash_update]: (data=0x%x, len=%d)!", + (u32)data, len); -#ifdef HASH_ACC_SYNC_CONTROL mutex_lock(&hash_hw_acc_mutex); -#endif - - if (!ctx->updated) { - ret = init_hash_hw(desc); - if (ret) { - stm_error("init_hash_hw() failed!"); - goto out; - } - } - - if (contextsaving) { - if (ctx->updated) { - hash_error = - hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); - if (hash_error != HASH_OK) { - stm_error("hash_resume_state() failed!"); - ret = -1; - goto out; - } - } - } /* NOTE: The length of the message is in the form of number of bits */ - hash_error = hash_hw_update(HASH_DEVICE_ID_1, data, len * 8); - if (hash_error != HASH_OK) { - stm_error("hash_hw_update() failed!"); - ret = -1; + hash_rv = hash_hw_update(desc, HASH_DEVICE_ID_1, data, len * 8); + if (hash_rv != HASH_OK) { + pr_err("hash_hw_update() failed!"); + ret = -EPERM; goto out; } - if (contextsaving) { - hash_error = - hash_save_state(HASH_DEVICE_ID_1, &ctx->state); - if (hash_error != HASH_OK) { - stm_error("hash_save_state() failed!"); - ret = -1; - goto out; - } - - } - ctx->updated = 1; - out: -#ifdef HASH_ACC_SYNC_CONTROL mutex_unlock(&hash_hw_acc_mutex); -#endif return ret; } @@ -327,99 +239,60 @@ out: static int hash_final(struct shash_desc *desc, u8 *out) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; struct hash_ctx *ctx = shash_desc_ctx(desc); + struct hash_driver_data *device_data = internal_drv_data; int digestsize = crypto_shash_digestsize(desc->tfm); u8 digest[HASH_MSG_DIGEST_SIZE]; - stm_dbg(debug, "[hash_final]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[hash_final]: (ctx=0x%x)!", (u32) ctx); -#ifdef HASH_ACC_SYNC_CONTROL mutex_lock(&hash_hw_acc_mutex); -#endif - if (contextsaving) { - hash_error = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } - if (hash_error != HASH_OK) { - stm_error("hash_resume_state() failed!"); - ret = -1; - goto out; + if (!ctx->updated) { + ret = init_hash_hw(desc); + if (ret) { + pr_err("init_hash_hw() failed!"); + goto out_power; } - } + } else { + hash_rv = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); - pad_message(HASH_DEVICE_ID_1); + if (hash_rv != HASH_OK) { + pr_err("hash_resume_state() failed!"); + ret = -EPERM; + goto out_power; + } + } - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) - write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); + hash_messagepad(HASH_DEVICE_ID_1, ctx->state.buffer, + ctx->state.index); - hash_error = hash_get_digest(HASH_DEVICE_ID_1, digest); + hash_get_digest(HASH_DEVICE_ID_1, digest, ctx->config.algorithm); memcpy(out, digest, digestsize); +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "hash_disable_power() failed!", __func__); + out: -#ifdef HASH_ACC_SYNC_CONTROL mutex_unlock(&hash_hw_acc_mutex); -#endif return ret; } -/** - * hash_setkey - The setkey function for providing the key during HMAC - * calculations. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - * @alg: The algorithm to use in the operation - */ -static int hash_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen, int alg) -{ - int ret = 0; - int hash_error = HASH_OK; - - struct hash_tfm_ctx *ctx_tfm = crypto_shash_ctx(tfm); - - stm_dbg(debug, "[hash_setkey]: (ctx_tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) ctx_tfm, (u32) key, keylen); - - /* Truncate the key to block size */ - if (keylen > HASH_BLOCK_BYTE_SIZE) { - struct hash_config config; - u8 digest[MAX_HASH_DIGEST_BYTE_SIZE]; - unsigned int digestsize = crypto_shash_digestsize(tfm); - - config.algorithm = alg; - config.data_format = HASH_DATA_8_BITS; - config.oper_mode = HASH_OPER_MODE_HASH; - -#ifdef HASH_ACC_SYNC_CONTROL - mutex_lock(&hash_hw_acc_mutex); -#endif - hash_error = hash_compute(HASH_DEVICE_ID_1, key, keylen * 8, - &config, digest); -#ifdef HASH_ACC_SYNC_CONTROL - mutex_unlock(&hash_hw_acc_mutex); -#endif - if (hash_error != HASH_OK) { - stm_error("Error: hash_compute() failed!"); - ret = -1; - goto out; - } - - memcpy(ctx_tfm->key, digest, digestsize); - ctx_tfm->keylen = digestsize; - } else { - memcpy(ctx_tfm->key, key, keylen); - ctx_tfm->keylen = keylen; - } - -out: - return ret; -} - /** * sha1_init - SHA1 init function. * @desc: The hash descriptor for the job @@ -428,7 +301,7 @@ static int sha1_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[sha1_init]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[sha1_init]: (ctx=0x%x)!", (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; @@ -445,7 +318,7 @@ static int sha256_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[sha256_init]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[sha256_init]: (ctx=0x%x)!", (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA2; @@ -454,70 +327,24 @@ static int sha256_init(struct shash_desc *desc) return hash_init(desc); } -/** - * hmac_sha1_init - SHA1 HMAC init function. - * @desc: The hash descriptor for the job - */ -static int hmac_sha1_init(struct shash_desc *desc) +static int hash_export(struct shash_desc *desc, void *out) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[hmac_sha1_init]: (ctx=0x%x)!", (u32) ctx); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA1; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->config.hmac_key = HASH_SHORT_KEY; - - return hash_init(desc); + pr_debug("[hash_export]: (ctx=0x%X) (out=0x%X)", + (u32) ctx, (u32) out); + memcpy(out, ctx, sizeof(*ctx)); + return 0; } -/** - * hmac_sha256_init - SHA2 (SHA256) HMAC init function. - * @desc: The hash descriptor for the job - */ -static int hmac_sha256_init(struct shash_desc *desc) +static int hash_import(struct shash_desc *desc, const void *in) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[hmac_sha256_init]: (ctx=0x%x)!", (u32) ctx); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA2; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->config.hmac_key = HASH_SHORT_KEY; - - return hash_init(desc); -} - -/** - * hmac_sha1_setkey - SHA1 HMAC setkey function. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - */ -static int hmac_sha1_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - stm_dbg(debug, "[hmac_sha1_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) tfm, (u32) key, keylen); - - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); -} - -/** - * hmac_sha256_setkey - SHA2 (SHA256) HMAC setkey function. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - */ -static int hmac_sha256_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - stm_dbg(debug, "[hmac_sha256_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) tfm, (u32) key, keylen); - - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA2); + pr_debug("[hash_import]: (ctx=0x%x) (in =0x%X)", + (u32) ctx, (u32) in); + memcpy(ctx, in, sizeof(*ctx)); + return 0; } static struct shash_alg sha1_alg = { @@ -525,16 +352,17 @@ static struct shash_alg sha1_alg = { .init = sha1_init, .update = hash_update, .final = hash_final, + .export = hash_export, + .import = hash_import, .descsize = sizeof(struct hash_ctx), + .statesize = sizeof(struct hash_ctx), .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-u8500", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } + .cra_name = "sha1", + .cra_driver_name = "sha1-u8500", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static struct shash_alg sha256_alg = { @@ -542,52 +370,17 @@ static struct shash_alg sha256_alg = { .init = sha256_init, .update = hash_update, .final = hash_final, + .export = hash_export, + .import = hash_import, .descsize = sizeof(struct hash_ctx), + .statesize = sizeof(struct hash_ctx), .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-u8500", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg hmac_sha1_alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = hmac_sha1_init, - .update = hash_update, - .final = hash_final, - .setkey = hmac_sha1_setkey, - .descsize = sizeof(struct hash_ctx), - .base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac(sha1-u8500)", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg hmac_sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = hmac_sha256_init, - .update = hash_update, - .final = hash_final, - .setkey = hmac_sha256_setkey, - .descsize = sizeof(struct hash_ctx), - .base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac(sha256-u8500)", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } + .cra_name = "sha256", + .cra_driver_name = "sha256-u8500", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; /** @@ -597,155 +390,129 @@ static struct shash_alg hmac_sha256_alg = { static int u8500_hash_probe(struct platform_device *pdev) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; struct resource *res = NULL; struct hash_driver_data *hash_drv_data; - stm_dbg(debug, "[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); + pr_debug("[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); - stm_dbg(debug, "[u8500_hash_probe]: Calling kzalloc()!"); + pr_debug("[u8500_hash_probe]: Calling kzalloc()!"); hash_drv_data = kzalloc(sizeof(struct hash_driver_data), GFP_KERNEL); if (!hash_drv_data) { - stm_dbg(debug, "kzalloc() failed!"); + pr_debug("kzalloc() failed!"); ret = -ENOMEM; goto out; } - stm_dbg(debug, "[u8500_hash_probe]: Calling platform_get_resource()!"); + hash_drv_data->dev = &pdev->dev; + + pr_debug("[u8500_hash_probe]: Calling platform_get_resource()!"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - stm_dbg(debug, "platform_get_resource() failed"); + pr_debug("platform_get_resource() failed"); ret = -ENODEV; goto out_kfree; } - stm_dbg(debug, "[u8500_hash_probe]: Calling request_mem_region()!"); - res = request_mem_region(res->start, res->end - res->start + 1, - pdev->name); + pr_debug("[u8500_hash_probe]: Calling request_mem_region()!"); + res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { - stm_dbg(debug, "request_mem_region() failed"); + pr_debug("request_mem_region() failed"); ret = -EBUSY; goto out_kfree; } - stm_dbg(debug, "[u8500_hash_probe]: Calling ioremap()!"); - hash_drv_data->base = ioremap(res->start, res->end - res->start + 1); + pr_debug("[u8500_hash_probe]: Calling ioremap()!"); + hash_drv_data->base = ioremap(res->start, resource_size(res)); if (!hash_drv_data->base) { - stm_error - ("[u8500_hash] ioremap of hash1 register memory failed!"); + pr_err("[u8500_hash] " + "ioremap of hash1 register memory failed!"); ret = -ENOMEM; goto out_free_mem; } + mutex_init(&hash_drv_data->power_state_mutex); + + /* Enable power for HASH hardware block */ + hash_drv_data->regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(hash_drv_data->regulator)) { + dev_err(&pdev->dev, "[u8500_hash] " + "could not get hash regulator\n"); + ret = PTR_ERR(hash_drv_data->regulator); + hash_drv_data->regulator = NULL; + goto out_unmap; + } - stm_dbg(debug, "[u8500_hash_probe]: Calling clk_get()!"); + pr_debug("[u8500_hash_probe]: Calling clk_get()!"); /* Enable the clk for HASH1 hardware block */ hash_drv_data->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(hash_drv_data->clk)) { - stm_error("clk_get() failed!"); + pr_err("clk_get() failed!"); ret = PTR_ERR(hash_drv_data->clk); - goto out_unmap; + goto out_regulator; } - stm_dbg(debug, "[u8500_hash_probe]: Calling clk_enable()!"); - ret = clk_enable(hash_drv_data->clk); + /* Enable device power (and clock) */ + ret = hash_enable_power(&pdev->dev, hash_drv_data, false); if (ret) { - stm_error("clk_enable() failed!"); - goto out_unmap; + dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", + __func__); + goto out_clk; } - stm_dbg(debug, - "[u8500_hash_probe]: Calling hash_init_base_address()->" - "(base=0x%x,DEVICE_ID=%d)!", - (u32) hash_drv_data->base, HASH_DEVICE_ID_1); + pr_debug("[u8500_hash_probe]: Calling hash_init_base_address()->" + "(base=0x%x,DEVICE_ID=%d)!", + (u32) hash_drv_data->base, HASH_DEVICE_ID_1); /* Setting base address */ - hash_error = + hash_rv = hash_init_base_address(HASH_DEVICE_ID_1, (t_logical_address) hash_drv_data->base); - if (hash_error != HASH_OK) { - stm_error("hash_init_base_address() failed!"); - ret = -1; /*TODO: what error code should be used here!? */ - goto out_clk; + if (hash_rv != HASH_OK) { + pr_err("hash_init_base_address() failed!"); + ret = -EPERM; + goto out_power; } -#ifdef HASH_ACC_SYNC_CONTROL - stm_dbg(debug, "[u8500_hash_probe]: Calling mutex_init()!"); + pr_debug("[u8500_hash_probe]: Calling mutex_init()!"); mutex_init(&hash_hw_acc_mutex); -#endif - if (mode == 0) { - stm_dbg(debug, - "[u8500_hash_probe]: To register all algorithms!"); - - ret = crypto_register_shash(&sha1_alg); - if (ret) { - stm_error("Could not register sha1_alg!"); - goto out_clk; - } - stm_dbg(debug, "[u8500_hash_probe]: sha1_alg registered!"); - - ret = crypto_register_shash(&sha256_alg); - if (ret) { - stm_error("Could not register sha256_alg!"); - goto out_unreg1; - } - stm_dbg(debug, "[u8500_hash_probe]: sha256_alg registered!"); + pr_debug("[u8500_hash_probe]: To register only sha1 and sha256" + " algorithms!"); + internal_drv_data = hash_drv_data; - ret = crypto_register_shash(&hmac_sha1_alg); - if (ret) { - stm_error("Could not register hmac_sha1_alg!"); - goto out_unreg2; - } - stm_dbg(debug, "[u8500_hash_probe]: hmac_sha1_alg registered!"); - - ret = crypto_register_shash(&hmac_sha256_alg); - if (ret) { - stm_error("Could not register hmac_sha256_alg!"); - goto out_unreg3; - } - stm_dbg(debug, - "[u8500_hash_probe]: hmac_sha256_alg registered!"); + ret = crypto_register_shash(&sha1_alg); + if (ret) { + pr_err("Could not register sha1_alg!"); + goto out_power; } + pr_debug("[u8500_hash_probe]: sha1_alg registered!"); - if (mode == 10) { - stm_dbg(debug, - "[u8500_hash_probe]: To register only sha1 and sha256" - " algorithms!"); - - ret = crypto_register_shash(&sha1_alg); - if (ret) { - stm_error("Could not register sha1_alg!"); - goto out_clk; - } - - ret = crypto_register_shash(&sha256_alg); - if (ret) { - stm_error("Could not register sha256_alg!"); - goto out_unreg1_tmp; - } + ret = crypto_register_shash(&sha256_alg); + if (ret) { + pr_err("Could not register sha256_alg!"); + goto out_unreg1_tmp; } - stm_dbg(debug, "[u8500_hash_probe]: Calling platform_set_drvdata()!"); + pr_debug("[u8500_hash_probe]: Calling platform_set_drvdata()!"); platform_set_drvdata(pdev, hash_drv_data); - return 0; - if (mode == 0) { -out_unreg1: - crypto_unregister_shash(&sha1_alg); -out_unreg2: - crypto_unregister_shash(&sha256_alg); -out_unreg3: - crypto_unregister_shash(&hmac_sha1_alg); - } + if (hash_disable_power(&pdev->dev, hash_drv_data, false)) + dev_err(&pdev->dev, "[%s]: hash_disable_power()" + " failed!", __func__); + + return 0; - if (mode == 10) { out_unreg1_tmp: - crypto_unregister_shash(&sha1_alg); - } + crypto_unregister_shash(&sha1_alg); + +out_power: + hash_disable_power(&pdev->dev, hash_drv_data, false); out_clk: - clk_disable(hash_drv_data->clk); clk_put(hash_drv_data->clk); +out_regulator: + regulator_put(hash_drv_data->regulator); + out_unmap: iounmap(hash_drv_data->base); @@ -767,60 +534,133 @@ static int u8500_hash_remove(struct platform_device *pdev) struct resource *res; struct hash_driver_data *hash_drv_data; - stm_dbg(debug, "[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); + pr_debug("[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); - stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_drvdata()!"); + pr_debug("[u8500_hash_remove]: Calling platform_get_drvdata()!"); hash_drv_data = platform_get_drvdata(pdev); - if (mode == 0) { - stm_dbg(debug, - "[u8500_hash_remove]: To unregister all algorithms!"); - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); - crypto_unregister_shash(&hmac_sha1_alg); - crypto_unregister_shash(&hmac_sha256_alg); - } + pr_debug("[u8500_hash_remove]: To unregister only sha1 and " + "sha256 algorithms!"); + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); - if (mode == 10) { - stm_dbg(debug, - "[u8500_hash_remove]: To unregister only sha1 and " - "sha256 algorithms!"); - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); - } -#ifdef HASH_ACC_SYNC_CONTROL - stm_dbg(debug, "[u8500_hash_remove]: Calling mutex_destroy()!"); + pr_debug("[u8500_hash_remove]: Calling mutex_destroy()!"); mutex_destroy(&hash_hw_acc_mutex); -#endif - stm_dbg(debug, "[u8500_hash_remove]: Calling clk_disable()!"); + pr_debug("[u8500_hash_remove]: Calling clk_disable()!"); clk_disable(hash_drv_data->clk); - stm_dbg(debug, "[u8500_hash_remove]: Calling clk_put()!"); + pr_debug("[u8500_hash_remove]: Calling clk_put()!"); clk_put(hash_drv_data->clk); - stm_dbg(debug, "[u8500_hash_remove]: Calling iounmap(): base = 0x%x", - (u32) hash_drv_data->base); + pr_debug("[u8500_hash_remove]: Calling regulator_disable()!"); + regulator_disable(hash_drv_data->regulator); + + pr_debug("[u8500_hash_remove]: Calling iounmap(): base = 0x%x", + (u32) hash_drv_data->base); iounmap(hash_drv_data->base); - stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_resource()!"); + pr_debug("[u8500_hash_remove]: Calling platform_get_resource()!"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stm_dbg(debug, - "[u8500_hash_remove]: Calling release_mem_region()" - "->res->start=0x%x, res->end = 0x%x!", + pr_debug("[u8500_hash_remove]: Calling release_mem_region()" + "->res->start=0x%x, res->end = 0x%x!", res->start, res->end); release_mem_region(res->start, res->end - res->start + 1); - stm_dbg(debug, "[u8500_hash_remove]: Calling kfree()!"); + pr_debug("[u8500_hash_remove]: Calling kfree()!"); kfree(hash_drv_data); return 0; } +static void u8500_hash_shutdown(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return; + } + + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); + + mutex_destroy(&hash_hw_acc_mutex); + + iounmap(hash_drv_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + + if (hash_disable_power(&pdev->dev, hash_drv_data, false)) + dev_err(&pdev->dev, "[%s]: " + "hash_disable_power() failed", __func__); + + clk_put(hash_drv_data->clk); + regulator_put(hash_drv_data->regulator); +} + +static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + /* Handle state? */ + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + ret = hash_disable_power(&pdev->dev, hash_drv_data, true); + if (ret) + dev_err(&pdev->dev, "[%s]: " + "hash_disable_power()", __func__); + + return ret; +} + +static int u8500_hash_resume(struct platform_device *pdev) +{ + int ret = 0; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + if (hash_drv_data->restore_dev_state) { + ret = hash_enable_power(&pdev->dev, hash_drv_data, true); + if (ret) + dev_err(&pdev->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + } + + return ret; +} + + static struct platform_driver hash_driver = { .probe = u8500_hash_probe, .remove = u8500_hash_remove, + .shutdown = u8500_hash_shutdown, + .suspend = u8500_hash_suspend, + .resume = u8500_hash_resume, .driver = { .owner = THIS_MODULE, .name = "hash1", @@ -832,7 +672,7 @@ static struct platform_driver hash_driver = { */ static int __init u8500_hash_mod_init(void) { - stm_dbg(debug, "u8500_hash_mod_init() is called!"); + pr_debug("u8500_hash_mod_init() is called!"); return platform_driver_register(&hash_driver); } @@ -842,7 +682,7 @@ static int __init u8500_hash_mod_init(void) */ static void __exit u8500_hash_mod_fini(void) { - stm_dbg(debug, "u8500_hash_mod_fini() is called!"); + pr_debug("u8500_hash_mod_fini() is called!"); platform_driver_unregister(&hash_driver); return; @@ -860,14 +700,10 @@ static void hash_processblock(int hid, const u32 *message) { u32 count; - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_DCAL_MASK); - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_NBLW_MASK); + clear_bit(HASH_STR_NBLW_MASK, (void *)sys_ctx_g.registry[hid]->str); /* Partially unrolled loop */ - for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); - count += 4) { + for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) { HASH_SET_DIN(message[0]); HASH_SET_DIN(message[1]); HASH_SET_DIN(message[2]); @@ -889,8 +725,8 @@ static void hash_processblock(int hid, const u32 *message) */ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) { - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad" - "(bytes in final msg=%d))", index_bytes); + pr_debug("[u8500_hash_alg] hash_messagepad" + "(bytes in final msg=%d))", index_bytes); clear_reg_str(hid); @@ -904,34 +740,39 @@ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) if (index_bytes) HASH_SET_DIN(message[0]); + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); + /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ HASH_SET_NBLW(index_bytes * 8); - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", - g_sys_ctx.registry[hid]->din, - g_sys_ctx.registry[hid]->str); + pr_debug("[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", + sys_ctx_g.registry[hid]->din, + sys_ctx_g.registry[hid]->str); HASH_SET_DCAL; - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad d -> " - "DIN=0x%08x NBLW=%d", - g_sys_ctx.registry[hid]->din, - g_sys_ctx.registry[hid]->str); + pr_debug("[u8500_hash_alg] hash_messagepad after dcal -> " + "DIN=0x%08x NBLW=%d", + sys_ctx_g.registry[hid]->din, + sys_ctx_g.registry[hid]->str); + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); } /** * hash_incrementlength - Increments the length of the current message. - * @hid: Hardware device ID + * @ctx: Hash context * @incr: Length of message processed already * * Overflow cannot occur, because conditions for overflow are checked in * hash_hw_update. */ -static void hash_incrementlength(int hid, u32 incr) +static void hash_incrementlength(struct hash_ctx *ctx, u32 incr) { - g_sys_ctx.state[hid].length.low_word += incr; + ctx->state.length.low_word += incr; /* Check for wrap-around */ - if (g_sys_ctx.state[hid].length.low_word < incr) - g_sys_ctx.state[hid].length.high_word++; + if (ctx->state.length.low_word < incr) + ctx->state.length.high_word++; } /** @@ -963,60 +804,49 @@ static void hash_incrementlength(int hid, u32 incr) */ int hash_setconfiguration(int hid, struct hash_config *p_config) { - int hash_error = HASH_OK; + int hash_rv = HASH_OK; - stm_dbg(debug, "[u8500_hash_alg] hash_setconfiguration())"); + pr_debug("[u8500_hash_alg] hash_setconfiguration())"); - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + if (p_config->algorithm != HASH_ALGO_SHA1 && + p_config->algorithm != HASH_ALGO_SHA2) + return HASH_INVALID_PARAMETER; HASH_SET_DATA_FORMAT(p_config->data_format); - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_EMPTYMSG_MASK); + HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_EMPTYMSG_MASK); - /* This bit selects between SHA-1 or SHA-2 algorithm */ - if (HASH_ALGO_SHA2 == p_config->algorithm) { - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_ALGO_MASK); - } else { /* SHA1 algorithm */ + switch (p_config->algorithm) { + case HASH_ALGO_SHA1: + HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); + break; - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_ALGO_MASK); + case HASH_ALGO_SHA2: + HCL_CLEAR_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); + break; + + default: + pr_debug("[u8500_hash_alg] Incorrect algorithm."); + return HASH_INVALID_PARAMETER; } /* This bit selects between HASH or HMAC mode for the selected algorithm */ if (HASH_OPER_MODE_HASH == p_config->oper_mode) { - HCL_CLEAR_BITS(g_sys_ctx.registry + HCL_CLEAR_BITS(sys_ctx_g.registry [hid]->cr, HASH_CR_MODE_MASK); - } else { /* HMAC mode */ - - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_MODE_MASK); - - /* This bit selects between short key (<= 64 bytes) or long key - (>64 bytes) in HMAC mode */ - if (HASH_SHORT_KEY == p_config->hmac_key) { - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_LKEY_MASK); - } else { - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_LKEY_MASK); - } + } else { /* HMAC mode or wrong hash mode */ + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); } - return hash_error; + return hash_rv; } /** * hash_begin - This routine resets some globals and initializes the hash * hardware. - * @hid: Hardware device ID + * @ctx: Hash context * * Reentrancy: Non Re-entrant * @@ -1033,35 +863,20 @@ int hash_setconfiguration(int hid, struct hash_config *p_config) * So the user has to initialize the device for new * configuration to take in to effect. */ -int hash_begin(int hid) +void hash_begin(struct hash_ctx *ctx) { - int hash_error = HASH_OK; - /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - stm_dbg(debug, "[u8500_hash_alg] hash_begin())"); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + pr_debug("[u8500_hash_alg] hash_begin())"); - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; + while (sys_ctx_g.registry[HASH_DEVICE_ID_1]->str & HASH_STR_DCAL_MASK) + cpu_relax(); HASH_INITIALIZE; - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_DCAL_MASK); - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HCL_CLEAR_BITS(sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, HASH_STR_NBLW_MASK); - - return hash_error; } /** @@ -1074,57 +889,62 @@ int hash_begin(int hid) * * Reentrancy: Non Re-entrant */ -int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) +int hash_hw_update(struct shash_desc *desc, + int hid, + const u8 *p_data_buffer, + u32 msg_length) { - int hash_error = HASH_OK; + int hash_rv = HASH_OK; u8 index; u8 *p_buffer; u32 count; + struct hash_ctx *ctx = shash_desc_ctx(desc); + struct hash_driver_data *device_data = internal_drv_data; - stm_dbg(debug, "[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " - "in=%d, bin=%d))", + pr_debug("[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " + "in=%d, bin=%d))", msg_length, msg_length / 8, - g_sys_ctx.state[hid].index, - g_sys_ctx.state[hid].bit_index); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + ctx->state.index, + ctx->state.bit_index); - index = g_sys_ctx.state[hid].index; + index = ctx->state.index; - p_buffer = (u8 *)g_sys_ctx.state[hid].buffer; + p_buffer = (u8 *)ctx->state.buffer; /* Number of bytes in the message */ msg_length /= 8; /* Check parameters */ if (NULL == p_data_buffer) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - /* Check if g_sys_ctx.state.length + msg_length + /* Check if ctx->state.length + msg_length overflows */ if (msg_length > - (g_sys_ctx.state[hid].length.low_word + msg_length) + (ctx->state.length.low_word + msg_length) && HASH_HIGH_WORD_MAX_VAL == - (g_sys_ctx.state[hid].length.high_word)) { - hash_error = HASH_MSG_LENGTH_OVERFLOW; - stm_error("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); - return hash_error; + (ctx->state.length.high_word)) { + hash_rv = HASH_MSG_LENGTH_OVERFLOW; + pr_err("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); + return hash_rv; + } + + /* Enable device power (and clock) */ + hash_rv = hash_enable_power(device_data->dev, device_data, false); + if (hash_rv) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; } /* Main loop */ while (0 != msg_length) { if ((index + msg_length) < HASH_BLOCK_SIZE) { for (count = 0; count < msg_length; count++) { - /*TODO: memcpy? */ p_buffer[index + count] = *(p_data_buffer + count); } @@ -1132,7 +952,26 @@ int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) index += msg_length; msg_length = 0; } else { - /* if 'p_data_buffer' is four byte aligned and local + if (!ctx->updated) { + hash_rv = init_hash_hw(desc); + if (hash_rv != HASH_OK) { + pr_err("init_hash_hw() failed!"); + goto out; + } + ctx->updated = 1; + } else { + hash_rv = + hash_resume_state(HASH_DEVICE_ID_1, + &ctx->state); + if (hash_rv != HASH_OK) { + pr_err("hash_resume_state()" + " failed!"); + goto out_power; + } + } + + /* + * If 'p_data_buffer' is four byte aligned and local * buffer does not have any data, we can write data * directly from 'p_data_buffer' to HW peripheral, * otherwise we first copy data to a local buffer @@ -1152,60 +991,34 @@ int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) hash_processblock(hid, (const u32 *)p_buffer); } - hash_incrementlength(hid, HASH_BLOCK_SIZE); + hash_incrementlength(ctx, HASH_BLOCK_SIZE); p_data_buffer += (HASH_BLOCK_SIZE - index); msg_length -= (HASH_BLOCK_SIZE - index); index = 0; + + hash_rv = + hash_save_state(HASH_DEVICE_ID_1, &ctx->state); + if (hash_rv != HASH_OK) { + pr_err("hash_save_state() failed!"); + goto out_power; + } } } - g_sys_ctx.state[hid].index = index; + ctx->state.index = index; - stm_dbg(debug, "[u8500_hash_alg] hash_hw_update END(msg_length=%d in " - "bits, in=%d, bin=%d))", + pr_debug("[u8500_hash_alg] hash_hw_update END(msg_length=%d in " + "bits, in=%d, bin=%d))", msg_length, - g_sys_ctx.state[hid].index, - g_sys_ctx.state[hid].bit_index); - - return hash_error; -} - -/** - * hash_end_key - Function that ends a message, i.e. pad and triggers the last - * calculation. - * @hid: Hardware device ID - * - * This function also clear the registries that have been involved in - * computation. - */ -int hash_end_key(int hid) -{ - int hash_error = HASH_OK; - u8 count = 0; - - stm_dbg(debug, "[u8500_hash_alg] hash_end_key(index=%d))", - g_sys_ctx.state[hid].index); - - hash_messagepad(hid, g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); - - /* Wait till the DCAL bit get cleared, So that we get the final - * message digest not intermediate value. - */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; - - /* Reset the HASH state */ - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) - g_sys_ctx.state[hid].buffer[count] = 0; - - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; - - return hash_error; + ctx->state.index, + ctx->state.bit_index); +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "hash_disable_power() failed!", __func__); +out: + return hash_rv; } /** @@ -1218,66 +1031,53 @@ int hash_end_key(int hid) int hash_resume_state(int hid, const struct hash_state *device_state) { u32 temp_cr; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; s32 count; + int hash_mode = HASH_OPER_MODE_HASH; - stm_dbg(debug, "[u8500_hash_alg] hash_resume_state(state(0x%x)))", - (u32) device_state); + pr_debug("[u8500_hash_alg] hash_resume_state(state(0x%x)))", + (u32) device_state); if (NULL == device_state) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } /* Check correctness of index and length members */ if (device_state->index > HASH_BLOCK_SIZE || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - for (count = 0; count < (s32) (HASH_BLOCK_SIZE / sizeof(u32)); - count++) { - g_sys_ctx.state[hid].buffer[count] = - device_state->buffer[count]; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - g_sys_ctx.state[hid].index = device_state->index; - g_sys_ctx.state[hid].bit_index = device_state->bit_index; - g_sys_ctx.state[hid].length = device_state->length; - HASH_INITIALIZE; temp_cr = device_state->temp_cr; - g_sys_ctx.registry[hid]->cr = + sys_ctx_g.registry[hid]->cr = temp_cr & HASH_CR_RESUME_MASK; + if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; + for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && - !(g_sys_ctx.registry[hid]->cr & - HASH_CR_MODE_MASK)) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) break; - } - g_sys_ctx.registry[hid]->csrx[count] = + + sys_ctx_g.registry[hid]->csrx[count] = device_state->csr[count]; } - g_sys_ctx.registry[hid]->csfull = device_state->csfull; - g_sys_ctx.registry[hid]->csdatain = device_state->csdatain; + sys_ctx_g.registry[hid]->csfull = device_state->csfull; + sys_ctx_g.registry[hid]->csdatain = device_state->csdatain; - g_sys_ctx.registry[hid]->str = device_state->str_reg; - g_sys_ctx.registry[hid]->cr = temp_cr; + sys_ctx_g.registry[hid]->str = device_state->str_reg; + sys_ctx_g.registry[hid]->cr = temp_cr; - return hash_error; + return hash_rv; } /** @@ -1291,289 +1091,50 @@ int hash_save_state(int hid, struct hash_state *device_state) { u32 temp_cr; u32 count; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; + int hash_mode = HASH_OPER_MODE_HASH; - stm_dbg(debug, "[u8500_hash_alg] hash_save_state( state(0x%x)))", - (u32) device_state); + pr_debug("[u8500_hash_alg] hash_save_state( state(0x%x)))", + (u32) device_state); if (NULL == device_state) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) { - device_state->buffer[count] = - g_sys_ctx.state[hid].buffer[count]; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - device_state->index = g_sys_ctx.state[hid].index; - device_state->bit_index = g_sys_ctx.state[hid].bit_index; - device_state->length = g_sys_ctx.state[hid].length; - /* Write dummy value to force digest intermediate calculation. This * actually makes sure that there isn't any ongoing calculation in the * hardware. */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); - temp_cr = g_sys_ctx.registry[hid]->cr; + temp_cr = sys_ctx_g.registry[hid]->cr; - device_state->str_reg = g_sys_ctx.registry[hid]->str; + device_state->str_reg = sys_ctx_g.registry[hid]->str; - device_state->din_reg = g_sys_ctx.registry[hid]->din; + device_state->din_reg = sys_ctx_g.registry[hid]->din; + + if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) - && !(g_sys_ctx.registry[hid]->cr & - HASH_CR_MODE_MASK)) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) break; - } device_state->csr[count] = - g_sys_ctx.registry[hid]->csrx[count]; + sys_ctx_g.registry[hid]->csrx[count]; } - device_state->csfull = g_sys_ctx.registry[hid]->csfull; - device_state->csdatain = g_sys_ctx.registry[hid]->csdatain; + device_state->csfull = sys_ctx_g.registry[hid]->csfull; + device_state->csdatain = sys_ctx_g.registry[hid]->csdatain; - /* end if */ device_state->temp_cr = temp_cr; - return hash_error; -} - -/** - * hash_end - Ends current HASH computation, passing back the hash to the user. - * @hid: Hardware device ID - * @digest: User allocated byte array for the calculated digest - * - * Reentrancy: Non Re-entrant - */ -int hash_end(int hid, u8 digest[HASH_MSG_DIGEST_SIZE]) -{ - int hash_error = HASH_OK; - u32 count; - /* Standard SHA-1 digest for null string for HASH mode */ - u8 zero_message_hash_sha1[HASH_MSG_DIGEST_SIZE] = { - 0xDA, 0x39, 0xA3, 0xEE, - 0x5E, 0x6B, 0x4B, 0x0D, - 0x32, 0x55, 0xBF, 0xEF, - 0x95, 0x60, 0x18, 0x90, - 0xAF, 0xD8, 0x07, 0x09, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA-2 digest for null string for HASH mode */ - u8 zero_message_hash_sha2[HASH_MSG_DIGEST_SIZE] = { - 0xD4, 0x1D, 0x8C, 0xD9, - 0x8F, 0x00, 0xB2, 0x04, - 0xE9, 0x80, 0x09, 0x98, - 0xEC, 0xF8, 0x42, 0x7E, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA-1 digest for null string for HMAC mode,with no key */ - u8 zero_message_hmac_sha1[HASH_MSG_DIGEST_SIZE] = { - 0xFB, 0xDB, 0x1D, 0x1B, - 0x18, 0xAA, 0x6C, 0x08, - 0x32, 0x4B, 0x7D, 0x64, - 0xB7, 0x1F, 0xB7, 0x63, - 0x70, 0x69, 0x0E, 0x1D, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA2 digest for null string for HMAC mode,with no key */ - u8 zero_message_hmac_sha2[HASH_MSG_DIGEST_SIZE] = { - 0x74, 0xE6, 0xF7, 0x29, - 0x8A, 0x9C, 0x2D, 0x16, - 0x89, 0x35, 0xF5, 0x8C, - 0x00, 0x1B, 0xAD, 0x88, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - - stm_dbg(debug, "[u8500_hash_alg] hash_end(digest array (0x%x)))", - (u32) digest); - - if (NULL == digest) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (0 == g_sys_ctx.state[hid].index && - 0 == g_sys_ctx.state[hid].length.high_word && - 0 == g_sys_ctx.state[hid].length.low_word) { - if (g_sys_ctx.registry[hid]->cr & HASH_CR_MODE_MASK) { - if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hmac_sha1[count]; - } - } else { /* SHA-2 algo */ - - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hmac_sha2[count]; - } - } - } else { /* HASH mode */ - - if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hash_sha1[count]; - } - } else { /* SHA-2 algo */ - - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hash_sha2[count]; - } - } - } - - HASH_SET_DCAL; - } else { - hash_messagepad(hid, - g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); - - /* Wait till the DCAL bit get cleared, So that we get the final - * message digest not intermediate value. */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; - - hash_error = hash_get_digest(hid, digest); - - /* Reset the HASH state */ - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); - count++) { - g_sys_ctx.state[hid].buffer[count] - = 0; - } - - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; - } - - if (debug) - hexdump(digest, HASH_MSG_DIGEST_SIZE); - - return hash_error; -} - -/** - * hash_initialize_globals - Initialize global variables to their default reset - * value. - * @hid: Hardware device ID - * - * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being - * modified - */ -static void hash_initialize_globals(int hid) -{ - u8 loop_count; - - /* Resetting the values of global variables except the registry */ - g_sys_ctx.state[hid].temp_cr = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].str_reg = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].din_reg = HASH_RESET_INDEX_VAL; - - for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { - g_sys_ctx.state[hid].csr[loop_count] = - HASH_RESET_CSRX_REG_VALUE; - } - - g_sys_ctx.state[hid].csfull = HASH_RESET_CSFULL_REG_VALUE; - g_sys_ctx.state[hid].csdatain = HASH_RESET_CSDATAIN_REG_VALUE; - - for (loop_count = 0; loop_count < (HASH_BLOCK_SIZE / sizeof(u32)); - loop_count++) { - g_sys_ctx.state[hid].buffer[loop_count] = - HASH_RESET_BUFFER_VAL; - } - - g_sys_ctx.state[hid].length.high_word = HASH_RESET_LEN_HIGH_VAL; - g_sys_ctx.state[hid].length.low_word = HASH_RESET_LEN_LOW_VAL; - g_sys_ctx.state[hid].index = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].bit_index = HASH_RESET_BIT_INDEX_VAL; -} - -/** - * hash_reset - This routine will reset the global variable to default reset - * value and HASH registers to their power on reset values. - * @hid: Hardware device ID - * - * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being - * modified. - */ -int hash_reset(int hid) -{ - int hash_error = HASH_OK; - u8 loop_count; - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - - return hash_error; - } - - /* Resetting the values of global variables except the registry */ - hash_initialize_globals(hid); - - /* Resetting HASH control register to power-on-reset values */ - g_sys_ctx.registry[hid]->str = HASH_RESET_START_REG_VALUE; - - for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { - g_sys_ctx.registry[hid]->csrx[loop_count] = - HASH_RESET_CSRX_REG_VALUE; - } - - g_sys_ctx.registry[hid]->csfull = HASH_RESET_CSFULL_REG_VALUE; - g_sys_ctx.registry[hid]->csdatain = - HASH_RESET_CSDATAIN_REG_VALUE; - - /* Resetting the HASH Control reg. This also reset the PRIVn and SECn - * bits and hence the device registers will not be accessed anymore and - * should be done in the last HASH register access statement. - */ - g_sys_ctx.registry[hid]->cr = HASH_RESET_CONTROL_REG_VALUE; - - return hash_error; + return hash_rv; } /** @@ -1587,59 +1148,37 @@ int hash_reset(int hid) */ int hash_init_base_address(int hid, t_logical_address base_address) { - int hash_error = HASH_OK; - - stm_dbg(debug, "[u8500_hash_alg] hash_init_base_address())"); + int hash_rv = HASH_OK; - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - - return hash_error; - } + pr_debug("[u8500_hash_alg] hash_init_base_address())"); if (0 != base_address) { - /*--------------------------------------* - * Initializing the registers structure * - *--------------------------------------*/ - g_sys_ctx.registry[hid] = (struct hash_register *) base_address; - - /*--------------------------* - * Checking Peripheral Ids * - *--------------------------*/ - if ((HASH_P_ID0 == - g_sys_ctx.registry[hid]->periphid0) - && (HASH_P_ID1 == - g_sys_ctx.registry[hid]->periphid1) - && (HASH_P_ID2 == - g_sys_ctx.registry[hid]->periphid2) - && (HASH_P_ID3 == - g_sys_ctx.registry[hid]->periphid3) - && (HASH_CELL_ID0 == - g_sys_ctx.registry[hid]->cellid0) - && (HASH_CELL_ID1 == - g_sys_ctx.registry[hid]->cellid1) - && (HASH_CELL_ID2 == - g_sys_ctx.registry[hid]->cellid2) - && (HASH_CELL_ID3 == - g_sys_ctx.registry[hid]->cellid3) + /* Initializing the registers structure */ + sys_ctx_g.registry[hid] = + (struct hash_register *) base_address; + + /* Checking Peripheral Ids */ + if ((HASH_P_ID0 == sys_ctx_g.registry[hid]->periphid0) + && (HASH_P_ID1 == sys_ctx_g.registry[hid]->periphid1) + && (HASH_P_ID2 == sys_ctx_g.registry[hid]->periphid2) + && (HASH_P_ID3 == sys_ctx_g.registry[hid]->periphid3) + && (HASH_CELL_ID0 == sys_ctx_g.registry[hid]->cellid0) + && (HASH_CELL_ID1 == sys_ctx_g.registry[hid]->cellid1) + && (HASH_CELL_ID2 == sys_ctx_g.registry[hid]->cellid2) + && (HASH_CELL_ID3 == sys_ctx_g.registry[hid]->cellid3) ) { - - /* Resetting the values of global variables except the - registry */ - hash_initialize_globals(hid); - hash_error = HASH_OK; - return hash_error; + hash_rv = HASH_OK; + return hash_rv; } else { - hash_error = HASH_UNSUPPORTED_HW; - stm_error("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); - return hash_error; + hash_rv = HASH_UNSUPPORTED_HW; + pr_err("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); + return hash_rv; } } /* end if */ else { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } } @@ -1647,110 +1186,50 @@ int hash_init_base_address(int hid, t_logical_address base_address) * hash_get_digest - Gets the digest. * @hid: Hardware device ID * @digest: User allocated byte array for the calculated digest + * @algorithm: The algorithm in use. * * Reentrancy: Non Re-entrant, global variable registry (hash control register) * is being modified. * - * Note that, if this is called before the final message has been handle it will - * return the intermediate message digest. + * Note that, if this is called before the final message has been handle it + * will return the intermediate message digest. */ -int hash_get_digest(int hid, u8 *digest) +void hash_get_digest(int hid, u8 *digest, int algorithm) { u32 temp_hx_val, count; - int hash_error = HASH_OK; - - stm_dbg(debug, - "[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", - (u32) digest); + int loop_ctr; - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA2) { + pr_err("[hash_get_digest] Incorrect algorithm %d", algorithm); + return; } + if (algorithm == HASH_ALGO_SHA1) + loop_ctr = HASH_SHA1_DIGEST_SIZE / sizeof(u32); + else + loop_ctr = HASH_SHA2_DIGEST_SIZE / sizeof(u32); + + pr_debug("[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", + (u32) digest); + /* Copy result into digest array */ - for (count = 0; count < (HASH_MSG_DIGEST_SIZE / sizeof(u32)); - count++) { + for (count = 0; count < loop_ctr; count++) { temp_hx_val = HASH_GET_HX(count); digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); } - - return hash_error; } -/** - * hash_compute - Performs a complete HASH calculation on the message passed. - * @hid: Hardware device ID - * @p_data_buffer: Pointer to the message to be hashed - * @msg_length: The length of the message - * @p_hash_config: Structure with configuration data for the hash hardware - * @digest: User allocated byte array for the calculated digest - * - * Reentrancy: Non Re-entrant - */ -int hash_compute(int hid, - const u8 *p_data_buffer, - u32 msg_length, - struct hash_config *p_hash_config, - u8 digest[HASH_MSG_DIGEST_SIZE]) { - int hash_error = HASH_OK; - - stm_dbg(debug, "[u8500_hash_alg] hash_compute())"); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - - /* WARNING: return code must be checked if - * behaviour of hash_begin changes. - */ - hash_error = hash_setconfiguration(hid, p_hash_config); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_setconfiguration() failed!"); - return hash_error; - } - - hash_error = hash_begin(hid); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_begin() failed!"); - return hash_error; - } - - hash_error = hash_hw_update(hid, p_data_buffer, msg_length); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_hw_update() failed!"); - return hash_error; - } - - hash_error = hash_end(hid, digest); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_end() failed!"); - return hash_error; - } - - return hash_error; -} module_init(u8500_hash_mod_init); module_exit(u8500_hash_mod_fini); -module_param(mode, int, 0); module_param(debug, int, 0); -module_param(contextsaving, int, 0); MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); MODULE_LICENSE("GPL"); MODULE_ALIAS("sha1-u8500"); MODULE_ALIAS("sha256-u8500"); -MODULE_ALIAS("hmac(sha1-u8500)"); -MODULE_ALIAS("hmac(sha256-u8500)"); -- cgit v1.2.3 From 4c93613a4a5f25b739fa32f86ae67bfe033ee0d3 Mon Sep 17 00:00:00 2001 From: Shujuan Chen Date: Tue, 31 Aug 2010 17:14:54 +0200 Subject: ux500: tee bug fix for returning wrong values - Fix the missing the ret argument sent to secure world. - Fix the mismatch return btw kernel and secure world in closesession. Dependencies: WP269815 ST-Ericsson ID: ER270978 Change-Id: I86b4af518660987663632d04d11d5d4967878bca Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/4545 Reviewed-by: Linus WALLEIJ Tested-by: Shujuan CHEN Reviewed-by: Fredric MORENIUS --- arch/arm/mach-ux500/tee_ux500.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index ab3782a323c..c009afe26ae 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -52,6 +52,7 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) virt_to_phys(ts->ta), ts->cmd, virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->err)), virt_to_phys((void *)(&ts->origin))); } else { call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, @@ -61,6 +62,7 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) virt_to_phys(ts->ta), ts->cmd, virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->err)), virt_to_phys((void *)(&ts->origin))); } break; @@ -71,7 +73,16 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) ts->id, NULL, virt_to_phys(ts->ta), - virt_to_phys((void *)(&ts->origin))); + virt_to_phys((void *)(&ts->err))); + + /* Since the TEE Client API does NOT take care of + * the return value, we print a warning here if + * something went wrong in secure world. + */ + if (ts->err != TEED_SUCCESS) + pr_warning("[%s] failed in secure world\n", + __func__); + break; } -- cgit v1.2.3 From 630355b7a64d4c4d42d217b643382ea95135d7a5 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 5 Apr 2011 13:53:10 +0200 Subject: ux500: add u5500 specific macros This patch is based on similar patch from Rickard Evertsson Although this patch is fixing fewer files. Signed-off-by: Mian Yousaf Kaukab Conflicts: arch/arm/mach-ux500/clock.c --- arch/arm/mach-ux500/tee_ux500.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index c009afe26ae..2e6a2e89f0d 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -8,11 +8,10 @@ #include #include #include +#include #include -#define BOOT_BRIDGE_FUNC (U8500_BOOT_ROM_BASE + 0x18300) - #define ISSWAPI_EXECUTE_TA 0x11000001 #define ISSWAPI_CLOSE_TA 0x11000002 @@ -25,8 +24,16 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - hw_sec_rom_pub_bridge = - (bridge_func)((u32)IO_ADDRESS(BOOT_BRIDGE_FUNC)); + if (cpu_is_u8500()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); + else if (cpu_is_u5500()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300)); + else { + pr_err("tee-ux500: Unknown DB Asic!\n"); + return -EIO; + } va_start(ap, cfg); ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); -- cgit v1.2.3 From ace22919ce9616453b97737e55825fb5ae274136 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 15 Oct 2010 15:07:04 +0200 Subject: V2 fix for TEE ST-Ericsson ID: WP270298 Change-Id: I0eb63eba30ed319ff601beb7cd4ac9c307e7414c Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/6620 Reviewed-by: Jens WIKLANDER Tested-by: Jens WIKLANDER --- arch/arm/mach-ux500/tee_ux500.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 2e6a2e89f0d..707e91284a1 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -20,11 +20,14 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) { typedef u32 (*bridge_func)(u32, u32, va_list); - static bridge_func hw_sec_rom_pub_bridge; + bridge_func hw_sec_rom_pub_bridge; va_list ap; u32 ret; - if (cpu_is_u8500()) + if (cpu_is_u8500v2()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); + else if (cpu_is_u8500v1()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); else if (cpu_is_u5500()) -- cgit v1.2.3 From 5b40311eb26ec74d23f00ed5ecbb17982ed9d897 Mon Sep 17 00:00:00 2001 From: Jonas Aaberg Date: Wed, 1 Jun 2011 08:26:59 +0200 Subject: ARM: ux500: tee: Remove u8500 v1 support ST-Ericsson Linux next: Not tested, ask SSM for ER ST-Ericsson ID: 342987 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ia8afe98cdafbc5f11c115a061e5be75d9bd7ece0 Signed-off-by: Jonas Aaberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24284 Reviewed-by: Joakim BECH Reviewed-by: QATEST --- arch/arm/mach-ux500/tee_ux500.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 707e91284a1..160ca529261 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -24,19 +24,14 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - if (cpu_is_u8500v2()) + if (cpu_is_u8500v20_or_later()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); - else if (cpu_is_u8500v1()) - hw_sec_rom_pub_bridge = (bridge_func) - ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); else if (cpu_is_u5500()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300)); - else { - pr_err("tee-ux500: Unknown DB Asic!\n"); - return -EIO; - } + else + ux500_unknown_soc(); va_start(ap, cfg); ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); -- cgit v1.2.3 From f4ec907012bf3f02a66c6c9dcb39bef6b413b89f Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Thu, 8 Sep 2011 14:23:57 +0530 Subject: ux500: cryp/hash: Updated for both u8500 & u5500 cryp1 & hash1 updated to be compatible on both u8500 and u5500: - added to u5500_defconfig. - settings from devices.c to board-mop500.c & board-u5500.c. - dynamic driver registration in board-mop500.c & board-u5500.c. - added cryp1 to clock-db5500.c and renamed cryp to cryp0. - added function dbx500_add_platform_device_noirq to devices-common.c. - added cryp1 and hash1 inline functions to devices-common.h (dbx500_add_cryp1). - defines added to devices-db5500.h and devices-db8500.h. - u8500_cryp/hash changed to ux500_cryp/hash. - update to handle different value for CRYP_PERIPHERAL_ID2 between u8500 and u5500 (more info in ER336742). ST-Ericsson ID: 257104 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Ref: Commit-id: Ibe72c72d8f9d781008164f1bf24ceafa82ac9083 Signed-off-by: Avinash A Change-Id: I08a8f71acb89be99cbf8b54390be569e2369c73b Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/30437 Reviewed-by: Avinash A Tested-by: Avinash A --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 5 +- drivers/crypto/ux500/cryp/Makefile | 4 +- drivers/crypto/ux500/cryp/cryp.c | 27 +++++--- drivers/crypto/ux500/cryp/cryp_core.c | 83 ++++++++++++------------- drivers/crypto/ux500/cryp/cryp_p.h | 4 +- drivers/crypto/ux500/hash/Makefile | 4 +- drivers/crypto/ux500/hash/hash_core.c | 69 ++++++++++---------- 7 files changed, 104 insertions(+), 92 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h index 57da88398d5..9d1e1c52c13 100644 --- a/arch/arm/mach-ux500/include/mach/crypto-ux500.h +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -6,11 +6,14 @@ */ #ifndef _CRYPTO_UX500_H #include -#include struct cryp_platform_data { struct stedma40_chan_cfg mem_to_engine; struct stedma40_chan_cfg engine_to_mem; }; +struct hash_platform_data { + struct stedma40_chan_cfg mem_to_engine; +}; + #endif diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile index fd5e6df3861..e5d362a6f68 100644 --- a/drivers/crypto/ux500/cryp/Makefile +++ b/drivers/crypto/ux500/cryp/Makefile @@ -9,5 +9,5 @@ CFLAGS_cryp.o := -DDEBUG -O0 CFLAGS_cryp_irq.o := -DDEBUG -O0 endif -obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += u8500_cryp.o -u8500_cryp-objs := cryp.o cryp_irq.o cryp_core.o +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o +ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index ae4fe318528..211200fed34 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -12,6 +12,8 @@ #include #include +#include + #include "cryp_p.h" #include "cryp.h" @@ -30,26 +32,33 @@ void cryp_wait_until_done(struct cryp_device_data *device_data) */ int cryp_check(struct cryp_device_data *device_data) { + int peripheralID2 = 0; + if (NULL == device_data) return -EINVAL; + if (cpu_is_u8500()) + peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500; + else if (cpu_is_u5500()) + peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500; + /* Check Peripheral and Pcell Id Register for CRYP */ if ((CRYP_PERIPHERAL_ID0 == - readl_relaxed(&device_data->base->periphId0)) + readl_relaxed(&device_data->base->periphId0)) && (CRYP_PERIPHERAL_ID1 == - readl_relaxed(&device_data->base->periphId1)) - && (CRYP_PERIPHERAL_ID2 == - readl_relaxed(&device_data->base->periphId2)) + readl_relaxed(&device_data->base->periphId1)) + && (peripheralID2 == + readl_relaxed(&device_data->base->periphId2)) && (CRYP_PERIPHERAL_ID3 == - readl_relaxed(&device_data->base->periphId3)) + readl_relaxed(&device_data->base->periphId3)) && (CRYP_PCELL_ID0 == - readl_relaxed(&device_data->base->pcellId0)) + readl_relaxed(&device_data->base->pcellId0)) && (CRYP_PCELL_ID1 == - readl_relaxed(&device_data->base->pcellId1)) + readl_relaxed(&device_data->base->pcellId1)) && (CRYP_PCELL_ID2 == - readl_relaxed(&device_data->base->pcellId2)) + readl_relaxed(&device_data->base->pcellId2)) && (CRYP_PCELL_ID3 == - readl_relaxed(&device_data->base->pcellId3))) { + readl_relaxed(&device_data->base->pcellId3))) { return 0; } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f0aed67f29a..5893abb57dc 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -33,7 +33,6 @@ #include #include -#include #include "cryp_p.h" #include "cryp.h" @@ -1194,13 +1193,14 @@ static int cryp_hw_calculate(struct cryp_ctx *ctx) } if (hw_crypt_noxts(ctx, device_data)) - pr_err("u8500_cryp:crypX: [%s]: hw_crypt_noxts() failed!", + dev_err(device_data->dev, "[%s]: hw_crypt_noxts() failed!", __func__); out: if (cryp_disable_power(device_data->dev, device_data, false)) dev_err(device_data->dev, "[%s]: " "cryp_disable_power() failed!", __func__); + /* Release the device */ spin_lock(&device_data->ctx_lock); device_data->current_ctx = NULL; @@ -1232,7 +1232,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1252,7 +1252,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1272,7 +1272,7 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1292,7 +1292,7 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1312,7 +1312,7 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1332,7 +1332,7 @@ static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1605,7 +1605,7 @@ static int des3_cbc_decrypt(struct ablkcipher_request *areq) */ static struct crypto_alg aes_alg = { .cra_name = "aes", - .cra_driver_name = "aes-u8500", + .cra_driver_name = "aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, @@ -1629,7 +1629,7 @@ static struct crypto_alg aes_alg = { */ static struct crypto_alg des_alg = { .cra_name = "des", - .cra_driver_name = "des-u8500", + .cra_driver_name = "des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, @@ -1653,7 +1653,7 @@ static struct crypto_alg des_alg = { */ static struct crypto_alg des3_alg = { .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-u8500", + .cra_driver_name = "des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1677,7 +1677,7 @@ static struct crypto_alg des3_alg = { */ static struct crypto_alg aes_ecb_alg = { .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-u8500", + .cra_driver_name = "ecb-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1703,7 +1703,7 @@ static struct crypto_alg aes_ecb_alg = { */ static struct crypto_alg aes_cbc_alg = { .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-u8500", + .cra_driver_name = "cbc-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1730,7 +1730,7 @@ static struct crypto_alg aes_cbc_alg = { */ static struct crypto_alg aes_ctr_alg = { .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-u8500", + .cra_driver_name = "ctr-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1757,7 +1757,7 @@ static struct crypto_alg aes_ctr_alg = { */ static struct crypto_alg des_ecb_alg = { .cra_name = "ecb(des)", - .cra_driver_name = "ecb-des-u8500", + .cra_driver_name = "ecb-des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1783,7 +1783,7 @@ static struct crypto_alg des_ecb_alg = { */ static struct crypto_alg des_cbc_alg = { .cra_name = "cbc(des)", - .cra_driver_name = "cbc-des-u8500", + .cra_driver_name = "cbc-des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1810,7 +1810,7 @@ static struct crypto_alg des_cbc_alg = { */ static struct crypto_alg des3_ecb_alg = { .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3_ede-u8500", + .cra_driver_name = "ecb-des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1836,7 +1836,7 @@ static struct crypto_alg des3_ecb_alg = { */ static struct crypto_alg des3_cbc_alg = { .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3_ede-u8500", + .cra_driver_name = "cbc-des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1859,9 +1859,9 @@ static struct crypto_alg des3_cbc_alg = { }; /** - * struct crypto_alg *u8500_cryp_algs[] - + * struct crypto_alg *ux500_cryp_algs[] - */ -static struct crypto_alg *u8500_cryp_algs[] = { +static struct crypto_alg *ux500_cryp_algs[] = { &aes_alg, &des_alg, &des3_alg, @@ -1885,19 +1885,19 @@ static int cryp_algs_register_all(void) pr_debug("[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) { - ret = crypto_register_alg(u8500_cryp_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) { + ret = crypto_register_alg(ux500_cryp_algs[i]); if (ret) { count = i; pr_err("[%s] alg registration failed", - u8500_cryp_algs[i]->cra_driver_name); + ux500_cryp_algs[i]->cra_driver_name); goto unreg; } } return 0; unreg: for (i = 0; i < count; i++) - crypto_unregister_alg(u8500_cryp_algs[i]); + crypto_unregister_alg(ux500_cryp_algs[i]); return ret; } @@ -1910,11 +1910,11 @@ static void cryp_algs_unregister_all(void) pr_debug(DEV_DBG_NAME " [%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) - crypto_unregister_alg(u8500_cryp_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) + crypto_unregister_alg(ux500_cryp_algs[i]); } -static int u8500_cryp_probe(struct platform_device *pdev) +static int ux500_cryp_probe(struct platform_device *pdev) { int ret; int cryp_error = 0; @@ -2071,7 +2071,7 @@ out: return ret; } -static int u8500_cryp_remove(struct platform_device *pdev) +static int ux500_cryp_remove(struct platform_device *pdev) { struct resource *res = NULL; struct resource *res_irq = NULL; @@ -2137,7 +2137,7 @@ static int u8500_cryp_remove(struct platform_device *pdev) return 0; } -static void u8500_cryp_shutdown(struct platform_device *pdev) +static void ux500_cryp_shutdown(struct platform_device *pdev) { struct resource *res_irq = NULL; struct cryp_device_data *device_data; @@ -2190,7 +2190,7 @@ static void u8500_cryp_shutdown(struct platform_device *pdev) } -static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) +static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct cryp_device_data *device_data; @@ -2234,7 +2234,7 @@ static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) return ret; } -static int u8500_cryp_resume(struct platform_device *pdev) +static int ux500_cryp_resume(struct platform_device *pdev) { int ret = 0; struct cryp_device_data *device_data; @@ -2274,40 +2274,39 @@ static int u8500_cryp_resume(struct platform_device *pdev) } static struct platform_driver cryp_driver = { - .probe = u8500_cryp_probe, - .remove = u8500_cryp_remove, - .shutdown = u8500_cryp_shutdown, - .suspend = u8500_cryp_suspend, - .resume = u8500_cryp_resume, + .probe = ux500_cryp_probe, + .remove = ux500_cryp_remove, + .shutdown = ux500_cryp_shutdown, + .suspend = ux500_cryp_suspend, + .resume = ux500_cryp_resume, .driver = { .owner = THIS_MODULE, .name = "cryp1" } }; -static int __init u8500_cryp_mod_init(void) +static int __init ux500_cryp_mod_init(void) { pr_debug("[%s] is called!", __func__); - klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); return platform_driver_register(&cryp_driver); } -static void __exit u8500_cryp_mod_fini(void) +static void __exit ux500_cryp_mod_fini(void) { pr_debug("[%s] is called!", __func__); platform_driver_unregister(&cryp_driver); return; } -module_init(u8500_cryp_mod_init); -module_exit(u8500_cryp_mod_fini); +module_init(ux500_cryp_mod_init); +module_exit(ux500_cryp_mod_fini); module_param(cryp_mode, int, 0); -MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 CRYP crypto engine."); +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); MODULE_ALIAS("aes-all"); MODULE_ALIAS("des-all"); diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 4b615a33fe9..0e070829edc 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -39,7 +39,9 @@ */ #define CRYP_PERIPHERAL_ID0 0xE3 #define CRYP_PERIPHERAL_ID1 0x05 -#define CRYP_PERIPHERAL_ID2 0x28 + +#define CRYP_PERIPHERAL_ID2_DB8500 0x28 +#define CRYP_PERIPHERAL_ID2_DB5500 0x29 #define CRYP_PERIPHERAL_ID3 0x00 #define CRYP_PCELL_ID0 0x0D diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile index aaa5f56a2c2..b2f90d9bac7 100644 --- a/drivers/crypto/ux500/hash/Makefile +++ b/drivers/crypto/ux500/hash/Makefile @@ -7,5 +7,5 @@ ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG CFLAGS_hash_core.o := -DDEBUG -O0 endif -obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o -u8500_hash-objs := hash_core.o +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o +ux500_hash-objs := hash_core.o diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index b042808496c..ce2c9d645fa 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1337,7 +1337,7 @@ static struct ahash_alg ahash_sha1_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "sha1", - .cra_driver_name = "sha1-u8500", + .cra_driver_name = "sha1-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1354,7 +1354,7 @@ static struct ahash_alg ahash_sha256_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "sha256", - .cra_driver_name = "sha256-u8500", + .cra_driver_name = "sha256-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1373,7 +1373,7 @@ static struct ahash_alg hmac_sha1_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac-sha1-u8500", + .cra_driver_name = "hmac-sha1-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1392,7 +1392,7 @@ static struct ahash_alg hmac_sha256_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac-sha256-u8500", + .cra_driver_name = "hmac-sha256-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1402,9 +1402,9 @@ static struct ahash_alg hmac_sha256_alg = { }; /** - * struct hash_alg *u8500_hash_algs[] - + * struct hash_alg *ux500_hash_algs[] - */ -static struct ahash_alg *u8500_ahash_algs[] = { +static struct ahash_alg *ux500_ahash_algs[] = { &ahash_sha1_alg, &ahash_sha256_alg, &hmac_sha1_alg, @@ -1422,20 +1422,20 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) { - ret = crypto_register_ahash(u8500_ahash_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) { + ret = crypto_register_ahash(ux500_ahash_algs[i]); if (ret) { count = i; dev_err(device_data->dev, "[%s] alg registration" " failed", - u8500_ahash_algs[i]->halg.base.cra_driver_name); + ux500_ahash_algs[i]->halg.base.cra_driver_name); goto unreg; } } return 0; unreg: for (i = 0; i < count; i++) - crypto_unregister_ahash(u8500_ahash_algs[i]); + crypto_unregister_ahash(ux500_ahash_algs[i]); return ret; } @@ -1448,15 +1448,15 @@ static void ahash_algs_unregister_all(struct hash_device_data *device_data) dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) - crypto_unregister_ahash(u8500_ahash_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) + crypto_unregister_ahash(ux500_ahash_algs[i]); } /** - * u8500_hash_probe - Function that probes the hash hardware. + * ux500_hash_probe - Function that probes the hash hardware. * @pdev: The platform device. */ -static int u8500_hash_probe(struct platform_device *pdev) +static int ux500_hash_probe(struct platform_device *pdev) { int ret = 0; struct resource *res = NULL; @@ -1571,10 +1571,10 @@ out: } /** - * u8500_hash_remove - Function that removes the hash device from the platform. + * ux500_hash_remove - Function that removes the hash device from the platform. * @pdev: The platform device. */ -static int u8500_hash_remove(struct platform_device *pdev) +static int ux500_hash_remove(struct platform_device *pdev) { struct resource *res; struct hash_device_data *device_data; @@ -1633,10 +1633,10 @@ static int u8500_hash_remove(struct platform_device *pdev) } /** - * u8500_hash_shutdown - Function that shutdown the hash device. + * ux500_hash_shutdown - Function that shutdown the hash device. * @pdev: The platform device */ -static void u8500_hash_shutdown(struct platform_device *pdev) +static void ux500_hash_shutdown(struct platform_device *pdev) { struct resource *res = NULL; struct hash_device_data *device_data; @@ -1686,11 +1686,11 @@ static void u8500_hash_shutdown(struct platform_device *pdev) } /** - * u8500_hash_suspend - Function that suspends the hash device. + * ux500_hash_suspend - Function that suspends the hash device. * @pdev: The platform device. * @state: - */ -static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) +static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct hash_device_data *device_data; @@ -1726,10 +1726,10 @@ static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) } /** - * u8500_hash_resume - Function that resume the hash device. + * ux500_hash_resume - Function that resume the hash device. * @pdev: The platform device. */ -static int u8500_hash_resume(struct platform_device *pdev) +static int ux500_hash_resume(struct platform_device *pdev) { int ret = 0; struct hash_device_data *device_data; @@ -1762,11 +1762,11 @@ static int u8500_hash_resume(struct platform_device *pdev) } static struct platform_driver hash_driver = { - .probe = u8500_hash_probe, - .remove = u8500_hash_remove, - .shutdown = u8500_hash_shutdown, - .suspend = u8500_hash_suspend, - .resume = u8500_hash_resume, + .probe = ux500_hash_probe, + .remove = ux500_hash_remove, + .shutdown = ux500_hash_shutdown, + .suspend = ux500_hash_suspend, + .resume = ux500_hash_resume, .driver = { .owner = THIS_MODULE, .name = "hash1", @@ -1774,12 +1774,11 @@ static struct platform_driver hash_driver = { }; /** - * u8500_hash_mod_init - The kernel module init function. + * ux500_hash_mod_init - The kernel module init function. */ -static int __init u8500_hash_mod_init(void) +static int __init ux500_hash_mod_init(void) { pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); - klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); @@ -1788,9 +1787,9 @@ static int __init u8500_hash_mod_init(void) } /** - * u8500_hash_mod_fini - The kernel module exit function. + * ux500_hash_mod_fini - The kernel module exit function. */ -static void __exit u8500_hash_mod_fini(void) +static void __exit ux500_hash_mod_fini(void) { pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); @@ -1798,10 +1797,10 @@ static void __exit u8500_hash_mod_fini(void) return; } -module_init(u8500_hash_mod_init); -module_exit(u8500_hash_mod_fini); +module_init(ux500_hash_mod_init); +module_exit(ux500_hash_mod_fini); -MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); MODULE_LICENSE("GPL"); MODULE_ALIAS("sha1-all"); -- cgit v1.2.3 From ea52e07a9554e72ac8a6ff26eb529f14b3e66248 Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Wed, 25 May 2011 14:27:51 +0200 Subject: crypto: ux500: hash: DMA support - Support for DMA. - Direct to CPU mode for data size < 4 byte data. - Workaround to handle data sizes not modulo wordsize. - Error message/check for HMAC empty message with keysize > 0. - Error message/check for HMAC DMA for u5500, since not working. - Additional, update cryp driver dma code according to this patch and make minor adjustments to comply with mainline code and design are covered by AP370178. ST-Ericsson ID: 280691 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I14d64d1577f007969b372ed4ef04556eca8bc0d6 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/33983 Reviewed-by: Per FORLIN Reviewed-by: QATOOLS Reviewed-by: QABUILD --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 4 +- drivers/crypto/ux500/hash/hash_alg.h | 36 +- drivers/crypto/ux500/hash/hash_core.c | 602 +++++++++++++++++------- 3 files changed, 470 insertions(+), 172 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h index 9d1e1c52c13..80c4620d633 100644 --- a/arch/arm/mach-ux500/include/mach/crypto-ux500.h +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -5,6 +5,7 @@ * License terms: GNU General Public License (GPL) version 2 */ #ifndef _CRYPTO_UX500_H +#include #include struct cryp_platform_data { @@ -13,7 +14,8 @@ struct cryp_platform_data { }; struct hash_platform_data { - struct stedma40_chan_cfg mem_to_engine; + void *mem_to_engine; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; #endif diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 299f0bacc2c..61db5b511b6 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -11,6 +11,7 @@ #include #define HASH_BLOCK_SIZE 64 +#define HASH_DMA_ALIGN_SIZE 4 /* Maximum value of the length's high word */ #define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL @@ -125,6 +126,12 @@ 0x01, HASH_STR_DCAL_POS, \ HASH_STR_DCAL_MASK) +/* Hardware access method */ +enum hash_mode { + HASH_MODE_CPU, + HASH_MODE_DMA +}; + /** * struct uint64 - Structure to handle 64 bits integers. * @high_word: Most significant bits. @@ -286,6 +293,26 @@ struct hash_config { int oper_mode; }; +/** + * struct hash_dma - Structure used for dma. + * @mask: DMA capabilities bitmap mask. + * @complete: Used to maintain state for a "completion". + * @chan_mem2hash: DMA channel. + * @cfg_mem2hash: DMA channel configuration. + * @sg_len: Scatterlist length. + * @sg: Scatterlist. + * @nents: Number of sg entries. + */ +struct hash_dma { + dma_cap_mask_t mask; + struct completion complete; + struct dma_chan *chan_mem2hash; + void *cfg_mem2hash; + int sg_len; + struct scatterlist *sg; + int nents; +}; + /** * struct hash_ctx - The context used for hash calculations. * @key: The key used in the operation. @@ -293,8 +320,10 @@ struct hash_config { * @updated: Indicates if hardware is initialized for new operations. * @state: The state of the current calculations. * @config: The current configuration. - * @digestsize The size of current digest. - * @device Pointer to the device structure. + * @digestsize: The size of current digest. + * @device: Pointer to the device structure. + * @dma_mode: Used in special cases (workaround), e.g. need to change to + * cpu mode, if not supported/working in dma mode. */ struct hash_ctx { u8 *key; @@ -304,6 +333,7 @@ struct hash_ctx { struct hash_config config; int digestsize; struct hash_device_data *device; + bool dma_mode; }; /** @@ -318,6 +348,7 @@ struct hash_ctx { * @regulator: Pointer to the device's power control. * @clk: Pointer to the device's clock control. * @restore_dev_state: TRUE = saved state, FALSE = no saved state. + * @dma: Structure used for dma. */ struct hash_device_data { struct hash_register __iomem *base; @@ -330,6 +361,7 @@ struct hash_device_data { struct ux500_regulator *regulator; struct clk *clk; bool restore_dev_state; + struct hash_dma dma; }; int hash_check_hw(struct hash_device_data *device_data); diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 08a89eeb601..b2a58dccf76 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -29,12 +30,17 @@ #include #include +#include #include #include "hash_alg.h" #define DEV_DBG_NAME "hashX hashX:" +static int hash_mode; +module_param(hash_mode, int, 0); +MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); + /** * Pre-calculated empty message digests. */ @@ -113,6 +119,101 @@ static void release_hash_device(struct hash_device_data *device_data) up(&driver_data.device_allocation); } +static void hash_dma_setup_channel(struct hash_device_data *device_data, + struct device *dev) +{ + struct hash_platform_data *platform_data = dev->platform_data; + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; + device_data->dma.chan_mem2hash = + dma_request_channel(device_data->dma.mask, + platform_data->dma_filter, + device_data->dma.cfg_mem2hash); + + init_completion(&device_data->dma.complete); +} + +static void hash_dma_callback(void *data) +{ + struct hash_ctx *ctx = (struct hash_ctx *) data; + + complete(&ctx->device->dma.complete); +} + +static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, + int len, enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + if (direction != DMA_TO_DEVICE) { + dev_err(ctx->device->dev, "[%s] Invalid DMA direction", + __func__); + return -EFAULT; + } + + sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); + + channel = ctx->device->dma.chan_mem2hash; + ctx->device->dma.sg = sg; + ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg, ctx->device->dma.nents, + direction); + + if (!ctx->device->dma.sg_len) { + dev_err(ctx->device->dev, + "[%s]: Could not map the sg list (TO_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(TO_DEVICE)", __func__); + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg, ctx->device->dma.sg_len, + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(ctx->device->dev, + "[%s]: device_prep_slave_sg() failed!", __func__); + return -EFAULT; + } + + desc->callback = hash_dma_callback; + desc->callback_param = ctx; + + cookie = desc->tx_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void hash_dma_done(struct hash_ctx *ctx) +{ + struct dma_chan *chan; + + chan = ctx->device->dma.chan_mem2hash; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, + ctx->device->dma.sg_len, DMA_TO_DEVICE); + +} + +static int hash_dma_write(struct hash_ctx *ctx, + struct scatterlist *sg, int len) +{ + int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + /** * get_empty_message_digest - Returns a pre-calculated digest for * the empty message. @@ -197,8 +298,6 @@ static int hash_disable_power( int ret = 0; struct device *dev = device_data->dev; - dev_dbg(dev, "[%s]", __func__); - spin_lock(&device_data->power_state_lock); if (!device_data->power_state) goto out; @@ -236,7 +335,6 @@ static int hash_enable_power( { int ret = 0; struct device *dev = device_data->dev; - dev_dbg(dev, "[%s]", __func__); spin_lock(&device_data->power_state_lock); if (!device_data->power_state) { @@ -287,8 +385,6 @@ static int hash_get_device_data(struct hash_ctx *ctx, struct klist_node *device_node; struct hash_device_data *local_device_data = NULL; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - /* Wait until a device is available */ ret = down_interruptible(&driver_data.device_allocation); if (ret) @@ -390,8 +486,6 @@ static int init_hash_hw(struct hash_device_data *device_data, { int ret = 0; - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx); - ret = hash_setconfiguration(device_data, &ctx->config); if (ret) { dev_err(device_data->dev, "[%s] hash_setconfiguration() " @@ -407,6 +501,61 @@ static int init_hash_hw(struct hash_device_data *device_data, return ret; } +/** + * hash_get_nents - Return number of entries (nents) in scatterlist (sg). + * + * @sg: Scatterlist. + * @size: Size in bytes. + * @aligned: True if sg data aligned to work in DMA mode. + * + * Reentrancy: Non Re-entrant + */ +static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) +{ + int nents = 0; + bool aligned_data = true; + + while (size > 0 && sg) { + nents++; + size -= sg->length; + + /* hash_set_dma_transfer will align last nent */ + if (aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE) || + (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && + size > 0)) + aligned_data = false; + + sg = sg_next(sg); + } + + if (aligned) + *aligned = aligned_data; + + if (size != 0) + return -EFAULT; + + return nents; +} + +/** + * hash_dma_valid_data - checks for dma valid sg data. + * @sg: Scatterlist. + * @datasize: Datasize in bytes. + * + * NOTE! This function checks for dma valid sg data, since dma + * only accept datasizes of even wordsize. + */ +static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) +{ + bool aligned; + + /* Need to include at least one nent, else error */ + if (hash_get_nents(sg, datasize, &aligned) < 1) + return false; + + return aligned; +} + /** * hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. @@ -418,13 +567,39 @@ static int hash_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); - if (!ctx->key) ctx->keylen = 0; memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; + if (hash_mode == HASH_MODE_DMA) { + if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) && + cpu_is_u5500()) { + pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working " + "on u5500, directing to CPU mode.", + __func__); + ctx->dma_mode = false; /* Don't use DMA in this case */ + goto out; + } + + if (req->nbytes < HASH_DMA_ALIGN_SIZE) { + ctx->dma_mode = false; /* Don't use DMA in this case */ + + pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " + "to CPU mode for data size < %d", + __func__, HASH_DMA_ALIGN_SIZE); + } else { + if (hash_dma_valid_data(req->src, req->nbytes)) { + ctx->dma_mode = true; + } else { + ctx->dma_mode = false; + pr_debug(DEV_DBG_NAME " [%s] DMA mode, but " + "direct to CPU mode for " + "non-aligned data", __func__); + } + } + } +out: return 0; } @@ -474,9 +649,6 @@ static void hash_processblock( static void hash_messagepad(struct hash_device_data *device_data, const u32 *message, u8 index_bytes) { - dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))", - __func__, index_bytes); - /* * Clear hash str register, only clear NBLW * since DCAL will be reset by hardware. @@ -561,7 +733,6 @@ int hash_setconfiguration(struct hash_device_data *device_data, struct hash_config *config) { int ret = 0; - dev_dbg(device_data->dev, "[%s] ", __func__); if (config->algorithm != HASH_ALGO_SHA1 && config->algorithm != HASH_ALGO_SHA256) @@ -573,13 +744,6 @@ int hash_setconfiguration(struct hash_device_data *device_data, */ HASH_SET_DATA_FORMAT(config->data_format); - /* - * Empty message bit. This bit is needed when the hash input data - * contain the empty message. Always set in current impl. but with - * no impact on data different than empty message. - */ - HASH_SET_BITS(&device_data->base->cr, HASH_CR_EMPTYMSG_MASK); - /* * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 */ @@ -652,7 +816,6 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) { /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - dev_dbg(device_data->dev, "[%s] ", __func__); while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); @@ -688,6 +851,7 @@ int hash_process_data( msg_length = 0; } else { if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); if (ret) { @@ -696,7 +860,6 @@ int hash_process_data( " failed!", __func__); goto out; } - } else { ret = init_hash_hw(device_data, ctx); if (ret) { @@ -732,6 +895,7 @@ int hash_process_data( } hash_incrementlength(ctx, HASH_BLOCK_SIZE); data_buffer += (HASH_BLOCK_SIZE - *index); + msg_length -= (HASH_BLOCK_SIZE - *index); *index = 0; @@ -750,6 +914,236 @@ out: return ret; } +/** + * hash_dma_final - The hash dma final function for SHA1/SHA256. + * @req: The hash request for the job. + */ +static int hash_dma_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + int bytes_written = 0; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } + + if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); + + if (ret) { + dev_err(device_data->dev, "[%s] hash_resume_state() " + "failed!", __func__); + goto out_power; + } + + } + + if (!ctx->updated) { + ret = hash_setconfiguration(device_data, &ctx->config); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_setconfiguration() failed!", + __func__); + goto out_power; + } + + /* Enable DMA input */ + if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) { + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + } else { + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_PRIVN_MASK); + } + + HASH_INITIALIZE; + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + hash_hw_write_key(device_data, ctx->key, ctx->keylen); + + /* Number of bits in last word = (nbytes * 8) % 32 */ + HASH_SET_NBLW((req->nbytes * 8) % 32); + ctx->updated = 1; + } + + /* Store the nents in the dma struct. */ + ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); + if (!ctx->device->dma.nents) { + dev_err(device_data->dev, "[%s] " + "ctx->device->dma.nents = 0", __func__); + goto out_power; + } + + bytes_written = hash_dma_write(ctx, req->src, req->nbytes); + if (bytes_written != req->nbytes) { + dev_err(device_data->dev, "[%s] " + "hash_dma_write() failed!", __func__); + goto out_power; + } + + wait_for_completion(&ctx->device->dma.complete); + hash_dma_done(ctx); + + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, + ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + __func__); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + +/** + * hash_hw_final - The final hash calculation function + * @req: The hash request for the job. + */ +int hash_hw_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } + + if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); + + if (ret) { + dev_err(device_data->dev, "[%s] hash_resume_state() " + "failed!", __func__); + goto out_power; + } + } else if (req->nbytes == 0 && ctx->keylen == 0) { + u8 zero_hash[SHA256_DIGEST_SIZE]; + u32 zero_hash_size = 0; + bool zero_digest = false; + /** + * Use a pre-calculated empty message digest + * (workaround since hw return zeroes, hw bug!?) + */ + ret = get_empty_message_digest(device_data, &zero_hash[0], + &zero_hash_size, &zero_digest); + if (!ret && likely(zero_hash_size == ctx->digestsize) && + zero_digest) { + memcpy(req->result, &zero_hash[0], ctx->digestsize); + goto out_power; + } else if (!ret && !zero_digest) { + dev_dbg(device_data->dev, "[%s] HMAC zero msg with " + "key, continue...", __func__); + } else { + dev_err(device_data->dev, "[%s] ret=%d, or wrong " + "digest size? %s", __func__, ret, + (zero_hash_size == ctx->digestsize) ? + "true" : "false"); + /* Return error */ + goto out_power; + } + } else if (req->nbytes == 0 && ctx->keylen > 0) { + dev_err(device_data->dev, "[%s] Empty message with " + "keylength > 0, NOT supported.", __func__); + goto out_power; + } + + if (!ctx->updated) { + ret = init_hash_hw(device_data, ctx); + if (ret) { + dev_err(device_data->dev, "[%s] init_hash_hw() " + "failed!", __func__); + goto out_power; + } + } + + if (ctx->state.index) { + hash_messagepad(device_data, ctx->state.buffer, + ctx->state.index); + } else { + HASH_SET_DCAL; + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + } + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, + ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + __func__); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + /** * hash_hw_update - Updates current HASH computation hashing another part of * the message. @@ -770,8 +1164,6 @@ int hash_hw_update(struct ahash_request *req) struct crypto_hash_walk walk; int msg_length = crypto_hash_walk_first(req, &walk); - pr_debug(DEV_DBG_NAME " [%s] datalength: %d", __func__, msg_length); - /* Empty message ("") is correct indata */ if (msg_length == 0) return ret; @@ -818,9 +1210,9 @@ int hash_hw_update(struct ahash_request *req) } ctx->state.index = index; - dev_dbg(device_data->dev, "[%s] indata length=%d, " - "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index); + "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index); + out_power: /* Disable power (and clock) */ if (hash_disable_power(device_data, false)) @@ -846,9 +1238,6 @@ int hash_resume_state(struct hash_device_data *device_data, s32 count; int hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(device_data->dev, "[%s] (state(0x%x)))", - __func__, (u32) device_state); - if (NULL == device_state) { dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", __func__); @@ -909,9 +1298,6 @@ int hash_save_state(struct hash_device_data *device_data, u32 count; int hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(device_data->dev, "[%s] state(0x%x)))", - __func__, (u32) device_state); - if (NULL == device_state) { dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", __func__); @@ -961,8 +1347,6 @@ int hash_check_hw(struct hash_device_data *device_data) { int ret = 0; - dev_dbg(device_data->dev, "[%s] ", __func__); - if (NULL == device_data) { ret = -EPERM; dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", @@ -1041,17 +1425,18 @@ void hash_get_digest(struct hash_device_data *device_data, static int ahash_update(struct ahash_request *req) { int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] ", __func__); + if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) + ret = hash_hw_update(req); + /* Skip update for DMA, all data will be passed to DMA in final */ - ret = hash_hw_update(req); if (ret) { pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", __func__); - goto out; } -out: return ret; } @@ -1064,103 +1449,18 @@ static int ahash_final(struct ahash_request *req) int ret = 0; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_device_data *device_data; - u8 digest[SHA256_DIGEST_SIZE]; - - pr_debug(DEV_DBG_NAME " [%s] ", __func__); - ret = hash_get_device_data(ctx, &device_data); - if (ret) - return ret; + pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + if ((hash_mode == HASH_MODE_DMA) && ctx->dma_mode) + ret = hash_dma_final(req); + else + ret = hash_hw_final(req); - /* Enable device power (and clock) */ - ret = hash_enable_power(device_data, false); if (ret) { - dev_err(device_data->dev, "[%s]: " - "hash_enable_power() failed!", __func__); - goto out; - } - - if (ctx->updated) { - ret = hash_resume_state(device_data, &ctx->state); - - if (ret) { - dev_err(device_data->dev, "[%s] hash_resume_state() " - "failed!", __func__); - goto out_power; - } - } else if (req->nbytes == 0 && ctx->keylen == 0) { - u8 zero_hash[SHA256_DIGEST_SIZE]; - u32 zero_hash_size = 0; - bool zero_digest = false; - /** - * Use a pre-calculated empty message digest - * (workaround since hw return zeroes, hw bug!?) - */ - ret = get_empty_message_digest(device_data, &zero_hash[0], - &zero_hash_size, &zero_digest); - if (!ret && likely(zero_hash_size == ctx->digestsize) && - zero_digest) { - memcpy(req->result, &zero_hash[0], ctx->digestsize); - goto out_power; - } else if (!ret && !zero_digest) { - dev_dbg(device_data->dev, "[%s] HMAC zero msg with " - "key, continue...", __func__); - } else { - dev_err(device_data->dev, "[%s] ret=%d, or wrong " - "digest size? %s", __func__, ret, - (zero_hash_size == ctx->digestsize) ? - "true" : "false"); - /* Return error */ - goto out_power; - } - } - - if (!ctx->updated) { - ret = init_hash_hw(device_data, ctx); - if (ret) { - dev_err(device_data->dev, "[%s] init_hash_hw() " - "failed!", __func__); - goto out_power; - } - } - - if (ctx->state.index) { - hash_messagepad(device_data, ctx->state.buffer, - ctx->state.index); - } else { - HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) - cpu_relax(); - } - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { - unsigned int keylen = ctx->keylen; - u8 *key = ctx->key; - - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, - ctx->keylen); - hash_hw_write_key(device_data, key, keylen); - } - - hash_get_digest(device_data, digest, ctx->config.algorithm); - memcpy(req->result, digest, ctx->digestsize); - -out_power: - /* Disable power (and clock) */ - if (hash_disable_power(device_data, false)) - dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", __func__); - -out: - release_hash_device(device_data); - - /** - * Allocated in setkey, and only used in HMAC. - */ - kfree(ctx->key); + } return ret; } @@ -1171,8 +1471,6 @@ static int hash_setkey(struct crypto_ahash *tfm, int ret = 0; struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] keylen: %d", __func__, keylen); - /** * Freed in final. */ @@ -1194,8 +1492,6 @@ static int ahash_sha1_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; ctx->config.oper_mode = HASH_OPER_MODE_HASH; @@ -1209,8 +1505,6 @@ static int ahash_sha256_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA256; ctx->config.oper_mode = HASH_OPER_MODE_HASH; @@ -1223,8 +1517,6 @@ static int ahash_sha1_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = ahash_sha1_init(req); if (ret1) goto out; @@ -1240,8 +1532,6 @@ static int ahash_sha256_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = ahash_sha256_init(req); if (ret1) goto out; @@ -1258,8 +1548,6 @@ static int hmac_sha1_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; ctx->config.oper_mode = HASH_OPER_MODE_HMAC; @@ -1273,8 +1561,6 @@ static int hmac_sha256_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA256; ctx->config.oper_mode = HASH_OPER_MODE_HMAC; @@ -1287,8 +1573,6 @@ static int hmac_sha1_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = hmac_sha1_init(req); if (ret1) goto out; @@ -1304,8 +1588,6 @@ static int hmac_sha256_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = hmac_sha256_init(req); if (ret1) goto out; @@ -1320,16 +1602,12 @@ out: static int hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - pr_debug(DEV_DBG_NAME " [%s]", __func__); - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); } static int hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - pr_debug(DEV_DBG_NAME " [%s]", __func__); - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); } @@ -1425,8 +1703,6 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) int i; int count; - dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) { ret = crypto_register_ahash(ux500_ahash_algs[i]); if (ret) { @@ -1451,8 +1727,6 @@ static void ahash_algs_unregister_all(struct hash_device_data *device_data) { int i; - dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) crypto_unregister_ahash(ux500_ahash_algs[i]); } @@ -1468,7 +1742,6 @@ static int ux500_hash_probe(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); if (!device_data) { dev_dbg(dev, "[%s] kzalloc() failed!", __func__); @@ -1505,7 +1778,6 @@ static int ux500_hash_probe(struct platform_device *pdev) /* Enable power for HASH1 hardware block */ device_data->regulator = ux500_regulator_get(dev); - if (IS_ERR(device_data->regulator)) { dev_err(dev, "[%s] regulator_get() failed!", __func__); ret = PTR_ERR(device_data->regulator); @@ -1534,6 +1806,9 @@ static int ux500_hash_probe(struct platform_device *pdev) goto out_power; } + if (hash_mode == HASH_MODE_DMA) + hash_dma_setup_channel(device_data, dev); + platform_set_drvdata(pdev, device_data); /* Put the new device into the device list... */ @@ -1585,8 +1860,6 @@ static int ux500_hash_remove(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(dev, "[%s]: platform_get_drvdata() failed!", @@ -1646,8 +1919,6 @@ static void ux500_hash_shutdown(struct platform_device *pdev) struct resource *res = NULL; struct hash_device_data *device_data; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1701,8 +1972,6 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1740,8 +2009,6 @@ static int ux500_hash_resume(struct platform_device *pdev) struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1783,7 +2050,6 @@ static struct platform_driver hash_driver = { */ static int __init ux500_hash_mod_init(void) { - pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); @@ -1796,8 +2062,6 @@ static int __init ux500_hash_mod_init(void) */ static void __exit ux500_hash_mod_fini(void) { - pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); - platform_driver_unregister(&hash_driver); return; } -- cgit v1.2.3 From 18c245732b85fb56457cb70fcdf34878dade3228 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 12 Jan 2012 13:14:22 +0100 Subject: security: ux500: Coding style fixes Fix the most obvious violations of the kernel coding style Signed-off-by: Jonas Aaberg --- arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h index 6978b7314c5..2ac88edfe71 100644 --- a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h +++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h @@ -45,4 +45,3 @@ struct tee_ta_start_modem { int tee_ta_start_modem(struct tee_ta_start_modem *data); #endif - -- cgit v1.2.3 From 3ee97c4b21460d30f853e936808e4074fc86da14 Mon Sep 17 00:00:00 2001 From: Michel JAOUEN Date: Thu, 19 Jan 2012 18:21:50 +0100 Subject: mach-ux500, drivers: u9540 security fix ST-Ericsson ID: 409625 ST-Ericsson FOSS-OUT ID: trivial ST-Ericsson Linux next: NA Depends-On: Iff4121811d2afbf581eec0905077c58bff96ce09 Change-Id: I43d5d593a4b6183d39322851db930e687177eead Signed-off-by: Michel JAOUEN Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/45317 Reviewed-by: QATOOLS Reviewed-by: QABUILD Reviewed-by: Srinidhi KASAGAR Reviewed-by: Berne HEBARK Reviewed-by: Linus WALLEIJ --- arch/arm/mach-ux500/tee_ux500.c | 6 +++++- drivers/crypto/ux500/cryp/cryp.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 160ca529261..0fc10c0a744 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -24,7 +24,11 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - if (cpu_is_u8500v20_or_later()) + if (cpu_is_u9540()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS_DB9540_ROM + (U9540_BOOT_ROM_BASE + 0x17300)); + else if (cpu_is_u8500v20_or_later()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); else if (cpu_is_u5500()) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 211200fed34..cec92af2f73 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -37,7 +37,7 @@ int cryp_check(struct cryp_device_data *device_data) if (NULL == device_data) return -EINVAL; - if (cpu_is_u8500()) + if (cpu_is_u8500() || cpu_is_u9540()) peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500; else if (cpu_is_u5500()) peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500; -- cgit v1.2.3 From aeb2742534db86281ed3a6dfc295888c4893f4a2 Mon Sep 17 00:00:00 2001 From: Jonas Aaberg Date: Tue, 20 Dec 2011 08:24:42 +0100 Subject: ARM: ux500: tee: Always assume v2.0 or later Remove check for pre v2.0 db8500 since pre v2.0 hardware is no longer supported. ST-Ericsson Linux next: - ST-Ericsson ID: 370799 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I5d1ab944c6d85cc39eb748a9bc585c2c6ca5e5ac Signed-off-by: Jonas Aaberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/43189 Reviewed-by: QABUILD Reviewed-by: Joakim BECH Reviewed-by: QATEST --- arch/arm/mach-ux500/tee_ux500.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 0fc10c0a744..9fa985a48c8 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -28,7 +28,7 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS_DB9540_ROM (U9540_BOOT_ROM_BASE + 0x17300)); - else if (cpu_is_u8500v20_or_later()) + else if (cpu_is_u8500()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); else if (cpu_is_u5500()) -- cgit v1.2.3