From 3416b97dc49ca05087870066ba991a727133b9bd Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 16:26:16 +0200 Subject: add trusted execution environment (tee) driver TEE working now for Android, tested ok with COPS! * Updated according to review comments - Added ST-Ericsson copyright headers to all tee files. - Fixed problem with not using readl/writel when using ioremap. - Fixed problem with forgetting to do iounmap on special case. - Fixed incorrect usage when doing copy_to_user when writing to the device. - Added architecture dependent file for the tee service that calls the secure world. - Added support for more several inputs (sharedmemory buffers) for tee. - Added dummy macro to map MT_MEMORY device. - Fixed memory leak in secure world due to not closing a TEE session correctly from the kernel. - Now we only copies input buffer from user space for tee. - Documented structures in tee.h. - Moved SVP implementation into arch/arm/mach-ux500 folder. - Added new config flags for ux500 and SVP regarding TEE driver. - Update mach-ux500/Kconfig: - Enable TEE_UX500 by default when using target hardware. - Enabel TEE_SVP by default when building simulator. - Fix the cache sync problem: not request ROM code to clean cache - ioremap for ICN_BASE, remove static mapping in cpu-db8500.c. - Fix ioremap of ICN_BASE and do iounmap after use. ST-Ericsson ID: WP269815 Change-Id: Ie861a90ec790e95fb3992e560512661693548a43 Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/4168 Reviewed-by: Shujuan CHEN Tested-by: Shujuan CHEN Signed-off-by: Lee Jones --- .../mach-ux500/include/mach/tee_ta_start_modem.h | 48 ++ arch/arm/mach-ux500/tee_service_svp.c | 66 +++ arch/arm/mach-ux500/tee_ta_start_modem_svp.c | 56 +++ arch/arm/mach-ux500/tee_ux500.c | 79 ++++ drivers/Kconfig | 3 + drivers/Makefile | 1 + drivers/tee/Kconfig | 13 + drivers/tee/Makefile | 8 + drivers/tee/tee_driver.c | 484 +++++++++++++++++++++ drivers/tee/tee_service.c | 17 + include/linux/tee.h | 143 ++++++ 11 files changed, 918 insertions(+) create mode 100644 arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h create mode 100644 arch/arm/mach-ux500/tee_service_svp.c create mode 100644 arch/arm/mach-ux500/tee_ta_start_modem_svp.c create mode 100644 arch/arm/mach-ux500/tee_ux500.c create mode 100644 drivers/tee/Kconfig create mode 100644 drivers/tee/Makefile create mode 100644 drivers/tee/tee_driver.c create mode 100644 drivers/tee/tee_service.c create mode 100644 include/linux/tee.h diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h new file mode 100644 index 00000000000..6978b7314c5 --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h @@ -0,0 +1,48 @@ +/* + * Data types and interface for TEE application for starting the modem. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef TEE_TA_START_MODEM_H +#define TEE_TA_START_MODEM_H + +#define COMMAND_ID_START_MODEM 0x00000001 + +#define UUID_TEE_TA_START_MODEM_LOW 0x8AD94107 +#define UUID_TEE_TA_START_MODEM_MID 0x6E50 +#define UUID_TEE_TA_START_MODEM_HIGH 0x418E +#define UUID_TEE_TA_START_MODEM_CLOCKSEQ \ + {0xB1, 0x14, 0x75, 0x7D, 0x60, 0x21, 0xBD, 0x36} + +struct mcore_segment_descr { + void *segment; + void *hash; + u32 size; +}; + +struct access_image_descr { + void *elf_hdr; + void *pgm_hdr_tbl; + void *signature; + unsigned long nbr_segment; + struct mcore_segment_descr *descr; +}; + +/* TODO: To be redefined with only info needed by Secure world. */ +struct tee_ta_start_modem { + void *access_mem_start; + u32 shared_mem_size; + u32 access_private_mem_size; + struct access_image_descr access_image_descr; +}; + +/** + * This is the function to handle the modem release. + */ +int tee_ta_start_modem(struct tee_ta_start_modem *data); + +#endif + diff --git a/arch/arm/mach-ux500/tee_service_svp.c b/arch/arm/mach-ux500/tee_service_svp.c new file mode 100644 index 00000000000..aa65dd961a0 --- /dev/null +++ b/arch/arm/mach-ux500/tee_service_svp.c @@ -0,0 +1,66 @@ +/* + * TEE service to handle the calls to trusted applications in SVP. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include "mach/tee_ta_start_modem.h" + +static int cmp_uuid_start_modem(struct tee_uuid *uuid) +{ + int ret = -EINVAL; + + if (uuid == NULL) + return -EINVAL; + + /* This handles the calls to TA for start the modem */ + if ((uuid->timeLow == UUID_TEE_TA_START_MODEM_LOW) && + (uuid->timeMid == UUID_TEE_TA_START_MODEM_MID) && + (uuid->timeHiAndVersion == UUID_TEE_TA_START_MODEM_HIGH)) { + + u8 clockSeqAndNode[TEE_UUID_CLOCK_SIZE] = + UUID_TEE_TA_START_MODEM_CLOCKSEQ; + + ret = memcmp(uuid->clockSeqAndNode, clockSeqAndNode, + TEE_UUID_CLOCK_SIZE); + } + + return ret; +} + +int call_sec_world(struct tee_session *ts, int sec_cmd) +{ + int ret = 0; + + if (ts == NULL) + return -EINVAL; + + if (cmp_uuid_start_modem(ts->uuid)) + return -EINVAL; + + switch (ts->cmd) { + case COMMAND_ID_START_MODEM: + ret = tee_ta_start_modem((struct tee_ta_start_modem *) + ts->op); + if (ret) { + ts->err = TEED_ERROR_GENERIC; + ts->origin = TEED_ORIGIN_TEE_APPLICATION; + pr_err("tee_ta_start_modem() failed!\n"); + return ret; + } + break; + + default: + break; + } + + /* TODO: to handle more trusted applications. */ + + return ret; +} diff --git a/arch/arm/mach-ux500/tee_ta_start_modem_svp.c b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c new file mode 100644 index 00000000000..12337b93154 --- /dev/null +++ b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c @@ -0,0 +1,56 @@ +/* + * Trusted application for starting the modem. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include + +#include "mach/tee_ta_start_modem.h" + +static int reset_modem(unsigned long modem_start_addr) +{ + void __iomem *base = ioremap(U5500_ACCCON_BASE_SEC, 0x2FF); + if (!base) + return -ENOMEM; + + pr_info("[%s] Setting modem start address!\n", __func__); + writel(base + (U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET/sizeof(uint32_t)), + modem_start_addr); + + pr_info("[%s] resetting the modem!\n", __func__); + writel(base + (U5500_ACCCON_ACC_CPU_CTRL_OFFSET/sizeof(uint32_t)), 1); + + iounmap(base); + + return 0; +} + +int tee_ta_start_modem(struct tee_ta_start_modem *data) +{ + int ret = 0; + struct elfhdr *elfhdr; + void __iomem *vaddr; + + vaddr = ioremap((unsigned long)data->access_image_descr.elf_hdr, + sizeof(struct elfhdr)); + if (!vaddr) + return -ENOMEM; + + elfhdr = (struct elfhdr *)readl(vaddr); + pr_info("Reading in kernel:elfhdr 0x%x:elfhdr->entry=0x%x\n", + (uint32_t)elfhdr, (uint32_t)elfhdr->e_entry); + + pr_info("[%s] reset modem()...\n", __func__); + ret = reset_modem(elfhdr->e_entry); + + iounmap(vaddr); + + return ret; +} diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c new file mode 100644 index 00000000000..ab3782a323c --- /dev/null +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -0,0 +1,79 @@ +/* + * TEE service to handle the calls to trusted applications. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Joakim Bech + * License terms: GNU General Public License (GPL) version 2 + */ +#include +#include +#include + +#include + +#define BOOT_BRIDGE_FUNC (U8500_BOOT_ROM_BASE + 0x18300) + +#define ISSWAPI_EXECUTE_TA 0x11000001 +#define ISSWAPI_CLOSE_TA 0x11000002 + +#define SEC_ROM_NO_FLAG_MASK 0x0000 + +static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) +{ + typedef u32 (*bridge_func)(u32, u32, va_list); + static bridge_func hw_sec_rom_pub_bridge; + va_list ap; + u32 ret; + + hw_sec_rom_pub_bridge = + (bridge_func)((u32)IO_ADDRESS(BOOT_BRIDGE_FUNC)); + + va_start(ap, cfg); + ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); + va_end(ap); + + return ret; +} + +int call_sec_world(struct tee_session *ts, int sec_cmd) +{ + /* + * ts->ta and ts->uuid is set to NULL when opening the device, + * hence it should be safe to just do the call here. + */ + + switch (sec_cmd) { + case TEED_INVOKE: + if (!ts->uuid) { + call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, + SEC_ROM_NO_FLAG_MASK, + virt_to_phys(&ts->id), + NULL, + virt_to_phys(ts->ta), + ts->cmd, + virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->origin))); + } else { + call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, + SEC_ROM_NO_FLAG_MASK, + virt_to_phys(&ts->id), + virt_to_phys(ts->uuid), + virt_to_phys(ts->ta), + ts->cmd, + virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->origin))); + } + break; + + case TEED_CLOSE_SESSION: + call_sec_rom_bridge(ISSWAPI_CLOSE_TA, + SEC_ROM_NO_FLAG_MASK, + ts->id, + NULL, + virt_to_phys(ts->ta), + virt_to_phys((void *)(&ts->origin))); + break; + } + + return 0; +} diff --git a/drivers/Kconfig b/drivers/Kconfig index d236aef7e59..68e36aed05b 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -126,6 +126,8 @@ source "drivers/platform/Kconfig" source "drivers/clk/Kconfig" +source "drivers/tee/Kconfig" + source "drivers/hwspinlock/Kconfig" source "drivers/clocksource/Kconfig" @@ -141,3 +143,4 @@ source "drivers/virt/Kconfig" source "drivers/devfreq/Kconfig" endmenu + diff --git a/drivers/Makefile b/drivers/Makefile index 95952c82bf1..4ca756c31be 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -122,6 +122,7 @@ obj-y += platform/ obj-y += ieee802154/ #common clk code obj-y += clk/ +obj-$(CONFIG_TEE_SUPPORT) += tee/ obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ obj-$(CONFIG_NFC) += nfc/ diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig new file mode 100644 index 00000000000..a452e888d77 --- /dev/null +++ b/drivers/tee/Kconfig @@ -0,0 +1,13 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Martin Hovang (martin.xm.hovang@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +# Trursted Execution Environment Configuration +config TEE_SUPPORT + bool "Trusted Execution Environment Support" + default y + ---help--- + This implements the Trusted Execution Environment (TEE) Client + API Specification from GlobalPlatform Device Technology. diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile new file mode 100644 index 00000000000..b937eb19d72 --- /dev/null +++ b/drivers/tee/Makefile @@ -0,0 +1,8 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Martin Hovang (martin.xm.hovang@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +obj-$(CONFIG_TEE_SUPPORT) += tee_service.o +obj-$(CONFIG_TEE_SUPPORT) += tee_driver.o diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c new file mode 100644 index 00000000000..551c92cc054 --- /dev/null +++ b/drivers/tee/tee_driver.c @@ -0,0 +1,484 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Martin Hovang + * Author: Joakim Bech + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEED_NAME "tee" + +#define TEED_STATE_OPEN_DEV 0 +#define TEED_STATE_OPEN_SESSION 1 + +#define TEEC_MEM_INPUT 0x00000001 +#define TEEC_MEM_OUTPUT 0x00000002 + +static int tee_open(struct inode *inode, struct file *file); +static int tee_release(struct inode *inode, struct file *file); +static int tee_read(struct file *filp, char __user *buffer, + size_t length, loff_t *offset); +static int tee_write(struct file *filp, const char __user *buffer, + size_t length, loff_t *offset); + +static inline void set_emsg(struct tee_session *ts, u32 msg) +{ + ts->err = msg; + ts->origin = TEED_ORIGIN_DRIVER; +} + +static void reset_session(struct tee_session *ts) +{ + ts->state = TEED_STATE_OPEN_DEV; + ts->err = TEED_SUCCESS; + ts->origin = TEED_ORIGIN_DRIVER; + ts->id = 0; + ts->ta = NULL; + ts->uuid = NULL; + ts->cmd = 0; + ts->driver_cmd = TEED_OPEN_SESSION; + ts->ta_size = 0; + ts->op = NULL; +} + +static int copy_ta(struct tee_session *ts, + struct tee_session *ku_buffer) +{ + ts->ta = kmalloc(ku_buffer->ta_size, GFP_KERNEL); + if (ts->ta == NULL) { + pr_err("[%s] error, out of memory (ta)\n", + __func__); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + return -ENOMEM; + } + + ts->ta_size = ku_buffer->ta_size; + + memcpy(ts->ta, ku_buffer->ta, ku_buffer->ta_size); + return 0; +} + +static int copy_uuid(struct tee_session *ts, + struct tee_session *ku_buffer) +{ + ts->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL); + + if (ts->uuid == NULL) { + pr_err("[%s] error, out of memory (uuid)\n", + __func__); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + return -ENOMEM; + } + + memcpy(ts->uuid, ku_buffer->uuid, sizeof(struct tee_uuid)); + + return 0; +} + +static inline void free_operation(struct tee_session *ts) +{ + int i; + + for (i = 0; i < 4; ++i) { + kfree(ts->op->shm[i].buffer); + ts->op->shm[i].buffer = NULL; + } + + kfree(ts->op); + ts->op = NULL; +} + +static inline void memrefs_phys_to_virt(struct tee_session *ts) +{ + int i; + + for (i = 0; i < 4; ++i) { + if (ts->op->flags & (1 << i)) { + ts->op->shm[i].buffer = + phys_to_virt((unsigned long) + ts->op->shm[i].buffer); + } + } +} + +static int copy_memref_to_user(struct tee_operation *op, + struct tee_operation *ubuf_op, + int memref) +{ + unsigned long bytes_left; + + bytes_left = copy_to_user(ubuf_op->shm[memref].buffer, + op->shm[memref].buffer, + op->shm[memref].size); + + if (bytes_left != 0) { + pr_err("[%s] Failed to copy result to user space (%lu " + "bytes left of buffer).\n", __func__, bytes_left); + return bytes_left; + } + + bytes_left = put_user(op->shm[memref].size, &ubuf_op->shm[memref].size); + + if (bytes_left != 0) { + pr_err("[%s] Failed to copy result to user space (%lu " + "bytes left of size).\n", __func__, bytes_left); + return -EINVAL; + } + + bytes_left = put_user(op->shm[memref].flags, + &ubuf_op->shm[memref].flags); + if (bytes_left != 0) { + pr_err("[%s] Failed to copy result to user space (%lu " + "bytes left of flags).\n", __func__, bytes_left); + return -EINVAL; + } + + return 0; +} + +static int copy_memref_to_kernel(struct tee_operation *op, + struct tee_operation *kbuf_op, + int memref) +{ + /* Buffer freed in invoke_command if this function fails */ + op->shm[memref].buffer = kmalloc(kbuf_op->shm[memref].size, GFP_KERNEL); + + if (!op->shm[memref].buffer) { + pr_err("[%s] out of memory\n", __func__); + return -ENOMEM; + } + + /* + * Copy shared memory operations to a local kernel + * buffer if they are of type input. + */ + if (kbuf_op->shm[memref].flags & TEEC_MEM_INPUT) { + memcpy(op->shm[memref].buffer, + kbuf_op->shm[memref].buffer, + kbuf_op->shm[memref].size); + } + + op->shm[memref].size = kbuf_op->shm[memref].size; + op->shm[memref].flags = kbuf_op->shm[memref].flags; + + /* Secure world expects physical addresses. */ + op->shm[memref].buffer = (void *)virt_to_phys(op->shm[memref].buffer); + + return 0; +} + +static int open_tee_device(struct tee_session *ts, + struct tee_session *ku_buffer) +{ + int ret; + + if (ku_buffer->driver_cmd != TEED_OPEN_SESSION) { + set_emsg(ts, TEED_ERROR_BAD_STATE); + return -EINVAL; + } + + if (ku_buffer->ta) { + ret = copy_ta(ts, ku_buffer); + } else if (ku_buffer->uuid) { + ret = copy_uuid(ts, ku_buffer); + } else { + set_emsg(ts, TEED_ERROR_COMMUNICATION); + return -EINVAL; + } + + ts->id = 0; + ts->state = TEED_STATE_OPEN_SESSION; + return ret; +} + +static int invoke_command(struct tee_session *ts, + struct tee_session *ku_buffer, + struct tee_session __user *u_buffer) +{ + int i; + int ret = 0; + struct tee_operation *kbuf_op = + (struct tee_operation *)ku_buffer->op; + + ts->op = kmalloc(sizeof(struct tee_operation), GFP_KERNEL); + + if (!ts->op) { + if (ts->op == NULL) { + pr_err("[%s] error, out of memory " + "(op)\n", __func__); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + ret = -ENOMEM; + goto err; + } + } + + /* Copy memrefs to kernel space. */ + ts->op->flags = kbuf_op->flags; + ts->cmd = ku_buffer->cmd; + + for (i = 0; i < 4; ++i) { + /* We only want to copy memrefs in use. */ + if (kbuf_op->flags & (1 << i)) { + ret = copy_memref_to_kernel(ts->op, kbuf_op, i); + + if (ret) + goto err; + } else { + ts->op->shm[i].buffer = NULL; + ts->op->shm[i].size = 0; + ts->op->shm[i].flags = 0; + } + } + + /* To call secure world */ + if (call_sec_world(ts, TEED_INVOKE)) { + ret = -EINVAL; + goto err; + } + + /* + * Convert physical addresses back to virtual address so the + * kernel can free the buffers when closing the session. + */ + memrefs_phys_to_virt(ts); + + for (i = 0; i < 4; ++i) { + if ((kbuf_op->flags & (1 << i)) && + (kbuf_op->shm[i].flags & TEEC_MEM_OUTPUT)) { + struct tee_operation *ubuf_op = + (struct tee_operation *)u_buffer->op; + + ret = copy_memref_to_user(ts->op, ubuf_op, i); + } + } +err: + free_operation(ts); + + return ret; +} + +static int tee_open(struct inode *inode, struct file *filp) +{ + struct tee_session *ts; + + filp->private_data = kmalloc(sizeof(struct tee_session), + GFP_KERNEL); + + if (filp->private_data == NULL) + return -ENOMEM; + + ts = (struct tee_session *) (filp->private_data); + + reset_session(ts); + + ts->sync = kmalloc(sizeof(struct mutex), GFP_KERNEL); + + if (!ts->sync) + return -ENOMEM; + + mutex_init(ts->sync); + + return 0; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + struct tee_session *ts; + int i; + + ts = (struct tee_session *) (filp->private_data); + + if (ts == NULL) + goto no_ts; + + if (ts->op) { + for (i = 0; i < 4; ++i) { + kfree(ts->op->shm[i].buffer); + ts->op->shm[i].buffer = NULL; + } + } + + kfree(ts->op); + ts->op = NULL; + + kfree(ts->sync); + ts->sync = NULL; + + kfree(ts->ta); + ts->ta = NULL; + +no_ts: + kfree(filp->private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * Called when a process, which already opened the dev file, attempts + * to read from it. This function gets the current status of the session. + */ +static int tee_read(struct file *filp, char __user *buffer, + size_t length, loff_t *offset) +{ + struct tee_read buf; + struct tee_session *ts; + + if (length != sizeof(struct tee_read)) { + pr_err("[%s] error, incorrect input length\n", + __func__); + return -EINVAL; + } + + ts = (struct tee_session *) (filp->private_data); + + if (ts == NULL || ts->sync == NULL) { + pr_err("[%s] error, private_data not " + "initialized\n", __func__); + return -EINVAL; + } + + mutex_lock(ts->sync); + + buf.err = ts->err; + buf.origin = ts->origin; + + mutex_unlock(ts->sync); + + if (copy_to_user(buffer, &buf, length)) { + pr_err("[%s] error, copy_to_user failed!\n", + __func__); + return -EINVAL; + } + + return length; +} + +/* + * Called when a process writes to a dev file + */ +static int tee_write(struct file *filp, const char __user *buffer, + size_t length, loff_t *offset) +{ + struct tee_session ku_buffer; + struct tee_session *ts; + int ret = length; + + if (length != sizeof(struct tee_session)) { + pr_err("[%s] error, incorrect input length\n", + __func__); + return -EINVAL; + } + + if (copy_from_user(&ku_buffer, buffer, length)) { + pr_err("[%s] error, tee_session " + "copy_from_user failed\n", __func__); + return -EINVAL; + } + + ts = (struct tee_session *) (filp->private_data); + + if (ts == NULL || ts->sync == NULL) { + pr_err("[%s] error, private_data not " + "initialized\n", __func__); + return -EINVAL; + } + + mutex_lock(ts->sync); + + switch (ts->state) { + case TEED_STATE_OPEN_DEV: + ret = open_tee_device(ts, &ku_buffer); + break; + + case TEED_STATE_OPEN_SESSION: + switch (ku_buffer.driver_cmd) { + case TEED_INVOKE: + ret = invoke_command(ts, &ku_buffer, + (struct tee_session *)buffer); + break; + + case TEED_CLOSE_SESSION: + /* no caching implemented yet... */ + if (call_sec_world(ts, TEED_CLOSE_SESSION)) + ret = -EINVAL; + + kfree(ts->ta); + ts->ta = NULL; + + reset_session(ts); + break; + + default: + set_emsg(ts, TEED_ERROR_BAD_PARAMETERS); + ret = -EINVAL; + } + break; + default: + pr_err("[%s] unknown state\n", __func__); + set_emsg(ts, TEED_ERROR_BAD_STATE); + ret = -EINVAL; + } + + /* + * We expect that ret has value zero when reaching the end here. + * If it has any other value some error must have occured. + */ + if (!ret) + ret = length; + else + ret = -EINVAL; + + mutex_unlock(ts->sync); + + return ret; +} + +static const struct file_operations tee_fops = { + .owner = THIS_MODULE, + .read = tee_read, + .write = tee_write, + .open = tee_open, + .release = tee_release, +}; + +static struct miscdevice tee_dev = { + MISC_DYNAMIC_MINOR, + TEED_NAME, + &tee_fops +}; + +static int __init tee_init(void) +{ + int err = 0; + + err = misc_register(&tee_dev); + + if (err) { + pr_err("[%s] error %d adding character device " + "TEE\n", __func__, err); + } + + return err; +} + +static void __exit tee_exit(void) +{ + misc_deregister(&tee_dev); +} + +module_init(tee_init); +module_exit(tee_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Trusted Execution Enviroment driver"); diff --git a/drivers/tee/tee_service.c b/drivers/tee/tee_service.c new file mode 100644 index 00000000000..b01e9d0ac39 --- /dev/null +++ b/drivers/tee/tee_service.c @@ -0,0 +1,17 @@ +/* + * TEE service to handle the calls to trusted applications. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Joakim Bech + * License terms: GNU General Public License (GPL) version 2 + */ +#include +#include +#include + +int __weak call_sec_world(struct tee_session *ts, int sec_cmd) +{ + pr_info("[%s] Generic call_sec_world called!\n", __func__); + + return 0; +} diff --git a/include/linux/tee.h b/include/linux/tee.h new file mode 100644 index 00000000000..0cdec2d254a --- /dev/null +++ b/include/linux/tee.h @@ -0,0 +1,143 @@ +/* + * Trusted Execution Environment (TEE) interface for TrustZone enabled ARM CPUs. + * + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen + * Author: Martin Hovang + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef TEE_H +#define TEE_H + +/* tee_cmd id values */ +#define TEED_OPEN_SESSION 0x00000000U +#define TEED_CLOSE_SESSION 0x00000001U +#define TEED_INVOKE 0x00000002U + +/* tee_retval id values */ +#define TEED_SUCCESS 0x00000000U +#define TEED_ERROR_GENERIC 0xFFFF0000U +#define TEED_ERROR_ACCESS_DENIED 0xFFFF0001U +#define TEED_ERROR_CANCEL 0xFFFF0002U +#define TEED_ERROR_ACCESS_CONFLICT 0xFFFF0003U +#define TEED_ERROR_EXCESS_DATA 0xFFFF0004U +#define TEED_ERROR_BAD_FORMAT 0xFFFF0005U +#define TEED_ERROR_BAD_PARAMETERS 0xFFFF0006U +#define TEED_ERROR_BAD_STATE 0xFFFF0007U +#define TEED_ERROR_ITEM_NOT_FOUND 0xFFFF0008U +#define TEED_ERROR_NOT_IMPLEMENTED 0xFFFF0009U +#define TEED_ERROR_NOT_SUPPORTED 0xFFFF000AU +#define TEED_ERROR_NO_DATA 0xFFFF000BU +#define TEED_ERROR_OUT_OF_MEMORY 0xFFFF000CU +#define TEED_ERROR_BUSY 0xFFFF000DU +#define TEED_ERROR_COMMUNICATION 0xFFFF000EU +#define TEED_ERROR_SECURITY 0xFFFF000FU +#define TEED_ERROR_SHORT_BUFFER 0xFFFF0010U + +/* TEE origin codes */ +#define TEED_ORIGIN_DRIVER 0x00000002U +#define TEED_ORIGIN_TEE 0x00000003U +#define TEED_ORIGIN_TEE_APPLICATION 0x00000004U + +#define TEE_UUID_CLOCK_SIZE 8 + +#define TEEC_CONFIG_PAYLOAD_REF_COUNT 4 + +/** + * struct tee_uuid - Structure that represent an uuid. + * @timeLow: The low field of the time stamp. + * @timeMid: The middle field of the time stamp. + * @timeHiAndVersion: The high field of the timestamp multiplexed + * with the version number. + * @clockSeqAndNode: The clock sequence and the node. + * + * This structure have different naming (camel case) to comply with Global + * Platforms TEE Client API spec. This type is defined in RFC4122. + */ +struct tee_uuid { + uint32_t timeLow; + uint16_t timeMid; + uint16_t timeHiAndVersion; + uint8_t clockSeqAndNode[TEE_UUID_CLOCK_SIZE]; +}; + +/** + * struct tee_sharedmemory - Shared memory block for TEE. + * @buffer: The in/out data to TEE. + * @size: The size of the data. + * @flags: Variable telling whether it is a in, out or in/out parameter. + */ +struct tee_sharedmemory { + void *buffer; + size_t size; + uint32_t flags; +}; + +/** + * struct tee_operation - Payload for sessions or invoke operation. + * @shm: Array containing the shared memory buffers. + * @flags: Tells which if memory buffers that are in use. + */ +struct tee_operation { + struct tee_sharedmemory shm[TEEC_CONFIG_PAYLOAD_REF_COUNT]; + uint32_t flags; +}; + +/** + * struct tee_session - The session of an open tee device. + * @state: The current state in the linux kernel. + * @err: Error code (as in Global Platform TEE Client API spec) + * @origin: Origin for the error code (also from spec). + * @id: Implementation defined type, 0 if not used. + * @ta: The trusted application. + * @uuid: The uuid for the trusted application. + * @cmd: The command to be executed in the trusted application. + * @driver_cmd: The command type in the driver. This is used from a client (user + * space to tell the Linux kernel whether it's a open-, + * close-session or if it is an invoke command. + * @ta_size: The size of the trusted application. + * @op: The payload for the trusted application. + * @sync: Mutex to handle multiple use of clients. + * + * This structure is mainly used in the Linux kernel as a session context for + * ongoing operations. Other than that it is also used in the communication with + * the user space. + */ +struct tee_session { + uint32_t state; + uint32_t err; + uint32_t origin; + uint32_t id; + void *ta; + struct tee_uuid *uuid; + unsigned int cmd; + unsigned int driver_cmd; + unsigned int ta_size; + struct tee_operation *op; + struct mutex *sync; +}; + +/** + * struct tee_read - Contains the error message and the origin. + * @err: Error code (as in Global Platform TEE Client API spec) + * @origin: Origin for the error code (also from spec). + * + * This is used by user space when a user space application wants to get more + * information about an error. + */ +struct tee_read { + unsigned int err; /* return value */ + unsigned int origin; /* error origin */ +}; + +/** + * Function that handles the function calls to trusted applications. + * @param ts: The session of a operation to be executed. + * @param sec_cmd: The type of command to be executed, open-, close-session, + * invoke command. + */ +int call_sec_world(struct tee_session *ts, int sec_cmd); + +#endif -- cgit v1.2.3 From 14e8ceccf703058389f1a0b6cb8ebb1e73149dc6 Mon Sep 17 00:00:00 2001 From: Robert Marklund Date: Thu, 7 Oct 2010 20:05:22 +0200 Subject: HCL driver implementation (hash). Major refactoring, checkpatch fixes, documentation of functions, structures, enums. Also moved a lot of code from hash_alg.c to hash_core.c. Fixed problem with incorrect digest when doing HMAC calculations. The problem was that the function that did message pad seems to handle padding for keys incorrectly. Now we do not say how many valid bits there are in the last word when it comes to the key and then the hardware seems to handle the key correctly. Fixed contextsaving so hmac(sha1) passes. There was an if statement checking the DINF bit which was never set. I have removed this and then contextsaving for hmac(sha1) is working. Code up to date with the new arch/arm/mach-ux500 folder structure. Signed-off-by: Joakim Bech ux500: switch to DMAENGINE-based DMA driver Switch all DMA clients to the DMA Engine API, and add the platform hooks for the DMA Engine-based DMA40 driver. Signed-off-by: Rabin Vincent Signed-off-by: Robert Marklund Signed-off-by: Lee Jones --- drivers/crypto/Kconfig | 11 + drivers/crypto/Makefile | 1 + drivers/crypto/ux500/Kconfig | 15 + drivers/crypto/ux500/Makefile | 11 + drivers/crypto/ux500/hash/Makefile | 9 + drivers/crypto/ux500/hash/hash_alg.h | 476 +++++++++ drivers/crypto/ux500/hash/hash_alg_p.h | 26 + drivers/crypto/ux500/hash/hash_core.c | 1756 ++++++++++++++++++++++++++++++++ 8 files changed, 2305 insertions(+) create mode 100755 drivers/crypto/ux500/Kconfig create mode 100755 drivers/crypto/ux500/Makefile create mode 100755 drivers/crypto/ux500/hash/Makefile create mode 100755 drivers/crypto/ux500/hash/hash_alg.h create mode 100755 drivers/crypto/ux500/hash/hash_alg_p.h create mode 100755 drivers/crypto/ux500/hash/hash_core.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dd414d9350e..52e0bf5738e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -296,4 +296,15 @@ config CRYPTO_DEV_TEGRA_AES To compile this driver as a module, choose M here: the module will be called tegra-aes. +config CRYPTO_DEV_UX500 + tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" + #depends on ARCH_U8500 + select CRYPTO_ALGAPI + help + Driver for ST-Ericsson UX500 crypto engine. + +if CRYPTO_DEV_UX500 + source "drivers/crypto/ux500/Kconfig" +endif # if CRYPTO_DEV_UX500 + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index f3e64eadd7a..8737ed1bdfe 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o +obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig new file mode 100755 index 00000000000..4ac419757d0 --- /dev/null +++ b/drivers/crypto/ux500/Kconfig @@ -0,0 +1,15 @@ + +config CRYPTO_DEV_UX500_HASH + tristate "UX500 crypto driver for HASH block" + depends on ARCH_U8500 + select CRYPTO_ALGAPI + select CRYPTO_HASH + select CRYPTO_HMAC + help + This selects the UX500 hash driver for the HASH hardware. + Depends on U8500/STM DMA if running in DMA mode. + +config CRYPTO_DEV_UX500_DEBUG_INFO + tristate "Enable UX500 crypto drivers debug info" + help + This is to enable the debug info for UX500 crypto drivers. diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile new file mode 100755 index 00000000000..4c187857120 --- /dev/null +++ b/drivers/crypto/ux500/Makefile @@ -0,0 +1,11 @@ + +ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) + EXTRA_CFLAGS += -D__DEBUG +else + EXTRA_CFLAGS += -D__RELEASE +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ + + + diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile new file mode 100755 index 00000000000..613330a4ca4 --- /dev/null +++ b/drivers/crypto/ux500/hash/Makefile @@ -0,0 +1,9 @@ + +ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) + EXTRA_CFLAGS += -D__DEBUG +else + EXTRA_CFLAGS += -D__RELEASE +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o +u8500_hash-objs := hash_core.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h new file mode 100755 index 00000000000..e1f7c2eb60b --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -0,0 +1,476 @@ +#ifndef _HASH_ALG_H +#define _HASH_ALG_H +/* + * Copyright (C) 2010 ST-Ericsson. + * Copyright (C) 2010 STMicroelectronics. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Number of bytes the message digest */ +#define HASH_MSG_DIGEST_SIZE 32 +#define HASH_BLOCK_SIZE 64 + +#define __HASH_ENHANCED + +/* Version defines */ +#define HASH_HCL_VERSION_ID 1 +#define HASH_HCL_MAJOR_ID 2 +#define HASH_HCL_MINOR_ID 1 + +#define MAX_HASH_DEVICE 2 + +/* Maximum value of the length's high word */ +#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL + +/* Power on Reset values HASH registers */ +#define HASH_RESET_CONTROL_REG_VALUE 0x0 +#define HASH_RESET_START_REG_VALUE 0x0 + +/* Number of context swap registers */ +#define HASH_CSR_COUNT 52 + +#define HASH_RESET_CSRX_REG_VALUE 0x0 +#define HASH_RESET_CSFULL_REG_VALUE 0x0 +#define HASH_RESET_CSDATAIN_REG_VALUE 0x0 + +#define HASH_RESET_INDEX_VAL 0x0 +#define HASH_RESET_BIT_INDEX_VAL 0x0 +#define HASH_RESET_BUFFER_VAL 0x0 +#define HASH_RESET_LEN_HIGH_VAL 0x0 +#define HASH_RESET_LEN_LOW_VAL 0x0 + +/* Control register bitfields */ +#define HASH_CR_RESUME_MASK 0x11FCF + +#define HASH_CR_SWITCHON_POS 31 +#define HASH_CR_SWITCHON_MASK MASK_BIT31 + +#define HASH_CR_EMPTYMSG_POS 20 +#define HASH_CR_EMPTYMSG_MASK MASK_BIT20 + +#define HASH_CR_DINF_POS 12 +#define HASH_CR_DINF_MASK MASK_BIT12 + +#define HASH_CR_NBW_POS 8 +#define HASH_CR_NBW_MASK 0x00000F00UL + +#define HASH_CR_LKEY_POS 16 +#define HASH_CR_LKEY_MASK MASK_BIT16 + +#define HASH_CR_ALGO_POS 7 +#define HASH_CR_ALGO_MASK MASK_BIT7 + +#define HASH_CR_MODE_POS 6 +#define HASH_CR_MODE_MASK MASK_BIT6 + +#define HASH_CR_DATAFORM_POS 4 +#define HASH_CR_DATAFORM_MASK (MASK_BIT4 | MASK_BIT5) + +#define HASH_CR_DMAE_POS 3 +#define HASH_CR_DMAE_MASK MASK_BIT3 + +#define HASH_CR_INIT_POS 2 +#define HASH_CR_INIT_MASK MASK_BIT2 + +#define HASH_CR_PRIVN_POS 1 +#define HASH_CR_PRIVN_MASK MASK_BIT1 + +#define HASH_CR_SECN_POS 0 +#define HASH_CR_SECN_MASK MASK_BIT0 + +/* Start register bitfields */ +#define HASH_STR_DCAL_POS 8 +#define HASH_STR_DCAL_MASK MASK_BIT8 + +#define HASH_STR_NBLW_POS 0 +#define HASH_STR_NBLW_MASK 0x0000001FUL + +#define HASH_NBLW_MAX_VAL 0x1F + +/* PrimeCell IDs */ +#define HASH_P_ID0 0xE0 +#define HASH_P_ID1 0x05 +#define HASH_P_ID2 0x38 +#define HASH_P_ID3 0x00 +#define HASH_CELL_ID0 0x0D +#define HASH_CELL_ID1 0xF0 +#define HASH_CELL_ID2 0x05 +#define HASH_CELL_ID3 0xB1 + +#define HASH_SET_DIN(val) HCL_WRITE_REG(g_sys_ctx.registry[hid]->din, (val)) + +#define HASH_INITIALIZE \ + HCL_WRITE_BITS( \ + g_sys_ctx.registry[hid]->cr, \ + 0x01 << HASH_CR_INIT_POS, \ + HASH_CR_INIT_MASK) + +#define HASH_SET_DATA_FORMAT(data_format) \ + HCL_WRITE_BITS( \ + g_sys_ctx.registry[hid]->cr, \ + (u32) (data_format) << HASH_CR_DATAFORM_POS, \ + HASH_CR_DATAFORM_MASK) + +#define HASH_GET_HX(pos) \ + HCL_READ_REG(g_sys_ctx.registry[hid]->hx[pos]) + +#define HASH_SET_HX(pos, val) \ + HCL_WRITE_REG(g_sys_ctx.registry[hid]->hx[pos], (val)); + +#define HASH_SET_NBLW(val) \ + HCL_WRITE_BITS( \ + g_sys_ctx.registry[hid]->str, \ + (u32) (val) << HASH_STR_NBLW_POS, \ + HASH_STR_NBLW_MASK) + +#define HASH_SET_DCAL \ + HCL_WRITE_BITS( \ + g_sys_ctx.registry[hid]->str, \ + 0x01 << HASH_STR_DCAL_POS, \ + HASH_STR_DCAL_MASK) + +/** + * struct uint64 - Structure to handle 64 bits integers. + * @high_word: Most significant bits + * @high_word: Least significant bits + * + * Used to handle 64 bits integers. + */ +struct uint64 { + u32 high_word; + u32 low_word; +}; + +/** + * struct hash_register - Contains all registers in u8500 hash hardware. + * @cr: HASH control register (0x000) + * @din: HASH data input register (0x004) + * @str: HASH start register (0x008) + * @hx: HASH digest register 0..7 (0x00c-0x01C) + * @padding0: Reserved (0x02C) + * @itcr: Integration test control register (0x080) + * @itip: Integration test input register (0x084) + * @itop: Integration test output register (0x088) + * @padding1: Reserved (0x08C) + * @csfull: HASH context full register (0x0F8) + * @csdatain: HASH context swap data input register (0x0FC) + * @csrx: HASH context swap register 0..51 (0x100-0x1CC) + * @padding2: Reserved (0x1D0) + * @periphid0: HASH peripheral identification register 0 (0xFE0) + * @periphid1: HASH peripheral identification register 1 (0xFE4) + * @periphid2: HASH peripheral identification register 2 (0xFE8) + * @periphid3: HASH peripheral identification register 3 (0xFEC) + * @cellid0: HASH PCell identification register 0 (0xFF0) + * @cellid1: HASH PCell identification register 1 (0xFF4) + * @cellid2: HASH PCell identification register 2 (0xFF8) + * @cellid3: HASH PCell identification register 3 (0xFFC) + * + * The device communicates to the HASH via 32-bit-wide control registers + * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure + * with the registers used. + */ +struct hash_register { + u32 cr; + u32 din; + u32 str; + u32 hx[8]; + + u32 padding0[(0x080 - 0x02C) >> 2]; + + u32 itcr; + u32 itip; + u32 itop; + + u32 padding1[(0x0F8 - 0x08C) >> 2]; + + u32 csfull; + u32 csdatain; + u32 csrx[HASH_CSR_COUNT]; + + u32 padding2[(0xFE0 - 0x1D0) >> 2]; + + u32 periphid0; + u32 periphid1; + u32 periphid2; + u32 periphid3; + + u32 cellid0; + u32 cellid1; + u32 cellid2; + u32 cellid3; +}; + +/** + * struct hash_state - Hash context state. + * @temp_cr: Temporary HASH Control Register + * @str_reg: HASH Start Register + * @din_reg: HASH Data Input Register + * @csr[52]: HASH Context Swap Registers 0-39 + * @csfull: HASH Context Swap Registers 40 ie Status flags + * @csdatain: HASH Context Swap Registers 41 ie Input data + * @buffer: Working buffer for messages going to the hardware + * @length: Length of the part of the message hashed so far (floor(N/64) * 64) + * @index: Valid number of bytes in buffer (N % 64) + * @bit_index: Valid number of bits in buffer (N % 8) + * + * This structure is used between context switches, i.e. when ongoing jobs are + * interupted with new jobs. When this happens we need to store intermediate + * results in software. + * + * WARNING: "index" is the member of the structure, to be sure that "buffer" + * is aligned on a 4-bytes boundary. This is highly implementation dependent + * and MUST be checked whenever this code is ported on new platforms. + */ +struct hash_state { + u32 temp_cr; + u32 str_reg; + u32 din_reg; + u32 csr[52]; + u32 csfull; + u32 csdatain; + u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; + struct uint64 length; + u8 index; + u8 bit_index; +}; + +/** + * struct hash_system_context - Structure for the global system context. + * @registry: Pointer to the registry of the hash hardware + * @state: State of the hash device + */ +struct hash_system_context { + /* Pointer to HASH registers structure */ + volatile struct hash_register *registry[MAX_HASH_DEVICE]; + + /* State of HASH device */ + struct hash_state state[MAX_HASH_DEVICE]; +}; + +/** + * enum hash_device_id - HASH device ID. + * @HASH_DEVICE_ID_0: Hash hardware with ID 0 + * @HASH_DEVICE_ID_1: Hash hardware with ID 1 + */ +enum hash_device_id { + HASH_DEVICE_ID_0 = 0, + HASH_DEVICE_ID_1 = 1 +}; + +/** + * enum hash_data_format - HASH data format. + * @HASH_DATA_32_BITS: 32 bits data format + * @HASH_DATA_16_BITS: 16 bits data format + * @HASH_DATA_8_BITS: 8 bits data format + * @HASH_DATA_1_BITS: 1 bit data format + */ +enum hash_data_format { + HASH_DATA_32_BITS = 0x0, + HASH_DATA_16_BITS = 0x1, + HASH_DATA_8_BITS = 0x2, + HASH_DATA_1_BIT = 0x3 +}; + +/** + * enum hash_device_state - Device state + * @DISABLE: Disable the hash hardware + * @ENABLE: Enable the hash hardware + */ +enum hash_device_state { + DISABLE = 0, + ENABLE = 1 +}; + +/** + * struct hash_protection_config - Device protection configuration. + * @privilege_access: FIXME, add comment. + * @secure_access: FIXME, add comment. + */ +struct hash_protection_config { + int privilege_access; + int secure_access; +}; + +/** + * enum hash_input_status - Data Input flag status. + * @HASH_DIN_EMPTY: Indicates that nothing is in data registers + * @HASH_DIN_FULL: Indicates that data registers are full + */ +enum hash_input_status { + HASH_DIN_EMPTY = 0, + HASH_DIN_FULL = 1 +}; + +/** + * Number of words already pushed + */ +enum hash_nbw_pushed { + HASH_NBW_00 = 0x00, + HASH_NBW_01 = 0x01, + HASH_NBW_02 = 0x02, + HASH_NBW_03 = 0x03, + HASH_NBW_04 = 0x04, + HASH_NBW_05 = 0x05, + HASH_NBW_06 = 0x06, + HASH_NBW_07 = 0x07, + HASH_NBW_08 = 0x08, + HASH_NBW_09 = 0x09, + HASH_NBW_10 = 0x0A, + HASH_NBW_11 = 0x0B, + HASH_NBW_12 = 0x0C, + HASH_NBW_13 = 0x0D, + HASH_NBW_14 = 0x0E, + HASH_NBW_15 = 0x0F +}; + +/** + * struct hash_device_status - Device status for DINF, NBW, and NBLW bit + * fields. + * @dinf_status: HASH data in full flag + * @nbw_status: Number of words already pushed + * @nblw_status: Number of Valid Bits Last Word of the Message + */ +struct hash_device_status { + int dinf_status; + int nbw_status; + u8 nblw_status; +}; + +/** + * enum hash_dma_request - Enumeration for HASH DMA request types. + */ +enum hash_dma_request { + HASH_DISABLE_DMA_REQ = 0x0, + HASH_ENABLE_DMA_REQ = 0x1 +}; + +/** + * enum hash_digest_cal - Enumeration for digest calculation. + * @HASH_DISABLE_DCAL: Indicates that DCAL bit is not set/used. + * @HASH_ENABLE_DCAL: Indicates that DCAL bit is set/used. + */ +enum hash_digest_cal { + HASH_DISABLE_DCAL = 0x0, + HASH_ENABLE_DCAL = 0x1 +}; + +/** + * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm + * @HASH_ALGO_SHA1: Indicates that SHA1 is used. + * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used. + */ +enum hash_algo { + HASH_ALGO_SHA1 = 0x0, + HASH_ALGO_SHA2 = 0x1 +}; + +/** + * enum hash_op - Enumeration for selecting between HASH or HMAC mode + * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode + * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC + */ +enum hash_op { + HASH_OPER_MODE_HASH = 0x0, + HASH_OPER_MODE_HMAC = 0x1 +}; + +/** + * enum hash_key_type - Enumeration for selecting between long and short key. + * @HASH_SHORT_KEY: Key used is shorter or equal to block size (64 bytes) + * @HASH_LONG_KEY: Key used is greater than block size (64 bytes) + */ +enum hash_key_type { + HASH_SHORT_KEY = 0x0, + HASH_LONG_KEY = 0x1 +}; + +/** + * struct hash_config - Configuration data for the hardware + * @data_format: Format of data entered into the hash data in register + * @algorithm: Algorithm selection bit + * @oper_mode: Operating mode selection bit + * @hmac_key: Long key selection bit HMAC mode + */ +struct hash_config { + int data_format; + int algorithm; + int oper_mode; + int hmac_key; +}; + + +/** + * enum hash_error - Error codes for hash. + */ +enum hash_error { + HASH_OK = 0, + HASH_MSG_LENGTH_OVERFLOW, + HASH_INTERNAL_ERROR, + HASH_NOT_CONFIGURED, + HASH_REQUEST_PENDING, + HASH_REQUEST_NOT_APPLICABLE, + HASH_INVALID_PARAMETER, + HASH_UNSUPPORTED_FEATURE, + HASH_UNSUPPORTED_HW +}; + +int hash_init_base_address(int hash_device_id, t_logical_address base_address); + +int HASH_GetVersion(t_version *p_version); + +int HASH_Reset(int hash_devive_id); + +int HASH_ConfigureDmaRequest(int hash_device_id, int request_state); + +int HASH_ConfigureLastValidBits(int hash_device_id, u8 nblw_val); + +int HASH_ConfigureDigestCal(int hash_device_id, int dcal_state); + +int HASH_ConfigureProtection(int hash_device_id, + struct hash_protection_config + *p_protect_config); + +int hash_setconfiguration(int hash_device_id, struct hash_config *p_config); + +int hash_begin(int hash_device_id); + +int hash_get_digest(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); + +int HASH_ClockGatingOff(int hash_device_id); + +struct hash_device_status HASH_GetDeviceStatus(int hash_device_id); + +t_bool HASH_IsDcalOngoing(int hash_device_id); + +int hash_hw_update(int hash_device_id, + const u8 *p_data_buffer, + u32 msg_length); + +int hash_end(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); + +int hash_compute(int hash_device_id, + const u8 *p_data_buffer, + u32 msg_length, + struct hash_config *p_hash_config, + u8 digest[HASH_MSG_DIGEST_SIZE]); + +int hash_end_key(int hash_device_id); + +int hash_save_state(int hash_device_id, struct hash_state *state); + +int hash_resume_state(int hash_device_id, const struct hash_state *state); + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/crypto/ux500/hash/hash_alg_p.h b/drivers/crypto/ux500/hash/hash_alg_p.h new file mode 100755 index 00000000000..c85faaeba6f --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_alg_p.h @@ -0,0 +1,26 @@ +/*****************************************************************************/ +/** +* � ST-Ericsson, 2009 - All rights reserved +* Reproduction and Communication of this document is strictly prohibited +* unless specifically authorized in writing by ST-Ericsson +* +* static Header file of HASH Processor +* Specification release related to this implementation: A_V2.2 +* AUTHOR : ST-Ericsson +*/ +/*****************************************************************************/ + +#ifndef _HASH_P_H_ +#define _HASH_P_H_ + +/*--------------------------------------------------------------------------* + * Includes * + *--------------------------------------------------------------------------*/ +#include "hash_alg.h" + +/*--------------------------------------------------------------------------* + * Defines * + *--------------------------------------------------------------------------*/ + +#endif /* End _HASH_P_H_ */ + diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c new file mode 100755 index 00000000000..fd5f8a870bf --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -0,0 +1,1756 @@ +/* + * Cryptographic API. + * + * Support for Nomadik hardware crypto engine. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "hash_alg.h" + +#define DRIVER_NAME "DRIVER HASH" +/* enables/disables debug msgs */ +#define DRIVER_DEBUG 1 +#define DRIVER_DEBUG_PFX DRIVER_NAME +#define DRIVER_DBG KERN_ERR + +#define MAX_HASH_DIGEST_BYTE_SIZE 32 +#define HASH_BLOCK_BYTE_SIZE 64 + +#define HASH_ACC_SYNC_CONTROL +#ifdef HASH_ACC_SYNC_CONTROL +static struct mutex hash_hw_acc_mutex; +#endif + +int debug; +static int mode; +static int contextsaving; +static struct hash_system_context g_sys_ctx; + +/** + * struct hash_driver_data - IO Base and clock. + * @base: The IO base for the block + * @clk: FIXME, add comment + */ +struct hash_driver_data { + void __iomem *base; + struct clk *clk; +}; + +/** + * struct hash_ctx - The context used for hash calculations. + * @key: The key used in the operation + * @keylen: The length of the key + * @updated: Indicates if hardware is initialized for new operations + * @state: The state of the current calculations + * @config: The current configuration + */ +struct hash_ctx { + u8 key[HASH_BLOCK_BYTE_SIZE]; + u32 keylen; + u8 updated; + struct hash_state state; + struct hash_config config; +}; + +/** + * struct hash_tfm_ctx - Transform context + * @key: The key stored in the transform context + * @keylen: The length of the key in the transform context + */ +struct hash_tfm_ctx { + u8 key[HASH_BLOCK_BYTE_SIZE]; + u32 keylen; +}; + +/* Declaration of functions */ +static void hash_messagepad(int hid, const u32 *message, u8 index_bytes); + +/** + * hexdump - Dumps buffers in hex. + * @buf: The buffer to dump + * @len: The length of the buffer + */ +static void hexdump(unsigned char *buf, unsigned int len) +{ + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); +} + +/** + * clear_reg_str - Clear the registry hash_str. + * @hid: Hardware device ID + * + * This function will clear the dcal bit and the nblw bits. + */ +static inline void clear_reg_str(int hid) +{ + /* We will only clear the valid registers and not the reserved */ + g_sys_ctx.registry[hid]->str &= ~HASH_STR_DCAL_MASK; + g_sys_ctx.registry[hid]->str &= ~HASH_STR_NBLW_MASK; +} + +/** + * write_nblw - Writes the number of valid bytes to nblw. + * @hid: Hardware device ID + * @bytes: The number of valid bytes in last word of a message + * + * Note that this function only writes, i.e. it does not clear the registry + * before it writes the new data. + */ +static inline void write_nblw(int hid, int bytes) +{ + g_sys_ctx.registry[hid]->str |= + ((bytes * 8) & HASH_STR_NBLW_MASK); +} + +/** + * write_dcal - Write/set the dcal bit. + * @hid: Hardware device ID + */ +static inline void write_dcal(int hid) +{ + g_sys_ctx.registry[hid]->str |= (1 << HASH_STR_DCAL_POS); +} + +/** + * pad_message - Function that pads a message. + * @hid: Hardware device ID + * + * FIXME: This function should be replaced. + */ +static inline void pad_message(int hid) +{ + hash_messagepad(hid, g_sys_ctx.state[hid].buffer, + g_sys_ctx.state[hid].index); +} + +/** + * write_key - Writes the key to the hardware registries. + * @hid: Hardware device ID + * @key: The key used in the operation + * @keylen: The length of the key + * + * Note that in this function we DO NOT write to the NBLW registry even though + * the hardware reference manual says so. There must be incorrect information in + * the manual or there must be a bug in the state machine in the hardware. + */ +static void write_key(int hid, const u8 *key, u32 keylen) +{ + u32 word = 0; + clear_reg_str(hid); + + while (keylen >= 4) { + word = ((u32) (key[3] & 255) << 24) | + ((u32) (key[2] & 255) << 16) | + ((u32) (key[1] & 255) << 8) | + ((u32) (key[0] & 255)); + + HASH_SET_DIN(word); + keylen -= 4; + key += 4; + } + + /* This takes care of the remaining bytes on the last word */ + if (keylen) { + word = 0; + while (keylen) { + word |= (key[keylen - 1] << (8 * (keylen - 1))); + keylen--; + } + HASH_SET_DIN(word); + } + + write_dcal(hid); +} + +/** + * init_hash_hw - Initialise the hash hardware for a new calculation. + * @desc: The hash descriptor for the job + * + * This function will enable the bits needed to clear and start a new + * calculation. + */ +static int init_hash_hw(struct shash_desc *desc) +{ + int ret = 0; + int hash_error = HASH_OK; + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[init_hash_hw] (ctx=0x%x)!", (u32)ctx); + + hash_error = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); + if (hash_error != HASH_OK) { + stm_error("hash_setconfiguration() failed!"); + ret = -1; + goto out; + } + + hash_error = hash_begin(HASH_DEVICE_ID_1); + if (hash_error != HASH_OK) { + stm_error("hash_begin() failed!"); + ret = -1; + goto out; + } + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { + stm_dbg(debug, "[init_hash_hw] update key=0x%0x, len=%d", + (u32) ctx->key, ctx->keylen); + write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); + } + +out: + return ret; +} + +/** + * hash_init - Common hash init function for SHA1/SHA2 (SHA256). + * @desc: The hash descriptor for the job + * + * Initialize structures and copy the key from the transform context to the + * descriptor context if the mode is HMAC. + */ +static int hash_init(struct shash_desc *desc) +{ + struct hash_ctx *ctx = shash_desc_ctx(desc); + struct hash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(&desc->tfm->base); + + stm_dbg(debug, "[hash_init]: (ctx=0x%x)!", (u32)ctx); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { + if (tfm_ctx->key) { + memcpy(ctx->key, tfm_ctx->key, tfm_ctx->keylen); + ctx->keylen = tfm_ctx->keylen; + } + } + + memset(&ctx->state, 0, sizeof(struct hash_state)); + ctx->updated = 0; + + return 0; +} + +/** + * hash_update - The hash update function for SHA1/SHA2 (SHA256). + * @desc: The hash descriptor for the job + * @data: Message that should be hashed + * @len: The length of the message that should be hashed + */ +static int hash_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + int ret = 0; + int hash_error = HASH_OK; + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[hash_update]: (ctx=0x%x, data=0x%x, len=%d)!", + (u32)ctx, (u32)data, len); + +#ifdef HASH_ACC_SYNC_CONTROL + mutex_lock(&hash_hw_acc_mutex); +#endif + + if (!ctx->updated) { + ret = init_hash_hw(desc); + if (ret) { + stm_error("init_hash_hw() failed!"); + goto out; + } + } + + if (contextsaving) { + if (ctx->updated) { + hash_error = + hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); + if (hash_error != HASH_OK) { + stm_error("hash_resume_state() failed!"); + ret = -1; + goto out; + } + } + } + + /* NOTE: The length of the message is in the form of number of bits */ + hash_error = hash_hw_update(HASH_DEVICE_ID_1, data, len * 8); + if (hash_error != HASH_OK) { + stm_error("hash_hw_update() failed!"); + ret = -1; + goto out; + } + + if (contextsaving) { + hash_error = + hash_save_state(HASH_DEVICE_ID_1, &ctx->state); + if (hash_error != HASH_OK) { + stm_error("hash_save_state() failed!"); + ret = -1; + goto out; + } + + } + ctx->updated = 1; + +out: +#ifdef HASH_ACC_SYNC_CONTROL + mutex_unlock(&hash_hw_acc_mutex); +#endif + return ret; +} + +/** + * hash_final - The hash final function for SHA1/SHA2 (SHA256). + * @desc: The hash descriptor for the job + * @out: Pointer for the calculated digest + */ +static int hash_final(struct shash_desc *desc, u8 *out) +{ + int ret = 0; + int hash_error = HASH_OK; + struct hash_ctx *ctx = shash_desc_ctx(desc); + + int digestsize = crypto_shash_digestsize(desc->tfm); + u8 digest[HASH_MSG_DIGEST_SIZE]; + + stm_dbg(debug, "[hash_final]: (ctx=0x%x)!", (u32) ctx); + +#ifdef HASH_ACC_SYNC_CONTROL + mutex_lock(&hash_hw_acc_mutex); +#endif + + if (contextsaving) { + hash_error = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); + + if (hash_error != HASH_OK) { + stm_error("hash_resume_state() failed!"); + ret = -1; + goto out; + } + } + + pad_message(HASH_DEVICE_ID_1); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); + + hash_error = hash_get_digest(HASH_DEVICE_ID_1, digest); + + memcpy(out, digest, digestsize); + +out: +#ifdef HASH_ACC_SYNC_CONTROL + mutex_unlock(&hash_hw_acc_mutex); +#endif + + return ret; +} + +/** + * hash_setkey - The setkey function for providing the key during HMAC + * calculations. + * @tfm: Pointer to the transform + * @key: The key used in the operation + * @keylen: The length of the key + * @alg: The algorithm to use in the operation + */ +static int hash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen, int alg) +{ + int ret = 0; + int hash_error = HASH_OK; + + struct hash_tfm_ctx *ctx_tfm = crypto_shash_ctx(tfm); + + stm_dbg(debug, "[hash_setkey]: (ctx_tfm=0x%x, key=0x%x, keylen=%d)!", + (u32) ctx_tfm, (u32) key, keylen); + + /* Truncate the key to block size */ + if (keylen > HASH_BLOCK_BYTE_SIZE) { + struct hash_config config; + u8 digest[MAX_HASH_DIGEST_BYTE_SIZE]; + unsigned int digestsize = crypto_shash_digestsize(tfm); + + config.algorithm = alg; + config.data_format = HASH_DATA_8_BITS; + config.oper_mode = HASH_OPER_MODE_HASH; + +#ifdef HASH_ACC_SYNC_CONTROL + mutex_lock(&hash_hw_acc_mutex); +#endif + hash_error = hash_compute(HASH_DEVICE_ID_1, key, keylen * 8, + &config, digest); +#ifdef HASH_ACC_SYNC_CONTROL + mutex_unlock(&hash_hw_acc_mutex); +#endif + if (hash_error != HASH_OK) { + stm_error("Error: hash_compute() failed!"); + ret = -1; + goto out; + } + + memcpy(ctx_tfm->key, digest, digestsize); + ctx_tfm->keylen = digestsize; + } else { + memcpy(ctx_tfm->key, key, keylen); + ctx_tfm->keylen = keylen; + } + +out: + return ret; +} + +/** + * sha1_init - SHA1 init function. + * @desc: The hash descriptor for the job + */ +static int sha1_init(struct shash_desc *desc) +{ + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[sha1_init]: (ctx=0x%x)!", (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + + return hash_init(desc); +} + +/** + * sha256_init - SHA2 (SHA256) init function. + * @desc: The hash descriptor for the job + */ +static int sha256_init(struct shash_desc *desc) +{ + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[sha256_init]: (ctx=0x%x)!", (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA2; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + + return hash_init(desc); +} + +/** + * hmac_sha1_init - SHA1 HMAC init function. + * @desc: The hash descriptor for the job + */ +static int hmac_sha1_init(struct shash_desc *desc) +{ + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[hmac_sha1_init]: (ctx=0x%x)!", (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->config.hmac_key = HASH_SHORT_KEY; + + return hash_init(desc); +} + +/** + * hmac_sha256_init - SHA2 (SHA256) HMAC init function. + * @desc: The hash descriptor for the job + */ +static int hmac_sha256_init(struct shash_desc *desc) +{ + struct hash_ctx *ctx = shash_desc_ctx(desc); + + stm_dbg(debug, "[hmac_sha256_init]: (ctx=0x%x)!", (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA2; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->config.hmac_key = HASH_SHORT_KEY; + + return hash_init(desc); +} + +/** + * hmac_sha1_setkey - SHA1 HMAC setkey function. + * @tfm: Pointer to the transform + * @key: The key used in the operation + * @keylen: The length of the key + */ +static int hmac_sha1_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + stm_dbg(debug, "[hmac_sha1_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", + (u32) tfm, (u32) key, keylen); + + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); +} + +/** + * hmac_sha256_setkey - SHA2 (SHA256) HMAC setkey function. + * @tfm: Pointer to the transform + * @key: The key used in the operation + * @keylen: The length of the key + */ +static int hmac_sha256_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + stm_dbg(debug, "[hmac_sha256_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", + (u32) tfm, (u32) key, keylen); + + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA2); +} + +static struct shash_alg sha1_alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = sha1_init, + .update = hash_update, + .final = hash_final, + .descsize = sizeof(struct hash_ctx), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-u8500", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_tfm_ctx), + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_init, + .update = hash_update, + .final = hash_final, + .descsize = sizeof(struct hash_ctx), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-u8500", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_tfm_ctx), + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg hmac_sha1_alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = hmac_sha1_init, + .update = hash_update, + .final = hash_final, + .setkey = hmac_sha1_setkey, + .descsize = sizeof(struct hash_ctx), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-u8500)", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_tfm_ctx), + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg hmac_sha256_alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = hmac_sha256_init, + .update = hash_update, + .final = hash_final, + .setkey = hmac_sha256_setkey, + .descsize = sizeof(struct hash_ctx), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-u8500)", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_tfm_ctx), + .cra_module = THIS_MODULE, + } +}; + +/** + * u8500_hash_probe - Function that probes the hash hardware. + * @pdev: The platform device + */ +static int u8500_hash_probe(struct platform_device *pdev) +{ + int ret = 0; + int hash_error = HASH_OK; + struct resource *res = NULL; + struct hash_driver_data *hash_drv_data; + + stm_dbg(debug, "[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); + + stm_dbg(debug, "[u8500_hash_probe]: Calling kzalloc()!"); + hash_drv_data = kzalloc(sizeof(struct hash_driver_data), GFP_KERNEL); + if (!hash_drv_data) { + stm_dbg(debug, "kzalloc() failed!"); + ret = -ENOMEM; + goto out; + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling platform_get_resource()!"); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + stm_dbg(debug, "platform_get_resource() failed"); + ret = -ENODEV; + goto out_kfree; + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling request_mem_region()!"); + res = request_mem_region(res->start, res->end - res->start + 1, + pdev->name); + if (res == NULL) { + stm_dbg(debug, "request_mem_region() failed"); + ret = -EBUSY; + goto out_kfree; + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling ioremap()!"); + hash_drv_data->base = ioremap(res->start, res->end - res->start + 1); + if (!hash_drv_data->base) { + stm_error + ("[u8500_hash] ioremap of hash1 register memory failed!"); + ret = -ENOMEM; + goto out_free_mem; + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling clk_get()!"); + /* Enable the clk for HASH1 hardware block */ + hash_drv_data->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(hash_drv_data->clk)) { + stm_error("clk_get() failed!"); + ret = PTR_ERR(hash_drv_data->clk); + goto out_unmap; + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling clk_enable()!"); + ret = clk_enable(hash_drv_data->clk); + if (ret) { + stm_error("clk_enable() failed!"); + goto out_unmap; + } + + stm_dbg(debug, + "[u8500_hash_probe]: Calling hash_init_base_address()->" + "(base=0x%x,DEVICE_ID=%d)!", + (u32) hash_drv_data->base, HASH_DEVICE_ID_1); + + /* Setting base address */ + hash_error = + hash_init_base_address(HASH_DEVICE_ID_1, + (t_logical_address) hash_drv_data->base); + if (hash_error != HASH_OK) { + stm_error("hash_init_base_address() failed!"); + ret = -1; /*TODO: what error code should be used here!? */ + goto out_clk; + } +#ifdef HASH_ACC_SYNC_CONTROL + stm_dbg(debug, "[u8500_hash_probe]: Calling mutex_init()!"); + mutex_init(&hash_hw_acc_mutex); +#endif + + if (mode == 0) { + stm_dbg(debug, + "[u8500_hash_probe]: To register all algorithms!"); + + ret = crypto_register_shash(&sha1_alg); + if (ret) { + stm_error("Could not register sha1_alg!"); + goto out_clk; + } + stm_dbg(debug, "[u8500_hash_probe]: sha1_alg registered!"); + + ret = crypto_register_shash(&sha256_alg); + if (ret) { + stm_error("Could not register sha256_alg!"); + goto out_unreg1; + } + stm_dbg(debug, "[u8500_hash_probe]: sha256_alg registered!"); + + ret = crypto_register_shash(&hmac_sha1_alg); + if (ret) { + stm_error("Could not register hmac_sha1_alg!"); + goto out_unreg2; + } + stm_dbg(debug, "[u8500_hash_probe]: hmac_sha1_alg registered!"); + + ret = crypto_register_shash(&hmac_sha256_alg); + if (ret) { + stm_error("Could not register hmac_sha256_alg!"); + goto out_unreg3; + } + stm_dbg(debug, + "[u8500_hash_probe]: hmac_sha256_alg registered!"); + } + + if (mode == 10) { + stm_dbg(debug, + "[u8500_hash_probe]: To register only sha1 and sha256" + " algorithms!"); + + ret = crypto_register_shash(&sha1_alg); + if (ret) { + stm_error("Could not register sha1_alg!"); + goto out_clk; + } + + ret = crypto_register_shash(&sha256_alg); + if (ret) { + stm_error("Could not register sha256_alg!"); + goto out_unreg1_tmp; + } + } + + stm_dbg(debug, "[u8500_hash_probe]: Calling platform_set_drvdata()!"); + platform_set_drvdata(pdev, hash_drv_data); + return 0; + + if (mode == 0) { +out_unreg1: + crypto_unregister_shash(&sha1_alg); +out_unreg2: + crypto_unregister_shash(&sha256_alg); +out_unreg3: + crypto_unregister_shash(&hmac_sha1_alg); + } + + if (mode == 10) { +out_unreg1_tmp: + crypto_unregister_shash(&sha1_alg); + } + +out_clk: + clk_disable(hash_drv_data->clk); + clk_put(hash_drv_data->clk); + +out_unmap: + iounmap(hash_drv_data->base); + +out_free_mem: + release_mem_region(res->start, res->end - res->start + 1); + +out_kfree: + kfree(hash_drv_data); +out: + return ret; +} + +/** + * u8500_hash_remove - Function that removes the hash device from the platform. + * @pdev: The platform device + */ +static int u8500_hash_remove(struct platform_device *pdev) +{ + struct resource *res; + struct hash_driver_data *hash_drv_data; + + stm_dbg(debug, "[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); + + stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_drvdata()!"); + hash_drv_data = platform_get_drvdata(pdev); + + if (mode == 0) { + stm_dbg(debug, + "[u8500_hash_remove]: To unregister all algorithms!"); + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); + crypto_unregister_shash(&hmac_sha1_alg); + crypto_unregister_shash(&hmac_sha256_alg); + } + + if (mode == 10) { + stm_dbg(debug, + "[u8500_hash_remove]: To unregister only sha1 and " + "sha256 algorithms!"); + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); + } +#ifdef HASH_ACC_SYNC_CONTROL + stm_dbg(debug, "[u8500_hash_remove]: Calling mutex_destroy()!"); + mutex_destroy(&hash_hw_acc_mutex); +#endif + + stm_dbg(debug, "[u8500_hash_remove]: Calling clk_disable()!"); + clk_disable(hash_drv_data->clk); + + stm_dbg(debug, "[u8500_hash_remove]: Calling clk_put()!"); + clk_put(hash_drv_data->clk); + + stm_dbg(debug, "[u8500_hash_remove]: Calling iounmap(): base = 0x%x", + (u32) hash_drv_data->base); + iounmap(hash_drv_data->base); + + stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_resource()!"); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + stm_dbg(debug, + "[u8500_hash_remove]: Calling release_mem_region()" + "->res->start=0x%x, res->end = 0x%x!", + res->start, res->end); + release_mem_region(res->start, res->end - res->start + 1); + + stm_dbg(debug, "[u8500_hash_remove]: Calling kfree()!"); + kfree(hash_drv_data); + + return 0; +} + +static struct platform_driver hash_driver = { + .probe = u8500_hash_probe, + .remove = u8500_hash_remove, + .driver = { + .owner = THIS_MODULE, + .name = "hash1", + }, +}; + +/** + * u8500_hash_mod_init - The kernel module init function. + */ +static int __init u8500_hash_mod_init(void) +{ + stm_dbg(debug, "u8500_hash_mod_init() is called!"); + + return platform_driver_register(&hash_driver); +} + +/** + * u8500_hash_mod_fini - The kernel module exit function. + */ +static void __exit u8500_hash_mod_fini(void) +{ + stm_dbg(debug, "u8500_hash_mod_fini() is called!"); + + platform_driver_unregister(&hash_driver); + return; +} + +/** + * hash_processblock - This function processes a single block of 512 bits (64 + * bytes), word aligned, starting at message. + * @hid: Hardware device ID + * @message: Block (512 bits) of message to be written to the HASH hardware + * + * Reentrancy: Non Re-entrant. + */ +static void hash_processblock(int hid, const u32 *message) +{ + u32 count; + + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HASH_STR_DCAL_MASK); + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HASH_STR_NBLW_MASK); + + /* Partially unrolled loop */ + for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); + count += 4) { + HASH_SET_DIN(message[0]); + HASH_SET_DIN(message[1]); + HASH_SET_DIN(message[2]); + HASH_SET_DIN(message[3]); + message += 4; + } +} + +/** + * hash_messagepad - Pads a message and write the nblw bits. + * @hid: Hardware device ID + * @message: Last word of a message + * @index_bytes: The number of bytes in the last message + * + * This function manages the final part of the digest calculation, when less + * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. + * + * Reentrancy: Non Re-entrant. + */ +static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) +{ + stm_dbg(debug, "[u8500_hash_alg] hash_messagepad" + "(bytes in final msg=%d))", index_bytes); + + clear_reg_str(hid); + + /* Main loop */ + while (index_bytes >= 4) { + HASH_SET_DIN(message[0]); + index_bytes -= 4; + message++; + } + + if (index_bytes) + HASH_SET_DIN(message[0]); + + /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ + HASH_SET_NBLW(index_bytes * 8); + stm_dbg(debug, "[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", + g_sys_ctx.registry[hid]->din, + g_sys_ctx.registry[hid]->str); + HASH_SET_DCAL; + stm_dbg(debug, "[u8500_hash_alg] hash_messagepad d -> " + "DIN=0x%08x NBLW=%d", + g_sys_ctx.registry[hid]->din, + g_sys_ctx.registry[hid]->str); + +} + +/** + * hash_incrementlength - Increments the length of the current message. + * @hid: Hardware device ID + * @incr: Length of message processed already + * + * Overflow cannot occur, because conditions for overflow are checked in + * hash_hw_update. + */ +static void hash_incrementlength(int hid, u32 incr) +{ + g_sys_ctx.state[hid].length.low_word += incr; + + /* Check for wrap-around */ + if (g_sys_ctx.state[hid].length.low_word < incr) + g_sys_ctx.state[hid].length.high_word++; +} + +/** + * hash_setconfiguration - Sets the required configuration for the hash + * hardware. + * @hid: Hardware device ID + * @p_config: Pointer to a configuration structure + * + * Reentrancy: Non Re-entrant + * Reentrancy issues: + * 1. Global variable registry(cofiguration register, + * parameter register, divider register) is being modified + * + * Comments 1. : User need to call hash_begin API after calling this + * API i.e. the current configuration is set only when + * bit INIT is set and we set INIT bit in hash_begin. + * Changing the configuration during a computation has + * no effect so we first set configuration by calling + * this API and then set the INIT bit for the HASH + * processor and the curent configuration is taken into + * account. As reading INIT bit (with correct protection + * rights) will always return 0b so we can't make a check + * at software level. So the user has to initialize the + * device for new configuration to take in to effect. + * 2. The default value of data format is 00b ie the format + * of data entered in HASH_DIN register is 32-bit data. + * The data written in HASH_DIN is used directly by the + * HASH processing, without re ordering. + */ +int hash_setconfiguration(int hid, struct hash_config *p_config) +{ + int hash_error = HASH_OK; + + stm_dbg(debug, "[u8500_hash_alg] hash_setconfiguration())"); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + HASH_SET_DATA_FORMAT(p_config->data_format); + + HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_EMPTYMSG_MASK); + + /* This bit selects between SHA-1 or SHA-2 algorithm */ + if (HASH_ALGO_SHA2 == p_config->algorithm) { + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_ALGO_MASK); + } else { /* SHA1 algorithm */ + + HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_ALGO_MASK); + } + + /* This bit selects between HASH or HMAC mode for the selected + algorithm */ + if (HASH_OPER_MODE_HASH == p_config->oper_mode) { + HCL_CLEAR_BITS(g_sys_ctx.registry + [hid]->cr, HASH_CR_MODE_MASK); + } else { /* HMAC mode */ + + HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_MODE_MASK); + + /* This bit selects between short key (<= 64 bytes) or long key + (>64 bytes) in HMAC mode */ + if (HASH_SHORT_KEY == p_config->hmac_key) { + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_LKEY_MASK); + } else { + HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, + HASH_CR_LKEY_MASK); + } + } + + return hash_error; +} + +/** + * hash_begin - This routine resets some globals and initializes the hash + * hardware. + * @hid: Hardware device ID + * + * Reentrancy: Non Re-entrant + * + * Comments 1. : User need to call hash_setconfiguration API before + * calling this API i.e. the current configuration is set + * only when bit INIT is set and we set INIT bit in + * hash_begin. Changing the configuration during a + * computation has no effect so we first set + * configuration by calling this API and then set the + * INIT bit for the HASH processor and the current + * configuration is taken into account. As reading INIT + * bit (with correct protection rights) will always + * return 0b so we can't make a check at software level. + * So the user has to initialize the device for new + * configuration to take in to effect. + */ +int hash_begin(int hid) +{ + int hash_error = HASH_OK; + + /* HW and SW initializations */ + /* Note: there is no need to initialize buffer and digest members */ + + stm_dbg(debug, "[u8500_hash_alg] hash_begin())"); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + g_sys_ctx.state[hid].index = 0; + g_sys_ctx.state[hid].bit_index = 0; + g_sys_ctx.state[hid].length.high_word = 0; + g_sys_ctx.state[hid].length.low_word = 0; + + HASH_INITIALIZE; + + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HASH_STR_DCAL_MASK); + HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HASH_STR_NBLW_MASK); + + return hash_error; +} + +/** + * hash_hw_update - Updates current HASH computation hashing another part of + * the message. + * @hid: Hardware device ID + * @p_data_buffer: Byte array containing the message to be hashed (caller + * allocated) + * @msg_length: Length of message to be hashed (in bits) + * + * Reentrancy: Non Re-entrant + */ +int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) +{ + int hash_error = HASH_OK; + u8 index; + u8 *p_buffer; + u32 count; + + stm_dbg(debug, "[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " + "in=%d, bin=%d))", + msg_length, + msg_length / 8, + g_sys_ctx.state[hid].index, + g_sys_ctx.state[hid].bit_index); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + index = g_sys_ctx.state[hid].index; + + p_buffer = (u8 *)g_sys_ctx.state[hid].buffer; + + /* Number of bytes in the message */ + msg_length /= 8; + + /* Check parameters */ + if (NULL == p_data_buffer) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + /* Check if g_sys_ctx.state.length + msg_length + overflows */ + if (msg_length > + (g_sys_ctx.state[hid].length.low_word + msg_length) + && HASH_HIGH_WORD_MAX_VAL == + (g_sys_ctx.state[hid].length.high_word)) { + hash_error = HASH_MSG_LENGTH_OVERFLOW; + stm_error("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); + return hash_error; + } + + /* Main loop */ + while (0 != msg_length) { + if ((index + msg_length) < HASH_BLOCK_SIZE) { + for (count = 0; count < msg_length; count++) { + /*TODO: memcpy? */ + p_buffer[index + count] = + *(p_data_buffer + count); + } + + index += msg_length; + msg_length = 0; + } else { + /* if 'p_data_buffer' is four byte aligned and local + * buffer does not have any data, we can write data + * directly from 'p_data_buffer' to HW peripheral, + * otherwise we first copy data to a local buffer + */ + if ((0 == (((u32) p_data_buffer) % 4)) + && (0 == index)) { + hash_processblock(hid, + (const u32 *)p_data_buffer); + } else { + for (count = 0; + count < (u32)(HASH_BLOCK_SIZE - index); + count++) { + p_buffer[index + count] = + *(p_data_buffer + count); + } + + hash_processblock(hid, (const u32 *)p_buffer); + } + + hash_incrementlength(hid, HASH_BLOCK_SIZE); + p_data_buffer += (HASH_BLOCK_SIZE - index); + msg_length -= (HASH_BLOCK_SIZE - index); + index = 0; + } + } + + g_sys_ctx.state[hid].index = index; + + stm_dbg(debug, "[u8500_hash_alg] hash_hw_update END(msg_length=%d in " + "bits, in=%d, bin=%d))", + msg_length, + g_sys_ctx.state[hid].index, + g_sys_ctx.state[hid].bit_index); + + return hash_error; +} + +/** + * hash_end_key - Function that ends a message, i.e. pad and triggers the last + * calculation. + * @hid: Hardware device ID + * + * This function also clear the registries that have been involved in + * computation. + */ +int hash_end_key(int hid) +{ + int hash_error = HASH_OK; + u8 count = 0; + + stm_dbg(debug, "[u8500_hash_alg] hash_end_key(index=%d))", + g_sys_ctx.state[hid].index); + + hash_messagepad(hid, g_sys_ctx.state[hid].buffer, + g_sys_ctx.state[hid].index); + + /* Wait till the DCAL bit get cleared, So that we get the final + * message digest not intermediate value. + */ + while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) + ; + + /* Reset the HASH state */ + g_sys_ctx.state[hid].index = 0; + g_sys_ctx.state[hid].bit_index = 0; + + for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) + g_sys_ctx.state[hid].buffer[count] = 0; + + g_sys_ctx.state[hid].length.high_word = 0; + g_sys_ctx.state[hid].length.low_word = 0; + + return hash_error; +} + +/** + * hash_resume_state - Function that resumes the state of an calculation. + * @hid: Hardware device ID + * @device_state: The state to be restored in the hash hardware + * + * Reentrancy: Non Re-entrant + */ +int hash_resume_state(int hid, const struct hash_state *device_state) +{ + u32 temp_cr; + int hash_error = HASH_OK; + s32 count; + + stm_dbg(debug, "[u8500_hash_alg] hash_resume_state(state(0x%x)))", + (u32) device_state); + + if (NULL == device_state) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + /* Check correctness of index and length members */ + if (device_state->index > HASH_BLOCK_SIZE + || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + for (count = 0; count < (s32) (HASH_BLOCK_SIZE / sizeof(u32)); + count++) { + g_sys_ctx.state[hid].buffer[count] = + device_state->buffer[count]; + } + + g_sys_ctx.state[hid].index = device_state->index; + g_sys_ctx.state[hid].bit_index = device_state->bit_index; + g_sys_ctx.state[hid].length = device_state->length; + + HASH_INITIALIZE; + + temp_cr = device_state->temp_cr; + g_sys_ctx.registry[hid]->cr = + temp_cr & HASH_CR_RESUME_MASK; + + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) && + !(g_sys_ctx.registry[hid]->cr & + HASH_CR_MODE_MASK)) { + break; + } + g_sys_ctx.registry[hid]->csrx[count] = + device_state->csr[count]; + } + + g_sys_ctx.registry[hid]->csfull = device_state->csfull; + g_sys_ctx.registry[hid]->csdatain = device_state->csdatain; + + g_sys_ctx.registry[hid]->str = device_state->str_reg; + g_sys_ctx.registry[hid]->cr = temp_cr; + + return hash_error; +} + +/** + * hash_save_state - Function that saves the state of hardware. + * @hid: Hardware device ID + * @device_state: The strucure where the hardware state should be saved + * + * Reentrancy: Non Re-entrant + */ +int hash_save_state(int hid, struct hash_state *device_state) +{ + u32 temp_cr; + u32 count; + int hash_error = HASH_OK; + + stm_dbg(debug, "[u8500_hash_alg] hash_save_state( state(0x%x)))", + (u32) device_state); + + if (NULL == device_state) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) { + device_state->buffer[count] = + g_sys_ctx.state[hid].buffer[count]; + } + + device_state->index = g_sys_ctx.state[hid].index; + device_state->bit_index = g_sys_ctx.state[hid].bit_index; + device_state->length = g_sys_ctx.state[hid].length; + + /* Write dummy value to force digest intermediate calculation. This + * actually makes sure that there isn't any ongoing calculation in the + * hardware. + */ + while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) + ; + + temp_cr = g_sys_ctx.registry[hid]->cr; + + device_state->str_reg = g_sys_ctx.registry[hid]->str; + + device_state->din_reg = g_sys_ctx.registry[hid]->din; + + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) + && !(g_sys_ctx.registry[hid]->cr & + HASH_CR_MODE_MASK)) { + break; + } + + device_state->csr[count] = + g_sys_ctx.registry[hid]->csrx[count]; + } + + device_state->csfull = g_sys_ctx.registry[hid]->csfull; + device_state->csdatain = g_sys_ctx.registry[hid]->csdatain; + + /* end if */ + device_state->temp_cr = temp_cr; + + return hash_error; +} + +/** + * hash_end - Ends current HASH computation, passing back the hash to the user. + * @hid: Hardware device ID + * @digest: User allocated byte array for the calculated digest + * + * Reentrancy: Non Re-entrant + */ +int hash_end(int hid, u8 digest[HASH_MSG_DIGEST_SIZE]) +{ + int hash_error = HASH_OK; + u32 count; + /* Standard SHA-1 digest for null string for HASH mode */ + u8 zero_message_hash_sha1[HASH_MSG_DIGEST_SIZE] = { + 0xDA, 0x39, 0xA3, 0xEE, + 0x5E, 0x6B, 0x4B, 0x0D, + 0x32, 0x55, 0xBF, 0xEF, + 0x95, 0x60, 0x18, 0x90, + 0xAF, 0xD8, 0x07, 0x09, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + /* Standard SHA-2 digest for null string for HASH mode */ + u8 zero_message_hash_sha2[HASH_MSG_DIGEST_SIZE] = { + 0xD4, 0x1D, 0x8C, 0xD9, + 0x8F, 0x00, 0xB2, 0x04, + 0xE9, 0x80, 0x09, 0x98, + 0xEC, 0xF8, 0x42, 0x7E, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + /* Standard SHA-1 digest for null string for HMAC mode,with no key */ + u8 zero_message_hmac_sha1[HASH_MSG_DIGEST_SIZE] = { + 0xFB, 0xDB, 0x1D, 0x1B, + 0x18, 0xAA, 0x6C, 0x08, + 0x32, 0x4B, 0x7D, 0x64, + 0xB7, 0x1F, 0xB7, 0x63, + 0x70, 0x69, 0x0E, 0x1D, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + /* Standard SHA2 digest for null string for HMAC mode,with no key */ + u8 zero_message_hmac_sha2[HASH_MSG_DIGEST_SIZE] = { + 0x74, 0xE6, 0xF7, 0x29, + 0x8A, 0x9C, 0x2D, 0x16, + 0x89, 0x35, 0xF5, 0x8C, + 0x00, 0x1B, 0xAD, 0x88, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + + stm_dbg(debug, "[u8500_hash_alg] hash_end(digest array (0x%x)))", + (u32) digest); + + if (NULL == digest) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + if (0 == g_sys_ctx.state[hid].index && + 0 == g_sys_ctx.state[hid].length.high_word && + 0 == g_sys_ctx.state[hid].length.low_word) { + if (g_sys_ctx.registry[hid]->cr & HASH_CR_MODE_MASK) { + if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { + /* hash of an empty message was requested */ + for (count = 0; count < HASH_MSG_DIGEST_SIZE; + count++) { + digest[count] = + zero_message_hmac_sha1[count]; + } + } else { /* SHA-2 algo */ + + /* hash of an empty message was requested */ + for (count = 0; count < HASH_MSG_DIGEST_SIZE; + count++) { + digest[count] = + zero_message_hmac_sha2[count]; + } + } + } else { /* HASH mode */ + + if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { + /* hash of an empty message was requested */ + for (count = 0; count < HASH_MSG_DIGEST_SIZE; + count++) { + digest[count] = + zero_message_hash_sha1[count]; + } + } else { /* SHA-2 algo */ + + /* hash of an empty message was requested */ + for (count = 0; count < HASH_MSG_DIGEST_SIZE; + count++) { + digest[count] = + zero_message_hash_sha2[count]; + } + } + } + + HASH_SET_DCAL; + } else { + hash_messagepad(hid, + g_sys_ctx.state[hid].buffer, + g_sys_ctx.state[hid].index); + + /* Wait till the DCAL bit get cleared, So that we get the final + * message digest not intermediate value. */ + while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) + ; + + hash_error = hash_get_digest(hid, digest); + + /* Reset the HASH state */ + g_sys_ctx.state[hid].index = 0; + g_sys_ctx.state[hid].bit_index = 0; + for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); + count++) { + g_sys_ctx.state[hid].buffer[count] + = 0; + } + + g_sys_ctx.state[hid].length.high_word = 0; + g_sys_ctx.state[hid].length.low_word = 0; + } + + if (debug) + hexdump(digest, HASH_MSG_DIGEST_SIZE); + + return hash_error; +} + +/** + * hash_initialize_globals - Initialize global variables to their default reset + * value. + * @hid: Hardware device ID + * + * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being + * modified + */ +static void hash_initialize_globals(int hid) +{ + u8 loop_count; + + /* Resetting the values of global variables except the registry */ + g_sys_ctx.state[hid].temp_cr = HASH_RESET_INDEX_VAL; + g_sys_ctx.state[hid].str_reg = HASH_RESET_INDEX_VAL; + g_sys_ctx.state[hid].din_reg = HASH_RESET_INDEX_VAL; + + for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { + g_sys_ctx.state[hid].csr[loop_count] = + HASH_RESET_CSRX_REG_VALUE; + } + + g_sys_ctx.state[hid].csfull = HASH_RESET_CSFULL_REG_VALUE; + g_sys_ctx.state[hid].csdatain = HASH_RESET_CSDATAIN_REG_VALUE; + + for (loop_count = 0; loop_count < (HASH_BLOCK_SIZE / sizeof(u32)); + loop_count++) { + g_sys_ctx.state[hid].buffer[loop_count] = + HASH_RESET_BUFFER_VAL; + } + + g_sys_ctx.state[hid].length.high_word = HASH_RESET_LEN_HIGH_VAL; + g_sys_ctx.state[hid].length.low_word = HASH_RESET_LEN_LOW_VAL; + g_sys_ctx.state[hid].index = HASH_RESET_INDEX_VAL; + g_sys_ctx.state[hid].bit_index = HASH_RESET_BIT_INDEX_VAL; +} + +/** + * hash_reset - This routine will reset the global variable to default reset + * value and HASH registers to their power on reset values. + * @hid: Hardware device ID + * + * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being + * modified. + */ +int hash_reset(int hid) +{ + int hash_error = HASH_OK; + u8 loop_count; + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + + return hash_error; + } + + /* Resetting the values of global variables except the registry */ + hash_initialize_globals(hid); + + /* Resetting HASH control register to power-on-reset values */ + g_sys_ctx.registry[hid]->str = HASH_RESET_START_REG_VALUE; + + for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { + g_sys_ctx.registry[hid]->csrx[loop_count] = + HASH_RESET_CSRX_REG_VALUE; + } + + g_sys_ctx.registry[hid]->csfull = HASH_RESET_CSFULL_REG_VALUE; + g_sys_ctx.registry[hid]->csdatain = + HASH_RESET_CSDATAIN_REG_VALUE; + + /* Resetting the HASH Control reg. This also reset the PRIVn and SECn + * bits and hence the device registers will not be accessed anymore and + * should be done in the last HASH register access statement. + */ + g_sys_ctx.registry[hid]->cr = HASH_RESET_CONTROL_REG_VALUE; + + return hash_error; +} + +/** + * hash_init_base_address - This routine initializes hash register base + * address. It also checks for peripheral Ids and PCell Ids. + * @hid: Hardware device ID + * @base_address: Hash hardware base address + * + * Reentrancy: Non Re-entrant, global variable registry (register base address) + * is being modified. + */ +int hash_init_base_address(int hid, t_logical_address base_address) +{ + int hash_error = HASH_OK; + + stm_dbg(debug, "[u8500_hash_alg] hash_init_base_address())"); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + + return hash_error; + } + + if (0 != base_address) { + /*--------------------------------------* + * Initializing the registers structure * + *--------------------------------------*/ + g_sys_ctx.registry[hid] = (struct hash_register *) base_address; + + /*--------------------------* + * Checking Peripheral Ids * + *--------------------------*/ + if ((HASH_P_ID0 == + g_sys_ctx.registry[hid]->periphid0) + && (HASH_P_ID1 == + g_sys_ctx.registry[hid]->periphid1) + && (HASH_P_ID2 == + g_sys_ctx.registry[hid]->periphid2) + && (HASH_P_ID3 == + g_sys_ctx.registry[hid]->periphid3) + && (HASH_CELL_ID0 == + g_sys_ctx.registry[hid]->cellid0) + && (HASH_CELL_ID1 == + g_sys_ctx.registry[hid]->cellid1) + && (HASH_CELL_ID2 == + g_sys_ctx.registry[hid]->cellid2) + && (HASH_CELL_ID3 == + g_sys_ctx.registry[hid]->cellid3) + ) { + + /* Resetting the values of global variables except the + registry */ + hash_initialize_globals(hid); + hash_error = HASH_OK; + return hash_error; + } else { + hash_error = HASH_UNSUPPORTED_HW; + stm_error("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); + return hash_error; + } + } /* end if */ + else { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } +} + +/** + * hash_get_digest - Gets the digest. + * @hid: Hardware device ID + * @digest: User allocated byte array for the calculated digest + * + * Reentrancy: Non Re-entrant, global variable registry (hash control register) + * is being modified. + * + * Note that, if this is called before the final message has been handle it will + * return the intermediate message digest. + */ +int hash_get_digest(int hid, u8 *digest) +{ + u32 temp_hx_val, count; + int hash_error = HASH_OK; + + stm_dbg(debug, + "[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", + (u32) digest); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + /* Copy result into digest array */ + for (count = 0; count < (HASH_MSG_DIGEST_SIZE / sizeof(u32)); + count++) { + temp_hx_val = HASH_GET_HX(count); + digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); + digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); + digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); + digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); + } + + return hash_error; +} + +/** + * hash_compute - Performs a complete HASH calculation on the message passed. + * @hid: Hardware device ID + * @p_data_buffer: Pointer to the message to be hashed + * @msg_length: The length of the message + * @p_hash_config: Structure with configuration data for the hash hardware + * @digest: User allocated byte array for the calculated digest + * + * Reentrancy: Non Re-entrant + */ +int hash_compute(int hid, + const u8 *p_data_buffer, + u32 msg_length, + struct hash_config *p_hash_config, + u8 digest[HASH_MSG_DIGEST_SIZE]) { + int hash_error = HASH_OK; + + stm_dbg(debug, "[u8500_hash_alg] hash_compute())"); + + if (!((HASH_DEVICE_ID_0 == hid) + || (HASH_DEVICE_ID_1 == hid))) { + hash_error = HASH_INVALID_PARAMETER; + stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_error; + } + + + /* WARNING: return code must be checked if + * behaviour of hash_begin changes. + */ + hash_error = hash_setconfiguration(hid, p_hash_config); + if (HASH_OK != hash_error) { + stm_error("[u8500_hash_alg] hash_setconfiguration() failed!"); + return hash_error; + } + + hash_error = hash_begin(hid); + if (HASH_OK != hash_error) { + stm_error("[u8500_hash_alg] hash_begin() failed!"); + return hash_error; + } + + hash_error = hash_hw_update(hid, p_data_buffer, msg_length); + if (HASH_OK != hash_error) { + stm_error("[u8500_hash_alg] hash_hw_update() failed!"); + return hash_error; + } + + hash_error = hash_end(hid, digest); + if (HASH_OK != hash_error) { + stm_error("[u8500_hash_alg] hash_end() failed!"); + return hash_error; + } + + return hash_error; +} + +module_init(u8500_hash_mod_init); +module_exit(u8500_hash_mod_fini); + +module_param(mode, int, 0); +module_param(debug, int, 0); +module_param(contextsaving, int, 0); + +MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("sha1-u8500"); +MODULE_ALIAS("sha256-u8500"); +MODULE_ALIAS("hmac(sha1-u8500)"); +MODULE_ALIAS("hmac(sha256-u8500)"); -- cgit v1.2.3 From 6e66274cbeb53f22a4dd08f9c1ac119c50d34768 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Wed, 19 Oct 2011 13:20:08 +0200 Subject: crypto: ux500: Add crypto and hash acceleration Adds device driver support for crypto and hash acceleration for the u8500 chip. ST-Ericsson Linux next: Builds and boot, but fails on cryp probe ER320876. ST-Ericsson ID: AP 270734 crypto: ux500: cryp: Add power-awareness Adds power awareness to the cryp part of the device driver for accelerating crypto in u8500. ST-Ericsson ID: ER277473 crypto: ux500: cryp: Fix of NULL pointer dereference in power-save Fix of NULL pointer dereference in cryp_disable_power. ST-Ericsson ID: ER277473 crypto: ux500: cryp/hash: Power-awareness - Hash: Adds power awareness to the hash part of the device driver for accelerating hashing in u8500. - Cryp: - Removed erroneous call to cryp_enable_power() in u8500_cryp_resume(). - Added spinlocks to protect usage of current_ctx. - Corrected erroneous gotos in hw_cryp_noxts(). - Added down_interruptible()/up() in suspend/resume, to make sure the device is not allocated during suspend. ST-Ericsson ID: ER280692 crypto: ux500: AES ECB converted to ablk_cipher and supports DMA. - DMA support for AES_ECB added. - ablk_cipher support added to the driver. In this commit AES_ECB is using this asynchronous API. This is a must since you will get miscellaneous sleep warning- error-messages from the crypto testmgr which runs sanity tests when loading a module in synchrounous mode using DMA. Therefore DMA operations should use the ablk_cipher (asynchronous) API, - Added scatterlist walk function for ablk_cipher for the non DMA version. - Added power awarness to DMA related code in this cryp driver. - Refactored code in hw_cryp_noxts with functions calls when getting the device and when setting up the context. - Renamed registers so they corresponds to the names in the design spec. ST-Ericsson ID: AP277474 ST-Ericsson Linux next: ER320876, v-ape regulator missing. crypto: ux500: cryp: DES ECB converted to ablk_cipher. ablk_cipher support added to the driver. In this commit DES_ECB and DES3_ECB is using this asynchronous API. This removes the log printout: BUG: sleeping function called from invalid context at kernel/mutex.c:94. ST-Ericsson ID: ER322583 crypto: ux500: cryp: More algorithms converted to use ablk_cipher. In this commit AES_CBC, AES_CTR, DES_CBC and DES3_CBC is using the asynchronous API. These algorithms also support DMA, except for givciphers. ST-Ericsson ID: AP277474 crypto: ux500: cryp: Power save redesign, to improve performance - Enable and disable power moved to be called only at beginning and end of algorithm calls. - Removed compiler warnings (uninitialized variables) visible using Linux-next compiler. Note! Those warnings not visible using default compiler in the android forest. ST-Ericsson ID: AP277474 crypto: Fixes after 2.6.35 merge Signed-off-by: Berne Hebark Signed-off-by: Lee Jones Signed-off-by: Philippe Langlais Conflicts: arch/arm/mach-ux500/board-mop500.c --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 16 + drivers/crypto/Kconfig | 2 +- drivers/crypto/ux500/Kconfig | 30 +- drivers/crypto/ux500/Makefile | 15 +- drivers/crypto/ux500/cryp/Makefile | 13 + drivers/crypto/ux500/cryp/cryp.c | 556 ++++++ drivers/crypto/ux500/cryp/cryp.h | 338 ++++ drivers/crypto/ux500/cryp/cryp_core.c | 2331 +++++++++++++++++++++++ drivers/crypto/ux500/cryp/cryp_irq.c | 45 + drivers/crypto/ux500/cryp/cryp_irq.h | 31 + drivers/crypto/ux500/cryp/cryp_irqp.h | 125 ++ drivers/crypto/ux500/cryp/cryp_p.h | 113 ++ drivers/crypto/ux500/hash/Makefile | 12 +- drivers/crypto/ux500/hash/hash_alg.h | 209 +- drivers/crypto/ux500/hash/hash_core.c | 1615 ++++++---------- 15 files changed, 4203 insertions(+), 1248 deletions(-) create mode 100644 arch/arm/mach-ux500/include/mach/crypto-ux500.h mode change 100755 => 100644 drivers/crypto/ux500/Kconfig mode change 100755 => 100644 drivers/crypto/ux500/Makefile create mode 100644 drivers/crypto/ux500/cryp/Makefile create mode 100644 drivers/crypto/ux500/cryp/cryp.c create mode 100644 drivers/crypto/ux500/cryp/cryp.h create mode 100644 drivers/crypto/ux500/cryp/cryp_core.c create mode 100644 drivers/crypto/ux500/cryp/cryp_irq.c create mode 100644 drivers/crypto/ux500/cryp/cryp_irq.h create mode 100644 drivers/crypto/ux500/cryp/cryp_irqp.h create mode 100644 drivers/crypto/ux500/cryp/cryp_p.h mode change 100755 => 100644 drivers/crypto/ux500/hash/Makefile mode change 100755 => 100644 drivers/crypto/ux500/hash/hash_alg.h mode change 100755 => 100644 drivers/crypto/ux500/hash/hash_core.c diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h new file mode 100644 index 00000000000..57da88398d5 --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Joakim Bech for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _CRYPTO_UX500_H +#include +#include + +struct cryp_platform_data { + struct stedma40_chan_cfg mem_to_engine; + struct stedma40_chan_cfg engine_to_mem; +}; + +#endif diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 52e0bf5738e..638648816c9 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES config CRYPTO_DEV_UX500 tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" - #depends on ARCH_U8500 + depends on ARCH_U8500 select CRYPTO_ALGAPI help Driver for ST-Ericsson UX500 crypto engine. diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig old mode 100755 new mode 100644 index 4ac419757d0..165a03d46c0 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -1,15 +1,29 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +config CRYPTO_DEV_UX500_CRYP + tristate "UX500 crypto driver for CRYP block" + depends on CRYPTO_DEV_UX500 + select CRYPTO_DES + help + This is the driver for the crypto block CRYP. config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" - depends on ARCH_U8500 - select CRYPTO_ALGAPI + depends on CRYPTO_DEV_UX500 select CRYPTO_HASH select CRYPTO_HMAC - help - This selects the UX500 hash driver for the HASH hardware. - Depends on U8500/STM DMA if running in DMA mode. + help + This selects the UX500 hash driver for the HASH hardware. + Depends on U8500/STM DMA if running in DMA mode. -config CRYPTO_DEV_UX500_DEBUG_INFO - tristate "Enable UX500 crypto drivers debug info" +config CRYPTO_DEV_UX500_DEBUG + bool "Activate ux500 platform debug-mode for crypto and hash block" + depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH + default n help - This is to enable the debug info for UX500 crypto drivers. + Say Y if you want to add debug prints to ux500_hash and + ux500_cryp devices. diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile old mode 100755 new mode 100644 index 4c187857120..b9a365bade8 --- a/drivers/crypto/ux500/Makefile +++ b/drivers/crypto/ux500/Makefile @@ -1,11 +1,8 @@ - -ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) - EXTRA_CFLAGS += -D__DEBUG -else - EXTRA_CFLAGS += -D__RELEASE -endif +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ - - - +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile new file mode 100644 index 00000000000..fd5e6df3861 --- /dev/null +++ b/drivers/crypto/ux500/cryp/Makefile @@ -0,0 +1,13 @@ +#/* +# * Copyright (C) ST-Ericsson SA 2010 +# * Author: shujuan.chen@stericsson.com for ST-Ericsson. +# * License terms: GNU General Public License (GPL) version 2 */ + +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_cryp_core.o := -DDEBUG -O0 +CFLAGS_cryp.o := -DDEBUG -O0 +CFLAGS_cryp_irq.o := -DDEBUG -O0 +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += u8500_cryp.o +u8500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c new file mode 100644 index 00000000000..94928f7efce --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -0,0 +1,556 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cryp_p.h" +#include "cryp.h" + +/** + * cryp_wait_until_done - wait until the device logic is not busy + */ +void cryp_wait_until_done(struct cryp_device_data *device_data) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); +} + +/** + * cryp_check - This routine checks Peripheral and PCell Id + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_check(struct cryp_device_data *device_data) +{ + if (NULL == device_data) + return -EINVAL; + + /* Check Peripheral and Pcell Id Register for CRYP */ + if ((CRYP_PERIPHERAL_ID0 == readl(&device_data->base->periphId0)) + && (CRYP_PERIPHERAL_ID1 == readl(&device_data->base->periphId1)) + && (CRYP_PERIPHERAL_ID2 == readl(&device_data->base->periphId2)) + && (CRYP_PERIPHERAL_ID3 == readl(&device_data->base->periphId3)) + && (CRYP_PCELL_ID0 == readl(&device_data->base->pcellId0)) + && (CRYP_PCELL_ID1 == readl(&device_data->base->pcellId1)) + && (CRYP_PCELL_ID2 == readl(&device_data->base->pcellId2)) + && (CRYP_PCELL_ID3 == readl(&device_data->base->pcellId3))) { + return 0; + } + + return -EPERM; +} + +/** + * cryp_reset - This routine loads the cryp register with the default values + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_reset(struct cryp_device_data *device_data) +{ + writel(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); + writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + + writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_r); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_l); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_r); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_l); + writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_r); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_l); + writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_r); + + /* Last step since the protection mode bits need to be modified. */ + writel(CRYP_CR_DEFAULT | CRYP_CR_FFLUSH, &device_data->base->cr); + + /* + * CRYP_INFIFO_READY_MASK is the expected value on the status register + * when starting a new calculation, which means Input FIFO is not full + * and input FIFO is empty. + */ + while (readl(&device_data->base->status) != CRYP_INFIFO_READY_MASK) + cpu_relax(); +} + +/** + * cryp_activity - This routine enables/disable the cryptography function. + * @device_data: Pointer to the device data struct for base address. + * @cryp_activity: Enable/Disable functionality + */ +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_crypen, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); +} + +/** + * cryp_start - starts the computation + * @device_data: Pointer to the device data struct for base address. + * @cryp_start: Enable/Disable functionality + */ +void cryp_start(struct cryp_device_data *device_data) +{ + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_START_ENABLE, + CRYP_START_POS, + CRYP_START_MASK); +} + +/** + * cryp_init_signal - This routine submit the initialization values. + * @device_data: Pointer to the device data struct for base address. + * @cryp_init_bit: Enable/Disable init signal + */ +void cryp_init_signal(struct cryp_device_data *device_data, + enum cryp_init cryp_init_bit) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_init_bit, + CRYP_INIT_POS, + CRYP_INIT_MASK); +} + +/** + * cryp_key_preparation - This routine prepares key for decryption. + * @device_data: Pointer to the device data struct for base address. + * @cryp_prepkey: Enable/Disable + */ +void cryp_key_preparation(struct cryp_device_data *device_data, + enum cryp_key_prep cryp_prepkey) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_prepkey, + CRYP_KSE_POS, + CRYP_KSE_MASK); +} + +/** + * cryp_flush_inoutfifo - Resets both the input and the output FIFOs + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_flush_inoutfifo(struct cryp_device_data *device_data) +{ + CRYP_SET_BITS(&device_data->base->cr, CRYP_FIFO_FLUSH_MASK); +} + +/** + * cryp_set_dir - + * @device_data: Pointer to the device data struct for base address. + * @dir: Crypto direction, encrypt/decrypt + */ +void cryp_set_dir(struct cryp_device_data *device_data, int dir) +{ + CRYP_PUT_BITS(&device_data->base->cr, + dir, + CRYP_ENC_DEC_POS, + CRYP_ENC_DEC_MASK); + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_DATA_TYPE_8BIT_SWAP, + CRYP_DATA_TYPE_POS, + CRYP_DATA_TYPE_MASK); +} + +/** + * cryp_cen_flush - + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_cen_flush(struct cryp_device_data *device_data) +{ + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_STATE_DISABLE, + CRYP_KEY_ACCESS_POS, + CRYP_KEY_ACCESS_MASK); + CRYP_SET_BITS(&device_data->base->cr, + CRYP_FIFO_FLUSH_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); +} + +/** + * cryp_set_configuration - This routine set the cr CRYP IP + * @device_data: Pointer to the device data struct for base address. + * @p_cryp_config: Pointer to the configuration parameter + */ +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == p_cryp_config) + return -EINVAL; + + /* Since more than one bit is written macro put_bits is used*/ + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->key_access, + CRYP_KEY_ACCESS_POS, + CRYP_KEY_ACCESS_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->key_size, + CRYP_KEY_SIZE_POS, + CRYP_KEY_SIZE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->data_type, + CRYP_DATA_TYPE_POS, + CRYP_DATA_TYPE_MASK); + + /* Prepare key for decryption */ + if ((CRYP_ALGORITHM_DECRYPT == p_cryp_config->encrypt_or_decrypt) && + ((CRYP_ALGO_AES_ECB == p_cryp_config->algo_mode) || + (CRYP_ALGO_AES_CBC == p_cryp_config->algo_mode))) { + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_ALGO_AES_ECB, + CRYP_ALGOMODE_POS, + CRYP_ALGOMODE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + KSE_ENABLED, + CRYP_KSE_POS, + CRYP_KSE_MASK); + + cryp_wait_until_done(device_data); + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_DISABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + } + + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_CRYPEN_ENABLE, + CRYP_CRYPEN_POS, + CRYP_CRYPEN_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->algo_mode, + CRYP_ALGOMODE_POS, + CRYP_ALGOMODE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->encrypt_or_decrypt, + CRYP_ENC_DEC_POS, + CRYP_ENC_DEC_MASK); + + return 0; +} + +/** + * cryp_get_configuration - gets the parameter of the control register of IP + * @device_data: Pointer to the device data struct for base address. + * @p_cryp_config: Gets the configuration parameter from cryp ip. + */ +int cryp_get_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config) +{ + if (NULL == p_cryp_config) + return -EINVAL; + + p_cryp_config->key_access = + ((readl(&device_data->base->cr) & CRYP_KEY_ACCESS_MASK) ? + CRYP_STATE_ENABLE : + CRYP_STATE_DISABLE); + p_cryp_config->key_size = + ((readl(&device_data->base->cr) & CRYP_KEY_SIZE_MASK) >> + CRYP_KEY_SIZE_POS); + + p_cryp_config->encrypt_or_decrypt = + ((readl(&device_data->base->cr) & CRYP_ENC_DEC_MASK) ? + CRYP_ALGORITHM_DECRYPT : + CRYP_ALGORITHM_ENCRYPT); + + p_cryp_config->data_type = + ((readl(&device_data->base->cr) & CRYP_DATA_TYPE_MASK) >> + CRYP_DATA_TYPE_POS); + p_cryp_config->algo_mode = + ((readl(&device_data->base->cr) & CRYP_ALGOMODE_MASK) >> + CRYP_ALGOMODE_POS); + + return 0; +} + +/** + * cryp_configure_protection - set the protection bits in the CRYP logic. + * @device_data: Pointer to the device data struct for base address. + * @p_protect_config: Pointer to the protection mode and + * secure mode configuration + */ +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config) +{ + if (NULL == p_protect_config) + return -EINVAL; + + CRYP_WRITE_BIT(&device_data->base->cr, + (u32) p_protect_config->secure_access, + CRYP_SECURE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_protect_config->privilege_access, + CRYP_PRLG_POS, + CRYP_PRLG_MASK); + + return 0; +} + +/** + * cryp_is_logic_busy - returns the busy status of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_is_logic_busy(struct cryp_device_data *device_data) +{ + return CRYP_TEST_BITS(&device_data->base->status, + CRYP_BUSY_STATUS_MASK); +} + +/** + * cryp_get_status - This routine returns the complete status of the cryp logic + * @device_data: Pointer to the device data struct for base address. + */ +/* +int cryp_get_status(struct cryp_device_data *device_data) +{ + return (int) readl(device_data->base->status); +} +*/ + +/** + * cryp_configure_for_dma - configures the CRYP IP for DMA operation + * @device_data: Pointer to the device data struct for base address. + * @dma_req: Specifies the DMA request type value. + */ +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req) +{ + CRYP_SET_BITS(&device_data->base->dmacr, + (u32) dma_req); +} + +/** + * cryp_configure_key_values - configures the key values for CRYP operations + * @device_data: Pointer to the device data struct for base address. + * @key_reg_index: Key value index register + * @key_value: The key value struct + */ +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (key_reg_index) { + case CRYP_KEY_REG_1: + writel(key_value.key_value_left, + &device_data->base->key_1_l); + writel(key_value.key_value_right, + &device_data->base->key_1_r); + break; + case CRYP_KEY_REG_2: + writel(key_value.key_value_left, + &device_data->base->key_2_l); + writel(key_value.key_value_right, + &device_data->base->key_2_r); + break; + case CRYP_KEY_REG_3: + writel(key_value.key_value_left, + &device_data->base->key_3_l); + writel(key_value.key_value_right, + &device_data->base->key_3_r); + break; + case CRYP_KEY_REG_4: + writel(key_value.key_value_left, + &device_data->base->key_4_l); + writel(key_value.key_value_right, + &device_data->base->key_4_r); + break; + default: + return -EINVAL; + } + + return 0; + +} + +/** + * cryp_configure_init_vector - configures the initialization vector register + * @device_data: Pointer to the device data struct for base address. + * @init_vector_index: Specifies the index of the init vector. + * @init_vector_value: Specifies the value for the init vector. + */ +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (init_vector_index) { + case CRYP_INIT_VECTOR_INDEX_0: + writel(init_vector_value.init_value_left, + &device_data->base->init_vect_0_l); + writel(init_vector_value.init_value_right, + &device_data->base->init_vect_0_r); + break; + case CRYP_INIT_VECTOR_INDEX_1: + writel(init_vector_value.init_value_left, + &device_data->base->init_vect_1_l); + writel(init_vector_value.init_value_right, + &device_data->base->init_vect_1_r); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * cryp_prep_ctx_mgmt - Prepares for handling the context of the block + * @device_data: Pointer to the device data struct for base address. + */ +static void cryp_prep_ctx_mgmt(struct cryp_device_data *device_data) +{ + cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); +} + +/** + * cryp_save_device_context - Store hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx) +{ + struct cryp_register *src_reg = device_data->base; + + cryp_prep_ctx_mgmt(device_data); + + ctx->din = readl(&src_reg->din); + + ctx->dout = readl(&src_reg->dout); + + ctx->cr = readl(&src_reg->cr); + ctx->dmacr = readl(&src_reg->dmacr); + ctx->imsc = readl(&src_reg->imsc); + + ctx->key_1_l = readl(&src_reg->key_1_l); + ctx->key_1_r = readl(&src_reg->key_1_r); + ctx->key_2_l = readl(&src_reg->key_2_l); + ctx->key_2_r = readl(&src_reg->key_2_r); + ctx->key_3_l = readl(&src_reg->key_3_l); + ctx->key_3_r = readl(&src_reg->key_3_r); + ctx->key_4_l = readl(&src_reg->key_4_l); + ctx->key_4_r = readl(&src_reg->key_4_r); + + ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); + ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); + ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); + ctx->init_vect_1_r = readl(&src_reg->init_vect_1_r); +} + +/** + * cryp_restore_device_context - Restore hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx) +{ + struct cryp_register *reg = device_data->base; + + cryp_prep_ctx_mgmt(device_data); + + writel(ctx->din, ®->din); + writel(ctx->dout, ®->dout); + writel(ctx->cr, ®->cr); + writel(ctx->dmacr, ®->dmacr); + writel(ctx->imsc, ®->imsc); + writel(ctx->key_1_l, ®->key_1_l); + writel(ctx->key_1_r, ®->key_1_r); + writel(ctx->key_2_l, ®->key_2_l); + writel(ctx->key_2_r, ®->key_2_r); + writel(ctx->key_3_l, ®->key_3_l); + writel(ctx->key_3_r, ®->key_3_r); + writel(ctx->key_4_l, ®->key_4_l); + writel(ctx->key_4_r, ®->key_4_r); + writel(ctx->init_vect_0_l, ®->init_vect_0_l); + writel(ctx->init_vect_0_r, ®->init_vect_0_r); + writel(ctx->init_vect_1_l, ®->init_vect_1_l); + writel(ctx->init_vect_1_r, ®->init_vect_1_r); +} + +/** + * cryp_write_indata - This routine writes 32 bit data into the data input + * register of the cryptography IP. + * @device_data: Pointer to the device data struct for base address. + * @write_data: Data word to write + */ +int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) +{ + if (NULL == device_data) + return -EINVAL; + writel(write_data, &device_data->base->din); + + return 0; +} + +/** + * cryp_read_indata - This routine reads the 32 bit data from the data input + * register into the specified location. + * @device_data: Pointer to the device data struct for base address. + * @p_read_data: Read the data from the input FIFO. + */ +int cryp_read_indata(struct cryp_device_data *device_data, u32 *p_read_data) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == p_read_data) + return -EINVAL; + + *p_read_data = readl(&device_data->base->din); + + return 0; +} + +/** + * cryp_read_outdata - This routine reads the data from the data output + * register of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + * @read_data: Read the data from the output FIFO. + */ +int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data) +{ + if (NULL == device_data) + return -EINVAL; + if (NULL == read_data) + return -EINVAL; + + *read_data = readl(&device_data->base->dout); + + return 0; +} diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h new file mode 100644 index 00000000000..2d98923071c --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -0,0 +1,338 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_H_ +#define _CRYP_H_ + +#include +#include +#include +#include + +/* Module Defines */ +#define CRYP_MODULE_NAME "CRYP HCL Module" + +#define DEV_DBG_NAME "crypX crypX:" + +/* CRYP enable/disable */ +enum cryp_crypen { + CRYP_CRYPEN_DISABLE = 0, + CRYP_CRYPEN_ENABLE = 1 +}; + +/* CRYP Start Computation enable/disable */ +enum cryp_start { + CRYP_START_DISABLE = 0, + CRYP_START_ENABLE = 1 +}; + +/* CRYP Init Signal enable/disable */ +enum cryp_init { + CRYP_INIT_DISABLE = 0, + CRYP_INIT_ENABLE = 1 +}; + +/* Cryp State enable/disable */ +enum cryp_state { + CRYP_STATE_DISABLE = 0, + CRYP_STATE_ENABLE = 1 +}; + +/* Key preparation bit enable */ +enum cryp_key_prep { + KSE_DISABLED, + KSE_ENABLED +}; + +/* Key size for AES*/ +#define CRYP_KEY_SIZE_128 (0) +#define CRYP_KEY_SIZE_192 (1) +#define CRYP_KEY_SIZE_256 (2) + +/* Data type Swap */ +#define CRYP_DATA_TYPE_32BIT_SWAP (0) +#define CRYP_DATA_TYPE_16BIT_SWAP (1) +#define CRYP_DATA_TYPE_8BIT_SWAP (2) +#define CRYP_DATA_TYPE_BIT_SWAP (3) + +/* AES modes */ +enum cryp_algo_mode { + CRYP_ALGO_TDES_ECB, + CRYP_ALGO_TDES_CBC, + CRYP_ALGO_DES_ECB, + CRYP_ALGO_DES_CBC, + CRYP_ALGO_AES_ECB, + CRYP_ALGO_AES_CBC, + CRYP_ALGO_AES_CTR, + CRYP_ALGO_AES_XTS +}; + +/* Cryp Encryption or Decryption */ +enum cryp_algorithm_dir { + CRYP_ALGORITHM_ENCRYPT, + CRYP_ALGORITHM_DECRYPT +}; + +/* Hardware access method */ +enum cryp_mode { + CRYP_MODE_POLLING, + CRYP_MODE_INTERRUPT, + CRYP_MODE_DMA +}; + +/** + * struct cryp_config - + * @key_access: Cryp state enable/disable + * @key_size: Key size for AES + * @data_type: Data type Swap + * @algo_mode: AES modes + * @encrypt_or_decrypt: Cryp Encryption or Decryption + * + * CRYP configuration structure to be passed to set configuration + */ +struct cryp_config { + enum cryp_state key_access; + int key_size; + int data_type; + enum cryp_algo_mode algo_mode; + enum cryp_algorithm_dir encrypt_or_decrypt; +}; + +/** + * struct cryp_protection_config - + * @privilege_access: Privileged cryp state enable/disable + * @secure_access: Secure cryp state enable/disable + * + * Protection configuration structure for setting privilage access + */ +struct cryp_protection_config { + enum cryp_state privilege_access; + enum cryp_state secure_access; +}; + +/* Cryp status */ +enum cryp_status_id { + CRYP_STATUS_BUSY = 0x10, + CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08, + CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04, + CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02, + CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01 +}; + +/* Cryp DMA interface */ +enum cryp_dma_req_type { + CRYP_DMA_DISABLE_BOTH, + CRYP_DMA_ENABLE_IN_DATA, + CRYP_DMA_ENABLE_OUT_DATA, + CRYP_DMA_ENABLE_BOTH_DIRECTIONS +}; + +enum cryp_dma_channel { + CRYP_DMA_RX = 0, + CRYP_DMA_TX +}; + +/* Key registers */ +enum cryp_key_reg_index { + CRYP_KEY_REG_1, + CRYP_KEY_REG_2, + CRYP_KEY_REG_3, + CRYP_KEY_REG_4 +}; + +/* Key register left and right */ +struct cryp_key_value { + u32 key_value_left; + u32 key_value_right; +}; + +/* Cryp Initialization structure */ +enum cryp_init_vector_index { + CRYP_INIT_VECTOR_INDEX_0, + CRYP_INIT_VECTOR_INDEX_1 +}; + +/* struct cryp_init_vector_value - + * @init_value_left + * @init_value_right + * */ +struct cryp_init_vector_value { + u32 init_value_left; + u32 init_value_right; +}; + +/** + * struct cryp_device_context - structure for a cryp context. + * @cr: control register + * @dmacr: DMA control register + * @imsc: Interrupt mask set/clear register + * @key_1_l: Key 1l register + * @key_1_r: Key 1r register + * @key_2_l: Key 2l register + * @key_2_r: Key 2r register + * @key_3_l: Key 3l register + * @key_3_r: Key 3r register + * @key_4_l: Key 4l register + * @key_4_r: Key 4r register + * @init_vect_0_l: Initialization vector 0l register + * @init_vect_0_r: Initialization vector 0r register + * @init_vect_1_l: Initialization vector 1l register + * @init_vect_1_r: Initialization vector 0r register + * @din: Data in register + * @dout: Data out register + * + * CRYP power management specifc structure. + */ +struct cryp_device_context { + u32 cr; + u32 dmacr; + u32 imsc; + + u32 key_1_l; + u32 key_1_r; + u32 key_2_l; + u32 key_2_r; + u32 key_3_l; + u32 key_3_r; + u32 key_4_l; + u32 key_4_r; + + u32 init_vect_0_l; + u32 init_vect_0_r; + u32 init_vect_1_l; + u32 init_vect_1_r; + + u32 din; + u32 dout; +}; + +struct cryp_dma { + dma_cap_mask_t mask; + struct completion cryp_dma_complete; + struct dma_chan *chan_cryp2mem; + struct dma_chan *chan_mem2cryp; + struct stedma40_chan_cfg *cfg_cryp2mem; + struct stedma40_chan_cfg *cfg_mem2cryp; + int sg_src_len; + int sg_dst_len; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + int nents_src; + int nents_dst; +}; + +/** + * struct cryp_device_data - structure for a cryp device. + * @base: Pointer to the hardware base address. + * @dev: Pointer to the devices dev structure. + * @cryp_irq_complete: Pointer to an interrupt completion structure. + * @clk: Pointer to the device's clock control. + * @pwr_regulator: Pointer to the device's power control. + * @power_status: Current status of the power. + * @ctx_lock: Lock for current_ctx. + * @current_ctx: Pointer to the currently allocated context. + * @list_node: For inclusion into a klist. + * @dma: The dma structure holding channel configuration. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_mutex: Mutex for power_state. + * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. + */ +struct cryp_device_data { + struct cryp_register __iomem *base; + struct device *dev; + struct completion cryp_irq_complete; + struct clk *clk; + struct regulator *pwr_regulator; + int power_status; + struct spinlock ctx_lock; + struct cryp_ctx *current_ctx; + struct klist_node list_node; + struct cryp_dma dma; + bool power_state; + struct mutex power_state_mutex; + bool restore_dev_ctx; +}; + +void cryp_wait_until_done(struct cryp_device_data *device_data); + +/* Initialization functions */ + +int cryp_check(struct cryp_device_data *device_data); + +void cryp_reset(struct cryp_device_data *device_data); + +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen); + +void cryp_start(struct cryp_device_data *device_data); + +void cryp_init_signal(struct cryp_device_data *device_data, + enum cryp_init cryp_init); + +void cryp_key_preparation(struct cryp_device_data *device_data, + enum cryp_key_prep cryp_key_prep); + +void cryp_flush_inoutfifo(struct cryp_device_data *device_data); + +void cryp_cen_flush(struct cryp_device_data *device_data); + +void cryp_set_dir(struct cryp_device_data *device_data, int dir); + +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config); + +int cryp_get_configuration(struct cryp_device_data *device_data, + struct cryp_config *p_cryp_config); + +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req); + +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value); + +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value); + +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config); + +/* Power management funtions */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx); + +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx); + +/* Data transfer and status bits. */ +int cryp_is_logic_busy(struct cryp_device_data *device_data); + +int cryp_get_status(struct cryp_device_data *device_data); + +/** + * cryp_write_indata - This routine writes 32 bit data into the data input + * register of the cryptography IP. + * @device_data: Pointer to the device data struct for base address. + * @write_data: Data to write. + */ +int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data); + +/** + * cryp_read_outdata - This routine reads the data from the data output + * register of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + * @read_data: Read the data from the output FIFO. + */ +int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data); + +#endif /* _CRYP_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c new file mode 100644 index 00000000000..197bb416067 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -0,0 +1,2331 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "cryp_p.h" +#include "cryp.h" + +#define CRYP_MAX_KEY_SIZE 32 +#define BYTES_PER_WORD 4 + +static int cryp_mode; + +static DEFINE_KLIST(cryp_device_list, NULL, NULL); + +static struct stedma40_chan_cfg *mem_to_engine; +static struct stedma40_chan_cfg *engine_to_mem; + +/** + * struct cryp_driver_data - data specific to the driver. + * + * @cryp_device_list: A list of registered devices to choose from. + * @device_allocation: A semaphore initialized with number of devices. + */ +struct cryp_driver_data { + struct klist device_list; + struct semaphore device_allocation; +}; + +/** + * struct cryp_ctx - Crypto context + * @config: Crypto mode. + * @key[CRYP_MAX_KEY_SIZE]: Key. + * @keylen: Length of key. + * @iv: Pointer to initialization vector. + * @indata: Pointer to indata. + * @outdata: Pointer to outdata. + * @datalen: Length of indata. + * @outlen: Length of outdata. + * @blocksize: Size of blocks. + * @updated: Updated flag. + * @dev_ctx: Device dependent context. + * @device: Pointer to the device. + */ +struct cryp_ctx { + struct cryp_config config; + u8 key[CRYP_MAX_KEY_SIZE]; + u32 keylen; + u8 *iv; + const u8 *indata; + u8 *outdata; + u32 datalen; + u32 outlen; + u32 blocksize; + u8 updated; + struct cryp_device_context dev_ctx; + struct cryp_device_data *device; +}; + +static struct cryp_driver_data driver_data; + +/** + * uint8p_to_uint32_be - 4*uint8 to uint32 big endian + * @in: Data to convert. + */ +static inline u32 uint8p_to_uint32_be(u8 *in) +{ + return (u32)in[0]<<24 | + ((u32)in[1]<<16) | + ((u32)in[2]<<8) | + ((u32)in[3]); +} + +/** + * uint8p_to_uint32_le - 4*uint8 to uint32 little endian + * @in: Data to convert. + */ +static inline u32 uint8p_to_uint32_le(u8 *in) +{ + return (u32)in[3]<<24 | + ((u32)in[2]<<16) | + ((u32)in[1]<<8) | + ((u32)in[0]); +} + +static inline void uint32_to_uint8p_be(u32 in, u8 *out) +{ + out[0] = (u8)(in>>24); + out[1] = (u8)(in>>16); + out[2] = (u8)(in>>8); + out[3] = (u8) in; +} + +static inline void uint32_to_uint8p_le(u32 in, u8 *out) +{ + out[3] = (u8)(in>>24); + out[2] = (u8)(in>>16); + out[1] = (u8)(in>>8); + out[0] = (u8) in; +} + +/** + * swap_bits_in_byte - mirror the bits in a byte + * @b: the byte to be mirrored + * + * The bits are swapped the following way: + * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and + * nibble 2 (n2) bits 4-7. + * + * Nibble 1 (n1): + * (The "old" (moved) bit is replaced with a zero) + * 1. Move bit 6 and 7, 4 positions to the left. + * 2. Move bit 3 and 5, 2 positions to the left. + * 3. Move bit 1-4, 1 position to the left. + * + * Nibble 2 (n2): + * 1. Move bit 0 and 1, 4 positions to the right. + * 2. Move bit 2 and 4, 2 positions to the right. + * 3. Move bit 3-6, 1 position to the right. + * + * Combine the two nibbles to a complete and swapped byte. + */ + +static inline u8 swap_bits_in_byte(u8 b) +{ +#define R_SHIFT_4_MASK (0xc0) /* Bits 6 and 7, right shift 4 */ +#define R_SHIFT_2_MASK (0x28) /* (After right shift 4) Bits 3 and 5, + right shift 2 */ +#define R_SHIFT_1_MASK (0x1e) /* (After right shift 2) Bits 1-4, + right shift 1 */ +#define L_SHIFT_4_MASK (0x03) /* Bits 0 and 1, left shift 4 */ +#define L_SHIFT_2_MASK (0x14) /* (After left shift 4) Bits 2 and 4, + left shift 2 */ +#define L_SHIFT_1_MASK (0x78) /* (After left shift 1) Bits 3-6, + left shift 1 */ + + u8 n1; + u8 n2; + + /* Swap most significant nibble */ + /* Right shift 4, bits 6 and 7 */ + n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); + /* Right shift 2, bits 3 and 5 */ + n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); + /* Right shift 1, bits 1-4 */ + n1 = (n1 & R_SHIFT_1_MASK) >> 1; + + /* Swap least significant nibble */ + /* Left shift 4, bits 0 and 1 */ + n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); + /* Left shift 2, bits 2 and 4 */ + n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); + /* Left shift 1, bits 3-6 */ + n2 = (n2 & L_SHIFT_1_MASK) << 1; + + return n1 | n2; +} + +static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, + u8 *out, u32 len) +{ + unsigned int i = 0; + int j; + int index = 0; + + j = len - BYTES_PER_WORD; + while (j >= 0) { + for (i = 0; i < BYTES_PER_WORD; i++) { + index = len - j - BYTES_PER_WORD + i; + out[j + i] = + swap_bits_in_byte(in[index]); + } + j -= BYTES_PER_WORD; + } +} + +static inline void swap_4bits_in_bytes(const u8 *in, u8 *out, u32 len) +{ + unsigned int i; + for (i = 0; i < len; i++) + out[i] = swap_bits_in_byte(in[i]); +} + +static irqreturn_t cryp_interrupt_handler(int irq, void *param) +{ + struct cryp_ctx *ctx; + int i; + struct cryp_device_data *device_data; + + if (param == NULL) { + BUG_ON(!param); + return IRQ_HANDLED; + } + + device_data = (struct cryp_device_data *)param; + + ctx = device_data->current_ctx; + + if (ctx == NULL) { + BUG_ON(!ctx); + return IRQ_HANDLED; + } + + if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO)) { + if (ctx->outlen / ctx->blocksize > 0) { + for (i = 0; i < ctx->blocksize / 4; i++) { + cryp_read_outdata(device_data, + (u32 *)ctx->outdata); + ctx->outdata += 4; + ctx->outlen -= 4; + } + + if (ctx->outlen == 0) { + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO); + complete(&ctx->device->cryp_irq_complete); + } + } + } else if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO)) { + if (ctx->datalen / ctx->blocksize > 0) { + for (i = 0 ; i < ctx->blocksize / 4; i++) { + cryp_write_indata(device_data, + *((u32 *)ctx->indata)); + ctx->indata += 4; + ctx->datalen -= 4; + } + + if (ctx->datalen == 0) + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO); + + if (ctx->config.algo_mode == CRYP_ALGO_AES_XTS) { + cryp_start(device_data); + cryp_wait_until_done(device_data); + } + } + } + + return IRQ_HANDLED; +} + +static int mode_is_aes(enum cryp_algo_mode mode) +{ + return (CRYP_ALGO_AES_ECB == mode) || + (CRYP_ALGO_AES_CBC == mode) || + (CRYP_ALGO_AES_CTR == mode) || + (CRYP_ALGO_AES_XTS == mode); +} + +static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right, + enum cryp_init_vector_index index) +{ + struct cryp_init_vector_value vector_value; + + dev_dbg(device_data->dev, "[%s]", __func__); + + vector_value.init_value_left = left; + vector_value.init_value_right = right; + + return cryp_configure_init_vector(device_data, + index, + vector_value); +} + +static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) +{ + int i; + int status = 0; + int num_of_regs = ctx->blocksize / 8; + u32 iv[AES_BLOCK_SIZE / 4]; + + dev_dbg(device_data->dev, "[%s]", __func__); + + /* + * Since we loop on num_of_regs we need to have a check in case + * someone provides an incorrect blocksize which would force calling + * cfg_iv with i greater than 2 which is an error. + */ + if (num_of_regs > 2) { + dev_err(device_data->dev, "[%s] Incorrect blocksize %d", + __func__, ctx->blocksize); + return -EINVAL; + } + + for (i = 0; i < ctx->blocksize / 4; i++) + iv[i] = uint8p_to_uint32_be(ctx->iv + i*4); + + for (i = 0; i < num_of_regs; i++) { + status = cfg_iv(device_data, iv[i*2], iv[i*2+1], + (enum cryp_init_vector_index) i); + if (status != 0) + return status; + } + return status; +} + +static int set_key(struct cryp_device_data *device_data, + u32 left_key, + u32 right_key, + enum cryp_key_reg_index index) +{ + struct cryp_key_value key_value; + int cryp_error; + + dev_dbg(device_data->dev, "[%s]", __func__); + + key_value.key_value_left = left_key; + key_value.key_value_right = right_key; + + cryp_error = cryp_configure_key_values(device_data, + index, + key_value); + if (cryp_error != 0) + dev_err(device_data->dev, "[%s]: " + "cryp_configure_key_values() failed!", __func__); + + return cryp_error; +} + +static int cfg_keys(struct cryp_ctx *ctx) +{ + int i; + int num_of_regs = ctx->keylen / 8; + u32 swapped_key[CRYP_MAX_KEY_SIZE / 4]; + int cryp_error = 0; + + dev_dbg(ctx->device->dev, "[%s]", __func__); + + if (mode_is_aes(ctx->config.algo_mode)) { + swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, + (u8 *)swapped_key, + ctx->keylen); + } else { + for (i = 0; i < ctx->keylen / 4; i++) + swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4); + } + + for (i = 0; i < num_of_regs; i++) { + cryp_error = set_key(ctx->device, + *(((u32 *)swapped_key)+i*2), + *(((u32 *)swapped_key)+i*2+1), + (enum cryp_key_reg_index) i); + + if (cryp_error != 0) { + dev_err(ctx->device->dev, "[%s]: set_key() failed!", + __func__); + return cryp_error; + } + } + return cryp_error; +} + +static int cryp_setup_context(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + if (ctx->updated) + cryp_restore_device_context(device_data, &ctx->dev_ctx); + else { + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + + if (cfg_keys(ctx) != 0) { + dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", + __func__); + return -EPERM; + } + + if ((ctx->iv) && + (CRYP_ALGO_AES_ECB != ctx->config.algo_mode) && + (CRYP_ALGO_DES_ECB != ctx->config.algo_mode) && + (CRYP_ALGO_TDES_ECB != ctx->config.algo_mode)) { + if (cfg_ivs(device_data, ctx) != 0) + return -EPERM; + } + + cryp_set_configuration(device_data, &ctx->config); + } + + return 0; +} + + +static int cryp_get_device_data(struct cryp_ctx *ctx, + struct cryp_device_data **device_data) +{ + int ret; + struct klist_iter device_iterator; + struct klist_node *device_node; + struct cryp_device_data *local_device_data = NULL; + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* Wait until a device is available */ + ret = down_interruptible(&driver_data.device_allocation); + if (ret) + return ret; /* Interrupted */ + + /* Select a device */ + klist_iter_init(&driver_data.device_list, &device_iterator); + + device_node = klist_next(&device_iterator); + while (device_node) { + local_device_data = container_of(device_node, + struct cryp_device_data, list_node); + spin_lock(&local_device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (local_device_data->current_ctx) { + device_node = klist_next(&device_iterator); + } else { + local_device_data->current_ctx = ctx; + ctx->device = local_device_data; + spin_unlock(&local_device_data->ctx_lock); + break; + } + spin_unlock(&local_device_data->ctx_lock); + } + klist_iter_exit(&device_iterator); + + if (!device_node) { + /** + * No free device found. + * Since we allocated a device with down_interruptible, this + * should not be able to happen. + * Number of available devices, which are contained in + * device_allocation, is therefore decremented by not doing + * an up(device_allocation). + */ + return -EBUSY; + } + + *device_data = local_device_data; + + return 0; +} + +static void cryp_dma_setup_channel(struct cryp_device_data *device_data, + struct device *dev) +{ + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2cryp = mem_to_engine; + device_data->dma.chan_mem2cryp = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_mem2cryp); + + device_data->dma.cfg_cryp2mem = engine_to_mem; + device_data->dma.chan_cryp2mem = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_cryp2mem); + + init_completion(&device_data->dma.cryp_dma_complete); +} + +static void cryp_dma_out_callback(void *data) +{ + struct cryp_ctx *ctx = (struct cryp_ctx *) data; + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + complete(&ctx->device->dma.cryp_dma_complete); +} + +static int cryp_set_dma_transfer(struct cryp_ctx *ctx, + struct scatterlist *sg, + int len, + enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + switch (direction) { + case DMA_TO_DEVICE: + channel = ctx->device->dma.chan_mem2cryp; + ctx->device->dma.sg_src = sg; + ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_src, + ctx->device->dma.nents_src, + direction); + + if (!ctx->device->dma.sg_src_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list (TO_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(TO_DEVICE)", __func__); + + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, + direction, + DMA_CTRL_ACK); + break; + + case DMA_FROM_DEVICE: + channel = ctx->device->dma.chan_cryp2mem; + ctx->device->dma.sg_dst = sg; + + ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_dst, + ctx->device->dma.nents_dst, + direction); + + if (!ctx->device->dma.sg_dst_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list " + "(FROM_DEVICE)", __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(FROM_DEVICE)", __func__); + + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, + direction, + DMA_CTRL_ACK | + DMA_PREP_INTERRUPT); + + desc->callback = cryp_dma_out_callback; + desc->callback_param = ctx; + break; + + default: + dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", + __func__); + return -EFAULT; + } + + cookie = desc->tx_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void cryp_dma_done(struct cryp_ctx *ctx) +{ + struct dma_chan *chan; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + chan = ctx->device->dma.chan_mem2cryp; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, DMA_TO_DEVICE); + + chan = ctx->device->dma.chan_cryp2mem; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); +} + +static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, + int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static int cryp_polling_mode(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int i; + int ret = 0; + int remaining_length = ctx->datalen; + const u8 *indata = ctx->indata; + u8 *outdata = ctx->outdata; + + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + while (remaining_length > 0) { + for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { + ret = cryp_write_indata(device_data, + *((u32 *)indata)); + if (ret) + goto out; + indata += BYTES_PER_WORD; + remaining_length -= BYTES_PER_WORD; + } + cryp_wait_until_done(device_data); + for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { + ret = cryp_read_outdata(device_data, + (u32 *)outdata); + if (ret) + goto out; + outdata += BYTES_PER_WORD; + } + cryp_wait_until_done(device_data); + } +out: + return ret; +} + +static int cryp_disable_power( + struct device *dev, + struct cryp_device_data *device_data, + bool save_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) + goto out; + + spin_lock(&device_data->ctx_lock); + if (save_device_context && device_data->current_ctx) { + cryp_save_device_context(device_data, + &device_data->current_ctx->dev_ctx); + device_data->restore_dev_ctx = true; + } + spin_unlock(&device_data->ctx_lock); + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->pwr_regulator); + if (ret) + dev_err(dev, "[%s]: " + "regulator_disable() failed!", + __func__); + + device_data->power_state = false; + +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; +} + +static int cryp_enable_power( + struct device *dev, + struct cryp_device_data *device_data, + bool restore_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) { + ret = regulator_enable(device_data->pwr_regulator); + if (ret) { + dev_err(dev, "[%s]: regulator_enable() failed!", + __func__); + goto out; + } + + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_enable() failed!", + __func__); + regulator_disable(device_data->pwr_regulator); + goto out; + } + device_data->power_state = true; + } + + if (device_data->restore_dev_ctx) { + spin_lock(&device_data->ctx_lock); + if (restore_device_context && device_data->current_ctx) { + device_data->restore_dev_ctx = false; + cryp_restore_device_context(device_data, + &device_data->current_ctx->dev_ctx); + } + spin_unlock(&device_data->ctx_lock); + } +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; +} + +static int hw_crypt_noxts(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int ret; + + const u8 *indata = ctx->indata; + u8 *outdata = ctx->outdata; + u32 datalen = ctx->datalen; + u32 outlen = datalen; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->outlen = ctx->datalen; + ctx->config.key_access = CRYP_STATE_ENABLE; + ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + + cryp_reset(device_data); + + ret = cryp_setup_context(ctx, device_data); + if (ret) + goto out; + + cryp_flush_inoutfifo(device_data); + + if (cryp_mode == CRYP_MODE_INTERRUPT) { + INIT_COMPLETION(ctx->device->cryp_irq_complete); + + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO); + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO); + + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + + wait_for_completion(&ctx->device->cryp_irq_complete); + } else if (cryp_mode == CRYP_MODE_POLLING || + cryp_mode == CRYP_MODE_DMA) { + /* + * The reason for having DMA in this if case is that if we are + * running cryp_mode = 2, then we separate DMA routines for + * handling cipher/plaintext > blocksize, except when + * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use + * the polling mode. Overhead of doing DMA setup eats up the + * benefits using it. + */ + ret = cryp_polling_mode(ctx, device_data); + if (ret) + goto out; + } else { + dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", + __func__); + ret = -EPERM; + goto out; + } + + ret = 0; + cryp_save_device_context(device_data, &ctx->dev_ctx); + if (ctx->updated == 0) + ctx->updated = 1; + +out: + ctx->indata = indata; + ctx->outdata = outdata; + ctx->datalen = datalen; + ctx->outlen = outlen; + + return ret; +} + +static int get_nents(struct scatterlist *sg, int nbytes) +{ + int nents = 0; + + while (nbytes > 0) { + nbytes -= sg->length; + sg = scatterwalk_sg_next(sg); + nents++; + } + + return nents; +} + +static int ablk_dma_crypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct cryp_device_data *device_data; + + int bytes_written = 0; + int bytes_read = 0; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.key_access = CRYP_STATE_ENABLE; + ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + ctx->datalen = areq->nbytes; + ctx->outlen = areq->nbytes; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + return ret; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out; + } + + cryp_reset(device_data); + + ret = cryp_setup_context(ctx, device_data); + if (ret) + goto out_power; + + /* We have the device now, so store the nents in the dma struct. */ + ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); + ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); + + /* Enable DMA in- and output. */ + cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS); + + bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); + bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); + + wait_for_completion(&ctx->device->dma.cryp_dma_complete); + cryp_dma_done(ctx); + + cryp_save_device_context(device_data, &ctx->dev_ctx); + ctx->updated = 1; + +out_power: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); + +out: + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + if (unlikely(bytes_written != bytes_read)) + return -EPERM; + + return 0; +} + +static int ablk_crypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct ablkcipher_walk walk; + unsigned long src_paddr; + unsigned long dst_paddr; + int ret; + int nbytes; + struct cryp_device_data *device_data; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + goto out; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out_power; + } + + ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); + ret = ablkcipher_walk_phys(areq, &walk); + + if (ret) { + pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!", + __func__); + goto out_power; + } + + while ((nbytes = walk.nbytes) > 0) { + ctx->iv = walk.iv; + src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); + ctx->indata = phys_to_virt(src_paddr); + + dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); + ctx->outdata = phys_to_virt(dst_paddr); + + ctx->datalen = nbytes - (nbytes % ctx->blocksize); + + ret = hw_crypt_noxts(ctx, device_data); + if (ret) + goto out_power; + + nbytes -= ctx->datalen; + ret = ablkcipher_walk_done(areq, &walk, nbytes); + if (ret) + goto out_power; + } + ablkcipher_walk_complete(&walk); + +out_power: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); +out: + /* Release the device */ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; +} + +static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + switch (keylen) { + case AES_KEYSIZE_128: + ctx->config.key_size = CRYP_KEY_SIZE_128; + break; + + case AES_KEYSIZE_192: + ctx->config.key_size = CRYP_KEY_SIZE_192; + break; + + case AES_KEYSIZE_256: + ctx->config.key_size = CRYP_KEY_SIZE_256; + break; + + default: + pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + + return 0; +} + +static int aes_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* For CTR mode */ + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { + + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s] invalid keylen", __func__); + return -EINVAL; + } + + if (keylen == AES_KEYSIZE_128) + ctx->config.key_size = CRYP_KEY_SIZE_128; + else if (keylen == AES_KEYSIZE_192) + ctx->config.key_size = CRYP_KEY_SIZE_192; + else if (keylen == AES_KEYSIZE_256) + ctx->config.key_size = CRYP_KEY_SIZE_256; + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + int ret; + u32 tmp[DES_EXPKEY_WORDS]; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + if (keylen != DES_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + const u32 *K = (const u32 *)key; + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + int i, ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES3_EDE_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + /* Checking key interdependency for weak key detection. */ + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + for (i = 0; i < 3; i++) { + ret = des_ekey(tmp, key + i*DES_KEY_SIZE); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: " + "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + return -EINVAL; + } + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + const u32 *K = (const u32 *)key; + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + int i, ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + if (keylen != DES3_EDE_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + for (i = 0; i < 3; i++) { + ret = des_ekey(tmp, key + i*DES_KEY_SIZE); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: " + "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + return -EINVAL; + } + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int cryp_hw_calculate(struct cryp_ctx *ctx) +{ + struct cryp_device_data *device_data; + int ret; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + goto out; + + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "cryp_enable_power() failed!", __func__); + goto out; + } + + if (hw_crypt_noxts(ctx, device_data)) + pr_err("u8500_cryp:crypX: [%s]: hw_crypt_noxts() failed!", + __func__); + +out: + if (cryp_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "cryp_disable_power() failed!", __func__); + /* Release the device */ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + +static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + + ctx->indata = in; + ctx->outdata = out; + ctx->datalen = ctx->blocksize; + + if (cryp_hw_calculate(ctx)) + pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + __func__); +} + + +static int aes_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->blocksize = AES_BLOCK_SIZE; + + if (cryp_mode == CRYP_MODE_DMA) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->blocksize = AES_BLOCK_SIZE; + + if (cryp_mode == CRYP_MODE_DMA) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ctr_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int aes_ctr_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->blocksize = AES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int des_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->blocksize = DES_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->blocksize = DES_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->blocksize = DES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->blocksize = DES_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_ecb_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_ecb_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_cbc_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +static int des3_cbc_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; + ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->blocksize = DES3_EDE_BLOCK_SIZE; + + /* Only DMA for ablkcipher, since givcipher not yet supported */ + if ((cryp_mode == CRYP_MODE_DMA) && + (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) + return ablk_dma_crypt(areq); + + /** + * Run the non DMA version also for DMA, since DMA is currently not + * working for DES. + */ + return ablk_crypt(areq); +} + +/** + * struct crypto_alg aes_alg + */ +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_setkey, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +/** + * struct crypto_alg des_alg + */ +static struct crypto_alg des_alg = { + .cra_name = "des", + .cra_driver_name = "des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES_KEY_SIZE, + .cia_max_keysize = DES_KEY_SIZE, + .cia_setkey = des_setkey, + .cia_encrypt = des_encrypt, + .cia_decrypt = des_decrypt + } + } +}; + +/** + * struct crypto_alg des3_alg + */ +static struct crypto_alg des3_alg = { + .cra_name = "des3_ede", + .cra_driver_name = "des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES3_EDE_KEY_SIZE, + .cia_max_keysize = DES3_EDE_KEY_SIZE, + .cia_setkey = des3_setkey, + .cia_encrypt = des3_encrypt, + .cia_decrypt = des3_decrypt + } + } +}; + +/** + * struct crypto_alg aes_ecb_alg + */ +static struct crypto_alg aes_ecb_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_ecb_encrypt, + .decrypt = aes_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg aes_cbc_alg + */ +static struct crypto_alg aes_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_cbc_encrypt, + .decrypt = aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg aes_ctr_alg + */ +static struct crypto_alg aes_ctr_alg = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_ctr_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = aes_ctr_encrypt, + .decrypt = aes_ctr_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg des_ecb_alg + */ +static struct crypto_alg des_ecb_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = des_ecb_encrypt, + .decrypt = des_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg des_cbc_alg + */ +static struct crypto_alg des_cbc_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = des_cbc_encrypt, + .decrypt = des_cbc_decrypt, + .ivsize = DES_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg des3_ecb_alg + */ +static struct crypto_alg des3_ecb_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_ecb_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = des3_ecb_encrypt, + .decrypt = des3_ecb_decrypt, + } + } +}; + +/** + * struct crypto_alg des3_cbc_alg + */ +static struct crypto_alg des3_cbc_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3_ede-u8500", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_cbc_alg.cra_list), + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = des3_cbc_encrypt, + .decrypt = des3_cbc_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + } +}; + +/** + * struct crypto_alg *u8500_cryp_algs[] - + */ +static struct crypto_alg *u8500_cryp_algs[] = { + &aes_alg, + &des_alg, + &des3_alg, + &aes_ecb_alg, + &aes_cbc_alg, + &aes_ctr_alg, + &des_ecb_alg, + &des_cbc_alg, + &des3_ecb_alg, + &des3_cbc_alg +}; + +/** + * cryp_algs_register_all - + */ +static int cryp_algs_register_all(void) +{ + int ret; + int i; + int count; + + pr_debug("[%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) { + ret = crypto_register_alg(u8500_cryp_algs[i]); + if (ret) { + count = i; + pr_err("[%s] alg registration failed", + u8500_cryp_algs[i]->cra_driver_name); + goto unreg; + } + } + return 0; +unreg: + for (i = 0; i < count; i++) + crypto_unregister_alg(u8500_cryp_algs[i]); + return ret; +} + +/** + * cryp_algs_unregister_all - + */ +static void cryp_algs_unregister_all(void) +{ + int i; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) + crypto_unregister_alg(u8500_cryp_algs[i]); +} + +static int u8500_cryp_probe(struct platform_device *pdev) +{ + int ret; + int cryp_error = 0; + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + struct cryp_protection_config prot = { + .privilege_access = CRYP_STATE_ENABLE + }; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "[%s]", __func__); + device_data = kzalloc(sizeof(struct cryp_device_data), GFP_KERNEL); + if (!device_data) { + dev_err(dev, "[%s]: kzalloc() failed!", __func__); + ret = -ENOMEM; + goto out; + } + + device_data->dev = dev; + device_data->current_ctx = NULL; + + /* Grab the DMA configuration from platform data. */ + mem_to_engine = &((struct cryp_platform_data *) + dev->platform_data)->mem_to_engine; + engine_to_mem = &((struct cryp_platform_data *) + dev->platform_data)->engine_to_mem; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "[%s]: platform_get_resource() failed", + __func__); + ret = -ENODEV; + goto out_kfree; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_err(dev, "[%s]: request_mem_region() failed", + __func__); + ret = -EBUSY; + goto out_kfree; + } + + device_data->base = ioremap(res->start, resource_size(res)); + if (!device_data->base) { + dev_err(dev, "[%s]: ioremap failed!", __func__); + ret = -ENOMEM; + goto out_free_mem; + } + + spin_lock_init(&device_data->ctx_lock); + mutex_init(&device_data->power_state_mutex); + + /* Enable power for CRYP hardware block */ + device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(device_data->pwr_regulator)) { + dev_err(dev, "[%s]: could not get cryp regulator", __func__); + ret = PTR_ERR(device_data->pwr_regulator); + device_data->pwr_regulator = NULL; + goto out_unmap; + } + + /* Enable the clk for CRYP hardware block */ + device_data->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(device_data->clk)) { + dev_err(dev, "[%s]: clk_get() failed!", __func__); + ret = PTR_ERR(device_data->clk); + goto out_regulator; + } + + /* Enable device power (and clock) */ + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); + goto out_clk; + } + + cryp_error = cryp_check(device_data); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_init() failed!", __func__); + ret = -EINVAL; + goto out_power; + } + + cryp_error = cryp_configure_protection(device_data, &prot); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_configure_protection() failed!", + __func__); + ret = -EINVAL; + goto out_power; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) { + dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", + __func__); + goto out_power; + } + + ret = request_irq(res_irq->start, + cryp_interrupt_handler, + 0, + "cryp1", + device_data); + if (ret) { + dev_err(dev, "[%s]: Unable to request IRQ", __func__); + goto out_power; + } + + init_completion(&device_data->cryp_irq_complete); + + if (cryp_mode == CRYP_MODE_DMA) + cryp_dma_setup_channel(device_data, dev); + + platform_set_drvdata(pdev, device_data); + + /* Put the new device into the device list... */ + klist_add_tail(&device_data->list_node, &driver_data.device_list); + + /* ... and signal that a new device is available. */ + up(&driver_data.device_allocation); + + ret = cryp_algs_register_all(); + if (ret) { + dev_err(dev, "[%s]: cryp_algs_register_all() failed!", + __func__); + goto out_power; + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(dev, "[%s]: cryp_disable_power() failed!", __func__); + + return 0; + +out_power: + cryp_disable_power(&pdev->dev, device_data, false); + +out_clk: + clk_put(device_data->clk); + +out_regulator: + regulator_put(device_data->pwr_regulator); + +out_unmap: + iounmap(device_data->base); + +out_free_mem: + release_mem_region(res->start, resource_size(res)); + +out_kfree: + kfree(device_data); +out: + return ret; +} + +static int u8500_cryp_remove(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + /* Try to decrease the number of available devices. */ + if (down_trylock(&driver_data.device_allocation)) + return -EBUSY; + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (device_data->current_ctx) { + /* The device is busy */ + spin_unlock(&device_data->ctx_lock); + /* Return the device to the pool. */ + up(&driver_data.device_allocation); + return -EBUSY; + } + + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + + clk_put(device_data->clk); + regulator_put(device_data->pwr_regulator); + + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, res->end - res->start + 1); + + kfree(device_data); + + return 0; +} + +static void u8500_cryp_shutdown(struct platform_device *pdev) +{ + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return; + } + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (!device_data->current_ctx) { + if (down_trylock(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" + "Shutting down anyway...", __func__); + /** + * (Allocate the device) + * Need to set this to non-null (dummy) value, + * to avoid usage if context switching. + */ + device_data->current_ctx++; + } + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + +} + +static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + /* Handle state? */ + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else + disable_irq(res_irq->start); + + spin_lock(&device_data->ctx_lock); + if (!device_data->current_ctx) + device_data->current_ctx++; + spin_unlock(&device_data->ctx_lock); + + if (device_data->current_ctx == ++temp_ctx) { + if (down_interruptible(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: down_interruptible() " + "failed", __func__); + ret = cryp_disable_power(&pdev->dev, device_data, false); + + } else + ret = cryp_disable_power(&pdev->dev, device_data, true); + + if (ret) + dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__); + + return ret; +} + +static int u8500_cryp_resume(struct platform_device *pdev) +{ + int ret = 0; + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + spin_lock(&device_data->ctx_lock); + if (device_data->current_ctx == ++temp_ctx) + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + + if (!device_data->current_ctx) + up(&driver_data.device_allocation); + else + ret = cryp_enable_power(&pdev->dev, device_data, true); + + if (ret) + dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!", + __func__); + else { + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res_irq) + enable_irq(res_irq->start); + } + + return ret; +} + +static struct platform_driver cryp_driver = { + .probe = u8500_cryp_probe, + .remove = u8500_cryp_remove, + .shutdown = u8500_cryp_shutdown, + .suspend = u8500_cryp_suspend, + .resume = u8500_cryp_resume, + .driver = { + .owner = THIS_MODULE, + .name = "cryp1" + } +}; + +static int __init u8500_cryp_mod_init(void) +{ + pr_debug("[%s] is called!", __func__); + + klist_init(&driver_data.device_list, NULL, NULL); + /* Initialize the semaphore to 0 devices (locked state) */ + sema_init(&driver_data.device_allocation, 0); + return platform_driver_register(&cryp_driver); +} + +static void __exit u8500_cryp_mod_fini(void) +{ + pr_debug("[%s] is called!", __func__); + platform_driver_unregister(&cryp_driver); + return; +} + +module_init(u8500_cryp_mod_init); +module_exit(u8500_cryp_mod_fini); + +module_param(cryp_mode, int, 0); + +MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 CRYP crypto engine."); + +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c new file mode 100644 index 00000000000..eacff226aa8 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -0,0 +1,45 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ + +#include +#include +#include + +#include "cryp.h" +#include "cryp_p.h" +#include "cryp_irq.h" +#include "cryp_irqp.h" + +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl(&device_data->base->imsc); + set_bit(irq_src, (void *)&i); + writel(i, &device_data->base->imsc); +} + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl(&device_data->base->imsc); + clear_bit(irq_src, (void *)&i); + writel(i, &device_data->base->imsc); +} + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + return (readl(&device_data->base->mis) & irq_src) > 0; +} diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h new file mode 100644 index 00000000000..5a7837f1b8f --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.h @@ -0,0 +1,31 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_IRQ_H_ +#define _CRYP_IRQ_H_ + +#include "cryp.h" + +enum cryp_irq_src_id { + CRYP_IRQ_SRC_INPUT_FIFO = 0x1, + CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2, + CRYP_IRQ_SRC_ALL = 0x3 +}; + +/** + * M0 Funtions + */ +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +#endif /* _CRYP_IRQ_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h new file mode 100644 index 00000000000..5b60f887d02 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -0,0 +1,125 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef __CRYP_IRQP_H_ +#define __CRYP_IRQP_H_ + +#include "cryp_irq.h" + +/** + * + * CRYP Registers - Offset mapping + * +-----------------+ + * 00h | CRYP_CR | Configuration register + * +-----------------+ + * 04h | CRYP_SR | Status register + * +-----------------+ + * 08h | CRYP_DIN | Data In register + * +-----------------+ + * 0ch | CRYP_DOUT | Data out register + * +-----------------+ + * 10h | CRYP_DMACR | DMA control register + * +-----------------+ + * 14h | CRYP_IMSC | IMSC + * +-----------------+ + * 18h | CRYP_RIS | Raw interrupt status + * +-----------------+ + * 1ch | CRYP_MIS | Masked interrupt status. + * +-----------------+ + * Key registers + * IVR registers + * Peripheral + * Cell IDs + * + * Refer data structure for other register map + */ + +/** + * struct cryp_register + * @cr - Configuration register + * @status - Status register + * @din - Data input register + * @din_size - Data input size register + * @dout - Data output register + * @dout_size - Data output size register + * @dmacr - Dma control register + * @imsc - Interrupt mask set/clear register + * @ris - Raw interrupt status + * @mis - Masked interrupt statu register + * @key_1_l - Key register 1 L + * @key_1_r - Key register 1 R + * @key_2_l - Key register 2 L + * @key_2_r - Key register 2 R + * @key_3_l - Key register 3 L + * @key_3_r - Key register 3 R + * @key_4_l - Key register 4 L + * @key_4_r - Key register 4 R + * @init_vect_0_l - init vector 0 L + * @init_vect_0_r - init vector 0 R + * @init_vect_1_l - init vector 1 L + * @init_vect_1_r - init vector 1 R + * @cryp_unused1 - unused registers + * @itcr - Integration test control register + * @itip - Integration test input register + * @itop - Integration test output register + * @cryp_unused2 - unused registers + * @periphId0 - FE0 CRYP Peripheral Identication Register + * @periphId1 - FE4 + * @periphId2 - FE8 + * @periphId3 - FEC + * @pcellId0 - FF0 CRYP PCell Identication Register + * @pcellId1 - FF4 + * @pcellId2 - FF8 + * @pcellId3 - FFC + */ +struct cryp_register { + u32 cr; /* Configuration register */ + u32 status; /* Status register */ + u32 din; /* Data input register */ + u32 din_size; /* Data input size register */ + u32 dout; /* Data output register */ + u32 dout_size; /* Data output size register */ + u32 dmacr; /* Dma control register */ + u32 imsc; /* Interrupt mask set/clear register */ + u32 ris; /* Raw interrupt status */ + u32 mis; /* Masked interrupt statu register */ + + u32 key_1_l; /*Key register 1 L */ + u32 key_1_r; /*Key register 1 R */ + u32 key_2_l; /*Key register 2 L */ + u32 key_2_r; /*Key register 2 R */ + u32 key_3_l; /*Key register 3 L */ + u32 key_3_r; /*Key register 3 R */ + u32 key_4_l; /*Key register 4 L */ + u32 key_4_r; /*Key register 4 R */ + + u32 init_vect_0_l; /*init vector 0 L */ + u32 init_vect_0_r; /*init vector 0 R */ + u32 init_vect_1_l; /*init vector 1 L */ + u32 init_vect_1_r; /*init vector 1 R */ + + u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */ + u32 itcr; /*Integration test control register */ + u32 itip; /*Integration test input register */ + u32 itop; /*Integration test output register */ + u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */ + + u32 periphId0; /* FE0 CRYP Peripheral Identication Register */ + u32 periphId1; /* FE4 */ + u32 periphId2; /* FE8 */ + u32 periphId3; /* FEC */ + + u32 pcellId0; /* FF0 CRYP PCell Identication Register */ + u32 pcellId1; /* FF4 */ + u32 pcellId2; /* FF8 */ + u32 pcellId3; /* FFC */ +}; + +#endif diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h new file mode 100644 index 00000000000..966de4633cc --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -0,0 +1,113 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson. + * Author: Jonas Linde for ST-Ericsson. + * Author: Joakim Bech for ST-Ericsson. + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_P_H_ +#define _CRYP_P_H_ + +#include +#include + +#include "cryp.h" +#include "cryp_irqp.h" + +/** + * Generic Macros + */ +#define CRYP_SET_BITS(reg_name, mask) \ + writel((readl(reg_name) | mask), reg_name) + +#define CRYP_WRITE_BIT(reg_name, val, mask) \ + writel(((readl(reg_name) & ~(mask)) | ((val) & (mask))), reg_name) + +#define CRYP_TEST_BITS(reg_name, val) \ + (readl(reg_name) & (val)) + +#define CRYP_PUT_BITS(reg, val, shift, mask) \ + writel(((readl(reg) & ~(mask)) | \ + (((u32)val << shift) & (mask))), reg) + +/** + * CRYP specific Macros + */ +#define CRYP_PERIPHERAL_ID0 0xE3 +#define CRYP_PERIPHERAL_ID1 0x05 +#define CRYP_PERIPHERAL_ID2 0x28 +#define CRYP_PERIPHERAL_ID3 0x00 + +#define CRYP_PCELL_ID0 0x0D +#define CRYP_PCELL_ID1 0xF0 +#define CRYP_PCELL_ID2 0x05 +#define CRYP_PCELL_ID3 0xB1 + +/** + * CRYP register default values + */ +#define MAX_DEVICE_SUPPORT 2 +#define CRYP_CR_DEFAULT 0x0002 +#define CRYP_CR_FFLUSH BIT(14) +#define CRYP_DMACR_DEFAULT 0x0 +#define CRYP_IMSC_DEFAULT 0x0 +#define CRYP_DIN_DEFAULT 0x0 +#define CRYP_DOUT_DEFAULT 0x0 +#define CRYP_KEY_DEFAULT 0x0 +#define CRYP_INIT_VECT_DEFAULT 0x0 + +/** + * CRYP Control register specific mask + */ +#define CRYP_SECURE_MASK BIT(0) +#define CRYP_PRLG_MASK BIT(1) +#define CRYP_ENC_DEC_MASK BIT(2) +#define CRYP_SR_BUSY_MASK BIT(4) +#define CRYP_KEY_ACCESS_MASK BIT(10) +#define CRYP_KSE_MASK BIT(11) +#define CRYP_START_MASK BIT(12) +#define CRYP_INIT_MASK BIT(13) +#define CRYP_FIFO_FLUSH_MASK BIT(14) +#define CRYP_CRYPEN_MASK BIT(15) +#define CRYP_INFIFO_READY_MASK (BIT(0) | BIT(1)) +#define CRYP_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) +#define CRYP_DATA_TYPE_MASK (BIT(7) | BIT(6)) +#define CRYP_KEY_SIZE_MASK (BIT(9) | BIT(8)) + +/** + * Bit position used while setting bits in register + */ +#define CRYP_PRLG_POS 1 +#define CRYP_ENC_DEC_POS 2 +#define CRYP_ALGOMODE_POS 3 +#define CRYP_SR_BUSY_POS 4 +#define CRYP_DATA_TYPE_POS 6 +#define CRYP_KEY_SIZE_POS 8 +#define CRYP_KEY_ACCESS_POS 10 +#define CRYP_KSE_POS 11 +#define CRYP_START_POS 12 +#define CRYP_INIT_POS 13 +#define CRYP_CRYPEN_POS 15 + +/** + * CRYP Status register + */ +#define CRYP_BUSY_STATUS_MASK BIT(4) + +/** + * CRYP PCRs------PC_NAND control register + * BIT_MASK + */ +#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0)) +#define CRYP_DMA_REQ_MASK_POS 0 + + +struct cryp_system_context { + /* CRYP Register structure */ + struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT]; +}; + +#endif diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile old mode 100755 new mode 100644 index 613330a4ca4..6194da8eec8 --- a/drivers/crypto/ux500/hash/Makefile +++ b/drivers/crypto/ux500/hash/Makefile @@ -1,8 +1,10 @@ - -ifeq ($(CONFIG_CRYPTO_DEV_UX500_DEBUG_INFO),y) - EXTRA_CFLAGS += -D__DEBUG -else - EXTRA_CFLAGS += -D__RELEASE +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_hash_core.o := -DDEBUG endif obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h old mode 100755 new mode 100644 index e1f7c2eb60b..1c3dd5705fb --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -1,25 +1,19 @@ -#ifndef _HASH_ALG_H -#define _HASH_ALG_H /* - * Copyright (C) 2010 ST-Ericsson. - * Copyright (C) 2010 STMicroelectronics. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen (shujuan.chen@stericsson.com) + * Author: Joakim Bech (joakim.xx.bech@stericsson.com) + * License terms: GNU General Public License (GPL) version 2 */ -#ifdef __cplusplus -extern "C" { -#endif +#ifndef _HASH_ALG_H +#define _HASH_ALG_H #include /* Number of bytes the message digest */ #define HASH_MSG_DIGEST_SIZE 32 #define HASH_BLOCK_SIZE 64 - -#define __HASH_ENHANCED +#define HASH_SHA1_DIGEST_SIZE 20 +#define HASH_SHA2_DIGEST_SIZE 32 /* Version defines */ #define HASH_HCL_VERSION_ID 1 @@ -106,42 +100,42 @@ extern "C" { #define HASH_CELL_ID2 0x05 #define HASH_CELL_ID3 0xB1 -#define HASH_SET_DIN(val) HCL_WRITE_REG(g_sys_ctx.registry[hid]->din, (val)) +#define HASH_SET_DIN(val) HCL_WRITE_REG( \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->din, (val)) #define HASH_INITIALIZE \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->cr, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ 0x01 << HASH_CR_INIT_POS, \ HASH_CR_INIT_MASK) #define HASH_SET_DATA_FORMAT(data_format) \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->cr, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ (u32) (data_format) << HASH_CR_DATAFORM_POS, \ HASH_CR_DATAFORM_MASK) #define HASH_GET_HX(pos) \ - HCL_READ_REG(g_sys_ctx.registry[hid]->hx[pos]) - -#define HASH_SET_HX(pos, val) \ - HCL_WRITE_REG(g_sys_ctx.registry[hid]->hx[pos], (val)); + HCL_READ_REG(sys_ctx_g.registry[HASH_DEVICE_ID_1]->hx[pos]) #define HASH_SET_NBLW(val) \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->str, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ (u32) (val) << HASH_STR_NBLW_POS, \ HASH_STR_NBLW_MASK) #define HASH_SET_DCAL \ HCL_WRITE_BITS( \ - g_sys_ctx.registry[hid]->str, \ + sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ 0x01 << HASH_STR_DCAL_POS, \ HASH_STR_DCAL_MASK) +#define HASH_BLOCK_BYTE_SIZE 64 + /** * struct uint64 - Structure to handle 64 bits integers. * @high_word: Most significant bits - * @high_word: Least significant bits + * @low_word: Least significant bits * * Used to handle 64 bits integers. */ @@ -184,19 +178,19 @@ struct hash_register { u32 str; u32 hx[8]; - u32 padding0[(0x080 - 0x02C) >> 2]; + u32 padding0[(0x080 - 0x02C) / sizeof(u32)]; u32 itcr; u32 itip; u32 itop; - u32 padding1[(0x0F8 - 0x08C) >> 2]; + u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)]; u32 csfull; u32 csdatain; u32 csrx[HASH_CSR_COUNT]; - u32 padding2[(0xFE0 - 0x1D0) >> 2]; + u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)]; u32 periphid0; u32 periphid1; @@ -249,7 +243,14 @@ struct hash_state { * @state: State of the hash device */ struct hash_system_context { - /* Pointer to HASH registers structure */ + /* + * Pointer to HASH registers structure. We know that this gives a + * checkpatch warning and in the current design it needs to be a + * volatile. We will change it when we will rewrite the driver similar + * to how we have done in cryp-part. We have also read + * Documentation/volatile-considered-harmful.txt as checkpatch tell + * us to do. + */ volatile struct hash_register *registry[MAX_HASH_DEVICE]; /* State of HASH device */ @@ -280,16 +281,6 @@ enum hash_data_format { HASH_DATA_1_BIT = 0x3 }; -/** - * enum hash_device_state - Device state - * @DISABLE: Disable the hash hardware - * @ENABLE: Enable the hash hardware - */ -enum hash_device_state { - DISABLE = 0, - ENABLE = 1 -}; - /** * struct hash_protection_config - Device protection configuration. * @privilege_access: FIXME, add comment. @@ -300,69 +291,6 @@ struct hash_protection_config { int secure_access; }; -/** - * enum hash_input_status - Data Input flag status. - * @HASH_DIN_EMPTY: Indicates that nothing is in data registers - * @HASH_DIN_FULL: Indicates that data registers are full - */ -enum hash_input_status { - HASH_DIN_EMPTY = 0, - HASH_DIN_FULL = 1 -}; - -/** - * Number of words already pushed - */ -enum hash_nbw_pushed { - HASH_NBW_00 = 0x00, - HASH_NBW_01 = 0x01, - HASH_NBW_02 = 0x02, - HASH_NBW_03 = 0x03, - HASH_NBW_04 = 0x04, - HASH_NBW_05 = 0x05, - HASH_NBW_06 = 0x06, - HASH_NBW_07 = 0x07, - HASH_NBW_08 = 0x08, - HASH_NBW_09 = 0x09, - HASH_NBW_10 = 0x0A, - HASH_NBW_11 = 0x0B, - HASH_NBW_12 = 0x0C, - HASH_NBW_13 = 0x0D, - HASH_NBW_14 = 0x0E, - HASH_NBW_15 = 0x0F -}; - -/** - * struct hash_device_status - Device status for DINF, NBW, and NBLW bit - * fields. - * @dinf_status: HASH data in full flag - * @nbw_status: Number of words already pushed - * @nblw_status: Number of Valid Bits Last Word of the Message - */ -struct hash_device_status { - int dinf_status; - int nbw_status; - u8 nblw_status; -}; - -/** - * enum hash_dma_request - Enumeration for HASH DMA request types. - */ -enum hash_dma_request { - HASH_DISABLE_DMA_REQ = 0x0, - HASH_ENABLE_DMA_REQ = 0x1 -}; - -/** - * enum hash_digest_cal - Enumeration for digest calculation. - * @HASH_DISABLE_DCAL: Indicates that DCAL bit is not set/used. - * @HASH_ENABLE_DCAL: Indicates that DCAL bit is set/used. - */ -enum hash_digest_cal { - HASH_DISABLE_DCAL = 0x0, - HASH_ENABLE_DCAL = 0x1 -}; - /** * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm * @HASH_ALGO_SHA1: Indicates that SHA1 is used. @@ -383,94 +311,61 @@ enum hash_op { HASH_OPER_MODE_HMAC = 0x1 }; -/** - * enum hash_key_type - Enumeration for selecting between long and short key. - * @HASH_SHORT_KEY: Key used is shorter or equal to block size (64 bytes) - * @HASH_LONG_KEY: Key used is greater than block size (64 bytes) - */ -enum hash_key_type { - HASH_SHORT_KEY = 0x0, - HASH_LONG_KEY = 0x1 -}; - /** * struct hash_config - Configuration data for the hardware * @data_format: Format of data entered into the hash data in register * @algorithm: Algorithm selection bit * @oper_mode: Operating mode selection bit - * @hmac_key: Long key selection bit HMAC mode */ struct hash_config { int data_format; int algorithm; int oper_mode; - int hmac_key; }; - /** - * enum hash_error - Error codes for hash. + * enum hash_rv - Return values / error codes for hash. */ -enum hash_error { +enum hash_rv { HASH_OK = 0, HASH_MSG_LENGTH_OVERFLOW, - HASH_INTERNAL_ERROR, - HASH_NOT_CONFIGURED, - HASH_REQUEST_PENDING, - HASH_REQUEST_NOT_APPLICABLE, HASH_INVALID_PARAMETER, - HASH_UNSUPPORTED_FEATURE, HASH_UNSUPPORTED_HW }; -int hash_init_base_address(int hash_device_id, t_logical_address base_address); - -int HASH_GetVersion(t_version *p_version); - -int HASH_Reset(int hash_devive_id); - -int HASH_ConfigureDmaRequest(int hash_device_id, int request_state); - -int HASH_ConfigureLastValidBits(int hash_device_id, u8 nblw_val); - -int HASH_ConfigureDigestCal(int hash_device_id, int dcal_state); +/** + * struct hash_ctx - The context used for hash calculations. + * @key: The key used in the operation + * @keylen: The length of the key + * @updated: Indicates if hardware is initialized for new operations + * @state: The state of the current calculations + * @config: The current configuration + */ +struct hash_ctx { + u8 key[HASH_BLOCK_BYTE_SIZE]; + u32 keylen; + u8 updated; + struct hash_state state; + struct hash_config config; +}; -int HASH_ConfigureProtection(int hash_device_id, - struct hash_protection_config - *p_protect_config); +int hash_init_base_address(int hash_device_id, t_logical_address base_address); int hash_setconfiguration(int hash_device_id, struct hash_config *p_config); -int hash_begin(int hash_device_id); - -int hash_get_digest(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); +void hash_begin(struct hash_ctx *ctx); -int HASH_ClockGatingOff(int hash_device_id); +void hash_get_digest(int hid, u8 *digest, int algorithm); -struct hash_device_status HASH_GetDeviceStatus(int hash_device_id); - -t_bool HASH_IsDcalOngoing(int hash_device_id); - -int hash_hw_update(int hash_device_id, +int hash_hw_update(struct shash_desc *desc, + int hash_device_id, const u8 *p_data_buffer, u32 msg_length); -int hash_end(int hash_device_id, u8 digest[HASH_MSG_DIGEST_SIZE]); - -int hash_compute(int hash_device_id, - const u8 *p_data_buffer, - u32 msg_length, - struct hash_config *p_hash_config, - u8 digest[HASH_MSG_DIGEST_SIZE]); - -int hash_end_key(int hash_device_id); +int hash_end(struct hash_ctx *ctx, u8 digest[HASH_MSG_DIGEST_SIZE]); int hash_save_state(int hash_device_id, struct hash_state *state); int hash_resume_state(int hash_device_id, const struct hash_state *state); -#ifdef __cplusplus -} -#endif #endif - diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c old mode 100755 new mode 100644 index fd5f8a870bf..a2e4ebd8ac1 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1,14 +1,13 @@ /* * Cryptographic API. - * * Support for Nomadik hardware crypto engine. - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * + + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen for ST-Ericsson + * Author: Joakim Bech for ST-Ericsson + * Author: Berne Hebark for ST-Ericsson. + * Author: Niklas Hernaeus for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 */ #include @@ -20,85 +19,53 @@ #include #include #include - #include +#include +#include + #include #include -#include #include #include "hash_alg.h" #define DRIVER_NAME "DRIVER HASH" -/* enables/disables debug msgs */ +/* Enable/Disables debug msgs */ #define DRIVER_DEBUG 1 #define DRIVER_DEBUG_PFX DRIVER_NAME -#define DRIVER_DBG KERN_ERR +#define DRIVER_DBG KERN_DEBUG #define MAX_HASH_DIGEST_BYTE_SIZE 32 -#define HASH_BLOCK_BYTE_SIZE 64 -#define HASH_ACC_SYNC_CONTROL -#ifdef HASH_ACC_SYNC_CONTROL static struct mutex hash_hw_acc_mutex; -#endif -int debug; -static int mode; -static int contextsaving; -static struct hash_system_context g_sys_ctx; +static int debug; +static struct hash_system_context sys_ctx_g; +static struct hash_driver_data *internal_drv_data; /** * struct hash_driver_data - IO Base and clock. - * @base: The IO base for the block - * @clk: FIXME, add comment + * @base: The IO base for the block. + * @clk: The clock. + * @regulator: The current regulator. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_mutex: Mutex for power_state. + * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. */ struct hash_driver_data { void __iomem *base; + struct device *dev; struct clk *clk; -}; - -/** - * struct hash_ctx - The context used for hash calculations. - * @key: The key used in the operation - * @keylen: The length of the key - * @updated: Indicates if hardware is initialized for new operations - * @state: The state of the current calculations - * @config: The current configuration - */ -struct hash_ctx { - u8 key[HASH_BLOCK_BYTE_SIZE]; - u32 keylen; - u8 updated; - struct hash_state state; - struct hash_config config; -}; - -/** - * struct hash_tfm_ctx - Transform context - * @key: The key stored in the transform context - * @keylen: The length of the key in the transform context - */ -struct hash_tfm_ctx { - u8 key[HASH_BLOCK_BYTE_SIZE]; - u32 keylen; + struct regulator *regulator; + bool power_state; + struct mutex power_state_mutex; + bool restore_dev_state; }; /* Declaration of functions */ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes); -/** - * hexdump - Dumps buffers in hex. - * @buf: The buffer to dump - * @len: The length of the buffer - */ -static void hexdump(unsigned char *buf, unsigned int len) -{ - print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); -} - /** * clear_reg_str - Clear the registry hash_str. * @hid: Hardware device ID @@ -107,83 +74,86 @@ static void hexdump(unsigned char *buf, unsigned int len) */ static inline void clear_reg_str(int hid) { - /* We will only clear the valid registers and not the reserved */ - g_sys_ctx.registry[hid]->str &= ~HASH_STR_DCAL_MASK; - g_sys_ctx.registry[hid]->str &= ~HASH_STR_NBLW_MASK; + /* + * We will only clear NBLW since writing 0 to DCAL is done by the + * hardware + */ + sys_ctx_g.registry[hid]->str &= ~HASH_STR_NBLW_MASK; } -/** - * write_nblw - Writes the number of valid bytes to nblw. - * @hid: Hardware device ID - * @bytes: The number of valid bytes in last word of a message - * - * Note that this function only writes, i.e. it does not clear the registry - * before it writes the new data. - */ -static inline void write_nblw(int hid, int bytes) +static int hash_disable_power( + struct device *dev, + struct hash_driver_data *device_data, + bool save_device_state) { - g_sys_ctx.registry[hid]->str |= - ((bytes * 8) & HASH_STR_NBLW_MASK); -} + int ret = 0; -/** - * write_dcal - Write/set the dcal bit. - * @hid: Hardware device ID - */ -static inline void write_dcal(int hid) -{ - g_sys_ctx.registry[hid]->str |= (1 << HASH_STR_DCAL_POS); -} + dev_dbg(dev, "[%s]", __func__); -/** - * pad_message - Function that pads a message. - * @hid: Hardware device ID - * - * FIXME: This function should be replaced. - */ -static inline void pad_message(int hid) -{ - hash_messagepad(hid, g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) + goto out; + + if (save_device_state) { + hash_save_state(HASH_DEVICE_ID_1, + &sys_ctx_g.state[HASH_DEVICE_ID_1]); + device_data->restore_dev_state = true; + } + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->regulator); + if (ret) + dev_err(dev, "[%s]: " + "regulator_disable() failed!", + __func__); + + device_data->power_state = false; + +out: + mutex_unlock(&device_data->power_state_mutex); + + return ret; } -/** - * write_key - Writes the key to the hardware registries. - * @hid: Hardware device ID - * @key: The key used in the operation - * @keylen: The length of the key - * - * Note that in this function we DO NOT write to the NBLW registry even though - * the hardware reference manual says so. There must be incorrect information in - * the manual or there must be a bug in the state machine in the hardware. - */ -static void write_key(int hid, const u8 *key, u32 keylen) +static int hash_enable_power( + struct device *dev, + struct hash_driver_data *device_data, + bool restore_device_state) { - u32 word = 0; - clear_reg_str(hid); + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); - while (keylen >= 4) { - word = ((u32) (key[3] & 255) << 24) | - ((u32) (key[2] & 255) << 16) | - ((u32) (key[1] & 255) << 8) | - ((u32) (key[0] & 255)); + mutex_lock(&device_data->power_state_mutex); + if (!device_data->power_state) { + ret = regulator_enable(device_data->regulator); + if (ret) { + dev_err(dev, "[%s]: regulator_enable() failed!", + __func__); + goto out; + } - HASH_SET_DIN(word); - keylen -= 4; - key += 4; + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_enable() failed!", + __func__); + regulator_disable(device_data->regulator); + goto out; + } + device_data->power_state = true; } - /* This takes care of the remaining bytes on the last word */ - if (keylen) { - word = 0; - while (keylen) { - word |= (key[keylen - 1] << (8 * (keylen - 1))); - keylen--; + if (device_data->restore_dev_state) { + if (restore_device_state) { + device_data->restore_dev_state = false; + hash_resume_state(HASH_DEVICE_ID_1, + &sys_ctx_g.state[HASH_DEVICE_ID_1]); } - HASH_SET_DIN(word); } +out: + mutex_unlock(&device_data->power_state_mutex); - write_dcal(hid); + return ret; } /** @@ -196,32 +166,20 @@ static void write_key(int hid, const u8 *key, u32 keylen) static int init_hash_hw(struct shash_desc *desc) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv; struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[init_hash_hw] (ctx=0x%x)!", (u32)ctx); + pr_debug("[init_hash_hw] (ctx=0x%x)!", (u32)ctx); - hash_error = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); - if (hash_error != HASH_OK) { - stm_error("hash_setconfiguration() failed!"); - ret = -1; - goto out; + hash_rv = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); + if (hash_rv != HASH_OK) { + pr_err("hash_setconfiguration() failed!"); + ret = -EPERM; + return ret; } - hash_error = hash_begin(HASH_DEVICE_ID_1); - if (hash_error != HASH_OK) { - stm_error("hash_begin() failed!"); - ret = -1; - goto out; - } - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { - stm_dbg(debug, "[init_hash_hw] update key=0x%0x, len=%d", - (u32) ctx->key, ctx->keylen); - write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); - } + hash_begin(ctx); -out: return ret; } @@ -229,22 +187,13 @@ out: * hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @desc: The hash descriptor for the job * - * Initialize structures and copy the key from the transform context to the - * descriptor context if the mode is HMAC. + * Initialize structures. */ static int hash_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - struct hash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(&desc->tfm->base); - - stm_dbg(debug, "[hash_init]: (ctx=0x%x)!", (u32)ctx); - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) { - if (tfm_ctx->key) { - memcpy(ctx->key, tfm_ctx->key, tfm_ctx->keylen); - ctx->keylen = tfm_ctx->keylen; - } - } + pr_debug("[hash_init]: (ctx=0x%x)!", (u32)ctx); memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; @@ -262,60 +211,23 @@ static int hash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { int ret = 0; - int hash_error = HASH_OK; - struct hash_ctx *ctx = shash_desc_ctx(desc); + int hash_rv = HASH_OK; - stm_dbg(debug, "[hash_update]: (ctx=0x%x, data=0x%x, len=%d)!", - (u32)ctx, (u32)data, len); + pr_debug("[hash_update]: (data=0x%x, len=%d)!", + (u32)data, len); -#ifdef HASH_ACC_SYNC_CONTROL mutex_lock(&hash_hw_acc_mutex); -#endif - - if (!ctx->updated) { - ret = init_hash_hw(desc); - if (ret) { - stm_error("init_hash_hw() failed!"); - goto out; - } - } - - if (contextsaving) { - if (ctx->updated) { - hash_error = - hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); - if (hash_error != HASH_OK) { - stm_error("hash_resume_state() failed!"); - ret = -1; - goto out; - } - } - } /* NOTE: The length of the message is in the form of number of bits */ - hash_error = hash_hw_update(HASH_DEVICE_ID_1, data, len * 8); - if (hash_error != HASH_OK) { - stm_error("hash_hw_update() failed!"); - ret = -1; + hash_rv = hash_hw_update(desc, HASH_DEVICE_ID_1, data, len * 8); + if (hash_rv != HASH_OK) { + pr_err("hash_hw_update() failed!"); + ret = -EPERM; goto out; } - if (contextsaving) { - hash_error = - hash_save_state(HASH_DEVICE_ID_1, &ctx->state); - if (hash_error != HASH_OK) { - stm_error("hash_save_state() failed!"); - ret = -1; - goto out; - } - - } - ctx->updated = 1; - out: -#ifdef HASH_ACC_SYNC_CONTROL mutex_unlock(&hash_hw_acc_mutex); -#endif return ret; } @@ -327,99 +239,60 @@ out: static int hash_final(struct shash_desc *desc, u8 *out) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; struct hash_ctx *ctx = shash_desc_ctx(desc); + struct hash_driver_data *device_data = internal_drv_data; int digestsize = crypto_shash_digestsize(desc->tfm); u8 digest[HASH_MSG_DIGEST_SIZE]; - stm_dbg(debug, "[hash_final]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[hash_final]: (ctx=0x%x)!", (u32) ctx); -#ifdef HASH_ACC_SYNC_CONTROL mutex_lock(&hash_hw_acc_mutex); -#endif - if (contextsaving) { - hash_error = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } - if (hash_error != HASH_OK) { - stm_error("hash_resume_state() failed!"); - ret = -1; - goto out; + if (!ctx->updated) { + ret = init_hash_hw(desc); + if (ret) { + pr_err("init_hash_hw() failed!"); + goto out_power; } - } + } else { + hash_rv = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); - pad_message(HASH_DEVICE_ID_1); + if (hash_rv != HASH_OK) { + pr_err("hash_resume_state() failed!"); + ret = -EPERM; + goto out_power; + } + } - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) - write_key(HASH_DEVICE_ID_1, ctx->key, ctx->keylen); + hash_messagepad(HASH_DEVICE_ID_1, ctx->state.buffer, + ctx->state.index); - hash_error = hash_get_digest(HASH_DEVICE_ID_1, digest); + hash_get_digest(HASH_DEVICE_ID_1, digest, ctx->config.algorithm); memcpy(out, digest, digestsize); +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "hash_disable_power() failed!", __func__); + out: -#ifdef HASH_ACC_SYNC_CONTROL mutex_unlock(&hash_hw_acc_mutex); -#endif return ret; } -/** - * hash_setkey - The setkey function for providing the key during HMAC - * calculations. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - * @alg: The algorithm to use in the operation - */ -static int hash_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen, int alg) -{ - int ret = 0; - int hash_error = HASH_OK; - - struct hash_tfm_ctx *ctx_tfm = crypto_shash_ctx(tfm); - - stm_dbg(debug, "[hash_setkey]: (ctx_tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) ctx_tfm, (u32) key, keylen); - - /* Truncate the key to block size */ - if (keylen > HASH_BLOCK_BYTE_SIZE) { - struct hash_config config; - u8 digest[MAX_HASH_DIGEST_BYTE_SIZE]; - unsigned int digestsize = crypto_shash_digestsize(tfm); - - config.algorithm = alg; - config.data_format = HASH_DATA_8_BITS; - config.oper_mode = HASH_OPER_MODE_HASH; - -#ifdef HASH_ACC_SYNC_CONTROL - mutex_lock(&hash_hw_acc_mutex); -#endif - hash_error = hash_compute(HASH_DEVICE_ID_1, key, keylen * 8, - &config, digest); -#ifdef HASH_ACC_SYNC_CONTROL - mutex_unlock(&hash_hw_acc_mutex); -#endif - if (hash_error != HASH_OK) { - stm_error("Error: hash_compute() failed!"); - ret = -1; - goto out; - } - - memcpy(ctx_tfm->key, digest, digestsize); - ctx_tfm->keylen = digestsize; - } else { - memcpy(ctx_tfm->key, key, keylen); - ctx_tfm->keylen = keylen; - } - -out: - return ret; -} - /** * sha1_init - SHA1 init function. * @desc: The hash descriptor for the job @@ -428,7 +301,7 @@ static int sha1_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[sha1_init]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[sha1_init]: (ctx=0x%x)!", (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; @@ -445,7 +318,7 @@ static int sha256_init(struct shash_desc *desc) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[sha256_init]: (ctx=0x%x)!", (u32) ctx); + pr_debug("[sha256_init]: (ctx=0x%x)!", (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA2; @@ -454,70 +327,24 @@ static int sha256_init(struct shash_desc *desc) return hash_init(desc); } -/** - * hmac_sha1_init - SHA1 HMAC init function. - * @desc: The hash descriptor for the job - */ -static int hmac_sha1_init(struct shash_desc *desc) +static int hash_export(struct shash_desc *desc, void *out) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[hmac_sha1_init]: (ctx=0x%x)!", (u32) ctx); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA1; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->config.hmac_key = HASH_SHORT_KEY; - - return hash_init(desc); + pr_debug("[hash_export]: (ctx=0x%X) (out=0x%X)", + (u32) ctx, (u32) out); + memcpy(out, ctx, sizeof(*ctx)); + return 0; } -/** - * hmac_sha256_init - SHA2 (SHA256) HMAC init function. - * @desc: The hash descriptor for the job - */ -static int hmac_sha256_init(struct shash_desc *desc) +static int hash_import(struct shash_desc *desc, const void *in) { struct hash_ctx *ctx = shash_desc_ctx(desc); - stm_dbg(debug, "[hmac_sha256_init]: (ctx=0x%x)!", (u32) ctx); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA2; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->config.hmac_key = HASH_SHORT_KEY; - - return hash_init(desc); -} - -/** - * hmac_sha1_setkey - SHA1 HMAC setkey function. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - */ -static int hmac_sha1_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - stm_dbg(debug, "[hmac_sha1_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) tfm, (u32) key, keylen); - - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); -} - -/** - * hmac_sha256_setkey - SHA2 (SHA256) HMAC setkey function. - * @tfm: Pointer to the transform - * @key: The key used in the operation - * @keylen: The length of the key - */ -static int hmac_sha256_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - stm_dbg(debug, "[hmac_sha256_setkey]: (tfm=0x%x, key=0x%x, keylen=%d)!", - (u32) tfm, (u32) key, keylen); - - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA2); + pr_debug("[hash_import]: (ctx=0x%x) (in =0x%X)", + (u32) ctx, (u32) in); + memcpy(ctx, in, sizeof(*ctx)); + return 0; } static struct shash_alg sha1_alg = { @@ -525,16 +352,17 @@ static struct shash_alg sha1_alg = { .init = sha1_init, .update = hash_update, .final = hash_final, + .export = hash_export, + .import = hash_import, .descsize = sizeof(struct hash_ctx), + .statesize = sizeof(struct hash_ctx), .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-u8500", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } + .cra_name = "sha1", + .cra_driver_name = "sha1-u8500", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static struct shash_alg sha256_alg = { @@ -542,52 +370,17 @@ static struct shash_alg sha256_alg = { .init = sha256_init, .update = hash_update, .final = hash_final, + .export = hash_export, + .import = hash_import, .descsize = sizeof(struct hash_ctx), + .statesize = sizeof(struct hash_ctx), .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-u8500", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg hmac_sha1_alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = hmac_sha1_init, - .update = hash_update, - .final = hash_final, - .setkey = hmac_sha1_setkey, - .descsize = sizeof(struct hash_ctx), - .base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac(sha1-u8500)", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg hmac_sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = hmac_sha256_init, - .update = hash_update, - .final = hash_final, - .setkey = hmac_sha256_setkey, - .descsize = sizeof(struct hash_ctx), - .base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac(sha256-u8500)", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST | - CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_tfm_ctx), - .cra_module = THIS_MODULE, - } + .cra_name = "sha256", + .cra_driver_name = "sha256-u8500", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; /** @@ -597,155 +390,129 @@ static struct shash_alg hmac_sha256_alg = { static int u8500_hash_probe(struct platform_device *pdev) { int ret = 0; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; struct resource *res = NULL; struct hash_driver_data *hash_drv_data; - stm_dbg(debug, "[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); + pr_debug("[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); - stm_dbg(debug, "[u8500_hash_probe]: Calling kzalloc()!"); + pr_debug("[u8500_hash_probe]: Calling kzalloc()!"); hash_drv_data = kzalloc(sizeof(struct hash_driver_data), GFP_KERNEL); if (!hash_drv_data) { - stm_dbg(debug, "kzalloc() failed!"); + pr_debug("kzalloc() failed!"); ret = -ENOMEM; goto out; } - stm_dbg(debug, "[u8500_hash_probe]: Calling platform_get_resource()!"); + hash_drv_data->dev = &pdev->dev; + + pr_debug("[u8500_hash_probe]: Calling platform_get_resource()!"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - stm_dbg(debug, "platform_get_resource() failed"); + pr_debug("platform_get_resource() failed"); ret = -ENODEV; goto out_kfree; } - stm_dbg(debug, "[u8500_hash_probe]: Calling request_mem_region()!"); - res = request_mem_region(res->start, res->end - res->start + 1, - pdev->name); + pr_debug("[u8500_hash_probe]: Calling request_mem_region()!"); + res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { - stm_dbg(debug, "request_mem_region() failed"); + pr_debug("request_mem_region() failed"); ret = -EBUSY; goto out_kfree; } - stm_dbg(debug, "[u8500_hash_probe]: Calling ioremap()!"); - hash_drv_data->base = ioremap(res->start, res->end - res->start + 1); + pr_debug("[u8500_hash_probe]: Calling ioremap()!"); + hash_drv_data->base = ioremap(res->start, resource_size(res)); if (!hash_drv_data->base) { - stm_error - ("[u8500_hash] ioremap of hash1 register memory failed!"); + pr_err("[u8500_hash] " + "ioremap of hash1 register memory failed!"); ret = -ENOMEM; goto out_free_mem; } + mutex_init(&hash_drv_data->power_state_mutex); + + /* Enable power for HASH hardware block */ + hash_drv_data->regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(hash_drv_data->regulator)) { + dev_err(&pdev->dev, "[u8500_hash] " + "could not get hash regulator\n"); + ret = PTR_ERR(hash_drv_data->regulator); + hash_drv_data->regulator = NULL; + goto out_unmap; + } - stm_dbg(debug, "[u8500_hash_probe]: Calling clk_get()!"); + pr_debug("[u8500_hash_probe]: Calling clk_get()!"); /* Enable the clk for HASH1 hardware block */ hash_drv_data->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(hash_drv_data->clk)) { - stm_error("clk_get() failed!"); + pr_err("clk_get() failed!"); ret = PTR_ERR(hash_drv_data->clk); - goto out_unmap; + goto out_regulator; } - stm_dbg(debug, "[u8500_hash_probe]: Calling clk_enable()!"); - ret = clk_enable(hash_drv_data->clk); + /* Enable device power (and clock) */ + ret = hash_enable_power(&pdev->dev, hash_drv_data, false); if (ret) { - stm_error("clk_enable() failed!"); - goto out_unmap; + dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", + __func__); + goto out_clk; } - stm_dbg(debug, - "[u8500_hash_probe]: Calling hash_init_base_address()->" - "(base=0x%x,DEVICE_ID=%d)!", - (u32) hash_drv_data->base, HASH_DEVICE_ID_1); + pr_debug("[u8500_hash_probe]: Calling hash_init_base_address()->" + "(base=0x%x,DEVICE_ID=%d)!", + (u32) hash_drv_data->base, HASH_DEVICE_ID_1); /* Setting base address */ - hash_error = + hash_rv = hash_init_base_address(HASH_DEVICE_ID_1, (t_logical_address) hash_drv_data->base); - if (hash_error != HASH_OK) { - stm_error("hash_init_base_address() failed!"); - ret = -1; /*TODO: what error code should be used here!? */ - goto out_clk; + if (hash_rv != HASH_OK) { + pr_err("hash_init_base_address() failed!"); + ret = -EPERM; + goto out_power; } -#ifdef HASH_ACC_SYNC_CONTROL - stm_dbg(debug, "[u8500_hash_probe]: Calling mutex_init()!"); + pr_debug("[u8500_hash_probe]: Calling mutex_init()!"); mutex_init(&hash_hw_acc_mutex); -#endif - if (mode == 0) { - stm_dbg(debug, - "[u8500_hash_probe]: To register all algorithms!"); - - ret = crypto_register_shash(&sha1_alg); - if (ret) { - stm_error("Could not register sha1_alg!"); - goto out_clk; - } - stm_dbg(debug, "[u8500_hash_probe]: sha1_alg registered!"); - - ret = crypto_register_shash(&sha256_alg); - if (ret) { - stm_error("Could not register sha256_alg!"); - goto out_unreg1; - } - stm_dbg(debug, "[u8500_hash_probe]: sha256_alg registered!"); + pr_debug("[u8500_hash_probe]: To register only sha1 and sha256" + " algorithms!"); + internal_drv_data = hash_drv_data; - ret = crypto_register_shash(&hmac_sha1_alg); - if (ret) { - stm_error("Could not register hmac_sha1_alg!"); - goto out_unreg2; - } - stm_dbg(debug, "[u8500_hash_probe]: hmac_sha1_alg registered!"); - - ret = crypto_register_shash(&hmac_sha256_alg); - if (ret) { - stm_error("Could not register hmac_sha256_alg!"); - goto out_unreg3; - } - stm_dbg(debug, - "[u8500_hash_probe]: hmac_sha256_alg registered!"); + ret = crypto_register_shash(&sha1_alg); + if (ret) { + pr_err("Could not register sha1_alg!"); + goto out_power; } + pr_debug("[u8500_hash_probe]: sha1_alg registered!"); - if (mode == 10) { - stm_dbg(debug, - "[u8500_hash_probe]: To register only sha1 and sha256" - " algorithms!"); - - ret = crypto_register_shash(&sha1_alg); - if (ret) { - stm_error("Could not register sha1_alg!"); - goto out_clk; - } - - ret = crypto_register_shash(&sha256_alg); - if (ret) { - stm_error("Could not register sha256_alg!"); - goto out_unreg1_tmp; - } + ret = crypto_register_shash(&sha256_alg); + if (ret) { + pr_err("Could not register sha256_alg!"); + goto out_unreg1_tmp; } - stm_dbg(debug, "[u8500_hash_probe]: Calling platform_set_drvdata()!"); + pr_debug("[u8500_hash_probe]: Calling platform_set_drvdata()!"); platform_set_drvdata(pdev, hash_drv_data); - return 0; - if (mode == 0) { -out_unreg1: - crypto_unregister_shash(&sha1_alg); -out_unreg2: - crypto_unregister_shash(&sha256_alg); -out_unreg3: - crypto_unregister_shash(&hmac_sha1_alg); - } + if (hash_disable_power(&pdev->dev, hash_drv_data, false)) + dev_err(&pdev->dev, "[%s]: hash_disable_power()" + " failed!", __func__); + + return 0; - if (mode == 10) { out_unreg1_tmp: - crypto_unregister_shash(&sha1_alg); - } + crypto_unregister_shash(&sha1_alg); + +out_power: + hash_disable_power(&pdev->dev, hash_drv_data, false); out_clk: - clk_disable(hash_drv_data->clk); clk_put(hash_drv_data->clk); +out_regulator: + regulator_put(hash_drv_data->regulator); + out_unmap: iounmap(hash_drv_data->base); @@ -767,60 +534,133 @@ static int u8500_hash_remove(struct platform_device *pdev) struct resource *res; struct hash_driver_data *hash_drv_data; - stm_dbg(debug, "[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); + pr_debug("[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); - stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_drvdata()!"); + pr_debug("[u8500_hash_remove]: Calling platform_get_drvdata()!"); hash_drv_data = platform_get_drvdata(pdev); - if (mode == 0) { - stm_dbg(debug, - "[u8500_hash_remove]: To unregister all algorithms!"); - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); - crypto_unregister_shash(&hmac_sha1_alg); - crypto_unregister_shash(&hmac_sha256_alg); - } + pr_debug("[u8500_hash_remove]: To unregister only sha1 and " + "sha256 algorithms!"); + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); - if (mode == 10) { - stm_dbg(debug, - "[u8500_hash_remove]: To unregister only sha1 and " - "sha256 algorithms!"); - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); - } -#ifdef HASH_ACC_SYNC_CONTROL - stm_dbg(debug, "[u8500_hash_remove]: Calling mutex_destroy()!"); + pr_debug("[u8500_hash_remove]: Calling mutex_destroy()!"); mutex_destroy(&hash_hw_acc_mutex); -#endif - stm_dbg(debug, "[u8500_hash_remove]: Calling clk_disable()!"); + pr_debug("[u8500_hash_remove]: Calling clk_disable()!"); clk_disable(hash_drv_data->clk); - stm_dbg(debug, "[u8500_hash_remove]: Calling clk_put()!"); + pr_debug("[u8500_hash_remove]: Calling clk_put()!"); clk_put(hash_drv_data->clk); - stm_dbg(debug, "[u8500_hash_remove]: Calling iounmap(): base = 0x%x", - (u32) hash_drv_data->base); + pr_debug("[u8500_hash_remove]: Calling regulator_disable()!"); + regulator_disable(hash_drv_data->regulator); + + pr_debug("[u8500_hash_remove]: Calling iounmap(): base = 0x%x", + (u32) hash_drv_data->base); iounmap(hash_drv_data->base); - stm_dbg(debug, "[u8500_hash_remove]: Calling platform_get_resource()!"); + pr_debug("[u8500_hash_remove]: Calling platform_get_resource()!"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stm_dbg(debug, - "[u8500_hash_remove]: Calling release_mem_region()" - "->res->start=0x%x, res->end = 0x%x!", + pr_debug("[u8500_hash_remove]: Calling release_mem_region()" + "->res->start=0x%x, res->end = 0x%x!", res->start, res->end); release_mem_region(res->start, res->end - res->start + 1); - stm_dbg(debug, "[u8500_hash_remove]: Calling kfree()!"); + pr_debug("[u8500_hash_remove]: Calling kfree()!"); kfree(hash_drv_data); return 0; } +static void u8500_hash_shutdown(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return; + } + + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); + + mutex_destroy(&hash_hw_acc_mutex); + + iounmap(hash_drv_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + + if (hash_disable_power(&pdev->dev, hash_drv_data, false)) + dev_err(&pdev->dev, "[%s]: " + "hash_disable_power() failed", __func__); + + clk_put(hash_drv_data->clk); + regulator_put(hash_drv_data->regulator); +} + +static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + /* Handle state? */ + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + ret = hash_disable_power(&pdev->dev, hash_drv_data, true); + if (ret) + dev_err(&pdev->dev, "[%s]: " + "hash_disable_power()", __func__); + + return ret; +} + +static int u8500_hash_resume(struct platform_device *pdev) +{ + int ret = 0; + struct hash_driver_data *hash_drv_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + hash_drv_data = platform_get_drvdata(pdev); + if (!hash_drv_data) { + dev_err(&pdev->dev, "[%s]: " + "platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + if (hash_drv_data->restore_dev_state) { + ret = hash_enable_power(&pdev->dev, hash_drv_data, true); + if (ret) + dev_err(&pdev->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + } + + return ret; +} + + static struct platform_driver hash_driver = { .probe = u8500_hash_probe, .remove = u8500_hash_remove, + .shutdown = u8500_hash_shutdown, + .suspend = u8500_hash_suspend, + .resume = u8500_hash_resume, .driver = { .owner = THIS_MODULE, .name = "hash1", @@ -832,7 +672,7 @@ static struct platform_driver hash_driver = { */ static int __init u8500_hash_mod_init(void) { - stm_dbg(debug, "u8500_hash_mod_init() is called!"); + pr_debug("u8500_hash_mod_init() is called!"); return platform_driver_register(&hash_driver); } @@ -842,7 +682,7 @@ static int __init u8500_hash_mod_init(void) */ static void __exit u8500_hash_mod_fini(void) { - stm_dbg(debug, "u8500_hash_mod_fini() is called!"); + pr_debug("u8500_hash_mod_fini() is called!"); platform_driver_unregister(&hash_driver); return; @@ -860,14 +700,10 @@ static void hash_processblock(int hid, const u32 *message) { u32 count; - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_DCAL_MASK); - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_NBLW_MASK); + clear_bit(HASH_STR_NBLW_MASK, (void *)sys_ctx_g.registry[hid]->str); /* Partially unrolled loop */ - for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); - count += 4) { + for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) { HASH_SET_DIN(message[0]); HASH_SET_DIN(message[1]); HASH_SET_DIN(message[2]); @@ -889,8 +725,8 @@ static void hash_processblock(int hid, const u32 *message) */ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) { - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad" - "(bytes in final msg=%d))", index_bytes); + pr_debug("[u8500_hash_alg] hash_messagepad" + "(bytes in final msg=%d))", index_bytes); clear_reg_str(hid); @@ -904,34 +740,39 @@ static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) if (index_bytes) HASH_SET_DIN(message[0]); + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); + /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ HASH_SET_NBLW(index_bytes * 8); - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", - g_sys_ctx.registry[hid]->din, - g_sys_ctx.registry[hid]->str); + pr_debug("[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", + sys_ctx_g.registry[hid]->din, + sys_ctx_g.registry[hid]->str); HASH_SET_DCAL; - stm_dbg(debug, "[u8500_hash_alg] hash_messagepad d -> " - "DIN=0x%08x NBLW=%d", - g_sys_ctx.registry[hid]->din, - g_sys_ctx.registry[hid]->str); + pr_debug("[u8500_hash_alg] hash_messagepad after dcal -> " + "DIN=0x%08x NBLW=%d", + sys_ctx_g.registry[hid]->din, + sys_ctx_g.registry[hid]->str); + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); } /** * hash_incrementlength - Increments the length of the current message. - * @hid: Hardware device ID + * @ctx: Hash context * @incr: Length of message processed already * * Overflow cannot occur, because conditions for overflow are checked in * hash_hw_update. */ -static void hash_incrementlength(int hid, u32 incr) +static void hash_incrementlength(struct hash_ctx *ctx, u32 incr) { - g_sys_ctx.state[hid].length.low_word += incr; + ctx->state.length.low_word += incr; /* Check for wrap-around */ - if (g_sys_ctx.state[hid].length.low_word < incr) - g_sys_ctx.state[hid].length.high_word++; + if (ctx->state.length.low_word < incr) + ctx->state.length.high_word++; } /** @@ -963,60 +804,49 @@ static void hash_incrementlength(int hid, u32 incr) */ int hash_setconfiguration(int hid, struct hash_config *p_config) { - int hash_error = HASH_OK; + int hash_rv = HASH_OK; - stm_dbg(debug, "[u8500_hash_alg] hash_setconfiguration())"); + pr_debug("[u8500_hash_alg] hash_setconfiguration())"); - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + if (p_config->algorithm != HASH_ALGO_SHA1 && + p_config->algorithm != HASH_ALGO_SHA2) + return HASH_INVALID_PARAMETER; HASH_SET_DATA_FORMAT(p_config->data_format); - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_EMPTYMSG_MASK); + HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_EMPTYMSG_MASK); - /* This bit selects between SHA-1 or SHA-2 algorithm */ - if (HASH_ALGO_SHA2 == p_config->algorithm) { - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_ALGO_MASK); - } else { /* SHA1 algorithm */ + switch (p_config->algorithm) { + case HASH_ALGO_SHA1: + HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); + break; - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_ALGO_MASK); + case HASH_ALGO_SHA2: + HCL_CLEAR_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); + break; + + default: + pr_debug("[u8500_hash_alg] Incorrect algorithm."); + return HASH_INVALID_PARAMETER; } /* This bit selects between HASH or HMAC mode for the selected algorithm */ if (HASH_OPER_MODE_HASH == p_config->oper_mode) { - HCL_CLEAR_BITS(g_sys_ctx.registry + HCL_CLEAR_BITS(sys_ctx_g.registry [hid]->cr, HASH_CR_MODE_MASK); - } else { /* HMAC mode */ - - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_MODE_MASK); - - /* This bit selects between short key (<= 64 bytes) or long key - (>64 bytes) in HMAC mode */ - if (HASH_SHORT_KEY == p_config->hmac_key) { - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_LKEY_MASK); - } else { - HCL_SET_BITS(g_sys_ctx.registry[hid]->cr, - HASH_CR_LKEY_MASK); - } + } else { /* HMAC mode or wrong hash mode */ + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); } - return hash_error; + return hash_rv; } /** * hash_begin - This routine resets some globals and initializes the hash * hardware. - * @hid: Hardware device ID + * @ctx: Hash context * * Reentrancy: Non Re-entrant * @@ -1033,35 +863,20 @@ int hash_setconfiguration(int hid, struct hash_config *p_config) * So the user has to initialize the device for new * configuration to take in to effect. */ -int hash_begin(int hid) +void hash_begin(struct hash_ctx *ctx) { - int hash_error = HASH_OK; - /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - stm_dbg(debug, "[u8500_hash_alg] hash_begin())"); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + pr_debug("[u8500_hash_alg] hash_begin())"); - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; + while (sys_ctx_g.registry[HASH_DEVICE_ID_1]->str & HASH_STR_DCAL_MASK) + cpu_relax(); HASH_INITIALIZE; - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, - HASH_STR_DCAL_MASK); - HCL_CLEAR_BITS(g_sys_ctx.registry[hid]->str, + HCL_CLEAR_BITS(sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, HASH_STR_NBLW_MASK); - - return hash_error; } /** @@ -1074,57 +889,62 @@ int hash_begin(int hid) * * Reentrancy: Non Re-entrant */ -int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) +int hash_hw_update(struct shash_desc *desc, + int hid, + const u8 *p_data_buffer, + u32 msg_length) { - int hash_error = HASH_OK; + int hash_rv = HASH_OK; u8 index; u8 *p_buffer; u32 count; + struct hash_ctx *ctx = shash_desc_ctx(desc); + struct hash_driver_data *device_data = internal_drv_data; - stm_dbg(debug, "[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " - "in=%d, bin=%d))", + pr_debug("[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " + "in=%d, bin=%d))", msg_length, msg_length / 8, - g_sys_ctx.state[hid].index, - g_sys_ctx.state[hid].bit_index); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } + ctx->state.index, + ctx->state.bit_index); - index = g_sys_ctx.state[hid].index; + index = ctx->state.index; - p_buffer = (u8 *)g_sys_ctx.state[hid].buffer; + p_buffer = (u8 *)ctx->state.buffer; /* Number of bytes in the message */ msg_length /= 8; /* Check parameters */ if (NULL == p_data_buffer) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - /* Check if g_sys_ctx.state.length + msg_length + /* Check if ctx->state.length + msg_length overflows */ if (msg_length > - (g_sys_ctx.state[hid].length.low_word + msg_length) + (ctx->state.length.low_word + msg_length) && HASH_HIGH_WORD_MAX_VAL == - (g_sys_ctx.state[hid].length.high_word)) { - hash_error = HASH_MSG_LENGTH_OVERFLOW; - stm_error("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); - return hash_error; + (ctx->state.length.high_word)) { + hash_rv = HASH_MSG_LENGTH_OVERFLOW; + pr_err("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); + return hash_rv; + } + + /* Enable device power (and clock) */ + hash_rv = hash_enable_power(device_data->dev, device_data, false); + if (hash_rv) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; } /* Main loop */ while (0 != msg_length) { if ((index + msg_length) < HASH_BLOCK_SIZE) { for (count = 0; count < msg_length; count++) { - /*TODO: memcpy? */ p_buffer[index + count] = *(p_data_buffer + count); } @@ -1132,7 +952,26 @@ int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) index += msg_length; msg_length = 0; } else { - /* if 'p_data_buffer' is four byte aligned and local + if (!ctx->updated) { + hash_rv = init_hash_hw(desc); + if (hash_rv != HASH_OK) { + pr_err("init_hash_hw() failed!"); + goto out; + } + ctx->updated = 1; + } else { + hash_rv = + hash_resume_state(HASH_DEVICE_ID_1, + &ctx->state); + if (hash_rv != HASH_OK) { + pr_err("hash_resume_state()" + " failed!"); + goto out_power; + } + } + + /* + * If 'p_data_buffer' is four byte aligned and local * buffer does not have any data, we can write data * directly from 'p_data_buffer' to HW peripheral, * otherwise we first copy data to a local buffer @@ -1152,60 +991,34 @@ int hash_hw_update(int hid, const u8 *p_data_buffer, u32 msg_length) hash_processblock(hid, (const u32 *)p_buffer); } - hash_incrementlength(hid, HASH_BLOCK_SIZE); + hash_incrementlength(ctx, HASH_BLOCK_SIZE); p_data_buffer += (HASH_BLOCK_SIZE - index); msg_length -= (HASH_BLOCK_SIZE - index); index = 0; + + hash_rv = + hash_save_state(HASH_DEVICE_ID_1, &ctx->state); + if (hash_rv != HASH_OK) { + pr_err("hash_save_state() failed!"); + goto out_power; + } } } - g_sys_ctx.state[hid].index = index; + ctx->state.index = index; - stm_dbg(debug, "[u8500_hash_alg] hash_hw_update END(msg_length=%d in " - "bits, in=%d, bin=%d))", + pr_debug("[u8500_hash_alg] hash_hw_update END(msg_length=%d in " + "bits, in=%d, bin=%d))", msg_length, - g_sys_ctx.state[hid].index, - g_sys_ctx.state[hid].bit_index); - - return hash_error; -} - -/** - * hash_end_key - Function that ends a message, i.e. pad and triggers the last - * calculation. - * @hid: Hardware device ID - * - * This function also clear the registries that have been involved in - * computation. - */ -int hash_end_key(int hid) -{ - int hash_error = HASH_OK; - u8 count = 0; - - stm_dbg(debug, "[u8500_hash_alg] hash_end_key(index=%d))", - g_sys_ctx.state[hid].index); - - hash_messagepad(hid, g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); - - /* Wait till the DCAL bit get cleared, So that we get the final - * message digest not intermediate value. - */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; - - /* Reset the HASH state */ - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) - g_sys_ctx.state[hid].buffer[count] = 0; - - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; - - return hash_error; + ctx->state.index, + ctx->state.bit_index); +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data->dev, device_data, false)) + dev_err(device_data->dev, "[%s]: " + "hash_disable_power() failed!", __func__); +out: + return hash_rv; } /** @@ -1218,66 +1031,53 @@ int hash_end_key(int hid) int hash_resume_state(int hid, const struct hash_state *device_state) { u32 temp_cr; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; s32 count; + int hash_mode = HASH_OPER_MODE_HASH; - stm_dbg(debug, "[u8500_hash_alg] hash_resume_state(state(0x%x)))", - (u32) device_state); + pr_debug("[u8500_hash_alg] hash_resume_state(state(0x%x)))", + (u32) device_state); if (NULL == device_state) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } /* Check correctness of index and length members */ if (device_state->index > HASH_BLOCK_SIZE || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - for (count = 0; count < (s32) (HASH_BLOCK_SIZE / sizeof(u32)); - count++) { - g_sys_ctx.state[hid].buffer[count] = - device_state->buffer[count]; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - g_sys_ctx.state[hid].index = device_state->index; - g_sys_ctx.state[hid].bit_index = device_state->bit_index; - g_sys_ctx.state[hid].length = device_state->length; - HASH_INITIALIZE; temp_cr = device_state->temp_cr; - g_sys_ctx.registry[hid]->cr = + sys_ctx_g.registry[hid]->cr = temp_cr & HASH_CR_RESUME_MASK; + if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; + for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && - !(g_sys_ctx.registry[hid]->cr & - HASH_CR_MODE_MASK)) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) break; - } - g_sys_ctx.registry[hid]->csrx[count] = + + sys_ctx_g.registry[hid]->csrx[count] = device_state->csr[count]; } - g_sys_ctx.registry[hid]->csfull = device_state->csfull; - g_sys_ctx.registry[hid]->csdatain = device_state->csdatain; + sys_ctx_g.registry[hid]->csfull = device_state->csfull; + sys_ctx_g.registry[hid]->csdatain = device_state->csdatain; - g_sys_ctx.registry[hid]->str = device_state->str_reg; - g_sys_ctx.registry[hid]->cr = temp_cr; + sys_ctx_g.registry[hid]->str = device_state->str_reg; + sys_ctx_g.registry[hid]->cr = temp_cr; - return hash_error; + return hash_rv; } /** @@ -1291,289 +1091,50 @@ int hash_save_state(int hid, struct hash_state *device_state) { u32 temp_cr; u32 count; - int hash_error = HASH_OK; + int hash_rv = HASH_OK; + int hash_mode = HASH_OPER_MODE_HASH; - stm_dbg(debug, "[u8500_hash_alg] hash_save_state( state(0x%x)))", - (u32) device_state); + pr_debug("[u8500_hash_alg] hash_save_state( state(0x%x)))", + (u32) device_state); if (NULL == device_state) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); count++) { - device_state->buffer[count] = - g_sys_ctx.state[hid].buffer[count]; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } - device_state->index = g_sys_ctx.state[hid].index; - device_state->bit_index = g_sys_ctx.state[hid].bit_index; - device_state->length = g_sys_ctx.state[hid].length; - /* Write dummy value to force digest intermediate calculation. This * actually makes sure that there isn't any ongoing calculation in the * hardware. */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; + while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) + cpu_relax(); - temp_cr = g_sys_ctx.registry[hid]->cr; + temp_cr = sys_ctx_g.registry[hid]->cr; - device_state->str_reg = g_sys_ctx.registry[hid]->str; + device_state->str_reg = sys_ctx_g.registry[hid]->str; - device_state->din_reg = g_sys_ctx.registry[hid]->din; + device_state->din_reg = sys_ctx_g.registry[hid]->din; + + if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) - && !(g_sys_ctx.registry[hid]->cr & - HASH_CR_MODE_MASK)) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) break; - } device_state->csr[count] = - g_sys_ctx.registry[hid]->csrx[count]; + sys_ctx_g.registry[hid]->csrx[count]; } - device_state->csfull = g_sys_ctx.registry[hid]->csfull; - device_state->csdatain = g_sys_ctx.registry[hid]->csdatain; + device_state->csfull = sys_ctx_g.registry[hid]->csfull; + device_state->csdatain = sys_ctx_g.registry[hid]->csdatain; - /* end if */ device_state->temp_cr = temp_cr; - return hash_error; -} - -/** - * hash_end - Ends current HASH computation, passing back the hash to the user. - * @hid: Hardware device ID - * @digest: User allocated byte array for the calculated digest - * - * Reentrancy: Non Re-entrant - */ -int hash_end(int hid, u8 digest[HASH_MSG_DIGEST_SIZE]) -{ - int hash_error = HASH_OK; - u32 count; - /* Standard SHA-1 digest for null string for HASH mode */ - u8 zero_message_hash_sha1[HASH_MSG_DIGEST_SIZE] = { - 0xDA, 0x39, 0xA3, 0xEE, - 0x5E, 0x6B, 0x4B, 0x0D, - 0x32, 0x55, 0xBF, 0xEF, - 0x95, 0x60, 0x18, 0x90, - 0xAF, 0xD8, 0x07, 0x09, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA-2 digest for null string for HASH mode */ - u8 zero_message_hash_sha2[HASH_MSG_DIGEST_SIZE] = { - 0xD4, 0x1D, 0x8C, 0xD9, - 0x8F, 0x00, 0xB2, 0x04, - 0xE9, 0x80, 0x09, 0x98, - 0xEC, 0xF8, 0x42, 0x7E, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA-1 digest for null string for HMAC mode,with no key */ - u8 zero_message_hmac_sha1[HASH_MSG_DIGEST_SIZE] = { - 0xFB, 0xDB, 0x1D, 0x1B, - 0x18, 0xAA, 0x6C, 0x08, - 0x32, 0x4B, 0x7D, 0x64, - 0xB7, 0x1F, 0xB7, 0x63, - 0x70, 0x69, 0x0E, 0x1D, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - /* Standard SHA2 digest for null string for HMAC mode,with no key */ - u8 zero_message_hmac_sha2[HASH_MSG_DIGEST_SIZE] = { - 0x74, 0xE6, 0xF7, 0x29, - 0x8A, 0x9C, 0x2D, 0x16, - 0x89, 0x35, 0xF5, 0x8C, - 0x00, 0x1B, 0xAD, 0x88, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 - }; - - stm_dbg(debug, "[u8500_hash_alg] hash_end(digest array (0x%x)))", - (u32) digest); - - if (NULL == digest) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - if (0 == g_sys_ctx.state[hid].index && - 0 == g_sys_ctx.state[hid].length.high_word && - 0 == g_sys_ctx.state[hid].length.low_word) { - if (g_sys_ctx.registry[hid]->cr & HASH_CR_MODE_MASK) { - if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hmac_sha1[count]; - } - } else { /* SHA-2 algo */ - - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hmac_sha2[count]; - } - } - } else { /* HASH mode */ - - if (g_sys_ctx.registry[hid]->cr & HASH_CR_ALGO_MASK) { - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hash_sha1[count]; - } - } else { /* SHA-2 algo */ - - /* hash of an empty message was requested */ - for (count = 0; count < HASH_MSG_DIGEST_SIZE; - count++) { - digest[count] = - zero_message_hash_sha2[count]; - } - } - } - - HASH_SET_DCAL; - } else { - hash_messagepad(hid, - g_sys_ctx.state[hid].buffer, - g_sys_ctx.state[hid].index); - - /* Wait till the DCAL bit get cleared, So that we get the final - * message digest not intermediate value. */ - while (g_sys_ctx.registry[hid]->str & HASH_STR_DCAL_MASK) - ; - - hash_error = hash_get_digest(hid, digest); - - /* Reset the HASH state */ - g_sys_ctx.state[hid].index = 0; - g_sys_ctx.state[hid].bit_index = 0; - for (count = 0; count < HASH_BLOCK_SIZE / sizeof(u32); - count++) { - g_sys_ctx.state[hid].buffer[count] - = 0; - } - - g_sys_ctx.state[hid].length.high_word = 0; - g_sys_ctx.state[hid].length.low_word = 0; - } - - if (debug) - hexdump(digest, HASH_MSG_DIGEST_SIZE); - - return hash_error; -} - -/** - * hash_initialize_globals - Initialize global variables to their default reset - * value. - * @hid: Hardware device ID - * - * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being - * modified - */ -static void hash_initialize_globals(int hid) -{ - u8 loop_count; - - /* Resetting the values of global variables except the registry */ - g_sys_ctx.state[hid].temp_cr = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].str_reg = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].din_reg = HASH_RESET_INDEX_VAL; - - for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { - g_sys_ctx.state[hid].csr[loop_count] = - HASH_RESET_CSRX_REG_VALUE; - } - - g_sys_ctx.state[hid].csfull = HASH_RESET_CSFULL_REG_VALUE; - g_sys_ctx.state[hid].csdatain = HASH_RESET_CSDATAIN_REG_VALUE; - - for (loop_count = 0; loop_count < (HASH_BLOCK_SIZE / sizeof(u32)); - loop_count++) { - g_sys_ctx.state[hid].buffer[loop_count] = - HASH_RESET_BUFFER_VAL; - } - - g_sys_ctx.state[hid].length.high_word = HASH_RESET_LEN_HIGH_VAL; - g_sys_ctx.state[hid].length.low_word = HASH_RESET_LEN_LOW_VAL; - g_sys_ctx.state[hid].index = HASH_RESET_INDEX_VAL; - g_sys_ctx.state[hid].bit_index = HASH_RESET_BIT_INDEX_VAL; -} - -/** - * hash_reset - This routine will reset the global variable to default reset - * value and HASH registers to their power on reset values. - * @hid: Hardware device ID - * - * Reentrancy: Non Re-entrant, global structure g_sys_ctx elements are being - * modified. - */ -int hash_reset(int hid) -{ - int hash_error = HASH_OK; - u8 loop_count; - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - - return hash_error; - } - - /* Resetting the values of global variables except the registry */ - hash_initialize_globals(hid); - - /* Resetting HASH control register to power-on-reset values */ - g_sys_ctx.registry[hid]->str = HASH_RESET_START_REG_VALUE; - - for (loop_count = 0; loop_count < HASH_CSR_COUNT; loop_count++) { - g_sys_ctx.registry[hid]->csrx[loop_count] = - HASH_RESET_CSRX_REG_VALUE; - } - - g_sys_ctx.registry[hid]->csfull = HASH_RESET_CSFULL_REG_VALUE; - g_sys_ctx.registry[hid]->csdatain = - HASH_RESET_CSDATAIN_REG_VALUE; - - /* Resetting the HASH Control reg. This also reset the PRIVn and SECn - * bits and hence the device registers will not be accessed anymore and - * should be done in the last HASH register access statement. - */ - g_sys_ctx.registry[hid]->cr = HASH_RESET_CONTROL_REG_VALUE; - - return hash_error; + return hash_rv; } /** @@ -1587,59 +1148,37 @@ int hash_reset(int hid) */ int hash_init_base_address(int hid, t_logical_address base_address) { - int hash_error = HASH_OK; - - stm_dbg(debug, "[u8500_hash_alg] hash_init_base_address())"); + int hash_rv = HASH_OK; - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - - return hash_error; - } + pr_debug("[u8500_hash_alg] hash_init_base_address())"); if (0 != base_address) { - /*--------------------------------------* - * Initializing the registers structure * - *--------------------------------------*/ - g_sys_ctx.registry[hid] = (struct hash_register *) base_address; - - /*--------------------------* - * Checking Peripheral Ids * - *--------------------------*/ - if ((HASH_P_ID0 == - g_sys_ctx.registry[hid]->periphid0) - && (HASH_P_ID1 == - g_sys_ctx.registry[hid]->periphid1) - && (HASH_P_ID2 == - g_sys_ctx.registry[hid]->periphid2) - && (HASH_P_ID3 == - g_sys_ctx.registry[hid]->periphid3) - && (HASH_CELL_ID0 == - g_sys_ctx.registry[hid]->cellid0) - && (HASH_CELL_ID1 == - g_sys_ctx.registry[hid]->cellid1) - && (HASH_CELL_ID2 == - g_sys_ctx.registry[hid]->cellid2) - && (HASH_CELL_ID3 == - g_sys_ctx.registry[hid]->cellid3) + /* Initializing the registers structure */ + sys_ctx_g.registry[hid] = + (struct hash_register *) base_address; + + /* Checking Peripheral Ids */ + if ((HASH_P_ID0 == sys_ctx_g.registry[hid]->periphid0) + && (HASH_P_ID1 == sys_ctx_g.registry[hid]->periphid1) + && (HASH_P_ID2 == sys_ctx_g.registry[hid]->periphid2) + && (HASH_P_ID3 == sys_ctx_g.registry[hid]->periphid3) + && (HASH_CELL_ID0 == sys_ctx_g.registry[hid]->cellid0) + && (HASH_CELL_ID1 == sys_ctx_g.registry[hid]->cellid1) + && (HASH_CELL_ID2 == sys_ctx_g.registry[hid]->cellid2) + && (HASH_CELL_ID3 == sys_ctx_g.registry[hid]->cellid3) ) { - - /* Resetting the values of global variables except the - registry */ - hash_initialize_globals(hid); - hash_error = HASH_OK; - return hash_error; + hash_rv = HASH_OK; + return hash_rv; } else { - hash_error = HASH_UNSUPPORTED_HW; - stm_error("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); - return hash_error; + hash_rv = HASH_UNSUPPORTED_HW; + pr_err("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); + return hash_rv; } } /* end if */ else { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + hash_rv = HASH_INVALID_PARAMETER; + pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); + return hash_rv; } } @@ -1647,110 +1186,50 @@ int hash_init_base_address(int hid, t_logical_address base_address) * hash_get_digest - Gets the digest. * @hid: Hardware device ID * @digest: User allocated byte array for the calculated digest + * @algorithm: The algorithm in use. * * Reentrancy: Non Re-entrant, global variable registry (hash control register) * is being modified. * - * Note that, if this is called before the final message has been handle it will - * return the intermediate message digest. + * Note that, if this is called before the final message has been handle it + * will return the intermediate message digest. */ -int hash_get_digest(int hid, u8 *digest) +void hash_get_digest(int hid, u8 *digest, int algorithm) { u32 temp_hx_val, count; - int hash_error = HASH_OK; - - stm_dbg(debug, - "[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", - (u32) digest); + int loop_ctr; - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; + if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA2) { + pr_err("[hash_get_digest] Incorrect algorithm %d", algorithm); + return; } + if (algorithm == HASH_ALGO_SHA1) + loop_ctr = HASH_SHA1_DIGEST_SIZE / sizeof(u32); + else + loop_ctr = HASH_SHA2_DIGEST_SIZE / sizeof(u32); + + pr_debug("[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", + (u32) digest); + /* Copy result into digest array */ - for (count = 0; count < (HASH_MSG_DIGEST_SIZE / sizeof(u32)); - count++) { + for (count = 0; count < loop_ctr; count++) { temp_hx_val = HASH_GET_HX(count); digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); } - - return hash_error; } -/** - * hash_compute - Performs a complete HASH calculation on the message passed. - * @hid: Hardware device ID - * @p_data_buffer: Pointer to the message to be hashed - * @msg_length: The length of the message - * @p_hash_config: Structure with configuration data for the hash hardware - * @digest: User allocated byte array for the calculated digest - * - * Reentrancy: Non Re-entrant - */ -int hash_compute(int hid, - const u8 *p_data_buffer, - u32 msg_length, - struct hash_config *p_hash_config, - u8 digest[HASH_MSG_DIGEST_SIZE]) { - int hash_error = HASH_OK; - - stm_dbg(debug, "[u8500_hash_alg] hash_compute())"); - - if (!((HASH_DEVICE_ID_0 == hid) - || (HASH_DEVICE_ID_1 == hid))) { - hash_error = HASH_INVALID_PARAMETER; - stm_error("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_error; - } - - - /* WARNING: return code must be checked if - * behaviour of hash_begin changes. - */ - hash_error = hash_setconfiguration(hid, p_hash_config); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_setconfiguration() failed!"); - return hash_error; - } - - hash_error = hash_begin(hid); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_begin() failed!"); - return hash_error; - } - - hash_error = hash_hw_update(hid, p_data_buffer, msg_length); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_hw_update() failed!"); - return hash_error; - } - - hash_error = hash_end(hid, digest); - if (HASH_OK != hash_error) { - stm_error("[u8500_hash_alg] hash_end() failed!"); - return hash_error; - } - - return hash_error; -} module_init(u8500_hash_mod_init); module_exit(u8500_hash_mod_fini); -module_param(mode, int, 0); module_param(debug, int, 0); -module_param(contextsaving, int, 0); MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); MODULE_LICENSE("GPL"); MODULE_ALIAS("sha1-u8500"); MODULE_ALIAS("sha256-u8500"); -MODULE_ALIAS("hmac(sha1-u8500)"); -MODULE_ALIAS("hmac(sha256-u8500)"); -- cgit v1.2.3 From 4c93613a4a5f25b739fa32f86ae67bfe033ee0d3 Mon Sep 17 00:00:00 2001 From: Shujuan Chen Date: Tue, 31 Aug 2010 17:14:54 +0200 Subject: ux500: tee bug fix for returning wrong values - Fix the missing the ret argument sent to secure world. - Fix the mismatch return btw kernel and secure world in closesession. Dependencies: WP269815 ST-Ericsson ID: ER270978 Change-Id: I86b4af518660987663632d04d11d5d4967878bca Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/4545 Reviewed-by: Linus WALLEIJ Tested-by: Shujuan CHEN Reviewed-by: Fredric MORENIUS --- arch/arm/mach-ux500/tee_ux500.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index ab3782a323c..c009afe26ae 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -52,6 +52,7 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) virt_to_phys(ts->ta), ts->cmd, virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->err)), virt_to_phys((void *)(&ts->origin))); } else { call_sec_rom_bridge(ISSWAPI_EXECUTE_TA, @@ -61,6 +62,7 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) virt_to_phys(ts->ta), ts->cmd, virt_to_phys((void *)(ts->op)), + virt_to_phys((void *)(&ts->err)), virt_to_phys((void *)(&ts->origin))); } break; @@ -71,7 +73,16 @@ int call_sec_world(struct tee_session *ts, int sec_cmd) ts->id, NULL, virt_to_phys(ts->ta), - virt_to_phys((void *)(&ts->origin))); + virt_to_phys((void *)(&ts->err))); + + /* Since the TEE Client API does NOT take care of + * the return value, we print a warning here if + * something went wrong in secure world. + */ + if (ts->err != TEED_SUCCESS) + pr_warning("[%s] failed in secure world\n", + __func__); + break; } -- cgit v1.2.3 From 630355b7a64d4c4d42d217b643382ea95135d7a5 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 5 Apr 2011 13:53:10 +0200 Subject: ux500: add u5500 specific macros This patch is based on similar patch from Rickard Evertsson Although this patch is fixing fewer files. Signed-off-by: Mian Yousaf Kaukab Conflicts: arch/arm/mach-ux500/clock.c --- arch/arm/mach-ux500/tee_ux500.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index c009afe26ae..2e6a2e89f0d 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -8,11 +8,10 @@ #include #include #include +#include #include -#define BOOT_BRIDGE_FUNC (U8500_BOOT_ROM_BASE + 0x18300) - #define ISSWAPI_EXECUTE_TA 0x11000001 #define ISSWAPI_CLOSE_TA 0x11000002 @@ -25,8 +24,16 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - hw_sec_rom_pub_bridge = - (bridge_func)((u32)IO_ADDRESS(BOOT_BRIDGE_FUNC)); + if (cpu_is_u8500()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); + else if (cpu_is_u5500()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300)); + else { + pr_err("tee-ux500: Unknown DB Asic!\n"); + return -EIO; + } va_start(ap, cfg); ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); -- cgit v1.2.3 From ace22919ce9616453b97737e55825fb5ae274136 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 15 Oct 2010 15:07:04 +0200 Subject: V2 fix for TEE ST-Ericsson ID: WP270298 Change-Id: I0eb63eba30ed319ff601beb7cd4ac9c307e7414c Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/6620 Reviewed-by: Jens WIKLANDER Tested-by: Jens WIKLANDER --- arch/arm/mach-ux500/tee_ux500.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 2e6a2e89f0d..707e91284a1 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -20,11 +20,14 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) { typedef u32 (*bridge_func)(u32, u32, va_list); - static bridge_func hw_sec_rom_pub_bridge; + bridge_func hw_sec_rom_pub_bridge; va_list ap; u32 ret; - if (cpu_is_u8500()) + if (cpu_is_u8500v2()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); + else if (cpu_is_u8500v1()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); else if (cpu_is_u5500()) -- cgit v1.2.3 From 845dda7f67651a93e0b8d09f39f17c332c22dc63 Mon Sep 17 00:00:00 2001 From: Martin Hovang Date: Mon, 9 May 2011 11:57:00 +0200 Subject: TEE: Added TEE kernel interface ST-Ericsson ID: 337171 ST Ericsson FOSS-OUT ID: Trivial ST Ericsson Linux next: Not tested Change-Id: I4c87bd550547a1fd90f1fef8c3c553246940b31e Signed-off-by: Martin Hovang Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/22666 Reviewed-by: QATEST Reviewed-by: Joakim BECH Reviewed-by: Jonas ABERG --- drivers/tee/tee_driver.c | 195 ++++++++++++++++++++++++++++++++++++++++++----- include/linux/tee.h | 135 +++++++++++++++++++++++++++++++- 2 files changed, 311 insertions(+), 19 deletions(-) diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c index 551c92cc054..73c62871ddc 100644 --- a/drivers/tee/tee_driver.c +++ b/drivers/tee/tee_driver.c @@ -21,8 +21,7 @@ #define TEED_STATE_OPEN_DEV 0 #define TEED_STATE_OPEN_SESSION 1 -#define TEEC_MEM_INPUT 0x00000001 -#define TEEC_MEM_OUTPUT 0x00000002 +static struct mutex sync; static int tee_open(struct inode *inode, struct file *file); static int tee_release(struct inode *inode, struct file *file); @@ -281,13 +280,6 @@ static int tee_open(struct inode *inode, struct file *filp) reset_session(ts); - ts->sync = kmalloc(sizeof(struct mutex), GFP_KERNEL); - - if (!ts->sync) - return -ENOMEM; - - mutex_init(ts->sync); - return 0; } @@ -311,9 +303,6 @@ static int tee_release(struct inode *inode, struct file *filp) kfree(ts->op); ts->op = NULL; - kfree(ts->sync); - ts->sync = NULL; - kfree(ts->ta); ts->ta = NULL; @@ -342,18 +331,18 @@ static int tee_read(struct file *filp, char __user *buffer, ts = (struct tee_session *) (filp->private_data); - if (ts == NULL || ts->sync == NULL) { + if (ts == NULL) { pr_err("[%s] error, private_data not " "initialized\n", __func__); return -EINVAL; } - mutex_lock(ts->sync); + mutex_lock(&sync); buf.err = ts->err; buf.origin = ts->origin; - mutex_unlock(ts->sync); + mutex_unlock(&sync); if (copy_to_user(buffer, &buf, length)) { pr_err("[%s] error, copy_to_user failed!\n", @@ -388,13 +377,13 @@ static int tee_write(struct file *filp, const char __user *buffer, ts = (struct tee_session *) (filp->private_data); - if (ts == NULL || ts->sync == NULL) { + if (ts == NULL) { pr_err("[%s] error, private_data not " "initialized\n", __func__); return -EINVAL; } - mutex_lock(ts->sync); + mutex_lock(&sync); switch (ts->state) { case TEED_STATE_OPEN_DEV: @@ -439,11 +428,179 @@ static int tee_write(struct file *filp, const char __user *buffer, else ret = -EINVAL; - mutex_unlock(ts->sync); + mutex_unlock(&sync); return ret; } +int teec_initialize_context(const char *name, struct tee_context *context) +{ + return TEED_SUCCESS; +} +EXPORT_SYMBOL(teec_initialize_context); + +int teec_finalize_context(struct tee_context *context) +{ + return TEED_SUCCESS; +} +EXPORT_SYMBOL(teec_finalize_context); + +int teec_open_session(struct tee_context *context, + struct tee_session *session, + const struct tee_uuid *destination, + unsigned int connection_method, + void *connection_data, struct tee_operation *operation, + unsigned int *error_origin) +{ + int res = TEED_SUCCESS; + + if (session == NULL || destination == NULL) { + pr_err("[%s] session or destination == NULL\n", __func__); + if (error_origin != NULL) + *error_origin = TEED_ORIGIN_DRIVER; + res = TEED_ERROR_BAD_PARAMETERS; + goto exit; + } + + reset_session(session); + + /* + * Open a session towards an application already loaded inside + * the TEE + */ + session->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL); + + if (session->uuid == NULL) { + pr_err("[%s] error, out of memory (uuid)\n", + __func__); + if (error_origin != NULL) + *error_origin = TEED_ORIGIN_DRIVER; + res = TEED_ERROR_OUT_OF_MEMORY; + goto exit; + } + + memcpy(session->uuid, destination, sizeof(struct tee_uuid)); + + session->ta = NULL; + session->id = 0; + +exit: + return res; +} +EXPORT_SYMBOL(teec_open_session); + +int teec_close_session(struct tee_session *session) +{ + int res = TEED_SUCCESS; + + mutex_lock(&sync); + + if (session == NULL) { + pr_err("[%s] error, session == NULL\n", __func__); + res = TEED_ERROR_BAD_PARAMETERS; + goto exit; + } + + if (call_sec_world(session, TEED_CLOSE_SESSION)) { + pr_err("[%s] error, call_sec_world failed\n", __func__); + res = TEED_ERROR_GENERIC; + goto exit; + } + +exit: + if (session != NULL) { + kfree(session->uuid); + session->uuid = NULL; + } + + mutex_unlock(&sync); + return res; +} +EXPORT_SYMBOL(teec_close_session); + +int teec_invoke_command( + struct tee_session *session, unsigned int command_id, + struct tee_operation *operation, + unsigned int *error_origin) +{ + int res = TEED_SUCCESS; + int i; + + mutex_lock(&sync); + + if (session == NULL || operation == NULL || error_origin == NULL) { + pr_err("[%s] error, input parameters == NULL\n", __func__); + if (error_origin != NULL) + *error_origin = TEED_ORIGIN_DRIVER; + res = TEED_ERROR_BAD_PARAMETERS; + goto exit; + } + + for (i = 0; i < 4; ++i) { + /* We only want to translate memrefs in use. */ + if (operation->flags & (1 << i)) { + operation->shm[i].buffer = + (void *)virt_to_phys( + operation->shm[i].buffer); + } + } + session->op = operation; + session->cmd = command_id; + + /* + * Call secure world + */ + if (call_sec_world(session, TEED_INVOKE)) { + pr_err("[%s] error, call_sec_world failed\n", __func__); + if (error_origin != NULL) + *error_origin = TEED_ORIGIN_DRIVER; + res = TEED_ERROR_GENERIC; + } + if (session->err != TEED_SUCCESS) { + pr_err("[%s] error, call_sec_world failed\n", __func__); + if (error_origin != NULL) + *error_origin = session->origin; + res = session->err; + } + + memrefs_phys_to_virt(session); + session->op = NULL; + +exit: + mutex_unlock(&sync); + return res; +} +EXPORT_SYMBOL(teec_invoke_command); + +int teec_allocate_shared_memory(struct tee_context *context, + struct tee_sharedmemory *shared_memory) +{ + int res = TEED_SUCCESS; + + if (shared_memory == NULL) { + res = TEED_ERROR_BAD_PARAMETERS; + goto exit; + } + + shared_memory->buffer = kmalloc(shared_memory->size, + GFP_KERNEL); + + if (shared_memory->buffer == NULL) { + res = TEED_ERROR_OUT_OF_MEMORY; + goto exit; + } + +exit: + return res; +} +EXPORT_SYMBOL(teec_allocate_shared_memory); + +void teec_release_shared_memory(struct tee_sharedmemory *shared_memory) +{ + kfree(shared_memory->buffer); +} +EXPORT_SYMBOL(teec_release_shared_memory); + static const struct file_operations tee_fops = { .owner = THIS_MODULE, .read = tee_read, @@ -469,6 +626,8 @@ static int __init tee_init(void) "TEE\n", __func__, err); } + mutex_init(&sync); + return err; } diff --git a/include/linux/tee.h b/include/linux/tee.h index 0cdec2d254a..4928e4dca1f 100644 --- a/include/linux/tee.h +++ b/include/linux/tee.h @@ -3,7 +3,7 @@ * * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen - * Author: Martin Hovang * Author: Joakim Bech * License terms: GNU General Public License (GPL) version 2 */ @@ -45,6 +45,37 @@ #define TEEC_CONFIG_PAYLOAD_REF_COUNT 4 +/* + * Flag constants indicating which of the memory references in an open session + * or invoke command operation payload (TEEC_Operation) that are used. + */ +#define TEEC_MEMREF_0_USED 0x00000001 +#define TEEC_MEMREF_1_USED 0x00000002 +#define TEEC_MEMREF_2_USED 0x00000004 +#define TEEC_MEMREF_3_USED 0x00000008 + +/* + * Flag constants indicating the data transfer direction of memory in + * TEEC_SharedMemory and TEEC_MemoryReference. TEEC_MEM_INPUT signifies data + * transfer direction from the client application to the TEE. TEEC_MEM_OUTPUT + * signifies data transfer direction from the TEE to the client application. + */ +#define TEEC_MEM_INPUT 0x00000001 +#define TEEC_MEM_OUTPUT 0x00000002 + +/* + * Session login methods, for use in TEEC_OpenSession() as parameter + * connectionMethod. Type is t_uint32. + * + * TEEC_LOGIN_PUBLIC No login data is provided. + */ +#define TEEC_LOGIN_PUBLIC 0x0 + +/* + * Exposed functions (command_id) in the static TA + */ +#define TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER 11 + /** * struct tee_uuid - Structure that represent an uuid. * @timeLow: The low field of the time stamp. @@ -85,6 +116,8 @@ struct tee_operation { uint32_t flags; }; +struct tee_context {}; + /** * struct tee_session - The session of an open tee device. * @state: The current state in the linux kernel. @@ -140,4 +173,104 @@ struct tee_read { */ int call_sec_world(struct tee_session *ts, int sec_cmd); + +/** + * teec_initialize_context() - Initializes a context holding connection + * information on the specific TEE. + * @param name: A zero-terminated string identifying the TEE to connect to. + * If name is set to NULL, the default TEE is connected to. + * NULL is the only supported value in this version of the + * API implementation. + * @param context: The context structure which is to be initialized. + * + * Initializes a context holding connection information between the calling + * client application and the TEE designated by the name string. + */ +int teec_initialize_context(const char *name, struct tee_context *context); + +/** + * teec_finalize_context() - Destroys a context holding connection information + * on the specific TEE. + * @param context: The context to be destroyed. + * + * This function destroys an initialized TEE context, closing the connection + * between the client application and the TEE. This function must only be + * called when all sessions related to this TEE context have been closed and + * all shared memory blocks have been released. + */ +int teec_finalize_context(struct tee_context *context); + +/** + * teec_open_session() - Opens a new session with the specified trusted + * application. + * @param context: The initialized TEE context structure in which scope to + * open the session. + * @param session: The session to initialize. + * @param destination: A structure identifying the trusted application with + * which to open a session. If this is set to NULL the + * operation TEEC_MEMREF_0 is expected to contain the blob + * which holds the Trusted Application. + * @param connection_method: The connection method to use. + * @param connection_data: Any data necessary to connect with the chosen + * connection method. Not supported should be set to + * NULL. + * @param operation: An operation structure to use in the session. May be + * set to NULL to signify no operation structure needed. + * If destination is set to NULL, TEEC_MEMREF_0 is + * expected to hold the TA binary as described above. + * @param error_origin: A parameter which will hold the error origin if this + * function returns any value other than TEEC_SUCCESS. + * + * Opens a new session with the specified trusted application. Only + * connectionMethod == TEEC_LOGIN_PUBLIC is supported. connectionData and + * operation shall be set to NULL. + */ +int teec_open_session(struct tee_context *context, struct tee_session *session, + const struct tee_uuid *destination, + unsigned int connection_method, + void *connection_data, struct tee_operation *operation, + unsigned int *error_origin); + +/** + * teec_close_session() - Closes the session which has been opened with the + * specific trusted application. + * @param session: The opened session to close. + * + * Closes the session which has been opened with the specific trusted + * application. + */ +int teec_close_session(struct tee_session *session); + +/** + * teec_invoke_command() - Executes a command in the specified trusted + * application. + * @param destination: A structure identifying the trusted application. + * @param command_id: Identifier of the command in the trusted application to + * invoke. + * @param operation: An operation structure to use in the invoke command. May + * be set to NULL to signify no operation structure needed. + * @param error_origin: A parameter which will hold the error origin if this + * function returns any value other than TEEC_SUCCESS. + * + * Executes a command in the specified trusted application. + */ +int teec_invoke_command(struct tee_session *session, unsigned int command_id, + struct tee_operation *operation, + unsigned int *error_origin); + +/** + * teec_allocate_shared_memory() - Allocate shared memory for TEE. + * @param context: The initialized TEE context structure in which scope to + * open the session. + * @param shared_memory: Pointer to the allocated shared memory. + */ +int teec_allocate_shared_memory(struct tee_context *context, + struct tee_sharedmemory *shared_memory); + +/** + * teec_release_shared_memory() - Free the shared memory. + * @param shared_memory: Pointer to the shared memory to be freed. + */ +void teec_release_shared_memory(struct tee_sharedmemory *shared_memory); + #endif -- cgit v1.2.3 From 7ef928c63bff257c900f7c3d9540cc2f18bd688b Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Fri, 14 Oct 2011 11:08:56 +0200 Subject: cryp: Updates according to corrected design spec - Change names of context registers so they corresponds to the name in the DS. - Follow the design specification strictly and with the additional missing steps that we have got from IP developers. - Remove unused functions from u8500. - Call atomic version of power enable/disable functions to get rid of sleeping while atomic BUG prints. - Replace mutex with spinlock in the crypto context to get rid of sleeping while atomic BUG prints. - Replace completion in interrupt mode with polling for the remaining data length of data to get rid of sleeping in invalid context BUG print. - Correct optimization bug which occured when building without debug information (the compiler optimized it incorrectly). - Update of irq, fixed interrupt mask handling. - Correct bug regarding keysize when doing context save. - BUG! DES hangs when encrypting data not modulo 16 using DMA. The reason for this is that the CRYP IP only supports burst size of 4 words. Due to this reason DMA for DES have been replaced by CPU mode instead. ST-Ericsson ID: 283399, 340779 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I23dbc123dd2fb7e47f5713025ed71423efbb5b18 Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/23297 Reviewed-by: QATEST Reviewed-by: Berne HEBARK --- drivers/crypto/ux500/cryp/cryp.c | 392 ++++++++++++---------------------- drivers/crypto/ux500/cryp/cryp.h | 54 ++--- drivers/crypto/ux500/cryp/cryp_core.c | 322 +++++++++++++--------------- drivers/crypto/ux500/cryp/cryp_irq.c | 4 +- drivers/crypto/ux500/cryp/cryp_irqp.h | 2 +- drivers/crypto/ux500/cryp/cryp_p.h | 64 +++--- 6 files changed, 337 insertions(+), 501 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 94928f7efce..ed9eeccf1d3 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -52,40 +52,6 @@ int cryp_check(struct cryp_device_data *device_data) return -EPERM; } -/** - * cryp_reset - This routine loads the cryp register with the default values - * @device_data: Pointer to the device data struct for base address. - */ -void cryp_reset(struct cryp_device_data *device_data) -{ - writel(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); - writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); - - writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_l); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_1_r); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_l); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_2_r); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_l); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_3_r); - writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_l); - writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_0_r); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_l); - writel(CRYP_KEY_DEFAULT, &device_data->base->key_4_r); - writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_l); - writel(CRYP_INIT_VECT_DEFAULT, &device_data->base->init_vect_1_r); - - /* Last step since the protection mode bits need to be modified. */ - writel(CRYP_CR_DEFAULT | CRYP_CR_FFLUSH, &device_data->base->cr); - - /* - * CRYP_INFIFO_READY_MASK is the expected value on the status register - * when starting a new calculation, which means Input FIFO is not full - * and input FIFO is empty. - */ - while (readl(&device_data->base->status) != CRYP_INFIFO_READY_MASK) - cpu_relax(); -} - /** * cryp_activity - This routine enables/disable the cryptography function. * @device_data: Pointer to the device data struct for base address. @@ -96,49 +62,8 @@ void cryp_activity(struct cryp_device_data *device_data, { CRYP_PUT_BITS(&device_data->base->cr, cryp_crypen, - CRYP_CRYPEN_POS, - CRYP_CRYPEN_MASK); -} - -/** - * cryp_start - starts the computation - * @device_data: Pointer to the device data struct for base address. - * @cryp_start: Enable/Disable functionality - */ -void cryp_start(struct cryp_device_data *device_data) -{ - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_START_ENABLE, - CRYP_START_POS, - CRYP_START_MASK); -} - -/** - * cryp_init_signal - This routine submit the initialization values. - * @device_data: Pointer to the device data struct for base address. - * @cryp_init_bit: Enable/Disable init signal - */ -void cryp_init_signal(struct cryp_device_data *device_data, - enum cryp_init cryp_init_bit) -{ - CRYP_PUT_BITS(&device_data->base->cr, - cryp_init_bit, - CRYP_INIT_POS, - CRYP_INIT_MASK); -} - -/** - * cryp_key_preparation - This routine prepares key for decryption. - * @device_data: Pointer to the device data struct for base address. - * @cryp_prepkey: Enable/Disable - */ -void cryp_key_preparation(struct cryp_device_data *device_data, - enum cryp_key_prep cryp_prepkey) -{ - CRYP_PUT_BITS(&device_data->base->cr, - cryp_prepkey, - CRYP_KSE_POS, - CRYP_KSE_MASK); + CRYP_CR_CRYPEN_POS, + CRYP_CR_CRYPEN_MASK); } /** @@ -147,43 +72,23 @@ void cryp_key_preparation(struct cryp_device_data *device_data, */ void cryp_flush_inoutfifo(struct cryp_device_data *device_data) { - CRYP_SET_BITS(&device_data->base->cr, CRYP_FIFO_FLUSH_MASK); -} - -/** - * cryp_set_dir - - * @device_data: Pointer to the device data struct for base address. - * @dir: Crypto direction, encrypt/decrypt - */ -void cryp_set_dir(struct cryp_device_data *device_data, int dir) -{ - CRYP_PUT_BITS(&device_data->base->cr, - dir, - CRYP_ENC_DEC_POS, - CRYP_ENC_DEC_MASK); - - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_DATA_TYPE_8BIT_SWAP, - CRYP_DATA_TYPE_POS, - CRYP_DATA_TYPE_MASK); -} + /* + * We always need to disble the hardware before trying to flush the + * FIFO. This is something that isn't written in the design + * specification, but we have been informed by the hardware designers + * that this must be done. + */ + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); -/** - * cryp_cen_flush - - * @device_data: Pointer to the device data struct for base address. - */ -void cryp_cen_flush(struct cryp_device_data *device_data) -{ - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_STATE_DISABLE, - CRYP_KEY_ACCESS_POS, - CRYP_KEY_ACCESS_MASK); - CRYP_SET_BITS(&device_data->base->cr, - CRYP_FIFO_FLUSH_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_CRYPEN_ENABLE, - CRYP_CRYPEN_POS, - CRYP_CRYPEN_MASK); + CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK); + /* + * CRYP_SR_INFIFO_READY_MASK is the expected value on the status + * register when starting a new calculation, which means Input FIFO is + * not full and input FIFO is empty. + */ + while (readl(&device_data->base->sr) != CRYP_SR_INFIFO_READY_MASK) + cpu_relax(); } /** @@ -194,96 +99,68 @@ void cryp_cen_flush(struct cryp_device_data *device_data) int cryp_set_configuration(struct cryp_device_data *device_data, struct cryp_config *p_cryp_config) { - if (NULL == device_data) - return -EINVAL; - if (NULL == p_cryp_config) + if (NULL == device_data || NULL == p_cryp_config) return -EINVAL; - /* Since more than one bit is written macro put_bits is used*/ CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->key_access, - CRYP_KEY_ACCESS_POS, - CRYP_KEY_ACCESS_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->key_size, - CRYP_KEY_SIZE_POS, - CRYP_KEY_SIZE_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->data_type, - CRYP_DATA_TYPE_POS, - CRYP_DATA_TYPE_MASK); - - /* Prepare key for decryption */ - if ((CRYP_ALGORITHM_DECRYPT == p_cryp_config->encrypt_or_decrypt) && - ((CRYP_ALGO_AES_ECB == p_cryp_config->algo_mode) || - (CRYP_ALGO_AES_CBC == p_cryp_config->algo_mode))) { + p_cryp_config->keysize, + CRYP_CR_KEYSIZE_POS, + CRYP_CR_KEYSIZE_MASK); + + /* Prepare key for decryption in AES_ECB and AES_CBC mode. */ + if ((CRYP_ALGORITHM_DECRYPT == p_cryp_config->algodir) && + ((CRYP_ALGO_AES_ECB == p_cryp_config->algomode) || + (CRYP_ALGO_AES_CBC == p_cryp_config->algomode))) { + /* + * This seems a bit odd, but it is indeed needed to set this to + * encrypt even though it is a decryption that we are doing. It + * also mentioned in the design spec that you need to do this. + * After the keyprepartion for decrypting is done you should set + * algodir back to decryption, which is done outside this if + * statement. + */ + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_ALGORITHM_ENCRYPT, + CRYP_CR_ALGODIR_POS, + CRYP_CR_ALGODIR_MASK); + + /* + * According to design specification we should set mode ECB + * during key preparation even though we might be running CBC + * when enter this function. + */ CRYP_PUT_BITS(&device_data->base->cr, CRYP_ALGO_AES_ECB, - CRYP_ALGOMODE_POS, - CRYP_ALGOMODE_MASK); + CRYP_CR_ALGOMODE_POS, + CRYP_CR_ALGOMODE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, CRYP_CRYPEN_ENABLE, - CRYP_CRYPEN_POS, - CRYP_CRYPEN_MASK); + CRYP_CR_CRYPEN_POS, + CRYP_CR_CRYPEN_MASK); + + /* + * Writing to KSE_ENABLED will drop CRYPEN when key preparation + * is done. Therefore we need to set CRYPEN again outside this + * if statement when running decryption. + */ CRYP_PUT_BITS(&device_data->base->cr, KSE_ENABLED, - CRYP_KSE_POS, - CRYP_KSE_MASK); + CRYP_CR_KSE_POS, + CRYP_CR_KSE_MASK); cryp_wait_until_done(device_data); - - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_CRYPEN_DISABLE, - CRYP_CRYPEN_POS, - CRYP_CRYPEN_MASK); } CRYP_PUT_BITS(&device_data->base->cr, - CRYP_CRYPEN_ENABLE, - CRYP_CRYPEN_POS, - CRYP_CRYPEN_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->algo_mode, - CRYP_ALGOMODE_POS, - CRYP_ALGOMODE_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->encrypt_or_decrypt, - CRYP_ENC_DEC_POS, - CRYP_ENC_DEC_MASK); - - return 0; -} - -/** - * cryp_get_configuration - gets the parameter of the control register of IP - * @device_data: Pointer to the device data struct for base address. - * @p_cryp_config: Gets the configuration parameter from cryp ip. - */ -int cryp_get_configuration(struct cryp_device_data *device_data, - struct cryp_config *p_cryp_config) -{ - if (NULL == p_cryp_config) - return -EINVAL; + p_cryp_config->algomode, + CRYP_CR_ALGOMODE_POS, + CRYP_CR_ALGOMODE_MASK); - p_cryp_config->key_access = - ((readl(&device_data->base->cr) & CRYP_KEY_ACCESS_MASK) ? - CRYP_STATE_ENABLE : - CRYP_STATE_DISABLE); - p_cryp_config->key_size = - ((readl(&device_data->base->cr) & CRYP_KEY_SIZE_MASK) >> - CRYP_KEY_SIZE_POS); - - p_cryp_config->encrypt_or_decrypt = - ((readl(&device_data->base->cr) & CRYP_ENC_DEC_MASK) ? - CRYP_ALGORITHM_DECRYPT : - CRYP_ALGORITHM_ENCRYPT); - - p_cryp_config->data_type = - ((readl(&device_data->base->cr) & CRYP_DATA_TYPE_MASK) >> - CRYP_DATA_TYPE_POS); - p_cryp_config->algo_mode = - ((readl(&device_data->base->cr) & CRYP_ALGOMODE_MASK) >> - CRYP_ALGOMODE_POS); + CRYP_PUT_BITS(&device_data->base->cr, + p_cryp_config->algodir, + CRYP_CR_ALGODIR_POS, + CRYP_CR_ALGODIR_MASK); return 0; } @@ -302,11 +179,11 @@ int cryp_configure_protection(struct cryp_device_data *device_data, CRYP_WRITE_BIT(&device_data->base->cr, (u32) p_protect_config->secure_access, - CRYP_SECURE_MASK); + CRYP_CR_SECURE_MASK); CRYP_PUT_BITS(&device_data->base->cr, p_protect_config->privilege_access, - CRYP_PRLG_POS, - CRYP_PRLG_MASK); + CRYP_CR_PRLG_POS, + CRYP_CR_PRLG_MASK); return 0; } @@ -317,20 +194,9 @@ int cryp_configure_protection(struct cryp_device_data *device_data, */ int cryp_is_logic_busy(struct cryp_device_data *device_data) { - return CRYP_TEST_BITS(&device_data->base->status, - CRYP_BUSY_STATUS_MASK); -} - -/** - * cryp_get_status - This routine returns the complete status of the cryp logic - * @device_data: Pointer to the device data struct for base address. - */ -/* -int cryp_get_status(struct cryp_device_data *device_data) -{ - return (int) readl(device_data->base->status); + return CRYP_TEST_BITS(&device_data->base->sr, + CRYP_SR_BUSY_MASK); } -*/ /** * cryp_configure_for_dma - configures the CRYP IP for DMA operation @@ -425,17 +291,6 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, return 0; } -/** - * cryp_prep_ctx_mgmt - Prepares for handling the context of the block - * @device_data: Pointer to the device data struct for base address. - */ -static void cryp_prep_ctx_mgmt(struct cryp_device_data *device_data) -{ - cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); - cryp_activity(device_data, CRYP_CRYPEN_DISABLE); - cryp_wait_until_done(device_data); -} - /** * cryp_save_device_context - Store hardware registers and * other device context parameter @@ -447,15 +302,21 @@ void cryp_save_device_context(struct cryp_device_data *device_data, { struct cryp_register *src_reg = device_data->base; - cryp_prep_ctx_mgmt(device_data); + /* + * Always start by disable the hardware and wait for it to finish the + * ongoing calculations before trying to reprogram it. + */ + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); + cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); - ctx->din = readl(&src_reg->din); + if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0) + ctx->din = readl(&src_reg->din); - ctx->dout = readl(&src_reg->dout); + ctx->cr = readl(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; - ctx->cr = readl(&src_reg->cr); - ctx->dmacr = readl(&src_reg->dmacr); - ctx->imsc = readl(&src_reg->imsc); + CRYP_PUT_BITS(&src_reg->cr, 1, CRYP_CR_KEYRDEN_POS, + CRYP_CR_KEYRDEN_MASK); ctx->key_1_l = readl(&src_reg->key_1_l); ctx->key_1_r = readl(&src_reg->key_1_r); @@ -466,10 +327,21 @@ void cryp_save_device_context(struct cryp_device_data *device_data, ctx->key_4_l = readl(&src_reg->key_4_l); ctx->key_4_r = readl(&src_reg->key_4_r); - ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); - ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); - ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); - ctx->init_vect_1_r = readl(&src_reg->init_vect_1_r); + CRYP_PUT_BITS(&src_reg->cr, 0, CRYP_CR_KEYRDEN_POS, + CRYP_CR_KEYRDEN_MASK); + + /* Save IV for CBC mode for both AES and DES. */ + if (CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == + CRYP_ALGO_TDES_CBC || + CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == + CRYP_ALGO_DES_CBC || + CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == + CRYP_ALGO_AES_CBC) { + ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); + ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); + ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); + ctx->init_vect_1_r = readl(&src_reg->init_vect_1_r); + } } /** @@ -482,26 +354,44 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx) { struct cryp_register *reg = device_data->base; + struct cryp_config *config = + (struct cryp_config *)device_data->current_ctx; + - cryp_prep_ctx_mgmt(device_data); + /* + * Fall through for all items in switch statement. DES is captured in + * the default. + */ + switch (config->keysize) { + case CRYP_KEY_SIZE_256: + writel(ctx->key_4_l, ®->key_4_l); + writel(ctx->key_4_r, ®->key_4_r); + + case CRYP_KEY_SIZE_192: + writel(ctx->key_3_l, ®->key_3_l); + writel(ctx->key_3_r, ®->key_3_r); + + case CRYP_KEY_SIZE_128: + writel(ctx->key_2_l, ®->key_2_l); + writel(ctx->key_2_r, ®->key_2_r); + + default: + writel(ctx->key_1_l, ®->key_1_l); + writel(ctx->key_1_r, ®->key_1_r); + } + + /* Restore IV for CBC mode for AES and DES. */ + if (config->algomode == CRYP_ALGO_TDES_CBC || + config->algomode == CRYP_ALGO_DES_CBC || + config->algomode == CRYP_ALGO_AES_CBC) { + writel(ctx->init_vect_0_l, ®->init_vect_0_l); + writel(ctx->init_vect_0_r, ®->init_vect_0_r); + writel(ctx->init_vect_1_l, ®->init_vect_1_l); + writel(ctx->init_vect_1_r, ®->init_vect_1_r); + } - writel(ctx->din, ®->din); - writel(ctx->dout, ®->dout); writel(ctx->cr, ®->cr); - writel(ctx->dmacr, ®->dmacr); - writel(ctx->imsc, ®->imsc); - writel(ctx->key_1_l, ®->key_1_l); - writel(ctx->key_1_r, ®->key_1_r); - writel(ctx->key_2_l, ®->key_2_l); - writel(ctx->key_2_r, ®->key_2_r); - writel(ctx->key_3_l, ®->key_3_l); - writel(ctx->key_3_r, ®->key_3_r); - writel(ctx->key_4_l, ®->key_4_l); - writel(ctx->key_4_r, ®->key_4_r); - writel(ctx->init_vect_0_l, ®->init_vect_0_l); - writel(ctx->init_vect_0_r, ®->init_vect_0_r); - writel(ctx->init_vect_1_l, ®->init_vect_1_l); - writel(ctx->init_vect_1_r, ®->init_vect_1_r); + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); } /** @@ -519,24 +409,6 @@ int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) return 0; } -/** - * cryp_read_indata - This routine reads the 32 bit data from the data input - * register into the specified location. - * @device_data: Pointer to the device data struct for base address. - * @p_read_data: Read the data from the input FIFO. - */ -int cryp_read_indata(struct cryp_device_data *device_data, u32 *p_read_data) -{ - if (NULL == device_data) - return -EINVAL; - if (NULL == p_read_data) - return -EINVAL; - - *p_read_data = readl(&device_data->base->din); - - return 0; -} - /** * cryp_read_outdata - This routine reads the data from the data output * register of the CRYP logic diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index 2d98923071c..ee7aee3dcb1 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -16,9 +16,6 @@ #include #include -/* Module Defines */ -#define CRYP_MODULE_NAME "CRYP HCL Module" - #define DEV_DBG_NAME "crypX crypX:" /* CRYP enable/disable */ @@ -47,11 +44,11 @@ enum cryp_state { /* Key preparation bit enable */ enum cryp_key_prep { - KSE_DISABLED, - KSE_ENABLED + KSE_DISABLED = 0, + KSE_ENABLED = 1 }; -/* Key size for AES*/ +/* Key size for AES */ #define CRYP_KEY_SIZE_128 (0) #define CRYP_KEY_SIZE_192 (1) #define CRYP_KEY_SIZE_256 (2) @@ -89,20 +86,20 @@ enum cryp_mode { /** * struct cryp_config - - * @key_access: Cryp state enable/disable - * @key_size: Key size for AES - * @data_type: Data type Swap - * @algo_mode: AES modes - * @encrypt_or_decrypt: Cryp Encryption or Decryption + * @keyrden: Cryp state enable/disable + * @keysize: Key size for AES + * @datatype: Data type Swap + * @algomode: AES modes + * @algodir: Cryp Encryption or Decryption * * CRYP configuration structure to be passed to set configuration */ struct cryp_config { - enum cryp_state key_access; - int key_size; - int data_type; - enum cryp_algo_mode algo_mode; - enum cryp_algorithm_dir encrypt_or_decrypt; + enum cryp_state keyrden; + int keysize; + int datatype; + enum cryp_algo_mode algomode; + enum cryp_algorithm_dir algodir; }; /** @@ -232,7 +229,6 @@ struct cryp_dma { * struct cryp_device_data - structure for a cryp device. * @base: Pointer to the hardware base address. * @dev: Pointer to the devices dev structure. - * @cryp_irq_complete: Pointer to an interrupt completion structure. * @clk: Pointer to the device's clock control. * @pwr_regulator: Pointer to the device's power control. * @power_status: Current status of the power. @@ -241,22 +237,21 @@ struct cryp_dma { * @list_node: For inclusion into a klist. * @dma: The dma structure holding channel configuration. * @power_state: TRUE = power state on, FALSE = power state off. - * @power_state_mutex: Mutex for power_state. + * @power_state_spinlock: Spinlock for power_state. * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. */ struct cryp_device_data { struct cryp_register __iomem *base; struct device *dev; - struct completion cryp_irq_complete; struct clk *clk; - struct regulator *pwr_regulator; + struct ux500_regulator *pwr_regulator; int power_status; struct spinlock ctx_lock; struct cryp_ctx *current_ctx; struct klist_node list_node; struct cryp_dma dma; bool power_state; - struct mutex power_state_mutex; + struct spinlock power_state_spinlock; bool restore_dev_ctx; }; @@ -266,31 +261,14 @@ void cryp_wait_until_done(struct cryp_device_data *device_data); int cryp_check(struct cryp_device_data *device_data); -void cryp_reset(struct cryp_device_data *device_data); - void cryp_activity(struct cryp_device_data *device_data, enum cryp_crypen cryp_crypen); -void cryp_start(struct cryp_device_data *device_data); - -void cryp_init_signal(struct cryp_device_data *device_data, - enum cryp_init cryp_init); - -void cryp_key_preparation(struct cryp_device_data *device_data, - enum cryp_key_prep cryp_key_prep); - void cryp_flush_inoutfifo(struct cryp_device_data *device_data); -void cryp_cen_flush(struct cryp_device_data *device_data); - -void cryp_set_dir(struct cryp_device_data *device_data, int dir); - int cryp_set_configuration(struct cryp_device_data *device_data, struct cryp_config *p_cryp_config); -int cryp_get_configuration(struct cryp_device_data *device_data, - struct cryp_config *p_cryp_config); - void cryp_configure_for_dma(struct cryp_device_data *device_data, enum cryp_dma_req_type dma_req); diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 197bb416067..f67577c386e 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -43,15 +43,13 @@ static int cryp_mode; -static DEFINE_KLIST(cryp_device_list, NULL, NULL); - static struct stedma40_chan_cfg *mem_to_engine; static struct stedma40_chan_cfg *engine_to_mem; /** * struct cryp_driver_data - data specific to the driver. * - * @cryp_device_list: A list of registered devices to choose from. + * @device_list: A list of registered devices to choose from. * @device_allocation: A semaphore initialized with number of devices. */ struct cryp_driver_data { @@ -103,34 +101,6 @@ static inline u32 uint8p_to_uint32_be(u8 *in) ((u32)in[3]); } -/** - * uint8p_to_uint32_le - 4*uint8 to uint32 little endian - * @in: Data to convert. - */ -static inline u32 uint8p_to_uint32_le(u8 *in) -{ - return (u32)in[3]<<24 | - ((u32)in[2]<<16) | - ((u32)in[1]<<8) | - ((u32)in[0]); -} - -static inline void uint32_to_uint8p_be(u32 in, u8 *out) -{ - out[0] = (u8)(in>>24); - out[1] = (u8)(in>>16); - out[2] = (u8)(in>>8); - out[3] = (u8) in; -} - -static inline void uint32_to_uint8p_le(u32 in, u8 *out) -{ - out[3] = (u8)(in>>24); - out[2] = (u8)(in>>16); - out[1] = (u8)(in>>8); - out[0] = (u8) in; -} - /** * swap_bits_in_byte - mirror the bits in a byte * @b: the byte to be mirrored @@ -206,13 +176,6 @@ static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, } } -static inline void swap_4bits_in_bytes(const u8 *in, u8 *out, u32 len) -{ - unsigned int i; - for (i = 0; i < len; i++) - out[i] = swap_bits_in_byte(in[i]); -} - static irqreturn_t cryp_interrupt_handler(int irq, void *param) { struct cryp_ctx *ctx; @@ -224,6 +187,7 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) return IRQ_HANDLED; } + /* The device is coming from the one found in hw_crypt_noxts. */ device_data = (struct cryp_device_data *)param; ctx = device_data->current_ctx; @@ -233,6 +197,10 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) return IRQ_HANDLED; } + dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen, + cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ? + "out" : "in"); + if (cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO)) { if (ctx->outlen / ctx->blocksize > 0) { @@ -246,7 +214,6 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) if (ctx->outlen == 0) { cryp_disable_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO); - complete(&ctx->device->cryp_irq_complete); } } } else if (cryp_pending_irq_src(device_data, @@ -263,8 +230,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) cryp_disable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO); - if (ctx->config.algo_mode == CRYP_ALGO_AES_XTS) { - cryp_start(device_data); + if (ctx->config.algomode == CRYP_ALGO_AES_XTS) { + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_START_ENABLE, + CRYP_CR_START_POS, + CRYP_CR_START_MASK); + cryp_wait_until_done(device_data); } } @@ -360,7 +331,7 @@ static int cfg_keys(struct cryp_ctx *ctx) dev_dbg(ctx->device->dev, "[%s]", __func__); - if (mode_is_aes(ctx->config.algo_mode)) { + if (mode_is_aes(ctx->config.algomode)) { swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, (u8 *)swapped_key, ctx->keylen); @@ -387,11 +358,29 @@ static int cfg_keys(struct cryp_ctx *ctx) static int cryp_setup_context(struct cryp_ctx *ctx, struct cryp_device_data *device_data) { + cryp_flush_inoutfifo(device_data); + + CRYP_PUT_BITS(&device_data->base->cr, + ctx->config.datatype, + CRYP_CR_DATATYPE_POS, + CRYP_CR_DATATYPE_MASK); + + switch (cryp_mode) { + case CRYP_MODE_INTERRUPT: + writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + break; + + case CRYP_MODE_DMA: + writel(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); + break; + + default: + break; + } + if (ctx->updated) cryp_restore_device_context(device_data, &ctx->dev_ctx); else { - cryp_activity(device_data, CRYP_CRYPEN_DISABLE); - if (cfg_keys(ctx) != 0) { dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", __func__); @@ -399,9 +388,9 @@ static int cryp_setup_context(struct cryp_ctx *ctx, } if ((ctx->iv) && - (CRYP_ALGO_AES_ECB != ctx->config.algo_mode) && - (CRYP_ALGO_DES_ECB != ctx->config.algo_mode) && - (CRYP_ALGO_TDES_ECB != ctx->config.algo_mode)) { + (CRYP_ALGO_AES_ECB != ctx->config.algomode) && + (CRYP_ALGO_DES_ECB != ctx->config.algomode) && + (CRYP_ALGO_TDES_ECB != ctx->config.algomode)) { if (cfg_ivs(device_data, ctx) != 0) return -EPERM; } @@ -409,10 +398,11 @@ static int cryp_setup_context(struct cryp_ctx *ctx, cryp_set_configuration(device_data, &ctx->config); } + cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + return 0; } - static int cryp_get_device_data(struct cryp_ctx *ctx, struct cryp_device_data **device_data) { @@ -505,6 +495,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: ", __func__); + if (unlikely(!IS_ALIGNED((u32)sg, 4))) { + dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " + "aligned! Addr: 0x%08x", __func__, (u32)sg); + return -EFAULT; + } + switch (direction) { case DMA_TO_DEVICE: channel = ctx->device->dma.chan_mem2cryp; @@ -534,7 +530,6 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, case DMA_FROM_DEVICE: channel = ctx->device->dma.chan_cryp2mem; ctx->device->dma.sg_dst = sg; - ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, ctx->device->dma.sg_dst, ctx->device->dma.nents_dst, @@ -618,7 +613,7 @@ static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) } static int cryp_polling_mode(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) + struct cryp_device_data *device_data) { int i; int ret = 0; @@ -650,16 +645,15 @@ out: return ret; } -static int cryp_disable_power( - struct device *dev, - struct cryp_device_data *device_data, - bool save_device_context) +static int cryp_disable_power(struct device *dev, + struct cryp_device_data *device_data, + bool save_device_context) { int ret = 0; dev_dbg(dev, "[%s]", __func__); - mutex_lock(&device_data->power_state_mutex); + spin_lock(&device_data->power_state_spinlock); if (!device_data->power_state) goto out; @@ -672,7 +666,7 @@ static int cryp_disable_power( spin_unlock(&device_data->ctx_lock); clk_disable(device_data->clk); - ret = regulator_disable(device_data->pwr_regulator); + ret = ux500_regulator_atomic_disable(device_data->pwr_regulator); if (ret) dev_err(dev, "[%s]: " "regulator_disable() failed!", @@ -681,7 +675,7 @@ static int cryp_disable_power( device_data->power_state = false; out: - mutex_unlock(&device_data->power_state_mutex); + spin_unlock(&device_data->power_state_spinlock); return ret; } @@ -695,9 +689,9 @@ static int cryp_enable_power( dev_dbg(dev, "[%s]", __func__); - mutex_lock(&device_data->power_state_mutex); + spin_lock(&device_data->power_state_spinlock); if (!device_data->power_state) { - ret = regulator_enable(device_data->pwr_regulator); + ret = ux500_regulator_atomic_enable(device_data->pwr_regulator); if (ret) { dev_err(dev, "[%s]: regulator_enable() failed!", __func__); @@ -708,7 +702,8 @@ static int cryp_enable_power( if (ret) { dev_err(dev, "[%s]: clk_enable() failed!", __func__); - regulator_disable(device_data->pwr_regulator); + ux500_regulator_atomic_disable( + device_data->pwr_regulator); goto out; } device_data->power_state = true; @@ -724,13 +719,13 @@ static int cryp_enable_power( spin_unlock(&device_data->ctx_lock); } out: - mutex_unlock(&device_data->power_state_mutex); + spin_unlock(&device_data->power_state_spinlock); return ret; } static int hw_crypt_noxts(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) + struct cryp_device_data *device_data) { int ret; @@ -742,26 +737,30 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, pr_debug(DEV_DBG_NAME " [%s]", __func__); ctx->outlen = ctx->datalen; - ctx->config.key_access = CRYP_STATE_ENABLE; - ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + ctx->config.keyrden = CRYP_STATE_ENABLE; + ctx->config.datatype = CRYP_DATA_TYPE_8BIT_SWAP; - cryp_reset(device_data); + if (unlikely(!IS_ALIGNED((u32)indata, 4))) { + pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " + "0x%08x", __func__, (u32)indata); + return -EINVAL; + } ret = cryp_setup_context(ctx, device_data); if (ret) goto out; - cryp_flush_inoutfifo(device_data); - if (cryp_mode == CRYP_MODE_INTERRUPT) { - INIT_COMPLETION(ctx->device->cryp_irq_complete); - - cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO); - cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO); + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO | + CRYP_IRQ_SRC_OUTPUT_FIFO); - cryp_activity(device_data, CRYP_CRYPEN_ENABLE); - - wait_for_completion(&ctx->device->cryp_irq_complete); + /* + * ctx->outlen is decremented in the cryp_interrupt_handler + * function. We had to add cpu_relax() (barrier) to make sure + * that gcc didn't optimze away this variable. + */ + while (ctx->outlen > 0) + cpu_relax(); } else if (cryp_mode == CRYP_MODE_POLLING || cryp_mode == CRYP_MODE_DMA) { /* @@ -821,8 +820,8 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.key_access = CRYP_STATE_ENABLE; - ctx->config.data_type = CRYP_DATA_TYPE_8BIT_SWAP; + ctx->config.keyrden = CRYP_STATE_ENABLE; + ctx->config.datatype = CRYP_DATA_TYPE_8BIT_SWAP; ctx->datalen = areq->nbytes; ctx->outlen = areq->nbytes; @@ -837,8 +836,6 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq) goto out; } - cryp_reset(device_data); - ret = cryp_setup_context(ctx, device_data); if (ret) goto out_power; @@ -884,14 +881,16 @@ out: static int ablk_crypt(struct ablkcipher_request *areq) { + struct ablkcipher_walk walk; struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - struct ablkcipher_walk walk; + struct cryp_device_data *device_data; unsigned long src_paddr; unsigned long dst_paddr; int ret; int nbytes; - struct cryp_device_data *device_data; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); ret = cryp_get_device_data(ctx, &device_data); if (ret) @@ -955,7 +954,7 @@ out: } static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 *flags = &cipher->base.crt_flags; @@ -964,15 +963,15 @@ static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, switch (keylen) { case AES_KEYSIZE_128: - ctx->config.key_size = CRYP_KEY_SIZE_128; + ctx->config.keysize = CRYP_KEY_SIZE_128; break; case AES_KEYSIZE_192: - ctx->config.key_size = CRYP_KEY_SIZE_192; + ctx->config.keysize = CRYP_KEY_SIZE_192; break; case AES_KEYSIZE_256: - ctx->config.key_size = CRYP_KEY_SIZE_256; + ctx->config.keysize = CRYP_KEY_SIZE_256; break; default: @@ -997,6 +996,12 @@ static int aes_setkey(struct crypto_tfm *tfm, const u8 *key, pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (unlikely(!IS_ALIGNED((u32)key, 4))) { + dev_err(ctx->device->dev, "[%s]: key isn't aligned! Addr: " + "0x%08x", __func__, (u32)key); + return -EFAULT; + } + /* For CTR mode */ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && @@ -1008,11 +1013,11 @@ static int aes_setkey(struct crypto_tfm *tfm, const u8 *key, } if (keylen == AES_KEYSIZE_128) - ctx->config.key_size = CRYP_KEY_SIZE_128; + ctx->config.keysize = CRYP_KEY_SIZE_128; else if (keylen == AES_KEYSIZE_192) - ctx->config.key_size = CRYP_KEY_SIZE_192; + ctx->config.keysize = CRYP_KEY_SIZE_192; else if (keylen == AES_KEYSIZE_256) - ctx->config.key_size = CRYP_KEY_SIZE_256; + ctx->config.keysize = CRYP_KEY_SIZE_256; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -1022,7 +1027,7 @@ static int aes_setkey(struct crypto_tfm *tfm, const u8 *key, } static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 *flags = &cipher->base.crt_flags; @@ -1085,7 +1090,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, } static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 *flags = &cipher->base.crt_flags; @@ -1218,8 +1223,8 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_AES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1238,8 +1243,8 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_AES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1258,8 +1263,8 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_DES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1278,8 +1283,8 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_DES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1298,8 +1303,8 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1318,8 +1323,8 @@ static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->blocksize = crypto_tfm_alg_blocksize(tfm); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_ECB; ctx->indata = in; ctx->outdata = out; @@ -1330,7 +1335,6 @@ static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) __func__); } - static int aes_ecb_encrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); @@ -1338,8 +1342,8 @@ static int aes_ecb_encrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_AES_ECB; ctx->blocksize = AES_BLOCK_SIZE; if (cryp_mode == CRYP_MODE_DMA) @@ -1356,8 +1360,8 @@ static int aes_ecb_decrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_AES_ECB; ctx->blocksize = AES_BLOCK_SIZE; if (cryp_mode == CRYP_MODE_DMA) @@ -1375,8 +1379,8 @@ static int aes_cbc_encrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_AES_CBC; ctx->blocksize = AES_BLOCK_SIZE; /* Only DMA for ablkcipher, since givcipher not yet supported */ @@ -1396,8 +1400,8 @@ static int aes_cbc_decrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_AES_CBC; ctx->blocksize = AES_BLOCK_SIZE; /* Only DMA for ablkcipher, since givcipher not yet supported */ @@ -1417,8 +1421,8 @@ static int aes_ctr_encrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_AES_CTR; ctx->blocksize = AES_BLOCK_SIZE; /* Only DMA for ablkcipher, since givcipher not yet supported */ @@ -1438,8 +1442,8 @@ static int aes_ctr_decrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_AES_CTR; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_AES_CTR; ctx->blocksize = AES_BLOCK_SIZE; /* Only DMA for ablkcipher, since givcipher not yet supported */ @@ -1458,11 +1462,11 @@ static int des_ecb_encrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_DES_ECB; ctx->blocksize = DES_BLOCK_SIZE; - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1476,11 +1480,11 @@ static int des_ecb_decrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_DES_ECB; ctx->blocksize = DES_BLOCK_SIZE; - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1491,20 +1495,14 @@ static int des_cbc_encrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_DES_CBC; ctx->blocksize = DES_BLOCK_SIZE; - /* Only DMA for ablkcipher, since givcipher not yet supported */ - if ((cryp_mode == CRYP_MODE_DMA) && - (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) - return ablk_dma_crypt(areq); - - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1515,20 +1513,14 @@ static int des_cbc_decrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_DES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_DES_CBC; ctx->blocksize = DES_BLOCK_SIZE; - /* Only DMA for ablkcipher, since givcipher not yet supported */ - if ((cryp_mode == CRYP_MODE_DMA) && - (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) - return ablk_dma_crypt(areq); - - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1542,11 +1534,11 @@ static int des3_ecb_encrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_ECB; ctx->blocksize = DES3_EDE_BLOCK_SIZE; - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1560,11 +1552,11 @@ static int des3_ecb_decrypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_ECB; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_ECB; ctx->blocksize = DES3_EDE_BLOCK_SIZE; - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1575,20 +1567,14 @@ static int des3_cbc_encrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_ENCRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_CBC; ctx->blocksize = DES3_EDE_BLOCK_SIZE; - /* Only DMA for ablkcipher, since givcipher not yet supported */ - if ((cryp_mode == CRYP_MODE_DMA) && - (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) - return ablk_dma_crypt(areq); - - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1599,20 +1585,14 @@ static int des3_cbc_decrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.encrypt_or_decrypt = CRYP_ALGORITHM_DECRYPT; - ctx->config.algo_mode = CRYP_ALGO_TDES_CBC; + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + ctx->config.algomode = CRYP_ALGO_TDES_CBC; ctx->blocksize = DES3_EDE_BLOCK_SIZE; - /* Only DMA for ablkcipher, since givcipher not yet supported */ - if ((cryp_mode == CRYP_MODE_DMA) && - (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER)) - return ablk_dma_crypt(areq); - - /** + /* * Run the non DMA version also for DMA, since DMA is currently not * working for DES. */ @@ -1890,7 +1870,7 @@ static struct crypto_alg *u8500_cryp_algs[] = { &des_ecb_alg, &des_cbc_alg, &des3_ecb_alg, - &des3_cbc_alg + &des3_cbc_alg, }; /** @@ -1946,7 +1926,7 @@ static int u8500_cryp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; dev_dbg(dev, "[%s]", __func__); - device_data = kzalloc(sizeof(struct cryp_device_data), GFP_KERNEL); + device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC); if (!device_data) { dev_err(dev, "[%s]: kzalloc() failed!", __func__); ret = -ENOMEM; @@ -1986,10 +1966,10 @@ static int u8500_cryp_probe(struct platform_device *pdev) } spin_lock_init(&device_data->ctx_lock); - mutex_init(&device_data->power_state_mutex); + spin_lock_init(&device_data->power_state_spinlock); /* Enable power for CRYP hardware block */ - device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); + device_data->pwr_regulator = ux500_regulator_get(&pdev->dev); if (IS_ERR(device_data->pwr_regulator)) { dev_err(dev, "[%s]: could not get cryp regulator", __func__); ret = PTR_ERR(device_data->pwr_regulator); @@ -2044,8 +2024,6 @@ static int u8500_cryp_probe(struct platform_device *pdev) goto out_power; } - init_completion(&device_data->cryp_irq_complete); - if (cryp_mode == CRYP_MODE_DMA) cryp_dma_setup_channel(device_data, dev); @@ -2076,7 +2054,7 @@ out_clk: clk_put(device_data->clk); out_regulator: - regulator_put(device_data->pwr_regulator); + ux500_regulator_put(device_data->pwr_regulator); out_unmap: iounmap(device_data->base); @@ -2143,7 +2121,7 @@ static int u8500_cryp_remove(struct platform_device *pdev) __func__); clk_put(device_data->clk); - regulator_put(device_data->pwr_regulator); + ux500_regulator_put(device_data->pwr_regulator); iounmap(device_data->base); @@ -2327,5 +2305,7 @@ module_exit(u8500_cryp_mod_fini); module_param(cryp_mode, int, 0); MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 CRYP crypto engine."); +MODULE_ALIAS("aes-all"); +MODULE_ALIAS("des-all"); MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c index eacff226aa8..8814acc05d7 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.c +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -24,7 +24,7 @@ void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) dev_dbg(device_data->dev, "[%s]", __func__); i = readl(&device_data->base->imsc); - set_bit(irq_src, (void *)&i); + i = i | irq_src; writel(i, &device_data->base->imsc); } @@ -35,7 +35,7 @@ void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) dev_dbg(device_data->dev, "[%s]", __func__); i = readl(&device_data->base->imsc); - clear_bit(irq_src, (void *)&i); + i = i & ~irq_src; writel(i, &device_data->base->imsc); } diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h index 5b60f887d02..8b339cc34bf 100644 --- a/drivers/crypto/ux500/cryp/cryp_irqp.h +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -81,7 +81,7 @@ */ struct cryp_register { u32 cr; /* Configuration register */ - u32 status; /* Status register */ + u32 sr; /* Status register */ u32 din; /* Data input register */ u32 din_size; /* Data input size register */ u32 dout; /* Data output register */ diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 966de4633cc..adc95457499 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -51,7 +51,6 @@ */ #define MAX_DEVICE_SUPPORT 2 #define CRYP_CR_DEFAULT 0x0002 -#define CRYP_CR_FFLUSH BIT(14) #define CRYP_DMACR_DEFAULT 0x0 #define CRYP_IMSC_DEFAULT 0x0 #define CRYP_DIN_DEFAULT 0x0 @@ -62,40 +61,47 @@ /** * CRYP Control register specific mask */ -#define CRYP_SECURE_MASK BIT(0) -#define CRYP_PRLG_MASK BIT(1) -#define CRYP_ENC_DEC_MASK BIT(2) +#define CRYP_CR_SECURE_MASK BIT(0) +#define CRYP_CR_PRLG_MASK BIT(1) +#define CRYP_CR_ALGODIR_MASK BIT(2) +#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) +#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6)) +#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8)) +#define CRYP_CR_KEYRDEN_MASK BIT(10) +#define CRYP_CR_KSE_MASK BIT(11) +#define CRYP_CR_START_MASK BIT(12) +#define CRYP_CR_INIT_MASK BIT(13) +#define CRYP_CR_FFLUSH_MASK BIT(14) +#define CRYP_CR_CRYPEN_MASK BIT(15) +#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\ + CRYP_CR_PRLG_MASK |\ + CRYP_CR_ALGODIR_MASK |\ + CRYP_CR_ALGOMODE_MASK |\ + CRYP_CR_DATATYPE_MASK |\ + CRYP_CR_KEYSIZE_MASK |\ + CRYP_CR_KEYRDEN_MASK |\ + CRYP_CR_DATATYPE_MASK) + + +#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1)) +#define CRYP_SR_IFEM_MASK BIT(0) #define CRYP_SR_BUSY_MASK BIT(4) -#define CRYP_KEY_ACCESS_MASK BIT(10) -#define CRYP_KSE_MASK BIT(11) -#define CRYP_START_MASK BIT(12) -#define CRYP_INIT_MASK BIT(13) -#define CRYP_FIFO_FLUSH_MASK BIT(14) -#define CRYP_CRYPEN_MASK BIT(15) -#define CRYP_INFIFO_READY_MASK (BIT(0) | BIT(1)) -#define CRYP_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) -#define CRYP_DATA_TYPE_MASK (BIT(7) | BIT(6)) -#define CRYP_KEY_SIZE_MASK (BIT(9) | BIT(8)) /** * Bit position used while setting bits in register */ -#define CRYP_PRLG_POS 1 -#define CRYP_ENC_DEC_POS 2 -#define CRYP_ALGOMODE_POS 3 -#define CRYP_SR_BUSY_POS 4 -#define CRYP_DATA_TYPE_POS 6 -#define CRYP_KEY_SIZE_POS 8 -#define CRYP_KEY_ACCESS_POS 10 -#define CRYP_KSE_POS 11 -#define CRYP_START_POS 12 -#define CRYP_INIT_POS 13 -#define CRYP_CRYPEN_POS 15 +#define CRYP_CR_PRLG_POS 1 +#define CRYP_CR_ALGODIR_POS 2 +#define CRYP_CR_ALGOMODE_POS 3 +#define CRYP_CR_DATATYPE_POS 6 +#define CRYP_CR_KEYSIZE_POS 8 +#define CRYP_CR_KEYRDEN_POS 10 +#define CRYP_CR_KSE_POS 11 +#define CRYP_CR_START_POS 12 +#define CRYP_CR_INIT_POS 13 +#define CRYP_CR_CRYPEN_POS 15 -/** - * CRYP Status register - */ -#define CRYP_BUSY_STATUS_MASK BIT(4) +#define CRYP_SR_BUSY_POS 4 /** * CRYP PCRs------PC_NAND control register -- cgit v1.2.3 From eebb14cb2df3fb6071b217e1b1f571ad2b9a79ad Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Wed, 19 Oct 2011 13:21:40 +0200 Subject: crypto: ux500: hash: converted to ahash. - Removed cryp1 and hash1 from u8500_v2_prcc_clocks, added comment that they already are included in u8500_v1_v2_prcc_clocks. - Re-activated hash1 accelerator in u8500__defconfig. - Updated clock-db8500.c with correct clk for hash1. - Added -O0 in hash Makefile, to remove optimization in debug mode. - Converted to ahash (from shash). - Updated infrastructure (klist functionality). - Dependencies to hcl_defs removed. - Changed power_state_mutex to spinlock and also use the atomic regulator interfaces, which removes the kernel printout BUG: sleeping function... - Removed the clear_bit function call and instead use the HASH_CLEAR_BITS macro. - Re-arranged the code to collect the external functions in the bottom of the file. - Removed page_to_phys/phys_to_virt calls in hash_hw_update, data directly accessed in walk.data. - Removed unused defines. ST-Ericsson ID: 319847, 280690 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: NA Change-Id: I732b7320cd8302d1dc86e4acb1954880e175773a Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/23294 Reviewed-by: Joakim BECH --- drivers/crypto/ux500/hash/Makefile | 2 +- drivers/crypto/ux500/hash/hash_alg.h | 404 ++++--- drivers/crypto/ux500/hash/hash_core.c | 1911 ++++++++++++++++++--------------- 3 files changed, 1242 insertions(+), 1075 deletions(-) diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile index 6194da8eec8..aaa5f56a2c2 100644 --- a/drivers/crypto/ux500/hash/Makefile +++ b/drivers/crypto/ux500/hash/Makefile @@ -4,7 +4,7 @@ # License terms: GNU General Public License (GPL) version 2 # ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG -CFLAGS_hash_core.o := -DDEBUG +CFLAGS_hash_core.o := -DDEBUG -O0 endif obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 1c3dd5705fb..3bf1354ea03 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -2,140 +2,137 @@ * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen (shujuan.chen@stericsson.com) * Author: Joakim Bech (joakim.xx.bech@stericsson.com) + * Author: Berne Hebark (berne.hebark@stericsson.com)) * License terms: GNU General Public License (GPL) version 2 */ #ifndef _HASH_ALG_H #define _HASH_ALG_H -#include +#include /* Number of bytes the message digest */ -#define HASH_MSG_DIGEST_SIZE 32 -#define HASH_BLOCK_SIZE 64 +#define HASH_MSG_DIGEST_SIZE 32 +#define HASH_BLOCK_SIZE 64 #define HASH_SHA1_DIGEST_SIZE 20 -#define HASH_SHA2_DIGEST_SIZE 32 - -/* Version defines */ -#define HASH_HCL_VERSION_ID 1 -#define HASH_HCL_MAJOR_ID 2 -#define HASH_HCL_MINOR_ID 1 - -#define MAX_HASH_DEVICE 2 +#define HASH_SHA2_DIGEST_SIZE 32 /* Maximum value of the length's high word */ -#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL +#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL /* Power on Reset values HASH registers */ -#define HASH_RESET_CONTROL_REG_VALUE 0x0 -#define HASH_RESET_START_REG_VALUE 0x0 +#define HASH_RESET_CR_VALUE 0x0 +#define HASH_RESET_STR_VALUE 0x0 /* Number of context swap registers */ -#define HASH_CSR_COUNT 52 +#define HASH_CSR_COUNT 52 -#define HASH_RESET_CSRX_REG_VALUE 0x0 -#define HASH_RESET_CSFULL_REG_VALUE 0x0 -#define HASH_RESET_CSDATAIN_REG_VALUE 0x0 +#define HASH_RESET_CSRX_REG_VALUE 0x0 +#define HASH_RESET_CSFULL_REG_VALUE 0x0 +#define HASH_RESET_CSDATAIN_REG_VALUE 0x0 -#define HASH_RESET_INDEX_VAL 0x0 -#define HASH_RESET_BIT_INDEX_VAL 0x0 -#define HASH_RESET_BUFFER_VAL 0x0 -#define HASH_RESET_LEN_HIGH_VAL 0x0 -#define HASH_RESET_LEN_LOW_VAL 0x0 +#define HASH_RESET_INDEX_VAL 0x0 +#define HASH_RESET_BIT_INDEX_VAL 0x0 +#define HASH_RESET_BUFFER_VAL 0x0 +#define HASH_RESET_LEN_HIGH_VAL 0x0 +#define HASH_RESET_LEN_LOW_VAL 0x0 /* Control register bitfields */ -#define HASH_CR_RESUME_MASK 0x11FCF +#define HASH_CR_RESUME_MASK 0x11FCF -#define HASH_CR_SWITCHON_POS 31 -#define HASH_CR_SWITCHON_MASK MASK_BIT31 +#define HASH_CR_SWITCHON_POS 31 +#define HASH_CR_SWITCHON_MASK BIT(31) -#define HASH_CR_EMPTYMSG_POS 20 -#define HASH_CR_EMPTYMSG_MASK MASK_BIT20 +#define HASH_CR_EMPTYMSG_POS 20 +#define HASH_CR_EMPTYMSG_MASK BIT(20) -#define HASH_CR_DINF_POS 12 -#define HASH_CR_DINF_MASK MASK_BIT12 +#define HASH_CR_DINF_POS 12 +#define HASH_CR_DINF_MASK BIT(12) -#define HASH_CR_NBW_POS 8 -#define HASH_CR_NBW_MASK 0x00000F00UL +#define HASH_CR_NBW_POS 8 +#define HASH_CR_NBW_MASK 0x00000F00UL -#define HASH_CR_LKEY_POS 16 -#define HASH_CR_LKEY_MASK MASK_BIT16 +#define HASH_CR_LKEY_POS 16 +#define HASH_CR_LKEY_MASK BIT(16) -#define HASH_CR_ALGO_POS 7 -#define HASH_CR_ALGO_MASK MASK_BIT7 +#define HASH_CR_ALGO_POS 7 +#define HASH_CR_ALGO_MASK BIT(7) -#define HASH_CR_MODE_POS 6 -#define HASH_CR_MODE_MASK MASK_BIT6 +#define HASH_CR_MODE_POS 6 +#define HASH_CR_MODE_MASK BIT(6) -#define HASH_CR_DATAFORM_POS 4 -#define HASH_CR_DATAFORM_MASK (MASK_BIT4 | MASK_BIT5) +#define HASH_CR_DATAFORM_POS 4 +#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5)) -#define HASH_CR_DMAE_POS 3 -#define HASH_CR_DMAE_MASK MASK_BIT3 +#define HASH_CR_DMAE_POS 3 +#define HASH_CR_DMAE_MASK BIT(3) -#define HASH_CR_INIT_POS 2 -#define HASH_CR_INIT_MASK MASK_BIT2 +#define HASH_CR_INIT_POS 2 +#define HASH_CR_INIT_MASK BIT(2) -#define HASH_CR_PRIVN_POS 1 -#define HASH_CR_PRIVN_MASK MASK_BIT1 +#define HASH_CR_PRIVN_POS 1 +#define HASH_CR_PRIVN_MASK BIT(1) -#define HASH_CR_SECN_POS 0 -#define HASH_CR_SECN_MASK MASK_BIT0 +#define HASH_CR_SECN_POS 0 +#define HASH_CR_SECN_MASK BIT(0) /* Start register bitfields */ -#define HASH_STR_DCAL_POS 8 -#define HASH_STR_DCAL_MASK MASK_BIT8 +#define HASH_STR_DCAL_POS 8 +#define HASH_STR_DCAL_MASK BIT(8) +#define HASH_STR_DEFAULT 0x0 -#define HASH_STR_NBLW_POS 0 -#define HASH_STR_NBLW_MASK 0x0000001FUL +#define HASH_STR_NBLW_POS 0 +#define HASH_STR_NBLW_MASK 0x0000001FUL -#define HASH_NBLW_MAX_VAL 0x1F +#define HASH_NBLW_MAX_VAL 0x1F /* PrimeCell IDs */ -#define HASH_P_ID0 0xE0 -#define HASH_P_ID1 0x05 -#define HASH_P_ID2 0x38 -#define HASH_P_ID3 0x00 -#define HASH_CELL_ID0 0x0D -#define HASH_CELL_ID1 0xF0 -#define HASH_CELL_ID2 0x05 -#define HASH_CELL_ID3 0xB1 - -#define HASH_SET_DIN(val) HCL_WRITE_REG( \ - sys_ctx_g.registry[HASH_DEVICE_ID_1]->din, (val)) - -#define HASH_INITIALIZE \ - HCL_WRITE_BITS( \ - sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ - 0x01 << HASH_CR_INIT_POS, \ +#define HASH_P_ID0 0xE0 +#define HASH_P_ID1 0x05 +#define HASH_P_ID2 0x38 +#define HASH_P_ID3 0x00 +#define HASH_CELL_ID0 0x0D +#define HASH_CELL_ID1 0xF0 +#define HASH_CELL_ID2 0x05 +#define HASH_CELL_ID3 0xB1 + +#define HASH_SET_BITS(reg_name, mask) \ + writel((readl(reg_name) | mask), reg_name) + +#define HASH_CLEAR_BITS(reg_name, mask) \ + writel((readl(reg_name) & ~mask), reg_name) + +#define HASH_PUT_BITS(reg, val, shift, mask) \ + writel(((readl(reg) & ~(mask)) | \ + (((u32)val << shift) & (mask))), reg) + +#define HASH_SET_DIN(val) writel((val), &device_data->base->din) + +#define HASH_INITIALIZE \ + HASH_PUT_BITS( \ + &device_data->base->cr, \ + 0x01, HASH_CR_INIT_POS, \ HASH_CR_INIT_MASK) -#define HASH_SET_DATA_FORMAT(data_format) \ - HCL_WRITE_BITS( \ - sys_ctx_g.registry[HASH_DEVICE_ID_1]->cr, \ - (u32) (data_format) << HASH_CR_DATAFORM_POS, \ +#define HASH_SET_DATA_FORMAT(data_format) \ + HASH_PUT_BITS( \ + &device_data->base->cr, \ + (u32) (data_format), HASH_CR_DATAFORM_POS, \ HASH_CR_DATAFORM_MASK) - -#define HASH_GET_HX(pos) \ - HCL_READ_REG(sys_ctx_g.registry[HASH_DEVICE_ID_1]->hx[pos]) - -#define HASH_SET_NBLW(val) \ - HCL_WRITE_BITS( \ - sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ - (u32) (val) << HASH_STR_NBLW_POS, \ +#define HASH_SET_NBLW(val) \ + HASH_PUT_BITS( \ + &device_data->base->str, \ + (u32) (val), HASH_STR_NBLW_POS, \ HASH_STR_NBLW_MASK) - -#define HASH_SET_DCAL \ - HCL_WRITE_BITS( \ - sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, \ - 0x01 << HASH_STR_DCAL_POS, \ +#define HASH_SET_DCAL \ + HASH_PUT_BITS( \ + &device_data->base->str, \ + 0x01, HASH_STR_DCAL_POS, \ HASH_STR_DCAL_MASK) -#define HASH_BLOCK_BYTE_SIZE 64 - /** * struct uint64 - Structure to handle 64 bits integers. - * @high_word: Most significant bits - * @low_word: Least significant bits + * @high_word: Most significant bits. + * @low_word: Least significant bits. * * Used to handle 64 bits integers. */ @@ -146,27 +143,27 @@ struct uint64 { /** * struct hash_register - Contains all registers in u8500 hash hardware. - * @cr: HASH control register (0x000) - * @din: HASH data input register (0x004) - * @str: HASH start register (0x008) - * @hx: HASH digest register 0..7 (0x00c-0x01C) - * @padding0: Reserved (0x02C) - * @itcr: Integration test control register (0x080) - * @itip: Integration test input register (0x084) - * @itop: Integration test output register (0x088) - * @padding1: Reserved (0x08C) - * @csfull: HASH context full register (0x0F8) - * @csdatain: HASH context swap data input register (0x0FC) - * @csrx: HASH context swap register 0..51 (0x100-0x1CC) - * @padding2: Reserved (0x1D0) - * @periphid0: HASH peripheral identification register 0 (0xFE0) - * @periphid1: HASH peripheral identification register 1 (0xFE4) - * @periphid2: HASH peripheral identification register 2 (0xFE8) - * @periphid3: HASH peripheral identification register 3 (0xFEC) - * @cellid0: HASH PCell identification register 0 (0xFF0) - * @cellid1: HASH PCell identification register 1 (0xFF4) - * @cellid2: HASH PCell identification register 2 (0xFF8) - * @cellid3: HASH PCell identification register 3 (0xFFC) + * @cr: HASH control register (0x000). + * @din: HASH data input register (0x004). + * @str: HASH start register (0x008). + * @hx: HASH digest register 0..7 (0x00c-0x01C). + * @padding0: Reserved (0x02C). + * @itcr: Integration test control register (0x080). + * @itip: Integration test input register (0x084). + * @itop: Integration test output register (0x088). + * @padding1: Reserved (0x08C). + * @csfull: HASH context full register (0x0F8). + * @csdatain: HASH context swap data input register (0x0FC). + * @csrx: HASH context swap register 0..51 (0x100-0x1CC). + * @padding2: Reserved (0x1D0). + * @periphid0: HASH peripheral identification register 0 (0xFE0). + * @periphid1: HASH peripheral identification register 1 (0xFE4). + * @periphid2: HASH peripheral identification register 2 (0xFE8). + * @periphid3: HASH peripheral identification register 3 (0xFEC). + * @cellid0: HASH PCell identification register 0 (0xFF0). + * @cellid1: HASH PCell identification register 1 (0xFF4). + * @cellid2: HASH PCell identification register 2 (0xFF8). + * @cellid3: HASH PCell identification register 3 (0xFFC). * * The device communicates to the HASH via 32-bit-wide control registers * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure @@ -205,16 +202,16 @@ struct hash_register { /** * struct hash_state - Hash context state. - * @temp_cr: Temporary HASH Control Register - * @str_reg: HASH Start Register - * @din_reg: HASH Data Input Register - * @csr[52]: HASH Context Swap Registers 0-39 - * @csfull: HASH Context Swap Registers 40 ie Status flags - * @csdatain: HASH Context Swap Registers 41 ie Input data - * @buffer: Working buffer for messages going to the hardware - * @length: Length of the part of the message hashed so far (floor(N/64) * 64) - * @index: Valid number of bytes in buffer (N % 64) - * @bit_index: Valid number of bits in buffer (N % 8) + * @temp_cr: Temporary HASH Control Register. + * @str_reg: HASH Start Register. + * @din_reg: HASH Data Input Register. + * @csr[52]: HASH Context Swap Registers 0-39. + * @csfull: HASH Context Swap Registers 40 ie Status flags. + * @csdatain: HASH Context Swap Registers 41 ie Input data. + * @buffer: Working buffer for messages going to the hardware. + * @length: Length of the part of message hashed so far (floor(N/64) * 64). + * @index: Valid number of bytes in buffer (N % 64). + * @bit_index: Valid number of bits in buffer (N % 8). * * This structure is used between context switches, i.e. when ongoing jobs are * interupted with new jobs. When this happens we need to store intermediate @@ -225,36 +222,16 @@ struct hash_register { * and MUST be checked whenever this code is ported on new platforms. */ struct hash_state { - u32 temp_cr; - u32 str_reg; - u32 din_reg; - u32 csr[52]; - u32 csfull; - u32 csdatain; - u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; - struct uint64 length; - u8 index; - u8 bit_index; -}; - -/** - * struct hash_system_context - Structure for the global system context. - * @registry: Pointer to the registry of the hash hardware - * @state: State of the hash device - */ -struct hash_system_context { - /* - * Pointer to HASH registers structure. We know that this gives a - * checkpatch warning and in the current design it needs to be a - * volatile. We will change it when we will rewrite the driver similar - * to how we have done in cryp-part. We have also read - * Documentation/volatile-considered-harmful.txt as checkpatch tell - * us to do. - */ - volatile struct hash_register *registry[MAX_HASH_DEVICE]; - - /* State of HASH device */ - struct hash_state state[MAX_HASH_DEVICE]; + u32 temp_cr; + u32 str_reg; + u32 din_reg; + u32 csr[52]; + u32 csfull; + u32 csdatain; + u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; + struct uint64 length; + u8 index; + u8 bit_index; }; /** @@ -269,42 +246,32 @@ enum hash_device_id { /** * enum hash_data_format - HASH data format. - * @HASH_DATA_32_BITS: 32 bits data format - * @HASH_DATA_16_BITS: 16 bits data format - * @HASH_DATA_8_BITS: 8 bits data format - * @HASH_DATA_1_BITS: 1 bit data format + * @HASH_DATA_32_BITS: 32 bits data format + * @HASH_DATA_16_BITS: 16 bits data format + * @HASH_DATA_8_BITS: 8 bits data format. + * @HASH_DATA_1_BITS: 1 bit data format. */ enum hash_data_format { - HASH_DATA_32_BITS = 0x0, - HASH_DATA_16_BITS = 0x1, - HASH_DATA_8_BITS = 0x2, - HASH_DATA_1_BIT = 0x3 -}; - -/** - * struct hash_protection_config - Device protection configuration. - * @privilege_access: FIXME, add comment. - * @secure_access: FIXME, add comment. - */ -struct hash_protection_config { - int privilege_access; - int secure_access; + HASH_DATA_32_BITS = 0x0, + HASH_DATA_16_BITS = 0x1, + HASH_DATA_8_BITS = 0x2, + HASH_DATA_1_BIT = 0x3 }; /** - * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm + * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm. * @HASH_ALGO_SHA1: Indicates that SHA1 is used. * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used. */ enum hash_algo { - HASH_ALGO_SHA1 = 0x0, - HASH_ALGO_SHA2 = 0x1 + HASH_ALGO_SHA1 = 0x0, + HASH_ALGO_SHA256 = 0x1 }; /** - * enum hash_op - Enumeration for selecting between HASH or HMAC mode - * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode - * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC + * enum hash_op - Enumeration for selecting between HASH or HMAC mode. + * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode. + * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC. */ enum hash_op { HASH_OPER_MODE_HASH = 0x0, @@ -312,10 +279,10 @@ enum hash_op { }; /** - * struct hash_config - Configuration data for the hardware - * @data_format: Format of data entered into the hash data in register - * @algorithm: Algorithm selection bit - * @oper_mode: Operating mode selection bit + * struct hash_config - Configuration data for the hardware. + * @data_format: Format of data entered into the hash data in register. + * @algorithm: Algorithm selection bit. + * @oper_mode: Operating mode selection bit. */ struct hash_config { int data_format; @@ -324,48 +291,67 @@ struct hash_config { }; /** - * enum hash_rv - Return values / error codes for hash. + * struct hash_ctx - The context used for hash calculations. + * @key: The key used in the operation. + * @keylen: The length of the key. + * @updated: Indicates if hardware is initialized for new operations. + * @state: The state of the current calculations. + * @config: The current configuration. + * @digestsize The size of current digest. + * @device Pointer to the device structure. */ -enum hash_rv { - HASH_OK = 0, - HASH_MSG_LENGTH_OVERFLOW, - HASH_INVALID_PARAMETER, - HASH_UNSUPPORTED_HW +struct hash_ctx { + u8 key[HASH_BLOCK_SIZE]; + u32 keylen; + u8 updated; + struct hash_state state; + struct hash_config config; + int digestsize; + struct hash_device_data *device; }; /** - * struct hash_ctx - The context used for hash calculations. - * @key: The key used in the operation - * @keylen: The length of the key - * @updated: Indicates if hardware is initialized for new operations - * @state: The state of the current calculations - * @config: The current configuration + * struct hash_device_data - structure for a hash device. + * @base: Pointer to the hardware base address. + * @list_node: For inclusion in klist. + * @dev: Pointer to the device dev structure. + * @ctx_lock: Spinlock for current_ctx. + * @current_ctx: Pointer to the currently allocated context. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_lock: Spinlock for power_state. + * @regulator: Pointer to the device's power control. + * @clk: Pointer to the device's clock control. + * @restore_dev_state: TRUE = saved state, FALSE = no saved state. */ -struct hash_ctx { - u8 key[HASH_BLOCK_BYTE_SIZE]; - u32 keylen; - u8 updated; - struct hash_state state; - struct hash_config config; +struct hash_device_data { + struct hash_register __iomem *base; + struct klist_node list_node; + struct device *dev; + struct spinlock ctx_lock; + struct hash_ctx *current_ctx; + bool power_state; + struct spinlock power_state_lock; + struct ux500_regulator *regulator; + struct clk *clk; + bool restore_dev_state; }; -int hash_init_base_address(int hash_device_id, t_logical_address base_address); - -int hash_setconfiguration(int hash_device_id, struct hash_config *p_config); +int hash_check_hw(struct hash_device_data *device_data); -void hash_begin(struct hash_ctx *ctx); +int hash_setconfiguration(struct hash_device_data *device_data, + struct hash_config *config); -void hash_get_digest(int hid, u8 *digest, int algorithm); +void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx); -int hash_hw_update(struct shash_desc *desc, - int hash_device_id, - const u8 *p_data_buffer, - u32 msg_length); +void hash_get_digest(struct hash_device_data *device_data, + u8 *digest, int algorithm); -int hash_end(struct hash_ctx *ctx, u8 digest[HASH_MSG_DIGEST_SIZE]); +int hash_hw_update(struct ahash_request *req); -int hash_save_state(int hash_device_id, struct hash_state *state); +int hash_save_state(struct hash_device_data *device_data, + struct hash_state *state); -int hash_resume_state(int hash_device_id, const struct hash_state *state); +int hash_resume_state(struct hash_device_data *device_data, + const struct hash_state *state); #endif diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index a2e4ebd8ac1..3b472d0bfaa 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -15,129 +15,124 @@ #include #include #include +#include #include #include -#include #include #include -#include + +#include #include #include #include +#include +#include #include #include "hash_alg.h" -#define DRIVER_NAME "DRIVER HASH" -/* Enable/Disables debug msgs */ -#define DRIVER_DEBUG 1 -#define DRIVER_DEBUG_PFX DRIVER_NAME -#define DRIVER_DBG KERN_DEBUG - -#define MAX_HASH_DIGEST_BYTE_SIZE 32 - -static struct mutex hash_hw_acc_mutex; - -static int debug; -static struct hash_system_context sys_ctx_g; -static struct hash_driver_data *internal_drv_data; +#define DEV_DBG_NAME "hashX hashX:" /** - * struct hash_driver_data - IO Base and clock. - * @base: The IO base for the block. - * @clk: The clock. - * @regulator: The current regulator. - * @power_state: TRUE = power state on, FALSE = power state off. - * @power_state_mutex: Mutex for power_state. - * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. + * struct hash_driver_data - data specific to the driver. + * + * @device_list: A list of registered devices to choose from. + * @device_allocation: A semaphore initialized with number of devices. */ struct hash_driver_data { - void __iomem *base; - struct device *dev; - struct clk *clk; - struct regulator *regulator; - bool power_state; - struct mutex power_state_mutex; - bool restore_dev_state; + struct klist device_list; + struct semaphore device_allocation; }; -/* Declaration of functions */ -static void hash_messagepad(int hid, const u32 *message, u8 index_bytes); +static struct hash_driver_data driver_data; +/* Declaration of functions */ /** - * clear_reg_str - Clear the registry hash_str. - * @hid: Hardware device ID + * hash_messagepad - Pads a message and write the nblw bits. + * @device_data: Structure for the hash device. + * @message: Last word of a message + * @index_bytes: The number of bytes in the last message * - * This function will clear the dcal bit and the nblw bits. + * This function manages the final part of the digest calculation, when less + * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. + * + * Reentrancy: Non Re-entrant. */ -static inline void clear_reg_str(int hid) -{ - /* - * We will only clear NBLW since writing 0 to DCAL is done by the - * hardware - */ - sys_ctx_g.registry[hid]->str &= ~HASH_STR_NBLW_MASK; -} +static void hash_messagepad(struct hash_device_data *device_data, + const u32 *message, u8 index_bytes); +/** + * hash_disable_power - Request to disable power and clock. + * @device_data: Structure for the hash device. + * @save_device_state: If true, saves the current hw state. + * + * This function request for disabling power (regulator) and clock, + * and could also save current hw state. + */ static int hash_disable_power( - struct device *dev, - struct hash_driver_data *device_data, - bool save_device_state) + struct hash_device_data *device_data, + bool save_device_state) { int ret = 0; + struct device *dev = device_data->dev; dev_dbg(dev, "[%s]", __func__); - mutex_lock(&device_data->power_state_mutex); + spin_lock(&device_data->power_state_lock); if (!device_data->power_state) goto out; - if (save_device_state) { - hash_save_state(HASH_DEVICE_ID_1, - &sys_ctx_g.state[HASH_DEVICE_ID_1]); + if (save_device_state && device_data->current_ctx) { + hash_save_state(device_data, + &device_data->current_ctx->state); device_data->restore_dev_state = true; } clk_disable(device_data->clk); - ret = regulator_disable(device_data->regulator); + ret = ux500_regulator_atomic_disable(device_data->regulator); if (ret) - dev_err(dev, "[%s]: " - "regulator_disable() failed!", - __func__); + dev_err(dev, "[%s] regulator_disable() failed!", __func__); device_data->power_state = false; out: - mutex_unlock(&device_data->power_state_mutex); + spin_unlock(&device_data->power_state_lock); return ret; } +/** + * hash_enable_power - Request to enable power and clock. + * @device_data: Structure for the hash device. + * @restore_device_state: If true, restores a previous saved hw state. + * + * This function request for enabling power (regulator) and clock, + * and could also restore a previously saved hw state. + */ static int hash_enable_power( - struct device *dev, - struct hash_driver_data *device_data, - bool restore_device_state) + struct hash_device_data *device_data, + bool restore_device_state) { int ret = 0; - + struct device *dev = device_data->dev; dev_dbg(dev, "[%s]", __func__); - mutex_lock(&device_data->power_state_mutex); + spin_lock(&device_data->power_state_lock); if (!device_data->power_state) { - ret = regulator_enable(device_data->regulator); + ret = ux500_regulator_atomic_enable(device_data->regulator); if (ret) { dev_err(dev, "[%s]: regulator_enable() failed!", __func__); goto out; } - ret = clk_enable(device_data->clk); if (ret) { dev_err(dev, "[%s]: clk_enable() failed!", __func__); - regulator_disable(device_data->regulator); + ret = ux500_regulator_atomic_disable( + device_data->regulator); goto out; } device_data->power_state = true; @@ -146,1088 +141,1274 @@ static int hash_enable_power( if (device_data->restore_dev_state) { if (restore_device_state) { device_data->restore_dev_state = false; - hash_resume_state(HASH_DEVICE_ID_1, - &sys_ctx_g.state[HASH_DEVICE_ID_1]); + hash_resume_state(device_data, + &device_data->current_ctx->state); } } out: - mutex_unlock(&device_data->power_state_mutex); + spin_unlock(&device_data->power_state_lock); return ret; } +/** + * hash_get_device_data - Checks for an available hash device and return it. + * @hash_ctx: Structure for the hash context. + * @device_data: Structure for the hash device. + * + * This function check for an available hash device and return it to + * the caller. + * Note! Caller need to release the device, calling up(). + */ +static int hash_get_device_data(struct hash_ctx *ctx, + struct hash_device_data **device_data) +{ + int ret; + struct klist_iter device_iterator; + struct klist_node *device_node; + struct hash_device_data *local_device_data = NULL; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* Wait until a device is available */ + ret = down_interruptible(&driver_data.device_allocation); + if (ret) + return ret; /* Interrupted */ + + /* Select a device */ + klist_iter_init(&driver_data.device_list, &device_iterator); + device_node = klist_next(&device_iterator); + while (device_node) { + local_device_data = container_of(device_node, + struct hash_device_data, list_node); + spin_lock(&local_device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (local_device_data->current_ctx) { + device_node = klist_next(&device_iterator); + } else { + local_device_data->current_ctx = ctx; + ctx->device = local_device_data; + spin_unlock(&local_device_data->ctx_lock); + break; + } + spin_unlock(&local_device_data->ctx_lock); + } + klist_iter_exit(&device_iterator); + + if (!device_node) { + /** + * No free device found. + * Since we allocated a device with down_interruptible, this + * should not be able to happen. + * Number of available devices, which are contained in + * device_allocation, is therefore decremented by not doing + * an up(device_allocation). + */ + return -EBUSY; + } + + *device_data = local_device_data; + + return 0; +} + /** * init_hash_hw - Initialise the hash hardware for a new calculation. - * @desc: The hash descriptor for the job + * @device_data: Structure for the hash device. + * @req: The hash request for the job. * * This function will enable the bits needed to clear and start a new * calculation. */ -static int init_hash_hw(struct shash_desc *desc) +static int init_hash_hw(struct hash_device_data *device_data, + struct ahash_request *req) { int ret = 0; - int hash_rv; - struct hash_ctx *ctx = shash_desc_ctx(desc); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug("[init_hash_hw] (ctx=0x%x)!", (u32)ctx); + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx); - hash_rv = hash_setconfiguration(HASH_DEVICE_ID_1, &ctx->config); - if (hash_rv != HASH_OK) { - pr_err("hash_setconfiguration() failed!"); - ret = -EPERM; + ret = hash_setconfiguration(device_data, &ctx->config); + if (ret) { + dev_err(device_data->dev, "[%s] hash_setconfiguration() " + "failed!", __func__); return ret; } - hash_begin(ctx); + hash_begin(device_data, ctx); return ret; } /** * hash_init - Common hash init function for SHA1/SHA2 (SHA256). - * @desc: The hash descriptor for the job + * @req: The hash request for the job. * * Initialize structures. */ -static int hash_init(struct shash_desc *desc) +static int hash_init(struct ahash_request *req) { - struct hash_ctx *ctx = shash_desc_ctx(desc); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug("[hash_init]: (ctx=0x%x)!", (u32)ctx); + pr_debug(DEV_DBG_NAME "[%s] (ctx=0x%x)!", __func__, (u32)ctx); memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; - return 0; } /** - * hash_update - The hash update function for SHA1/SHA2 (SHA256). - * @desc: The hash descriptor for the job - * @data: Message that should be hashed - * @len: The length of the message that should be hashed + * hash_processblock - This function processes a single block of 512 bits (64 + * bytes), word aligned, starting at message. + * @device_data: Structure for the hash device. + * @message: Block (512 bits) of message to be written to + * the HASH hardware. + * + * Reentrancy: Non Re-entrant. */ -static int hash_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static void hash_processblock( + struct hash_device_data *device_data, + const u32 *message) { - int ret = 0; - int hash_rv = HASH_OK; - - pr_debug("[hash_update]: (data=0x%x, len=%d)!", - (u32)data, len); + u32 count; - mutex_lock(&hash_hw_acc_mutex); + /* + * NBLW bits. Reset the number of bits in last word (NBLW). + */ + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); - /* NOTE: The length of the message is in the form of number of bits */ - hash_rv = hash_hw_update(desc, HASH_DEVICE_ID_1, data, len * 8); - if (hash_rv != HASH_OK) { - pr_err("hash_hw_update() failed!"); - ret = -EPERM; - goto out; + /* + * Write message data to the HASH_DIN register. + */ + for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) { + HASH_SET_DIN(message[0]); + HASH_SET_DIN(message[1]); + HASH_SET_DIN(message[2]); + HASH_SET_DIN(message[3]); + message += 4; } - -out: - mutex_unlock(&hash_hw_acc_mutex); - return ret; } /** - * hash_final - The hash final function for SHA1/SHA2 (SHA256). - * @desc: The hash descriptor for the job - * @out: Pointer for the calculated digest + * hash_messagepad - Pads a message and write the nblw bits. + * @device_data: Structure for the hash device. + * @message: Last word of a message. + * @index_bytes: The number of bytes in the last message. + * + * This function manages the final part of the digest calculation, when less + * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. + * + * Reentrancy: Non Re-entrant. */ -static int hash_final(struct shash_desc *desc, u8 *out) +static void hash_messagepad(struct hash_device_data *device_data, + const u32 *message, u8 index_bytes) { - int ret = 0; - int hash_rv = HASH_OK; - struct hash_ctx *ctx = shash_desc_ctx(desc); - struct hash_driver_data *device_data = internal_drv_data; - - int digestsize = crypto_shash_digestsize(desc->tfm); - u8 digest[HASH_MSG_DIGEST_SIZE]; - - pr_debug("[hash_final]: (ctx=0x%x)!", (u32) ctx); - - mutex_lock(&hash_hw_acc_mutex); - - /* Enable device power (and clock) */ - ret = hash_enable_power(device_data->dev, device_data, false); - if (ret) { - dev_err(device_data->dev, "[%s]: " - "hash_enable_power() failed!", __func__); - goto out; - } - - if (!ctx->updated) { - ret = init_hash_hw(desc); - if (ret) { - pr_err("init_hash_hw() failed!"); - goto out_power; - } - } else { - hash_rv = hash_resume_state(HASH_DEVICE_ID_1, &ctx->state); + dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))", + __func__, index_bytes); + /* + * Clear hash str register, only clear NBLW + * since DCAL will be reset by hardware. + */ + writel((readl(&device_data->base->str) & ~HASH_STR_NBLW_MASK), + &device_data->base->str); - if (hash_rv != HASH_OK) { - pr_err("hash_resume_state() failed!"); - ret = -EPERM; - goto out_power; - } + /* Main loop */ + while (index_bytes >= 4) { + HASH_SET_DIN(message[0]); + index_bytes -= 4; + message++; } - hash_messagepad(HASH_DEVICE_ID_1, ctx->state.buffer, - ctx->state.index); - - hash_get_digest(HASH_DEVICE_ID_1, digest, ctx->config.algorithm); - - memcpy(out, digest, digestsize); + if (index_bytes) + HASH_SET_DIN(message[0]); -out_power: - /* Disable power (and clock) */ - if (hash_disable_power(device_data->dev, device_data, false)) - dev_err(device_data->dev, "[%s]: " - "hash_disable_power() failed!", __func__); + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); -out: - mutex_unlock(&hash_hw_acc_mutex); + /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ + HASH_SET_NBLW(index_bytes * 8); + dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, + readl(&device_data->base->din), + readl(&device_data->base->str)); + HASH_SET_DCAL; + dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", + __func__, readl(&device_data->base->din), + readl(&device_data->base->str)); - return ret; + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); } /** - * sha1_init - SHA1 init function. - * @desc: The hash descriptor for the job + * hash_incrementlength - Increments the length of the current message. + * @ctx: Hash context + * @incr: Length of message processed already + * + * Overflow cannot occur, because conditions for overflow are checked in + * hash_hw_update. */ -static int sha1_init(struct shash_desc *desc) +static void hash_incrementlength(struct hash_ctx *ctx, u32 incr) { - struct hash_ctx *ctx = shash_desc_ctx(desc); - - pr_debug("[sha1_init]: (ctx=0x%x)!", (u32) ctx); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA1; - ctx->config.oper_mode = HASH_OPER_MODE_HASH; + ctx->state.length.low_word += incr; - return hash_init(desc); + /* Check for wrap-around */ + if (ctx->state.length.low_word < incr) + ctx->state.length.high_word++; } /** - * sha256_init - SHA2 (SHA256) init function. - * @desc: The hash descriptor for the job + * hash_setconfiguration - Sets the required configuration for the hash + * hardware. + * @device_data: Structure for the hash device. + * @config: Pointer to a configuration structure. + * + * Reentrancy: Non Re-entrant + * Reentrancy issues: + * 1. Global variable registry(cofiguration register, + * parameter register, divider register) is being modified + * + * Comments 1. : User need to call hash_begin API after calling this + * API i.e. the current configuration is set only when + * bit INIT is set and we set INIT bit in hash_begin. + * Changing the configuration during a computation has + * no effect so we first set configuration by calling + * this API and then set the INIT bit for the HASH + * processor and the curent configuration is taken into + * account. As reading INIT bit (with correct protection + * rights) will always return 0b so we can't make a check + * at software level. So the user has to initialize the + * device for new configuration to take in to effect. + * 2. The default value of data format is 00b ie the format + * of data entered in HASH_DIN register is 32-bit data. + * The data written in HASH_DIN is used directly by the + * HASH processing, without re ordering. */ -static int sha256_init(struct shash_desc *desc) +int hash_setconfiguration(struct hash_device_data *device_data, + struct hash_config *config) { - struct hash_ctx *ctx = shash_desc_ctx(desc); + int ret = 0; + dev_dbg(device_data->dev, "[%s] ", __func__); - pr_debug("[sha256_init]: (ctx=0x%x)!", (u32) ctx); + if (config->algorithm != HASH_ALGO_SHA1 && + config->algorithm != HASH_ALGO_SHA256) + return -EPERM; - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA2; - ctx->config.oper_mode = HASH_OPER_MODE_HASH; + /* + * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data + * to be written to HASH_DIN is considered as 32 bits. + */ + HASH_SET_DATA_FORMAT(config->data_format); - return hash_init(desc); -} + /* + * Empty message bit. This bit is needed when the hash input data + * contain the empty message. Always set in current impl. but with + * no impact on data different than empty message. + */ + HASH_SET_BITS(&device_data->base->cr, HASH_CR_EMPTYMSG_MASK); -static int hash_export(struct shash_desc *desc, void *out) -{ - struct hash_ctx *ctx = shash_desc_ctx(desc); + /* + * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 + */ + switch (config->algorithm) { + case HASH_ALGO_SHA1: + HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); + break; - pr_debug("[hash_export]: (ctx=0x%X) (out=0x%X)", - (u32) ctx, (u32) out); - memcpy(out, ctx, sizeof(*ctx)); - return 0; + case HASH_ALGO_SHA256: + HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); + break; + + default: + dev_err(device_data->dev, "[%s] Incorrect algorithm.", + __func__); + return -EPERM; + } + + /* + * MODE bit. This bit selects between HASH or HMAC mode for the + * selected algorithm. 0b0 = HASH and 0b1 = HMAC. + */ + if (HASH_OPER_MODE_HASH == config->oper_mode) { + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_MODE_MASK); + } else { /* HMAC mode or wrong hash mode */ + ret = -EPERM; + dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + __func__); + } + return ret; } -static int hash_import(struct shash_desc *desc, const void *in) +/** + * hash_begin - This routine resets some globals and initializes the hash + * hardware. + * @device_data: Structure for the hash device. + * @ctx: Hash context. + * + * Reentrancy: Non Re-entrant + * + * Comments 1. : User need to call hash_setconfiguration API before + * calling this API i.e. the current configuration is set + * only when bit INIT is set and we set INIT bit in + * hash_begin. Changing the configuration during a + * computation has no effect so we first set + * configuration by calling this API and then set the + * INIT bit for the HASH processor and the current + * configuration is taken into account. As reading INIT + * bit (with correct protection rights) will always + * return 0b so we can't make a check at software level. + * So the user has to initialize the device for new + * configuration to take in to effect. + */ +void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) { - struct hash_ctx *ctx = shash_desc_ctx(desc); + /* HW and SW initializations */ + /* Note: there is no need to initialize buffer and digest members */ + dev_dbg(device_data->dev, "[%s] ", __func__); - pr_debug("[hash_import]: (ctx=0x%x) (in =0x%X)", - (u32) ctx, (u32) in); - memcpy(ctx, in, sizeof(*ctx)); - return 0; -} + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); -static struct shash_alg sha1_alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_init, - .update = hash_update, - .final = hash_final, - .export = hash_export, - .import = hash_import, - .descsize = sizeof(struct hash_ctx), - .statesize = sizeof(struct hash_ctx), - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-u8500", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; + /* + * INIT bit. Set this bit to 0b1 to reset the HASH processor core and + * prepare the initialize the HASH accelerator to compute the message + * digest of a new message. + */ + HASH_INITIALIZE; -static struct shash_alg sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_init, - .update = hash_update, - .final = hash_final, - .export = hash_export, - .import = hash_import, - .descsize = sizeof(struct hash_ctx), - .statesize = sizeof(struct hash_ctx), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-u8500", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; + /* + * NBLW bits. Reset the number of bits in last word (NBLW). + */ + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); +} /** - * u8500_hash_probe - Function that probes the hash hardware. - * @pdev: The platform device + * hash_hw_update - Updates current HASH computation hashing another part of + * the message. + * @req: Byte array containing the message to be hashed (caller + * allocated). + * + * Reentrancy: Non Re-entrant */ -static int u8500_hash_probe(struct platform_device *pdev) +int hash_hw_update(struct ahash_request *req) { int ret = 0; - int hash_rv = HASH_OK; - struct resource *res = NULL; - struct hash_driver_data *hash_drv_data; + u8 index; + u32 count; + u8 *p_buffer; + struct hash_device_data *device_data; + u8 *p_data_buffer; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct crypto_hash_walk walk; + int msg_length = crypto_hash_walk_first(req, &walk); - pr_debug("[u8500_hash_probe]: (pdev=0x%x)", (u32) pdev); + pr_debug(DEV_DBG_NAME "[%s] ", __func__); - pr_debug("[u8500_hash_probe]: Calling kzalloc()!"); - hash_drv_data = kzalloc(sizeof(struct hash_driver_data), GFP_KERNEL); - if (!hash_drv_data) { - pr_debug("kzalloc() failed!"); - ret = -ENOMEM; - goto out; - } + if (msg_length == 0) + return -EPERM; - hash_drv_data->dev = &pdev->dev; + index = ctx->state.index; + p_buffer = (u8 *)ctx->state.buffer; - pr_debug("[u8500_hash_probe]: Calling platform_get_resource()!"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - pr_debug("platform_get_resource() failed"); - ret = -ENODEV; - goto out_kfree; + /* Check if ctx->state.length + msg_length + overflows */ + if (msg_length > + (ctx->state.length.low_word + msg_length) + && HASH_HIGH_WORD_MAX_VAL == + (ctx->state.length.high_word)) { + dev_err(device_data->dev, "[%s] HASH_MSG_LENGTH_OVERFLOW!", + __func__); + return -EPERM; } - pr_debug("[u8500_hash_probe]: Calling request_mem_region()!"); - res = request_mem_region(res->start, resource_size(res), pdev->name); - if (res == NULL) { - pr_debug("request_mem_region() failed"); - ret = -EBUSY; - goto out_kfree; + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; } - pr_debug("[u8500_hash_probe]: Calling ioremap()!"); - hash_drv_data->base = ioremap(res->start, resource_size(res)); - if (!hash_drv_data->base) { - pr_err("[u8500_hash] " - "ioremap of hash1 register memory failed!"); - ret = -ENOMEM; - goto out_free_mem; - } - mutex_init(&hash_drv_data->power_state_mutex); - - /* Enable power for HASH hardware block */ - hash_drv_data->regulator = regulator_get(&pdev->dev, "v-ape"); - if (IS_ERR(hash_drv_data->regulator)) { - dev_err(&pdev->dev, "[u8500_hash] " - "could not get hash regulator\n"); - ret = PTR_ERR(hash_drv_data->regulator); - hash_drv_data->regulator = NULL; - goto out_unmap; - } - - pr_debug("[u8500_hash_probe]: Calling clk_get()!"); - /* Enable the clk for HASH1 hardware block */ - hash_drv_data->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(hash_drv_data->clk)) { - pr_err("clk_get() failed!"); - ret = PTR_ERR(hash_drv_data->clk); - goto out_regulator; - } - - /* Enable device power (and clock) */ - ret = hash_enable_power(&pdev->dev, hash_drv_data, false); - if (ret) { - dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", - __func__); - goto out_clk; - } + /* Main loop */ + while (0 != msg_length) { + p_data_buffer = walk.data; + if ((index + msg_length) < HASH_BLOCK_SIZE) { + for (count = 0; count < msg_length; count++) { + p_buffer[index + count] = + *(p_data_buffer + count); + } - pr_debug("[u8500_hash_probe]: Calling hash_init_base_address()->" - "(base=0x%x,DEVICE_ID=%d)!", - (u32) hash_drv_data->base, HASH_DEVICE_ID_1); + index += msg_length; + } else { + if (!ctx->updated) { + ret = init_hash_hw(device_data, req); + if (ret) { + dev_err(device_data->dev, "[%s] " + "init_hash_hw() failed!", + __func__); + goto out; + } + ctx->updated = 1; + } else { + ret = hash_resume_state(device_data, + &ctx->state); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_resume_state() failed!", + __func__); + goto out_power; + } + } - /* Setting base address */ - hash_rv = - hash_init_base_address(HASH_DEVICE_ID_1, - (t_logical_address) hash_drv_data->base); - if (hash_rv != HASH_OK) { - pr_err("hash_init_base_address() failed!"); - ret = -EPERM; - goto out_power; - } - pr_debug("[u8500_hash_probe]: Calling mutex_init()!"); - mutex_init(&hash_hw_acc_mutex); + /* + * If 'p_data_buffer' is four byte aligned and local + * buffer does not have any data, we can write data + * directly from 'p_data_buffer' to HW peripheral, + * otherwise we first copy data to a local buffer + */ + if ((0 == (((u32) p_data_buffer) % 4)) + && (0 == index)) { + hash_processblock(device_data, + (const u32 *)p_data_buffer); + } else { + for (count = 0; + count < (u32)(HASH_BLOCK_SIZE - index); + count++) { + p_buffer[index + count] = + *(p_data_buffer + count); + } - pr_debug("[u8500_hash_probe]: To register only sha1 and sha256" - " algorithms!"); - internal_drv_data = hash_drv_data; + hash_processblock(device_data, + (const u32 *)p_buffer); + } - ret = crypto_register_shash(&sha1_alg); - if (ret) { - pr_err("Could not register sha1_alg!"); - goto out_power; - } - pr_debug("[u8500_hash_probe]: sha1_alg registered!"); + hash_incrementlength(ctx, HASH_BLOCK_SIZE); + index = 0; - ret = crypto_register_shash(&sha256_alg); - if (ret) { - pr_err("Could not register sha256_alg!"); - goto out_unreg1_tmp; + ret = hash_save_state(device_data, &ctx->state); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_save_state() failed!", __func__); + goto out_power; + } + } + msg_length = crypto_hash_walk_done(&walk, 0); } - pr_debug("[u8500_hash_probe]: Calling platform_set_drvdata()!"); - platform_set_drvdata(pdev, hash_drv_data); - - if (hash_disable_power(&pdev->dev, hash_drv_data, false)) - dev_err(&pdev->dev, "[%s]: hash_disable_power()" - " failed!", __func__); - - return 0; - -out_unreg1_tmp: - crypto_unregister_shash(&sha1_alg); + ctx->state.index = index; + dev_dbg(device_data->dev, "[%s] END(msg_length=%d in bits, in=%d, " + "bin=%d))", __func__, msg_length, ctx->state.index, + ctx->state.bit_index); out_power: - hash_disable_power(&pdev->dev, hash_drv_data, false); - -out_clk: - clk_put(hash_drv_data->clk); - -out_regulator: - regulator_put(hash_drv_data->regulator); - -out_unmap: - iounmap(hash_drv_data->base); + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s]: " + "hash_disable_power() failed!", __func__); +out: + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); -out_free_mem: - release_mem_region(res->start, res->end - res->start + 1); + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); -out_kfree: - kfree(hash_drv_data); -out: return ret; } /** - * u8500_hash_remove - Function that removes the hash device from the platform. - * @pdev: The platform device + * hash_resume_state - Function that resumes the state of an calculation. + * @device_data: Pointer to the device structure. + * @device_state: The state to be restored in the hash hardware + * + * Reentrancy: Non Re-entrant */ -static int u8500_hash_remove(struct platform_device *pdev) +int hash_resume_state(struct hash_device_data *device_data, + const struct hash_state *device_state) { - struct resource *res; - struct hash_driver_data *hash_drv_data; - - pr_debug("[u8500_hash_remove]: (pdev=0x%x)", (u32) pdev); + u32 temp_cr; + s32 count; + int hash_mode = HASH_OPER_MODE_HASH; - pr_debug("[u8500_hash_remove]: Calling platform_get_drvdata()!"); - hash_drv_data = platform_get_drvdata(pdev); + dev_dbg(device_data->dev, "[%s] (state(0x%x)))", + __func__, (u32) device_state); - pr_debug("[u8500_hash_remove]: To unregister only sha1 and " - "sha256 algorithms!"); - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); + if (NULL == device_state) { + dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + __func__); + return -EPERM; + } - pr_debug("[u8500_hash_remove]: Calling mutex_destroy()!"); - mutex_destroy(&hash_hw_acc_mutex); + /* Check correctness of index and length members */ + if (device_state->index > HASH_BLOCK_SIZE + || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { + dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + __func__); + return -EPERM; + } - pr_debug("[u8500_hash_remove]: Calling clk_disable()!"); - clk_disable(hash_drv_data->clk); + /* + * INIT bit. Set this bit to 0b1 to reset the HASH processor core and + * prepare the initialize the HASH accelerator to compute the message + * digest of a new message. + */ + HASH_INITIALIZE; - pr_debug("[u8500_hash_remove]: Calling clk_put()!"); - clk_put(hash_drv_data->clk); + temp_cr = device_state->temp_cr; + writel(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); - pr_debug("[u8500_hash_remove]: Calling regulator_disable()!"); - regulator_disable(hash_drv_data->regulator); + if (device_data->base->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; - pr_debug("[u8500_hash_remove]: Calling iounmap(): base = 0x%x", - (u32) hash_drv_data->base); - iounmap(hash_drv_data->base); + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) + break; - pr_debug("[u8500_hash_remove]: Calling platform_get_resource()!"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + writel(device_state->csr[count], + &device_data->base->csrx[count]); + } - pr_debug("[u8500_hash_remove]: Calling release_mem_region()" - "->res->start=0x%x, res->end = 0x%x!", - res->start, res->end); - release_mem_region(res->start, res->end - res->start + 1); + writel(device_state->csfull, &device_data->base->csfull); + writel(device_state->csdatain, &device_data->base->csdatain); - pr_debug("[u8500_hash_remove]: Calling kfree()!"); - kfree(hash_drv_data); + writel(device_state->str_reg, &device_data->base->str); + writel(temp_cr, &device_data->base->cr); return 0; } -static void u8500_hash_shutdown(struct platform_device *pdev) +/** + * hash_save_state - Function that saves the state of hardware. + * @device_data: Pointer to the device structure. + * @device_state: The strucure where the hardware state should be saved. + * + * Reentrancy: Non Re-entrant + */ +int hash_save_state(struct hash_device_data *device_data, + struct hash_state *device_state) { - struct resource *res = NULL; - struct hash_driver_data *hash_drv_data; + u32 temp_cr; + u32 count; + int hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(&pdev->dev, "[%s]", __func__); + dev_dbg(device_data->dev, "[%s] state(0x%x)))", + __func__, (u32) device_state); - hash_drv_data = platform_get_drvdata(pdev); - if (!hash_drv_data) { - dev_err(&pdev->dev, "[%s]: " - "platform_get_drvdata() failed!", __func__); - return; + if (NULL == device_state) { + dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + __func__); + return -EPERM; } - crypto_unregister_shash(&sha1_alg); - crypto_unregister_shash(&sha256_alg); - - mutex_destroy(&hash_hw_acc_mutex); - - iounmap(hash_drv_data->base); + /* Write dummy value to force digest intermediate calculation. This + * actually makes sure that there isn't any ongoing calculation in the + * hardware. + */ + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) - release_mem_region(res->start, resource_size(res)); + temp_cr = readl(&device_data->base->cr); - if (hash_disable_power(&pdev->dev, hash_drv_data, false)) - dev_err(&pdev->dev, "[%s]: " - "hash_disable_power() failed", __func__); + device_state->str_reg = readl(&device_data->base->str); - clk_put(hash_drv_data->clk); - regulator_put(hash_drv_data->regulator); -} + device_state->din_reg = readl(&device_data->base->din); -static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) -{ - int ret; - struct hash_driver_data *hash_drv_data; + if (device_data->base->cr & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(&pdev->dev, "[%s]", __func__); + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) + break; - /* Handle state? */ - hash_drv_data = platform_get_drvdata(pdev); - if (!hash_drv_data) { - dev_err(&pdev->dev, "[%s]: " - "platform_get_drvdata() failed!", __func__); - return -ENOMEM; + device_state->csr[count] = + readl(&device_data->base->csrx[count]); } - ret = hash_disable_power(&pdev->dev, hash_drv_data, true); - if (ret) - dev_err(&pdev->dev, "[%s]: " - "hash_disable_power()", __func__); + device_state->csfull = readl(&device_data->base->csfull); + device_state->csdatain = readl(&device_data->base->csdatain); - return ret; + device_state->temp_cr = temp_cr; + + return 0; } -static int u8500_hash_resume(struct platform_device *pdev) +/** + * hash_check_hw - This routine checks for peripheral Ids and PCell Ids. + * @device_data: + * + */ +int hash_check_hw(struct hash_device_data *device_data) { int ret = 0; - struct hash_driver_data *hash_drv_data; - dev_dbg(&pdev->dev, "[%s]", __func__); + dev_dbg(device_data->dev, "[%s] ", __func__); - hash_drv_data = platform_get_drvdata(pdev); - if (!hash_drv_data) { - dev_err(&pdev->dev, "[%s]: " - "platform_get_drvdata() failed!", __func__); - return -ENOMEM; + if (NULL == device_data) { + ret = -EPERM; + dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + __func__); + goto out; } - if (hash_drv_data->restore_dev_state) { - ret = hash_enable_power(&pdev->dev, hash_drv_data, true); - if (ret) - dev_err(&pdev->dev, "[%s]: " - "hash_enable_power() failed!", __func__); + /* Checking Peripheral Ids */ + if ((HASH_P_ID0 == readl(&device_data->base->periphid0)) + && (HASH_P_ID1 == readl(&device_data->base->periphid1)) + && (HASH_P_ID2 == readl(&device_data->base->periphid2)) + && (HASH_P_ID3 == readl(&device_data->base->periphid3)) + && (HASH_CELL_ID0 == readl(&device_data->base->cellid0)) + && (HASH_CELL_ID1 == readl(&device_data->base->cellid1)) + && (HASH_CELL_ID2 == readl(&device_data->base->cellid2)) + && (HASH_CELL_ID3 == readl(&device_data->base->cellid3)) + ) { + ret = 0; + goto out;; + } else { + ret = -EPERM; + dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", + __func__); + goto out; } - +out: return ret; } - -static struct platform_driver hash_driver = { - .probe = u8500_hash_probe, - .remove = u8500_hash_remove, - .shutdown = u8500_hash_shutdown, - .suspend = u8500_hash_suspend, - .resume = u8500_hash_resume, - .driver = { - .owner = THIS_MODULE, - .name = "hash1", - }, -}; - /** - * u8500_hash_mod_init - The kernel module init function. + * hash_get_digest - Gets the digest. + * @device_data: Pointer to the device structure. + * @digest: User allocated byte array for the calculated digest. + * @algorithm: The algorithm in use. + * + * Reentrancy: Non Re-entrant, global variable registry (hash control register) + * is being modified. + * + * Note that, if this is called before the final message has been handle it + * will return the intermediate message digest. */ -static int __init u8500_hash_mod_init(void) +void hash_get_digest(struct hash_device_data *device_data, + u8 *digest, int algorithm) { - pr_debug("u8500_hash_mod_init() is called!"); + u32 temp_hx_val, count; + int loop_ctr; - return platform_driver_register(&hash_driver); -} + if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { + dev_err(device_data->dev, "[%s] Incorrect algorithm %d", + __func__, algorithm); + return; + } -/** - * u8500_hash_mod_fini - The kernel module exit function. - */ -static void __exit u8500_hash_mod_fini(void) -{ - pr_debug("u8500_hash_mod_fini() is called!"); + if (algorithm == HASH_ALGO_SHA1) + loop_ctr = HASH_SHA1_DIGEST_SIZE / sizeof(u32); + else + loop_ctr = HASH_SHA2_DIGEST_SIZE / sizeof(u32); - platform_driver_unregister(&hash_driver); - return; + dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", + __func__, (u32) digest); + + /* Copy result into digest array */ + for (count = 0; count < loop_ctr; count++) { + temp_hx_val = readl(&device_data->base->hx[count]); + digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); + digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); + digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); + digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); + } } /** - * hash_processblock - This function processes a single block of 512 bits (64 - * bytes), word aligned, starting at message. - * @hid: Hardware device ID - * @message: Block (512 bits) of message to be written to the HASH hardware - * - * Reentrancy: Non Re-entrant. + * hash_update - The hash update function for SHA1/SHA2 (SHA256). + * @req: The hash request for the job. */ -static void hash_processblock(int hid, const u32 *message) +static int ahash_update(struct ahash_request *req) { - u32 count; + int ret = 0; - clear_bit(HASH_STR_NBLW_MASK, (void *)sys_ctx_g.registry[hid]->str); + pr_debug(DEV_DBG_NAME "[%s] ", __func__); - /* Partially unrolled loop */ - for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) { - HASH_SET_DIN(message[0]); - HASH_SET_DIN(message[1]); - HASH_SET_DIN(message[2]); - HASH_SET_DIN(message[3]); - message += 4; + ret = hash_hw_update(req); + if (ret) { + pr_err(DEV_DBG_NAME "[%s] hash_hw_update() failed!", __func__); + goto out; } + +out: + return ret; } /** - * hash_messagepad - Pads a message and write the nblw bits. - * @hid: Hardware device ID - * @message: Last word of a message - * @index_bytes: The number of bytes in the last message - * - * This function manages the final part of the digest calculation, when less - * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. - * - * Reentrancy: Non Re-entrant. + * hash_final - The hash final function for SHA1/SHA2 (SHA256). + * @req: The hash request for the job. */ -static void hash_messagepad(int hid, const u32 *message, u8 index_bytes) +static int ahash_final(struct ahash_request *req) { - pr_debug("[u8500_hash_alg] hash_messagepad" - "(bytes in final msg=%d))", index_bytes); + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_device_data *device_data; + u8 digest[HASH_MSG_DIGEST_SIZE]; - clear_reg_str(hid); + pr_debug(DEV_DBG_NAME "[%s] ", __func__); + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; - /* Main loop */ - while (index_bytes >= 4) { - HASH_SET_DIN(message[0]); - index_bytes -= 4; - message++; + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; } - if (index_bytes) - HASH_SET_DIN(message[0]); + if (!ctx->updated) { + ret = init_hash_hw(device_data, req); + if (ret) { + dev_err(device_data->dev, "[%s] init_hash_hw() " + "failed!", __func__); + goto out_power; + } + } else { + ret = hash_resume_state(device_data, &ctx->state); - while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) - cpu_relax(); + if (ret) { + dev_err(device_data->dev, "[%s] hash_resume_state() " + "failed!", __func__); + goto out_power; + } + } - /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ - HASH_SET_NBLW(index_bytes * 8); - pr_debug("[u8500_hash_alg] hash_messagepad -> DIN=0x%08x NBLW=%d", - sys_ctx_g.registry[hid]->din, - sys_ctx_g.registry[hid]->str); - HASH_SET_DCAL; - pr_debug("[u8500_hash_alg] hash_messagepad after dcal -> " - "DIN=0x%08x NBLW=%d", - sys_ctx_g.registry[hid]->din, - sys_ctx_g.registry[hid]->str); + hash_messagepad(device_data, ctx->state.buffer, + ctx->state.index); - while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) - cpu_relax(); + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + __func__); + +out: + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; } -/** - * hash_incrementlength - Increments the length of the current message. - * @ctx: Hash context - * @incr: Length of message processed already - * - * Overflow cannot occur, because conditions for overflow are checked in - * hash_hw_update. - */ -static void hash_incrementlength(struct hash_ctx *ctx, u32 incr) +static int ahash_sha1_init(struct ahash_request *req) { - ctx->state.length.low_word += incr; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - /* Check for wrap-around */ - if (ctx->state.length.low_word < incr) - ctx->state.length.high_word++; + pr_debug(DEV_DBG_NAME "[%s]: (ctx=0x%x)!", __func__, (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + ctx->digestsize = SHA1_DIGEST_SIZE; + + return hash_init(req); } -/** - * hash_setconfiguration - Sets the required configuration for the hash - * hardware. - * @hid: Hardware device ID - * @p_config: Pointer to a configuration structure - * - * Reentrancy: Non Re-entrant - * Reentrancy issues: - * 1. Global variable registry(cofiguration register, - * parameter register, divider register) is being modified - * - * Comments 1. : User need to call hash_begin API after calling this - * API i.e. the current configuration is set only when - * bit INIT is set and we set INIT bit in hash_begin. - * Changing the configuration during a computation has - * no effect so we first set configuration by calling - * this API and then set the INIT bit for the HASH - * processor and the curent configuration is taken into - * account. As reading INIT bit (with correct protection - * rights) will always return 0b so we can't make a check - * at software level. So the user has to initialize the - * device for new configuration to take in to effect. - * 2. The default value of data format is 00b ie the format - * of data entered in HASH_DIN register is 32-bit data. - * The data written in HASH_DIN is used directly by the - * HASH processing, without re ordering. - */ -int hash_setconfiguration(int hid, struct hash_config *p_config) +static int ahash_sha256_init(struct ahash_request *req) { - int hash_rv = HASH_OK; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug("[u8500_hash_alg] hash_setconfiguration())"); + pr_debug(DEV_DBG_NAME "[%s]: (ctx=0x%x)!", __func__, (u32) ctx); - if (p_config->algorithm != HASH_ALGO_SHA1 && - p_config->algorithm != HASH_ALGO_SHA2) - return HASH_INVALID_PARAMETER; + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA256; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + ctx->digestsize = SHA256_DIGEST_SIZE; - HASH_SET_DATA_FORMAT(p_config->data_format); + return hash_init(req); +} - HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_EMPTYMSG_MASK); +static int ahash_sha1_digest(struct ahash_request *req) +{ + int ret2, ret1 = ahash_sha1_init(req); - switch (p_config->algorithm) { - case HASH_ALGO_SHA1: - HCL_SET_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); - break; + if (ret1) + goto out; - case HASH_ALGO_SHA2: - HCL_CLEAR_BITS(sys_ctx_g.registry[hid]->cr, HASH_CR_ALGO_MASK); - break; + ret1 = ahash_update(req); + ret2 = ahash_final(req); - default: - pr_debug("[u8500_hash_alg] Incorrect algorithm."); - return HASH_INVALID_PARAMETER; +out: + return ret1 ? ret1 : ret2; +} + +static int ahash_sha256_digest(struct ahash_request *req) +{ + int ret2, ret1 = ahash_sha256_init(req); + + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static struct ahash_alg ahash_sha1_alg = { + .init = ahash_sha1_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha1_digest, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-u8500", + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_module = THIS_MODULE, } +}; - /* This bit selects between HASH or HMAC mode for the selected - algorithm */ - if (HASH_OPER_MODE_HASH == p_config->oper_mode) { - HCL_CLEAR_BITS(sys_ctx_g.registry - [hid]->cr, HASH_CR_MODE_MASK); - } else { /* HMAC mode or wrong hash mode */ - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); +static struct ahash_alg ahash_sha256_alg = { + .init = ahash_sha256_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha256_digest, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-u8500", + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, } +}; - return hash_rv; -} +/** + * struct hash_alg *u8500_hash_algs[] - + */ +static struct ahash_alg *u8500_ahash_algs[] = { + &ahash_sha1_alg, + &ahash_sha256_alg +}; /** - * hash_begin - This routine resets some globals and initializes the hash - * hardware. - * @ctx: Hash context - * - * Reentrancy: Non Re-entrant - * - * Comments 1. : User need to call hash_setconfiguration API before - * calling this API i.e. the current configuration is set - * only when bit INIT is set and we set INIT bit in - * hash_begin. Changing the configuration during a - * computation has no effect so we first set - * configuration by calling this API and then set the - * INIT bit for the HASH processor and the current - * configuration is taken into account. As reading INIT - * bit (with correct protection rights) will always - * return 0b so we can't make a check at software level. - * So the user has to initialize the device for new - * configuration to take in to effect. + * hash_algs_register_all - */ -void hash_begin(struct hash_ctx *ctx) +static int ahash_algs_register_all(void) { - /* HW and SW initializations */ - /* Note: there is no need to initialize buffer and digest members */ + int ret; + int i; + int count; - pr_debug("[u8500_hash_alg] hash_begin())"); + pr_debug("[%s]", __func__); - while (sys_ctx_g.registry[HASH_DEVICE_ID_1]->str & HASH_STR_DCAL_MASK) - cpu_relax(); + for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) { + ret = crypto_register_ahash(u8500_ahash_algs[i]); + if (ret) { + count = i; + pr_err("[%s] alg registration failed", + u8500_ahash_algs[i]->halg.base.cra_driver_name); + goto unreg; + } + } + return 0; +unreg: + for (i = 0; i < count; i++) + crypto_unregister_ahash(u8500_ahash_algs[i]); + return ret; +} - HASH_INITIALIZE; +/** + * hash_algs_unregister_all - + */ +static void ahash_algs_unregister_all(void) +{ + int i; - HCL_CLEAR_BITS(sys_ctx_g.registry[HASH_DEVICE_ID_1]->str, - HASH_STR_NBLW_MASK); + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) + crypto_unregister_ahash(u8500_ahash_algs[i]); } /** - * hash_hw_update - Updates current HASH computation hashing another part of - * the message. - * @hid: Hardware device ID - * @p_data_buffer: Byte array containing the message to be hashed (caller - * allocated) - * @msg_length: Length of message to be hashed (in bits) - * - * Reentrancy: Non Re-entrant + * u8500_hash_probe - Function that probes the hash hardware. + * @pdev: The platform device. */ -int hash_hw_update(struct shash_desc *desc, - int hid, - const u8 *p_data_buffer, - u32 msg_length) +static int u8500_hash_probe(struct platform_device *pdev) { - int hash_rv = HASH_OK; - u8 index; - u8 *p_buffer; - u32 count; - struct hash_ctx *ctx = shash_desc_ctx(desc); - struct hash_driver_data *device_data = internal_drv_data; + int ret = 0; + struct resource *res = NULL; + struct hash_device_data *device_data; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); + device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); + if (!device_data) { + dev_dbg(dev, "[%s] kzalloc() failed!", __func__); + ret = -ENOMEM; + goto out; + } - pr_debug("[u8500_hash_alg] hash_hw_update(msg_length=%d / %d), " - "in=%d, bin=%d))", - msg_length, - msg_length / 8, - ctx->state.index, - ctx->state.bit_index); + device_data->dev = dev; + device_data->current_ctx = NULL; - index = ctx->state.index; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); + ret = -ENODEV; + goto out_kfree; + } - p_buffer = (u8 *)ctx->state.buffer; + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); + ret = -EBUSY; + goto out_kfree; + } + + device_data->base = ioremap(res->start, resource_size(res)); + if (!device_data->base) { + dev_err(dev, "[%s] ioremap() failed!", + __func__); + ret = -ENOMEM; + goto out_free_mem; + } + spin_lock_init(&device_data->ctx_lock); + spin_lock_init(&device_data->power_state_lock); - /* Number of bytes in the message */ - msg_length /= 8; + /* Enable power for HASH1 hardware block */ + device_data->regulator = ux500_regulator_get(dev); - /* Check parameters */ - if (NULL == p_data_buffer) { - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_rv; + if (IS_ERR(device_data->regulator)) { + dev_err(dev, "[%s] regulator_get() failed!", __func__); + ret = PTR_ERR(device_data->regulator); + device_data->regulator = NULL; + goto out_unmap; } - /* Check if ctx->state.length + msg_length - overflows */ - if (msg_length > - (ctx->state.length.low_word + msg_length) - && HASH_HIGH_WORD_MAX_VAL == - (ctx->state.length.high_word)) { - hash_rv = HASH_MSG_LENGTH_OVERFLOW; - pr_err("[u8500_hash_alg] HASH_MSG_LENGTH_OVERFLOW!"); - return hash_rv; + /* Enable the clock for HASH1 hardware block */ + device_data->clk = clk_get(dev, NULL); + if (IS_ERR(device_data->clk)) { + dev_err(dev, "[%s] clk_get() failed!", __func__); + ret = PTR_ERR(device_data->clk); + goto out_regulator; } /* Enable device power (and clock) */ - hash_rv = hash_enable_power(device_data->dev, device_data, false); - if (hash_rv) { - dev_err(device_data->dev, "[%s]: " - "hash_enable_power() failed!", __func__); - goto out; + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); + goto out_clk; } - /* Main loop */ - while (0 != msg_length) { - if ((index + msg_length) < HASH_BLOCK_SIZE) { - for (count = 0; count < msg_length; count++) { - p_buffer[index + count] = - *(p_data_buffer + count); - } - - index += msg_length; - msg_length = 0; - } else { - if (!ctx->updated) { - hash_rv = init_hash_hw(desc); - if (hash_rv != HASH_OK) { - pr_err("init_hash_hw() failed!"); - goto out; - } - ctx->updated = 1; - } else { - hash_rv = - hash_resume_state(HASH_DEVICE_ID_1, - &ctx->state); - if (hash_rv != HASH_OK) { - pr_err("hash_resume_state()" - " failed!"); - goto out_power; - } - } - - /* - * If 'p_data_buffer' is four byte aligned and local - * buffer does not have any data, we can write data - * directly from 'p_data_buffer' to HW peripheral, - * otherwise we first copy data to a local buffer - */ - if ((0 == (((u32) p_data_buffer) % 4)) - && (0 == index)) { - hash_processblock(hid, - (const u32 *)p_data_buffer); - } else { - for (count = 0; - count < (u32)(HASH_BLOCK_SIZE - index); - count++) { - p_buffer[index + count] = - *(p_data_buffer + count); - } + ret = hash_check_hw(device_data); + if (ret) { + dev_err(dev, "[%s] hash_check_hw() failed!", __func__); + goto out_power; + } - hash_processblock(hid, (const u32 *)p_buffer); - } + platform_set_drvdata(pdev, device_data); - hash_incrementlength(ctx, HASH_BLOCK_SIZE); - p_data_buffer += (HASH_BLOCK_SIZE - index); - msg_length -= (HASH_BLOCK_SIZE - index); - index = 0; + /* Put the new device into the device list... */ + klist_add_tail(&device_data->list_node, &driver_data.device_list); + /* ... and signal that a new device is available. */ + up(&driver_data.device_allocation); - hash_rv = - hash_save_state(HASH_DEVICE_ID_1, &ctx->state); - if (hash_rv != HASH_OK) { - pr_err("hash_save_state() failed!"); - goto out_power; - } - } + ret = ahash_algs_register_all(); + if (ret) { + dev_err(dev, "[%s] ahash_algs_register_all() " + "failed!", __func__); + goto out_power; } - ctx->state.index = index; + if (hash_disable_power(device_data, false)) + dev_err(dev, "[%s]: hash_disable_power() failed!", __func__); + + dev_info(dev, "[%s] successfully probed", __func__); + return 0; - pr_debug("[u8500_hash_alg] hash_hw_update END(msg_length=%d in " - "bits, in=%d, bin=%d))", - msg_length, - ctx->state.index, - ctx->state.bit_index); out_power: - /* Disable power (and clock) */ - if (hash_disable_power(device_data->dev, device_data, false)) - dev_err(device_data->dev, "[%s]: " - "hash_disable_power() failed!", __func__); + hash_disable_power(device_data, false); + +out_clk: + clk_put(device_data->clk); + +out_regulator: + ux500_regulator_put(device_data->regulator); + +out_unmap: + iounmap(device_data->base); + +out_free_mem: + release_mem_region(res->start, resource_size(res)); + +out_kfree: + kfree(device_data); out: - return hash_rv; + return ret; } /** - * hash_resume_state - Function that resumes the state of an calculation. - * @hid: Hardware device ID - * @device_state: The state to be restored in the hash hardware - * - * Reentrancy: Non Re-entrant + * u8500_hash_remove - Function that removes the hash device from the platform. + * @pdev: The platform device. */ -int hash_resume_state(int hid, const struct hash_state *device_state) +static int u8500_hash_remove(struct platform_device *pdev) { - u32 temp_cr; - int hash_rv = HASH_OK; - s32 count; - int hash_mode = HASH_OPER_MODE_HASH; + struct resource *res; + struct hash_device_data *device_data; + struct device *dev = &pdev->dev; - pr_debug("[u8500_hash_alg] hash_resume_state(state(0x%x)))", - (u32) device_state); + dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); - if (NULL == device_state) { - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_rv; + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; } - /* Check correctness of index and length members */ - if (device_state->index > HASH_BLOCK_SIZE - || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_rv; + /* Try to decrease the number of available devices. */ + if (down_trylock(&driver_data.device_allocation)) + return -EBUSY; + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (device_data->current_ctx) { + /* The device is busy */ + spin_unlock(&device_data->ctx_lock); + /* Return the device to the pool. */ + up(&driver_data.device_allocation); + return -EBUSY; } - HASH_INITIALIZE; + spin_unlock(&device_data->ctx_lock); - temp_cr = device_state->temp_cr; - sys_ctx_g.registry[hid]->cr = - temp_cr & HASH_CR_RESUME_MASK; + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); - if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) - hash_mode = HASH_OPER_MODE_HMAC; - else - hash_mode = HASH_OPER_MODE_HASH; + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + ahash_algs_unregister_all(); - for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) - break; + if (hash_disable_power(device_data, false)) + dev_err(dev, "[%s]: hash_disable_power() failed", + __func__); - sys_ctx_g.registry[hid]->csrx[count] = - device_state->csr[count]; - } + clk_put(device_data->clk); + ux500_regulator_put(device_data->regulator); - sys_ctx_g.registry[hid]->csfull = device_state->csfull; - sys_ctx_g.registry[hid]->csdatain = device_state->csdatain; + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); - sys_ctx_g.registry[hid]->str = device_state->str_reg; - sys_ctx_g.registry[hid]->cr = temp_cr; + kfree(device_data); - return hash_rv; + return 0; } /** - * hash_save_state - Function that saves the state of hardware. - * @hid: Hardware device ID - * @device_state: The strucure where the hardware state should be saved - * - * Reentrancy: Non Re-entrant + * u8500_hash_shutdown - Function that shutdown the hash device. + * @pdev: The platform device */ -int hash_save_state(int hid, struct hash_state *device_state) +static void u8500_hash_shutdown(struct platform_device *pdev) { - u32 temp_cr; - u32 count; - int hash_rv = HASH_OK; - int hash_mode = HASH_OPER_MODE_HASH; + struct resource *res = NULL; + struct hash_device_data *device_data; - pr_debug("[u8500_hash_alg] hash_save_state( state(0x%x)))", - (u32) device_state); + dev_dbg(&pdev->dev, "[%s]", __func__); - if (NULL == device_state) { - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_rv; + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", + __func__); + return; } - /* Write dummy value to force digest intermediate calculation. This - * actually makes sure that there isn't any ongoing calculation in the - * hardware. - */ - while (sys_ctx_g.registry[hid]->str & HASH_STR_DCAL_MASK) - cpu_relax(); - - temp_cr = sys_ctx_g.registry[hid]->cr; - - device_state->str_reg = sys_ctx_g.registry[hid]->str; - - device_state->din_reg = sys_ctx_g.registry[hid]->din; - - if (sys_ctx_g.registry[hid]->cr & HASH_CR_MODE_MASK) - hash_mode = HASH_OPER_MODE_HMAC; - else - hash_mode = HASH_OPER_MODE_HASH; + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (!device_data->current_ctx) { + if (down_trylock(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" + "Shutting down anyway...", __func__); + /** + * (Allocate the device) + * Need to set this to non-null (dummy) value, + * to avoid usage if context switching. + */ + device_data->current_ctx++; + } + spin_unlock(&device_data->ctx_lock); - for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) - break; + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); - device_state->csr[count] = - sys_ctx_g.registry[hid]->csrx[count]; - } + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + ahash_algs_unregister_all(); - device_state->csfull = sys_ctx_g.registry[hid]->csfull; - device_state->csdatain = sys_ctx_g.registry[hid]->csdatain; + iounmap(device_data->base); - device_state->temp_cr = temp_cr; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); - return hash_rv; + if (hash_disable_power(device_data, false)) + dev_err(&pdev->dev, "[%s] hash_disable_power() failed", + __func__); } /** - * hash_init_base_address - This routine initializes hash register base - * address. It also checks for peripheral Ids and PCell Ids. - * @hid: Hardware device ID - * @base_address: Hash hardware base address - * - * Reentrancy: Non Re-entrant, global variable registry (register base address) - * is being modified. + * u8500_hash_suspend - Function that suspends the hash device. + * @pdev: The platform device. + * @state: - */ -int hash_init_base_address(int hid, t_logical_address base_address) +static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) { - int hash_rv = HASH_OK; - - pr_debug("[u8500_hash_alg] hash_init_base_address())"); - - if (0 != base_address) { - /* Initializing the registers structure */ - sys_ctx_g.registry[hid] = - (struct hash_register *) base_address; - - /* Checking Peripheral Ids */ - if ((HASH_P_ID0 == sys_ctx_g.registry[hid]->periphid0) - && (HASH_P_ID1 == sys_ctx_g.registry[hid]->periphid1) - && (HASH_P_ID2 == sys_ctx_g.registry[hid]->periphid2) - && (HASH_P_ID3 == sys_ctx_g.registry[hid]->periphid3) - && (HASH_CELL_ID0 == sys_ctx_g.registry[hid]->cellid0) - && (HASH_CELL_ID1 == sys_ctx_g.registry[hid]->cellid1) - && (HASH_CELL_ID2 == sys_ctx_g.registry[hid]->cellid2) - && (HASH_CELL_ID3 == sys_ctx_g.registry[hid]->cellid3) - ) { - hash_rv = HASH_OK; - return hash_rv; - } else { - hash_rv = HASH_UNSUPPORTED_HW; - pr_err("[u8500_hash_alg] HASH_UNSUPPORTED_HW!"); - return hash_rv; - } - } /* end if */ - else { - hash_rv = HASH_INVALID_PARAMETER; - pr_err("[u8500_hash_alg] HASH_INVALID_PARAMETER!"); - return hash_rv; + int ret; + struct hash_device_data *device_data; + struct hash_ctx *temp_ctx = NULL; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", + __func__); + return -ENOMEM; } + + spin_lock(&device_data->ctx_lock); + if (!device_data->current_ctx) + device_data->current_ctx++; + spin_unlock(&device_data->ctx_lock); + + if (device_data->current_ctx == ++temp_ctx) { + if (down_interruptible(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: down_interruptible() " + "failed", __func__); + ret = hash_disable_power(device_data, false); + + } else + ret = hash_disable_power(device_data, true); + + if (ret) + dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__); + + return ret; } /** - * hash_get_digest - Gets the digest. - * @hid: Hardware device ID - * @digest: User allocated byte array for the calculated digest - * @algorithm: The algorithm in use. - * - * Reentrancy: Non Re-entrant, global variable registry (hash control register) - * is being modified. - * - * Note that, if this is called before the final message has been handle it - * will return the intermediate message digest. + * u8500_hash_resume - Function that resume the hash device. + * @pdev: The platform device. */ -void hash_get_digest(int hid, u8 *digest, int algorithm) +static int u8500_hash_resume(struct platform_device *pdev) { - u32 temp_hx_val, count; - int loop_ctr; + int ret = 0; + struct hash_device_data *device_data; + struct hash_ctx *temp_ctx = NULL; - if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA2) { - pr_err("[hash_get_digest] Incorrect algorithm %d", algorithm); - return; + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", + __func__); + return -ENOMEM; } - if (algorithm == HASH_ALGO_SHA1) - loop_ctr = HASH_SHA1_DIGEST_SIZE / sizeof(u32); + spin_lock(&device_data->ctx_lock); + if (device_data->current_ctx == ++temp_ctx) + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + if (!device_data->current_ctx) + up(&driver_data.device_allocation); else - loop_ctr = HASH_SHA2_DIGEST_SIZE / sizeof(u32); + ret = hash_enable_power(device_data, true); - pr_debug("[u8500_hash_alg] hash_get_digest(digest array:(0x%x))", - (u32) digest); + if (ret) + dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", + __func__); - /* Copy result into digest array */ - for (count = 0; count < loop_ctr; count++) { - temp_hx_val = HASH_GET_HX(count); - digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); - digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); - digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); - digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); + return ret; +} + +static struct platform_driver hash_driver = { + .probe = u8500_hash_probe, + .remove = u8500_hash_remove, + .shutdown = u8500_hash_shutdown, + .suspend = u8500_hash_suspend, + .resume = u8500_hash_resume, + .driver = { + .owner = THIS_MODULE, + .name = "hash1", } +}; + +/** + * u8500_hash_mod_init - The kernel module init function. + */ +static int __init u8500_hash_mod_init(void) +{ + pr_debug("[%s] is called!", __func__); + + klist_init(&driver_data.device_list, NULL, NULL); + /* Initialize the semaphore to 0 devices (locked state) */ + sema_init(&driver_data.device_allocation, 0); + + return platform_driver_register(&hash_driver); } +/** + * u8500_hash_mod_fini - The kernel module exit function. + */ +static void __exit u8500_hash_mod_fini(void) +{ + pr_debug("[%s] is called!", __func__); + platform_driver_unregister(&hash_driver); + return; +} module_init(u8500_hash_mod_init); module_exit(u8500_hash_mod_fini); -module_param(debug, int, 0); - MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 8645233e6375a93e4054ad1568b79bed8271d50c Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Mon, 23 May 2011 16:23:56 +0200 Subject: cryp: CPU mode register read/write optimizations - Minimize the number of read/write to the cryp hardware. - Write direct to hardware instead of going through functions that aren't inlined to get rid of function prologues/epilogues. - Remove NULL checks where performance is needed and the check itself is unnecessary since it is catched earlier in the callstack. - Remove unused defines/includes that are not used any longer due to above optimizations. ST-Ericsson ID: 341921 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I9efff1714f851672f8cb04e0c946a753a4b3f4b8 Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/23719 Reviewed-by: Berne HEBARK Reviewed-by: QATEST Reviewed-by: Jonas ABERG --- drivers/crypto/ux500/cryp/cryp.c | 119 +++++++++++++--------------------- drivers/crypto/ux500/cryp/cryp.h | 11 +--- drivers/crypto/ux500/cryp/cryp_core.c | 58 ++++++----------- drivers/crypto/ux500/cryp/cryp_p.h | 4 +- 4 files changed, 69 insertions(+), 123 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index ed9eeccf1d3..f627c57d9bd 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -8,12 +8,8 @@ * License terms: GNU General Public License (GPL) version 2 */ -#include -#include #include -#include #include -#include #include #include "cryp_p.h" @@ -94,23 +90,25 @@ void cryp_flush_inoutfifo(struct cryp_device_data *device_data) /** * cryp_set_configuration - This routine set the cr CRYP IP * @device_data: Pointer to the device data struct for base address. - * @p_cryp_config: Pointer to the configuration parameter + * @cryp_config: Pointer to the configuration parameter + * @control_register: The control register to be written later on. */ int cryp_set_configuration(struct cryp_device_data *device_data, - struct cryp_config *p_cryp_config) + struct cryp_config *cryp_config, + u32 *control_register) { - if (NULL == device_data || NULL == p_cryp_config) + u32 cr_for_kse; + + if (NULL == device_data || NULL == cryp_config) return -EINVAL; - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->keysize, - CRYP_CR_KEYSIZE_POS, - CRYP_CR_KEYSIZE_MASK); + *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS); /* Prepare key for decryption in AES_ECB and AES_CBC mode. */ - if ((CRYP_ALGORITHM_DECRYPT == p_cryp_config->algodir) && - ((CRYP_ALGO_AES_ECB == p_cryp_config->algomode) || - (CRYP_ALGO_AES_CBC == p_cryp_config->algomode))) { + if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) && + ((CRYP_ALGO_AES_ECB == cryp_config->algomode) || + (CRYP_ALGO_AES_CBC == cryp_config->algomode))) { + cr_for_kse = *control_register; /* * This seems a bit odd, but it is indeed needed to set this to * encrypt even though it is a decryption that we are doing. It @@ -118,49 +116,27 @@ int cryp_set_configuration(struct cryp_device_data *device_data, * After the keyprepartion for decrypting is done you should set * algodir back to decryption, which is done outside this if * statement. - */ - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_ALGORITHM_ENCRYPT, - CRYP_CR_ALGODIR_POS, - CRYP_CR_ALGODIR_MASK); - - /* + * * According to design specification we should set mode ECB * during key preparation even though we might be running CBC * when enter this function. - */ - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_ALGO_AES_ECB, - CRYP_CR_ALGOMODE_POS, - CRYP_CR_ALGOMODE_MASK); - - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_CRYPEN_ENABLE, - CRYP_CR_CRYPEN_POS, - CRYP_CR_CRYPEN_MASK); - - /* + * * Writing to KSE_ENABLED will drop CRYPEN when key preparation * is done. Therefore we need to set CRYPEN again outside this * if statement when running decryption. */ - CRYP_PUT_BITS(&device_data->base->cr, - KSE_ENABLED, - CRYP_CR_KSE_POS, - CRYP_CR_KSE_MASK); + cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) | + (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) | + (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) | + (KSE_ENABLED << CRYP_CR_KSE_POS)); + writel(cr_for_kse, &device_data->base->cr); cryp_wait_until_done(device_data); } - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->algomode, - CRYP_CR_ALGOMODE_POS, - CRYP_CR_ALGOMODE_MASK); - - CRYP_PUT_BITS(&device_data->base->cr, - p_cryp_config->algodir, - CRYP_CR_ALGODIR_POS, - CRYP_CR_ALGODIR_MASK); + *control_register |= + ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) | + (cryp_config->algodir << CRYP_CR_ALGODIR_POS)); return 0; } @@ -300,7 +276,10 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, void cryp_save_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx) { + enum cryp_algo_mode algomode; struct cryp_register *src_reg = device_data->base; + struct cryp_config *config = + (struct cryp_config *)device_data->current_ctx; /* * Always start by disable the hardware and wait for it to finish the @@ -315,28 +294,29 @@ void cryp_save_device_context(struct cryp_device_data *device_data, ctx->cr = readl(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; - CRYP_PUT_BITS(&src_reg->cr, 1, CRYP_CR_KEYRDEN_POS, - CRYP_CR_KEYRDEN_MASK); + switch (config->keysize) { + case CRYP_KEY_SIZE_256: + ctx->key_4_l = readl(&src_reg->key_4_l); + ctx->key_4_r = readl(&src_reg->key_4_r); - ctx->key_1_l = readl(&src_reg->key_1_l); - ctx->key_1_r = readl(&src_reg->key_1_r); - ctx->key_2_l = readl(&src_reg->key_2_l); - ctx->key_2_r = readl(&src_reg->key_2_r); - ctx->key_3_l = readl(&src_reg->key_3_l); - ctx->key_3_r = readl(&src_reg->key_3_r); - ctx->key_4_l = readl(&src_reg->key_4_l); - ctx->key_4_r = readl(&src_reg->key_4_r); + case CRYP_KEY_SIZE_192: + ctx->key_3_l = readl(&src_reg->key_3_l); + ctx->key_3_r = readl(&src_reg->key_3_r); - CRYP_PUT_BITS(&src_reg->cr, 0, CRYP_CR_KEYRDEN_POS, - CRYP_CR_KEYRDEN_MASK); + case CRYP_KEY_SIZE_128: + ctx->key_2_l = readl(&src_reg->key_2_l); + ctx->key_2_r = readl(&src_reg->key_2_r); + + default: + ctx->key_1_l = readl(&src_reg->key_1_l); + ctx->key_1_r = readl(&src_reg->key_1_r); + } /* Save IV for CBC mode for both AES and DES. */ - if (CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == - CRYP_ALGO_TDES_CBC || - CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == - CRYP_ALGO_DES_CBC || - CRYP_TEST_BITS(&src_reg->cr, CRYP_CR_ALGOMODE_POS) == - CRYP_ALGO_AES_CBC) { + algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS); + if (algomode == CRYP_ALGO_TDES_CBC || + algomode == CRYP_ALGO_DES_CBC || + algomode == CRYP_ALGO_AES_CBC) { ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); @@ -357,7 +337,6 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, struct cryp_config *config = (struct cryp_config *)device_data->current_ctx; - /* * Fall through for all items in switch statement. DES is captured in * the default. @@ -389,9 +368,6 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, writel(ctx->init_vect_1_l, ®->init_vect_1_l); writel(ctx->init_vect_1_r, ®->init_vect_1_r); } - - writel(ctx->cr, ®->cr); - cryp_activity(device_data, CRYP_CRYPEN_ENABLE); } /** @@ -402,8 +378,6 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, */ int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) { - if (NULL == device_data) - return -EINVAL; writel(write_data, &device_data->base->din); return 0; @@ -417,11 +391,6 @@ int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) */ int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data) { - if (NULL == device_data) - return -EINVAL; - if (NULL == read_data) - return -EINVAL; - *read_data = readl(&device_data->base->dout); return 0; diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index ee7aee3dcb1..bfa2beb694f 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -53,12 +53,6 @@ enum cryp_key_prep { #define CRYP_KEY_SIZE_192 (1) #define CRYP_KEY_SIZE_256 (2) -/* Data type Swap */ -#define CRYP_DATA_TYPE_32BIT_SWAP (0) -#define CRYP_DATA_TYPE_16BIT_SWAP (1) -#define CRYP_DATA_TYPE_8BIT_SWAP (2) -#define CRYP_DATA_TYPE_BIT_SWAP (3) - /* AES modes */ enum cryp_algo_mode { CRYP_ALGO_TDES_ECB, @@ -88,7 +82,6 @@ enum cryp_mode { * struct cryp_config - * @keyrden: Cryp state enable/disable * @keysize: Key size for AES - * @datatype: Data type Swap * @algomode: AES modes * @algodir: Cryp Encryption or Decryption * @@ -97,7 +90,6 @@ enum cryp_mode { struct cryp_config { enum cryp_state keyrden; int keysize; - int datatype; enum cryp_algo_mode algomode; enum cryp_algorithm_dir algodir; }; @@ -267,7 +259,8 @@ void cryp_activity(struct cryp_device_data *device_data, void cryp_flush_inoutfifo(struct cryp_device_data *device_data); int cryp_set_configuration(struct cryp_device_data *device_data, - struct cryp_config *p_cryp_config); + struct cryp_config *cryp_config, + u32 *control_register); void cryp_configure_for_dma(struct cryp_device_data *device_data, enum cryp_dma_req_type dma_req); diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f67577c386e..051874bff03 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -358,13 +358,9 @@ static int cfg_keys(struct cryp_ctx *ctx) static int cryp_setup_context(struct cryp_ctx *ctx, struct cryp_device_data *device_data) { + u32 control_register = CRYP_CR_DEFAULT; cryp_flush_inoutfifo(device_data); - CRYP_PUT_BITS(&device_data->base->cr, - ctx->config.datatype, - CRYP_CR_DATATYPE_POS, - CRYP_CR_DATATYPE_MASK); - switch (cryp_mode) { case CRYP_MODE_INTERRUPT: writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); @@ -378,9 +374,10 @@ static int cryp_setup_context(struct cryp_ctx *ctx, break; } - if (ctx->updated) + if (ctx->updated) { cryp_restore_device_context(device_data, &ctx->dev_ctx); - else { + control_register = ctx->dev_ctx.cr; + } else { if (cfg_keys(ctx) != 0) { dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", __func__); @@ -395,10 +392,12 @@ static int cryp_setup_context(struct cryp_ctx *ctx, return -EPERM; } - cryp_set_configuration(device_data, &ctx->config); + cryp_set_configuration(device_data, &ctx->config, + &control_register); } - cryp_activity(device_data, CRYP_CRYPEN_ENABLE); + writel(control_register | (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), + &device_data->base->cr); return 0; } @@ -612,37 +611,28 @@ static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) return len; } -static int cryp_polling_mode(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) +static void cryp_polling_mode(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) { int i; - int ret = 0; int remaining_length = ctx->datalen; - const u8 *indata = ctx->indata; - u8 *outdata = ctx->outdata; + u32 *indata = (u32 *)ctx->indata; + u32 *outdata = (u32 *)ctx->outdata; - cryp_activity(device_data, CRYP_CRYPEN_ENABLE); while (remaining_length > 0) { for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { - ret = cryp_write_indata(device_data, - *((u32 *)indata)); - if (ret) - goto out; - indata += BYTES_PER_WORD; + writel(*indata, &device_data->base->din); + ++indata; remaining_length -= BYTES_PER_WORD; } cryp_wait_until_done(device_data); + for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { - ret = cryp_read_outdata(device_data, - (u32 *)outdata); - if (ret) - goto out; - outdata += BYTES_PER_WORD; + *outdata = readl(&device_data->base->dout); + ++outdata; } cryp_wait_until_done(device_data); } -out: - return ret; } static int cryp_disable_power(struct device *dev, @@ -727,7 +717,7 @@ out: static int hw_crypt_noxts(struct cryp_ctx *ctx, struct cryp_device_data *device_data) { - int ret; + int ret = 0; const u8 *indata = ctx->indata; u8 *outdata = ctx->outdata; @@ -737,8 +727,6 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, pr_debug(DEV_DBG_NAME " [%s]", __func__); ctx->outlen = ctx->datalen; - ctx->config.keyrden = CRYP_STATE_ENABLE; - ctx->config.datatype = CRYP_DATA_TYPE_8BIT_SWAP; if (unlikely(!IS_ALIGNED((u32)indata, 4))) { pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " @@ -771,9 +759,7 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, * the polling mode. Overhead of doing DMA setup eats up the * benefits using it. */ - ret = cryp_polling_mode(ctx, device_data); - if (ret) - goto out; + cryp_polling_mode(ctx, device_data); } else { dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", __func__); @@ -781,10 +767,8 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, goto out; } - ret = 0; cryp_save_device_context(device_data, &ctx->dev_ctx); - if (ctx->updated == 0) - ctx->updated = 1; + ctx->updated = 1; out: ctx->indata = indata; @@ -820,8 +804,6 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq) pr_debug(DEV_DBG_NAME " [%s]", __func__); - ctx->config.keyrden = CRYP_STATE_ENABLE; - ctx->config.datatype = CRYP_DATA_TYPE_8BIT_SWAP; ctx->datalen = areq->nbytes; ctx->outlen = areq->nbytes; diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index adc95457499..5171ee10f71 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -50,7 +50,9 @@ * CRYP register default values */ #define MAX_DEVICE_SUPPORT 2 -#define CRYP_CR_DEFAULT 0x0002 + +/* Priv set, keyrden set and datatype 8bits swapped set as default. */ +#define CRYP_CR_DEFAULT 0x0482 #define CRYP_DMACR_DEFAULT 0x0 #define CRYP_IMSC_DEFAULT 0x0 #define CRYP_DIN_DEFAULT 0x0 -- cgit v1.2.3 From 1a2d5912e3212adcf88c7d51bd6d16e7c6389aef Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Fri, 27 May 2011 09:50:33 +0200 Subject: cryp: Session handling for software context - Add a session id to the software crypto context in ux500 crypto driver. - Add a static global session id which is a counter that increases when a new job is started (it is ok that this variable loop back to zero when it reach the maximum value). - These session id's will be used to decide if the hardware registers needs be restored or not for continous jobs. As a result we gain performance when we don't need to restore hardware registers. ST-Ericsson ID: 342659 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I1a1f508e9969e438204d1b4a803f1d637c1e0d60 Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24019 Reviewed-by: QATOOLS Reviewed-by: QATEST Reviewed-by: Berne HEBARK --- drivers/crypto/ux500/cryp/cryp.h | 2 -- drivers/crypto/ux500/cryp/cryp_core.c | 37 ++++++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index bfa2beb694f..36835a3982e 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -80,7 +80,6 @@ enum cryp_mode { /** * struct cryp_config - - * @keyrden: Cryp state enable/disable * @keysize: Key size for AES * @algomode: AES modes * @algodir: Cryp Encryption or Decryption @@ -88,7 +87,6 @@ enum cryp_mode { * CRYP configuration structure to be passed to set configuration */ struct cryp_config { - enum cryp_state keyrden; int keysize; enum cryp_algo_mode algomode; enum cryp_algorithm_dir algodir; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 051874bff03..9cbfc63ba2b 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -42,6 +42,7 @@ #define BYTES_PER_WORD 4 static int cryp_mode; +static atomic_t session_id; static struct stedma40_chan_cfg *mem_to_engine; static struct stedma40_chan_cfg *engine_to_mem; @@ -85,6 +86,7 @@ struct cryp_ctx { u8 updated; struct cryp_device_context dev_ctx; struct cryp_device_data *device; + u32 session_id; }; static struct cryp_driver_data driver_data; @@ -176,6 +178,18 @@ static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, } } +static void add_session_id(struct cryp_ctx *ctx) +{ + /* + * We never want 0 to be a valid value, since this is the default value + * for the software context. + */ + if (unlikely(atomic_inc_and_test(&session_id))) + atomic_inc(&session_id); + + ctx->session_id = atomic_read(&session_id); +} + static irqreturn_t cryp_interrupt_handler(int irq, void *param) { struct cryp_ctx *ctx; @@ -359,11 +373,10 @@ static int cryp_setup_context(struct cryp_ctx *ctx, struct cryp_device_data *device_data) { u32 control_register = CRYP_CR_DEFAULT; - cryp_flush_inoutfifo(device_data); switch (cryp_mode) { case CRYP_MODE_INTERRUPT: - writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); break; case CRYP_MODE_DMA: @@ -374,10 +387,8 @@ static int cryp_setup_context(struct cryp_ctx *ctx, break; } - if (ctx->updated) { - cryp_restore_device_context(device_data, &ctx->dev_ctx); - control_register = ctx->dev_ctx.cr; - } else { + if (ctx->updated == 0) { + cryp_flush_inoutfifo(device_data); if (cfg_keys(ctx) != 0) { dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", __func__); @@ -394,7 +405,16 @@ static int cryp_setup_context(struct cryp_ctx *ctx, cryp_set_configuration(device_data, &ctx->config, &control_register); - } + add_session_id(ctx); + } else if (ctx->updated == 1 && + ctx->session_id != atomic_read(&session_id)) { + cryp_flush_inoutfifo(device_data); + cryp_restore_device_context(device_data, &ctx->dev_ctx); + + add_session_id(ctx); + control_register = ctx->dev_ctx.cr; + } else + control_register = ctx->dev_ctx.cr; writel(control_register | (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), &device_data->base->cr); @@ -735,6 +755,7 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, } ret = cryp_setup_context(ctx, device_data); + if (ret) goto out; @@ -2017,6 +2038,8 @@ static int u8500_cryp_probe(struct platform_device *pdev) /* ... and signal that a new device is available. */ up(&driver_data.device_allocation); + atomic_set(&session_id, 1); + ret = cryp_algs_register_all(); if (ret) { dev_err(dev, "[%s]: cryp_algs_register_all() failed!", -- cgit v1.2.3 From 2632694663e15811bdda9389dc9c38e3dd249879 Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Mon, 30 May 2011 12:32:36 +0200 Subject: ux500: cryp: Remove register access Remove unnecessary call to function cryp_configure_for_dma when running in CPU mode only (we will gain performance by not calling this function, since it read and write to registers in the crypto hardware). ST-Ericsson ID: 343001 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ie5e99d2f96e6c9218ec767a76eb65a6683ad17dd Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24150 Reviewed-by: QATEST Reviewed-by: QATOOLS Reviewed-by: Berne HEBARK --- drivers/crypto/ux500/cryp/cryp.c | 7 +++++-- drivers/crypto/ux500/cryp/cryp.h | 3 ++- drivers/crypto/ux500/cryp/cryp_core.c | 7 ++++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index f627c57d9bd..30318817b18 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -274,7 +274,8 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, * @ctx: Crypto device context */ void cryp_save_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx) + struct cryp_device_context *ctx, + int cryp_mode) { enum cryp_algo_mode algomode; struct cryp_register *src_reg = device_data->base; @@ -287,7 +288,9 @@ void cryp_save_device_context(struct cryp_device_data *device_data, */ cryp_activity(device_data, CRYP_CRYPEN_DISABLE); cryp_wait_until_done(device_data); - cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); + + if (cryp_mode == CRYP_MODE_DMA) + cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0) ctx->din = readl(&src_reg->din); diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index 36835a3982e..df2e25d4671 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -278,7 +278,8 @@ int cryp_configure_protection(struct cryp_device_data *device_data, /* Power management funtions */ void cryp_save_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx); + struct cryp_device_context *ctx, + int cryp_mode); void cryp_restore_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx); diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 9cbfc63ba2b..0276501831a 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -670,7 +670,8 @@ static int cryp_disable_power(struct device *dev, spin_lock(&device_data->ctx_lock); if (save_device_context && device_data->current_ctx) { cryp_save_device_context(device_data, - &device_data->current_ctx->dev_ctx); + &device_data->current_ctx->dev_ctx, + cryp_mode); device_data->restore_dev_ctx = true; } spin_unlock(&device_data->ctx_lock); @@ -788,7 +789,7 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, goto out; } - cryp_save_device_context(device_data, &ctx->dev_ctx); + cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); ctx->updated = 1; out: @@ -856,7 +857,7 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq) wait_for_completion(&ctx->device->dma.cryp_dma_complete); cryp_dma_done(ctx); - cryp_save_device_context(device_data, &ctx->dev_ctx); + cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); ctx->updated = 1; out_power: -- cgit v1.2.3 From ac58ec89186c7270936a6d0e122a33b06e78b78e Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Mon, 30 May 2011 08:58:22 +0200 Subject: ux500: cryp: Use relaxed versions of writel/readl - Use readl_relaxed instead of readl. - Use writel_relaxed instead of writel. - Use writesl where a stream of data is written to the crypto hardware. - Use readsl where a stream of data is read from the crypto hardware. The above changes will increase performance when doing cryptographic operations. We noticed about 30%-50% better throughput when running the cryptographic test module tcrypt. ST-Ericsson ID: 343394 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I32d9229f93a8b86257d5a9ab9729cfc4337caa52 Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24275 Reviewed-by: QATEST Reviewed-by: Berne HEBARK Reviewed-by: Linus WALLEIJ --- drivers/crypto/ux500/cryp/cryp.c | 109 ++++++++++++++++++---------------- drivers/crypto/ux500/cryp/cryp_core.c | 23 ++++--- drivers/crypto/ux500/cryp/cryp_irq.c | 10 ++-- drivers/crypto/ux500/cryp/cryp_p.h | 9 +-- 4 files changed, 79 insertions(+), 72 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 30318817b18..ae4fe318528 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -34,14 +34,22 @@ int cryp_check(struct cryp_device_data *device_data) return -EINVAL; /* Check Peripheral and Pcell Id Register for CRYP */ - if ((CRYP_PERIPHERAL_ID0 == readl(&device_data->base->periphId0)) - && (CRYP_PERIPHERAL_ID1 == readl(&device_data->base->periphId1)) - && (CRYP_PERIPHERAL_ID2 == readl(&device_data->base->periphId2)) - && (CRYP_PERIPHERAL_ID3 == readl(&device_data->base->periphId3)) - && (CRYP_PCELL_ID0 == readl(&device_data->base->pcellId0)) - && (CRYP_PCELL_ID1 == readl(&device_data->base->pcellId1)) - && (CRYP_PCELL_ID2 == readl(&device_data->base->pcellId2)) - && (CRYP_PCELL_ID3 == readl(&device_data->base->pcellId3))) { + if ((CRYP_PERIPHERAL_ID0 == + readl_relaxed(&device_data->base->periphId0)) + && (CRYP_PERIPHERAL_ID1 == + readl_relaxed(&device_data->base->periphId1)) + && (CRYP_PERIPHERAL_ID2 == + readl_relaxed(&device_data->base->periphId2)) + && (CRYP_PERIPHERAL_ID3 == + readl_relaxed(&device_data->base->periphId3)) + && (CRYP_PCELL_ID0 == + readl_relaxed(&device_data->base->pcellId0)) + && (CRYP_PCELL_ID1 == + readl_relaxed(&device_data->base->pcellId1)) + && (CRYP_PCELL_ID2 == + readl_relaxed(&device_data->base->pcellId2)) + && (CRYP_PCELL_ID3 == + readl_relaxed(&device_data->base->pcellId3))) { return 0; } @@ -83,7 +91,8 @@ void cryp_flush_inoutfifo(struct cryp_device_data *device_data) * register when starting a new calculation, which means Input FIFO is * not full and input FIFO is empty. */ - while (readl(&device_data->base->sr) != CRYP_SR_INFIFO_READY_MASK) + while (readl_relaxed(&device_data->base->sr) != + CRYP_SR_INFIFO_READY_MASK) cpu_relax(); } @@ -130,7 +139,7 @@ int cryp_set_configuration(struct cryp_device_data *device_data, (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) | (KSE_ENABLED << CRYP_CR_KSE_POS)); - writel(cr_for_kse, &device_data->base->cr); + writel_relaxed(cr_for_kse, &device_data->base->cr); cryp_wait_until_done(device_data); } @@ -201,27 +210,27 @@ int cryp_configure_key_values(struct cryp_device_data *device_data, switch (key_reg_index) { case CRYP_KEY_REG_1: - writel(key_value.key_value_left, + writel_relaxed(key_value.key_value_left, &device_data->base->key_1_l); - writel(key_value.key_value_right, + writel_relaxed(key_value.key_value_right, &device_data->base->key_1_r); break; case CRYP_KEY_REG_2: - writel(key_value.key_value_left, + writel_relaxed(key_value.key_value_left, &device_data->base->key_2_l); - writel(key_value.key_value_right, + writel_relaxed(key_value.key_value_right, &device_data->base->key_2_r); break; case CRYP_KEY_REG_3: - writel(key_value.key_value_left, + writel_relaxed(key_value.key_value_left, &device_data->base->key_3_l); - writel(key_value.key_value_right, + writel_relaxed(key_value.key_value_right, &device_data->base->key_3_r); break; case CRYP_KEY_REG_4: - writel(key_value.key_value_left, + writel_relaxed(key_value.key_value_left, &device_data->base->key_4_l); - writel(key_value.key_value_right, + writel_relaxed(key_value.key_value_right, &device_data->base->key_4_r); break; default: @@ -249,15 +258,15 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, switch (init_vector_index) { case CRYP_INIT_VECTOR_INDEX_0: - writel(init_vector_value.init_value_left, + writel_relaxed(init_vector_value.init_value_left, &device_data->base->init_vect_0_l); - writel(init_vector_value.init_value_right, + writel_relaxed(init_vector_value.init_value_right, &device_data->base->init_vect_0_r); break; case CRYP_INIT_VECTOR_INDEX_1: - writel(init_vector_value.init_value_left, + writel_relaxed(init_vector_value.init_value_left, &device_data->base->init_vect_1_l); - writel(init_vector_value.init_value_right, + writel_relaxed(init_vector_value.init_value_right, &device_data->base->init_vect_1_r); break; default: @@ -293,26 +302,26 @@ void cryp_save_device_context(struct cryp_device_data *device_data, cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0) - ctx->din = readl(&src_reg->din); + ctx->din = readl_relaxed(&src_reg->din); - ctx->cr = readl(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; + ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; switch (config->keysize) { case CRYP_KEY_SIZE_256: - ctx->key_4_l = readl(&src_reg->key_4_l); - ctx->key_4_r = readl(&src_reg->key_4_r); + ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); + ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); case CRYP_KEY_SIZE_192: - ctx->key_3_l = readl(&src_reg->key_3_l); - ctx->key_3_r = readl(&src_reg->key_3_r); + ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); + ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); case CRYP_KEY_SIZE_128: - ctx->key_2_l = readl(&src_reg->key_2_l); - ctx->key_2_r = readl(&src_reg->key_2_r); + ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); + ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); default: - ctx->key_1_l = readl(&src_reg->key_1_l); - ctx->key_1_r = readl(&src_reg->key_1_r); + ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); + ctx->key_1_r = readl_relaxed(&src_reg->key_1_r); } /* Save IV for CBC mode for both AES and DES. */ @@ -320,10 +329,10 @@ void cryp_save_device_context(struct cryp_device_data *device_data, if (algomode == CRYP_ALGO_TDES_CBC || algomode == CRYP_ALGO_DES_CBC || algomode == CRYP_ALGO_AES_CBC) { - ctx->init_vect_0_l = readl(&src_reg->init_vect_0_l); - ctx->init_vect_0_r = readl(&src_reg->init_vect_0_r); - ctx->init_vect_1_l = readl(&src_reg->init_vect_1_l); - ctx->init_vect_1_r = readl(&src_reg->init_vect_1_r); + ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l); + ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r); + ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l); + ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r); } } @@ -346,30 +355,30 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, */ switch (config->keysize) { case CRYP_KEY_SIZE_256: - writel(ctx->key_4_l, ®->key_4_l); - writel(ctx->key_4_r, ®->key_4_r); + writel_relaxed(ctx->key_4_l, ®->key_4_l); + writel_relaxed(ctx->key_4_r, ®->key_4_r); case CRYP_KEY_SIZE_192: - writel(ctx->key_3_l, ®->key_3_l); - writel(ctx->key_3_r, ®->key_3_r); + writel_relaxed(ctx->key_3_l, ®->key_3_l); + writel_relaxed(ctx->key_3_r, ®->key_3_r); case CRYP_KEY_SIZE_128: - writel(ctx->key_2_l, ®->key_2_l); - writel(ctx->key_2_r, ®->key_2_r); + writel_relaxed(ctx->key_2_l, ®->key_2_l); + writel_relaxed(ctx->key_2_r, ®->key_2_r); default: - writel(ctx->key_1_l, ®->key_1_l); - writel(ctx->key_1_r, ®->key_1_r); + writel_relaxed(ctx->key_1_l, ®->key_1_l); + writel_relaxed(ctx->key_1_r, ®->key_1_r); } /* Restore IV for CBC mode for AES and DES. */ if (config->algomode == CRYP_ALGO_TDES_CBC || config->algomode == CRYP_ALGO_DES_CBC || config->algomode == CRYP_ALGO_AES_CBC) { - writel(ctx->init_vect_0_l, ®->init_vect_0_l); - writel(ctx->init_vect_0_r, ®->init_vect_0_r); - writel(ctx->init_vect_1_l, ®->init_vect_1_l); - writel(ctx->init_vect_1_r, ®->init_vect_1_r); + writel_relaxed(ctx->init_vect_0_l, ®->init_vect_0_l); + writel_relaxed(ctx->init_vect_0_r, ®->init_vect_0_r); + writel_relaxed(ctx->init_vect_1_l, ®->init_vect_1_l); + writel_relaxed(ctx->init_vect_1_r, ®->init_vect_1_r); } } @@ -381,7 +390,7 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, */ int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) { - writel(write_data, &device_data->base->din); + writel_relaxed(write_data, &device_data->base->din); return 0; } @@ -394,7 +403,7 @@ int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data) */ int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data) { - *read_data = readl(&device_data->base->dout); + *read_data = readl_relaxed(&device_data->base->dout); return 0; } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 0276501831a..da168cb57f6 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -376,11 +376,11 @@ static int cryp_setup_context(struct cryp_ctx *ctx, switch (cryp_mode) { case CRYP_MODE_INTERRUPT: - writel(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc); break; case CRYP_MODE_DMA: - writel(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); + writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); break; default: @@ -416,7 +416,8 @@ static int cryp_setup_context(struct cryp_ctx *ctx, } else control_register = ctx->dev_ctx.cr; - writel(control_register | (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), + writel(control_register | + (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), &device_data->base->cr); return 0; @@ -634,23 +635,19 @@ static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) static void cryp_polling_mode(struct cryp_ctx *ctx, struct cryp_device_data *device_data) { - int i; + int len = ctx->blocksize / BYTES_PER_WORD; int remaining_length = ctx->datalen; u32 *indata = (u32 *)ctx->indata; u32 *outdata = (u32 *)ctx->outdata; while (remaining_length > 0) { - for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { - writel(*indata, &device_data->base->din); - ++indata; - remaining_length -= BYTES_PER_WORD; - } + writesl(&device_data->base->din, indata, len); + indata += len; + remaining_length -= (len * BYTES_PER_WORD); cryp_wait_until_done(device_data); - for (i = 0; i < ctx->blocksize / BYTES_PER_WORD; i++) { - *outdata = readl(&device_data->base->dout); - ++outdata; - } + readsl(&device_data->base->dout, outdata, len); + outdata += len; cryp_wait_until_done(device_data); } } diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c index 8814acc05d7..08d291cdbe6 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.c +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -23,9 +23,9 @@ void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) dev_dbg(device_data->dev, "[%s]", __func__); - i = readl(&device_data->base->imsc); + i = readl_relaxed(&device_data->base->imsc); i = i | irq_src; - writel(i, &device_data->base->imsc); + writel_relaxed(i, &device_data->base->imsc); } void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) @@ -34,12 +34,12 @@ void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) dev_dbg(device_data->dev, "[%s]", __func__); - i = readl(&device_data->base->imsc); + i = readl_relaxed(&device_data->base->imsc); i = i & ~irq_src; - writel(i, &device_data->base->imsc); + writel_relaxed(i, &device_data->base->imsc); } bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src) { - return (readl(&device_data->base->mis) & irq_src) > 0; + return (readl_relaxed(&device_data->base->mis) & irq_src) > 0; } diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 5171ee10f71..4b615a33fe9 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -21,16 +21,17 @@ * Generic Macros */ #define CRYP_SET_BITS(reg_name, mask) \ - writel((readl(reg_name) | mask), reg_name) + writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) #define CRYP_WRITE_BIT(reg_name, val, mask) \ - writel(((readl(reg_name) & ~(mask)) | ((val) & (mask))), reg_name) + writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\ + ((val) & (mask))), reg_name) #define CRYP_TEST_BITS(reg_name, val) \ - (readl(reg_name) & (val)) + (readl_relaxed(reg_name) & (val)) #define CRYP_PUT_BITS(reg, val, shift, mask) \ - writel(((readl(reg) & ~(mask)) | \ + writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \ (((u32)val << shift) & (mask))), reg) /** -- cgit v1.2.3 From 5b40311eb26ec74d23f00ed5ecbb17982ed9d897 Mon Sep 17 00:00:00 2001 From: Jonas Aaberg Date: Wed, 1 Jun 2011 08:26:59 +0200 Subject: ARM: ux500: tee: Remove u8500 v1 support ST-Ericsson Linux next: Not tested, ask SSM for ER ST-Ericsson ID: 342987 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ia8afe98cdafbc5f11c115a061e5be75d9bd7ece0 Signed-off-by: Jonas Aaberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24284 Reviewed-by: Joakim BECH Reviewed-by: QATEST --- arch/arm/mach-ux500/tee_ux500.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 707e91284a1..160ca529261 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -24,19 +24,14 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - if (cpu_is_u8500v2()) + if (cpu_is_u8500v20_or_later()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); - else if (cpu_is_u8500v1()) - hw_sec_rom_pub_bridge = (bridge_func) - ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x18300)); else if (cpu_is_u5500()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300)); - else { - pr_err("tee-ux500: Unknown DB Asic!\n"); - return -EIO; - } + else + ux500_unknown_soc(); va_start(ap, cfg); ret = hw_sec_rom_pub_bridge(service_id, cfg, ap); -- cgit v1.2.3 From a5fb564ad8dc07c0da67fd56b11e8568d87a0b8f Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Fri, 14 Oct 2011 16:22:31 +0200 Subject: crypto: align u5500 PRCMU & CPUFREQ management with u8500 (multiple commits in one) Signed-off-by: Philippe Langlais Merge of following commits too: u5500: add support for sysclk basic sysclk support added in PRCMU driver and clock framework driver updated. Signed-off-by: Shreshtha Kumar Sahu U5500: Support for ESRAM12 EPOD in PRCMU driver Signed-off-by: Vijaya Kumar Kilari ux500: regulator: handle different base offset of ePOD ID 5500 ePOD ids are offseted for some reason in the PRCMU driver. Adjust the ids to index the local arrays to avoid memory corruption. Signed-off-by: Rabin Vincent ux500: pm: support PRCMU status check on 5500 This also removes unused 8500v1 code. Signed-off-by: Rabin Vincent arm: ux500: prcmu_ac_wake_req workaround This patch adds a check in prcmu_ac_wake_req that the modem is awake (in terms of the value in the PRCM_MOD_AWAKE_STATUS register) after the AC_WAKE_ACK has been received from the PRCMU FW. If the check fails, a retry is made. This seems to be necessary, since the modem can generate an AC_WAKE_ACK, and then still go to sleep. Signed-off-by: Mattias Nilsson U5500: Add support for PRCMU Mailbox0 Add PRCMU mailbox 0 support for irq wakeup enable and disable Signed-off-by: Vijaya Kumar K U5500: Add support for power state transition PRCMU driver is updated to provide API for system power state transition Signed-off-by: Vijaya Kumar K ARM: ux500: prcmu: Add A9 watchdog interface Signed-off-by: Jonas Aaberg U5500 : ab5500 core interrupt hander update AB5500 interrupts will be now handled by PRCMU and then routed to AB5500 core driver.AB5500 irq handler will no more read the latch registers to find the interrupt reason.Instead PRCMU will read the latch registers and provide the values to core driver. Signed-off-by: Bibek Basu ARM: ux500: prcmu-dbg: Tiny code clean-up Signed-off-by: Jonas Aaberg u5500: add mailbox1 and related function support Add cpufreq-db5500.c file for db5500 CPUfreq support. PRCMU mailbox1 and related functions' support is added. List of functions implemented: - prcmu_get/set_arm_opp - read_mailbox_1 Signed-off-by: Shreshtha Kumar Sahu Fix for PRCMU u5500: PRCMU IRQ should be NO_SUSPEND As on 8500. Signed-off-by: Rabin Vincent ARM: u5500: PRCMU reset API Added API for rebooting the board and for getting the last reboot code. Signed-off-by: Pawel Szyszuk Conflicts: arch/arm/mach-ux500/board-u5500-regulators.c arch/arm/mach-ux500/board-u5500.c arch/arm/mach-ux500/clock-db5500.c arch/arm/mach-ux500/cpu.c arch/arm/mach-ux500/devices-db5500.c arch/arm/mach-ux500/include/mach/prcmu.h arch/arm/mach-ux500/pm/Kconfig arch/arm/mach-ux500/pm/pm.c arch/arm/mach-ux500/pm/runtime.c arch/arm/mach-ux500/pm/suspend.c arch/arm/mach-ux500/prcmu-debug.c drivers/cpufreq/db8500-cpufreq.c drivers/mfd/ab5500-core.c drivers/mfd/db5500-prcmu.c drivers/mfd/db8500-prcmu.c drivers/regulator/db5500-prcmu.c drivers/regulator/dbx500-prcmu.c include/linux/mfd/db5500-prcmu.h include/linux/mfd/db8500-prcmu.h include/linux/regulator/db5500-prcmu.h --- drivers/crypto/ux500/cryp/cryp_core.c | 2 +- drivers/crypto/ux500/hash/hash_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index da168cb57f6..f0aed67f29a 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 3b472d0bfaa..aec6bcf5e1d 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include -- cgit v1.2.3 From 270c0136433a8bae098a69914ee466f464046003 Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Thu, 9 Jun 2011 13:08:25 +0200 Subject: crypto: ux500: hash: Empty message support Added support for empty message (pre-calculated) digests. Support for data > blocksize, bug introduced in commit using ahash. ST-Ericsson ID: 344561 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I7d9615b074438d1e072bd6c291d392e4ff5e7768 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/24825 Reviewed-by: QATEST Reviewed-by: QATOOLS --- drivers/crypto/ux500/hash/hash_alg.h | 6 +- drivers/crypto/ux500/hash/hash_core.c | 369 ++++++++++++++++++++++------------ 2 files changed, 245 insertions(+), 130 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 3bf1354ea03..2895c2ffe5e 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -10,11 +10,7 @@ #include -/* Number of bytes the message digest */ -#define HASH_MSG_DIGEST_SIZE 32 -#define HASH_BLOCK_SIZE 64 -#define HASH_SHA1_DIGEST_SIZE 20 -#define HASH_SHA2_DIGEST_SIZE 32 +#define HASH_BLOCK_SIZE 64 /* Maximum value of the length's high word */ #define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index aec6bcf5e1d..9d21b5393c6 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -35,6 +35,22 @@ #define DEV_DBG_NAME "hashX hashX:" +/** + * Pre-calculated empty message digests. + */ +static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { + 0xDA, 0x39, 0xA3, 0xEE, 0x5E, 0x6B, 0x4B, 0x0D, + 0x32, 0x55, 0xBF, 0xEF, 0x95, 0x60, 0x18, 0x90, + 0xAF, 0xD8, 0x07, 0x09 +}; + +static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; + /** * struct hash_driver_data - data specific to the driver. * @@ -63,6 +79,69 @@ static struct hash_driver_data driver_data; static void hash_messagepad(struct hash_device_data *device_data, const u32 *message, u8 index_bytes); +/** + * release_hash_device - Releases a previously allocated hash device. + * @device_data: Structure for the hash device. + * + */ +static void release_hash_device(struct hash_device_data *device_data) +{ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx->device = NULL; + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); +} + +/** + * get_empty_message_digest - Returns a pre-calculated digest for + * the empty message. + * @device_data: Structure for the hash device. + * @zero_hash: Buffer to return the empty message digest. + * @zero_hash_size: Hash size of the empty message digest. + */ +static int get_empty_message_digest( + struct hash_device_data *device_data, + u8 *zero_hash, u32 *zero_hash_size) +{ + int ret = 0; + struct hash_ctx *ctx = device_data->current_ctx; + + /** + * Caller responsible for ctx != NULL. + */ + + if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { + if (HASH_ALGO_SHA1 == ctx->config.algorithm) { + zero_hash = zero_message_hash_sha1; + *zero_hash_size = SHA1_DIGEST_SIZE; + } else if (HASH_ALGO_SHA256 == + ctx->config.algorithm) { + zero_hash = zero_message_hash_sha256; + *zero_hash_size = SHA256_DIGEST_SIZE; + } else { + dev_err(device_data->dev, "[%s] " + "Incorrect algorithm!" + , __func__); + ret = -EINVAL; + goto out; + } + } else { + dev_err(device_data->dev, "[%s] " + "Incorrect hash mode!" + , __func__); + ret = -EINVAL; + goto out; + } +out: + return ret; +} + /** * hash_disable_power - Request to disable power and clock. * @device_data: Structure for the hash device. @@ -215,17 +294,15 @@ static int hash_get_device_data(struct hash_ctx *ctx, /** * init_hash_hw - Initialise the hash hardware for a new calculation. * @device_data: Structure for the hash device. - * @req: The hash request for the job. + * @ctx: The hash context. * * This function will enable the bits needed to clear and start a new * calculation. */ static int init_hash_hw(struct hash_device_data *device_data, - struct ahash_request *req) + struct hash_ctx *ctx) { int ret = 0; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx); @@ -252,7 +329,7 @@ static int hash_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME "[%s] (ctx=0x%x)!", __func__, (u32)ctx); + pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; @@ -331,11 +408,13 @@ static void hash_messagepad(struct hash_device_data *device_data, HASH_SET_NBLW(index_bytes * 8); dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, readl(&device_data->base->din), - readl(&device_data->base->str)); + (int)(readl(&device_data->base->str) & + HASH_STR_NBLW_MASK)); HASH_SET_DCAL; dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", __func__, readl(&device_data->base->din), - readl(&device_data->base->str)); + (int)(readl(&device_data->base->str) & + HASH_STR_NBLW_MASK)); while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); @@ -484,6 +563,86 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); } +int hash_process_data( + struct hash_device_data *device_data, + struct hash_ctx *ctx, int msg_length, u8 *data_buffer, + u8 *buffer, u8 *index) +{ + int ret = 0; + u32 count; + + do { + if ((*index + msg_length) < HASH_BLOCK_SIZE) { + for (count = 0; count < msg_length; count++) { + buffer[*index + count] = + *(data_buffer + count); + } + *index += msg_length; + msg_length = 0; + } else { + if (ctx->updated) { + ret = hash_resume_state(device_data, + &ctx->state); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_resume_state()" + " failed!", __func__); + goto out; + } + + } else { + ret = init_hash_hw(device_data, ctx); + if (ret) { + dev_err(device_data->dev, "[%s] " + "init_hash_hw()" + " failed!", __func__); + goto out; + } + ctx->updated = 1; + } + /* + * If 'data_buffer' is four byte aligned and + * local buffer does not have any data, we can + * write data directly from 'data_buffer' to + * HW peripheral, otherwise we first copy data + * to a local buffer + */ + if ((0 == (((u32)data_buffer) % 4)) + && (0 == *index)) + hash_processblock(device_data, + (const u32 *) + data_buffer); + else { + for (count = 0; count < + (u32)(HASH_BLOCK_SIZE - + *index); + count++) { + buffer[*index + count] = + *(data_buffer + count); + } + hash_processblock(device_data, + (const u32 *)buffer); + } + hash_incrementlength(ctx, HASH_BLOCK_SIZE); + data_buffer += (HASH_BLOCK_SIZE - *index); + msg_length -= (HASH_BLOCK_SIZE - *index); + *index = 0; + + ret = hash_save_state(device_data, + &ctx->state); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_save_state()" + " failed!", __func__); + goto out; + } + } + } while (msg_length != 0); +out: + + return ret; +} + /** * hash_hw_update - Updates current HASH computation hashing another part of * the message. @@ -496,22 +655,22 @@ int hash_hw_update(struct ahash_request *req) { int ret = 0; u8 index; - u32 count; - u8 *p_buffer; + u8 *buffer; struct hash_device_data *device_data; - u8 *p_data_buffer; + u8 *data_buffer; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_hash_walk walk; int msg_length = crypto_hash_walk_first(req, &walk); - pr_debug(DEV_DBG_NAME "[%s] ", __func__); + pr_debug(DEV_DBG_NAME " [%s] datalength: %d", __func__, msg_length); + /* Empty message ("") is correct indata */ if (msg_length == 0) - return -EPERM; + return ret; index = ctx->state.index; - p_buffer = (u8 *)ctx->state.buffer; + buffer = (u8 *)ctx->state.buffer; /* Check if ctx->state.length + msg_length overflows */ @@ -538,91 +697,29 @@ int hash_hw_update(struct ahash_request *req) /* Main loop */ while (0 != msg_length) { - p_data_buffer = walk.data; - if ((index + msg_length) < HASH_BLOCK_SIZE) { - for (count = 0; count < msg_length; count++) { - p_buffer[index + count] = - *(p_data_buffer + count); - } - - index += msg_length; - } else { - if (!ctx->updated) { - ret = init_hash_hw(device_data, req); - if (ret) { - dev_err(device_data->dev, "[%s] " - "init_hash_hw() failed!", - __func__); - goto out; - } - ctx->updated = 1; - } else { - ret = hash_resume_state(device_data, - &ctx->state); - if (ret) { - dev_err(device_data->dev, "[%s] " - "hash_resume_state() failed!", - __func__); - goto out_power; - } - } - - /* - * If 'p_data_buffer' is four byte aligned and local - * buffer does not have any data, we can write data - * directly from 'p_data_buffer' to HW peripheral, - * otherwise we first copy data to a local buffer - */ - if ((0 == (((u32) p_data_buffer) % 4)) - && (0 == index)) { - hash_processblock(device_data, - (const u32 *)p_data_buffer); - } else { - for (count = 0; - count < (u32)(HASH_BLOCK_SIZE - index); - count++) { - p_buffer[index + count] = - *(p_data_buffer + count); - } - - hash_processblock(device_data, - (const u32 *)p_buffer); - } - - hash_incrementlength(ctx, HASH_BLOCK_SIZE); - index = 0; - - ret = hash_save_state(device_data, &ctx->state); - if (ret) { - dev_err(device_data->dev, "[%s] " - "hash_save_state() failed!", __func__); - goto out_power; - } + data_buffer = walk.data; + ret = hash_process_data(device_data, ctx, + msg_length, data_buffer, buffer, &index); + if (ret) { + dev_err(device_data->dev, "[%s] hash_internal_hw_" + "update() failed!", __func__); + goto out_power; } + msg_length = crypto_hash_walk_done(&walk, 0); } ctx->state.index = index; - dev_dbg(device_data->dev, "[%s] END(msg_length=%d in bits, in=%d, " - "bin=%d))", __func__, msg_length, ctx->state.index, - ctx->state.bit_index); + dev_dbg(device_data->dev, "[%s] indata length=%d, " + "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index); out_power: /* Disable power (and clock) */ if (hash_disable_power(device_data, false)) dev_err(device_data->dev, "[%s]: " "hash_disable_power() failed!", __func__); out: - spin_lock(&device_data->ctx_lock); - device_data->current_ctx = NULL; - ctx->device = NULL; - spin_unlock(&device_data->ctx_lock); - - /* - * The down_interruptible part for this semaphore is called in - * cryp_get_device_data. - */ - up(&driver_data.device_allocation); + release_hash_device(device_data); return ret; } @@ -812,9 +909,9 @@ void hash_get_digest(struct hash_device_data *device_data, } if (algorithm == HASH_ALGO_SHA1) - loop_ctr = HASH_SHA1_DIGEST_SIZE / sizeof(u32); + loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32); else - loop_ctr = HASH_SHA2_DIGEST_SIZE / sizeof(u32); + loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", __func__, (u32) digest); @@ -837,11 +934,12 @@ static int ahash_update(struct ahash_request *req) { int ret = 0; - pr_debug(DEV_DBG_NAME "[%s] ", __func__); + pr_debug(DEV_DBG_NAME " [%s] ", __func__); ret = hash_hw_update(req); if (ret) { - pr_err(DEV_DBG_NAME "[%s] hash_hw_update() failed!", __func__); + pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", + __func__); goto out; } @@ -859,9 +957,10 @@ static int ahash_final(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); struct hash_device_data *device_data; - u8 digest[HASH_MSG_DIGEST_SIZE]; + u8 digest[SHA256_DIGEST_SIZE]; pr_debug(DEV_DBG_NAME "[%s] ", __func__); + ret = hash_get_device_data(ctx, &device_data); if (ret) return ret; @@ -876,18 +975,40 @@ static int ahash_final(struct ahash_request *req) goto out; } - if (!ctx->updated) { - ret = init_hash_hw(device_data, req); + if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); + if (ret) { - dev_err(device_data->dev, "[%s] init_hash_hw() " + dev_err(device_data->dev, "[%s] hash_resume_state() " "failed!", __func__); goto out_power; } - } else { - ret = hash_resume_state(device_data, &ctx->state); + } else if (!ctx->state.index) { + u8 zero_hash[SHA256_DIGEST_SIZE]; + u32 zero_hash_size = 0; + /** + * Use a pre-calculated empty message digest + * (workaround since hw return zeroes, hw bug!?) + */ + ret = get_empty_message_digest(device_data, + &zero_hash[0], &zero_hash_size); + if (!ret && likely(zero_hash_size == ctx->digestsize)) + memcpy(req->result, &zero_hash[0], + ctx->digestsize); + else + dev_err(device_data->dev, "[%s] ret=%d, or wrong " + "digest size? %s", __func__, ret, + (zero_hash_size == ctx->digestsize) ? + "true" : "false"); + /** + * Empty message digest copied to req->result, or return error + */ + goto out_power; + } else { + ret = init_hash_hw(device_data, ctx); if (ret) { - dev_err(device_data->dev, "[%s] hash_resume_state() " + dev_err(device_data->dev, "[%s] init_hash_hw() " "failed!", __func__); goto out_power; } @@ -895,7 +1016,6 @@ static int ahash_final(struct ahash_request *req) hash_messagepad(device_data, ctx->state.buffer, ctx->state.index); - hash_get_digest(device_data, digest, ctx->config.algorithm); memcpy(req->result, digest, ctx->digestsize); @@ -906,16 +1026,7 @@ out_power: __func__); out: - spin_lock(&device_data->ctx_lock); - device_data->current_ctx = NULL; - ctx->device = NULL; - spin_unlock(&device_data->ctx_lock); - - /* - * The down_interruptible part for this semaphore is called in - * cryp_get_device_data. - */ - up(&driver_data.device_allocation); + release_hash_device(device_data); return ret; } @@ -925,7 +1036,7 @@ static int ahash_sha1_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME "[%s]: (ctx=0x%x)!", __func__, (u32) ctx); + pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; @@ -940,7 +1051,7 @@ static int ahash_sha256_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME "[%s]: (ctx=0x%x)!", __func__, (u32) ctx); + pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA256; @@ -952,8 +1063,11 @@ static int ahash_sha256_init(struct ahash_request *req) static int ahash_sha1_digest(struct ahash_request *req) { - int ret2, ret1 = ahash_sha1_init(req); + int ret2, ret1; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + ret1 = ahash_sha1_init(req); if (ret1) goto out; @@ -966,8 +1080,11 @@ out: static int ahash_sha256_digest(struct ahash_request *req) { - int ret2, ret1 = ahash_sha256_init(req); + int ret2, ret1; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + ret1 = ahash_sha256_init(req); if (ret1) goto out; @@ -1024,19 +1141,20 @@ static struct ahash_alg *u8500_ahash_algs[] = { /** * hash_algs_register_all - */ -static int ahash_algs_register_all(void) +static int ahash_algs_register_all(struct hash_device_data *device_data) { int ret; int i; int count; - pr_debug("[%s]", __func__); + dev_dbg(device_data->dev, "[%s]", __func__); for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) { ret = crypto_register_ahash(u8500_ahash_algs[i]); if (ret) { count = i; - pr_err("[%s] alg registration failed", + dev_err(device_data->dev, "[%s] alg registration" + " failed", u8500_ahash_algs[i]->halg.base.cra_driver_name); goto unreg; } @@ -1051,11 +1169,11 @@ unreg: /** * hash_algs_unregister_all - */ -static void ahash_algs_unregister_all(void) +static void ahash_algs_unregister_all(struct hash_device_data *device_data) { int i; - pr_debug(DEV_DBG_NAME " [%s]", __func__); + dev_dbg(device_data->dev, "[%s]", __func__); for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) crypto_unregister_ahash(u8500_ahash_algs[i]); @@ -1145,7 +1263,7 @@ static int u8500_hash_probe(struct platform_device *pdev) /* ... and signal that a new device is available. */ up(&driver_data.device_allocation); - ret = ahash_algs_register_all(); + ret = ahash_algs_register_all(device_data); if (ret) { dev_err(dev, "[%s] ahash_algs_register_all() " "failed!", __func__); @@ -1155,7 +1273,7 @@ static int u8500_hash_probe(struct platform_device *pdev) if (hash_disable_power(device_data, false)) dev_err(dev, "[%s]: hash_disable_power() failed!", __func__); - dev_info(dev, "[%s] successfully probed", __func__); + dev_info(dev, "[%s] successfully probed\n", __func__); return 0; out_power: @@ -1221,7 +1339,7 @@ static int u8500_hash_remove(struct platform_device *pdev) /* If this was the last device, remove the services */ if (list_empty(&driver_data.device_list.k_list)) - ahash_algs_unregister_all(); + ahash_algs_unregister_all(device_data); if (hash_disable_power(device_data, false)) dev_err(dev, "[%s]: hash_disable_power() failed", @@ -1281,7 +1399,7 @@ static void u8500_hash_shutdown(struct platform_device *pdev) /* If this was the last device, remove the services */ if (list_empty(&driver_data.device_list.k_list)) - ahash_algs_unregister_all(); + ahash_algs_unregister_all(device_data); iounmap(device_data->base); @@ -1387,7 +1505,7 @@ static struct platform_driver hash_driver = { */ static int __init u8500_hash_mod_init(void) { - pr_debug("[%s] is called!", __func__); + pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ @@ -1401,7 +1519,8 @@ static int __init u8500_hash_mod_init(void) */ static void __exit u8500_hash_mod_fini(void) { - pr_debug("[%s] is called!", __func__); + pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); + platform_driver_unregister(&hash_driver); return; } @@ -1412,5 +1531,5 @@ module_exit(u8500_hash_mod_fini); MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("sha1-u8500"); -MODULE_ALIAS("sha256-u8500"); +MODULE_ALIAS("sha1-all"); +MODULE_ALIAS("sha256-all"); -- cgit v1.2.3 From 1aa196c780eafb6bc5d26c80638ad0353d60e1fd Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Tue, 31 May 2011 08:57:30 +0200 Subject: crypto: ux500: hash: HMAC (sha1/sha256) support - HMAC (sha1/sha256) support to the hash driver. - Empty message fix ST-Ericsson ID: 348475 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I38961f4e8c1af927ad650743a74ebcf1e8ab4ad4 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/25652 Reviewed-by: QATOOLS Reviewed-by: QATEST Reviewed-by: Jonas ABERG --- drivers/crypto/ux500/hash/hash_alg.h | 2 +- drivers/crypto/ux500/hash/hash_core.c | 337 ++++++++++++++++++++++++++++++---- 2 files changed, 307 insertions(+), 32 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 2895c2ffe5e..299f0bacc2c 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -297,7 +297,7 @@ struct hash_config { * @device Pointer to the device structure. */ struct hash_ctx { - u8 key[HASH_BLOCK_SIZE]; + u8 *key; u32 keylen; u8 updated; struct hash_state state; diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 9d21b5393c6..b042808496c 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -39,9 +39,9 @@ * Pre-calculated empty message digests. */ static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { - 0xDA, 0x39, 0xA3, 0xEE, 0x5E, 0x6B, 0x4B, 0x0D, - 0x32, 0x55, 0xBF, 0xEF, 0x95, 0x60, 0x18, 0x90, - 0xAF, 0xD8, 0x07, 0x09 + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 }; static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { @@ -51,6 +51,21 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 }; +/* HMAC-SHA1, no key */ +static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { + 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, + 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, + 0x70, 0x69, 0x0e, 0x1d +}; + +/* HMAC-SHA256, no key */ +static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { + 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, + 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, + 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, + 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad +}; + /** * struct hash_driver_data - data specific to the driver. * @@ -104,13 +119,15 @@ static void release_hash_device(struct hash_device_data *device_data) * @device_data: Structure for the hash device. * @zero_hash: Buffer to return the empty message digest. * @zero_hash_size: Hash size of the empty message digest. + * @zero_digest: True if zero_digest returned. */ static int get_empty_message_digest( struct hash_device_data *device_data, - u8 *zero_hash, u32 *zero_hash_size) + u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest) { int ret = 0; struct hash_ctx *ctx = device_data->current_ctx; + *zero_digest = false; /** * Caller responsible for ctx != NULL. @@ -118,12 +135,16 @@ static int get_empty_message_digest( if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { if (HASH_ALGO_SHA1 == ctx->config.algorithm) { - zero_hash = zero_message_hash_sha1; + memcpy(zero_hash, &zero_message_hash_sha1[0], + SHA1_DIGEST_SIZE); *zero_hash_size = SHA1_DIGEST_SIZE; + *zero_digest = true; } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { - zero_hash = zero_message_hash_sha256; + memcpy(zero_hash, &zero_message_hash_sha256[0], + SHA256_DIGEST_SIZE); *zero_hash_size = SHA256_DIGEST_SIZE; + *zero_digest = true; } else { dev_err(device_data->dev, "[%s] " "Incorrect algorithm!" @@ -131,14 +152,33 @@ static int get_empty_message_digest( ret = -EINVAL; goto out; } - } else { - dev_err(device_data->dev, "[%s] " - "Incorrect hash mode!" - , __func__); - ret = -EINVAL; - goto out; + } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) { + if (!ctx->keylen) { + if (HASH_ALGO_SHA1 == ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hmac_sha1[0], + SHA1_DIGEST_SIZE); + *zero_hash_size = SHA1_DIGEST_SIZE; + *zero_digest = true; + } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hmac_sha256[0], + SHA256_DIGEST_SIZE); + *zero_hash_size = SHA256_DIGEST_SIZE; + *zero_digest = true; + } else { + dev_err(device_data->dev, "[%s] " + "Incorrect algorithm!" + , __func__); + ret = -EINVAL; + goto out; + } + } else { + dev_dbg(device_data->dev, "[%s] Continue hash " + "calculation, since hmac key avalable", + __func__); + } } out: + return ret; } @@ -291,6 +331,52 @@ static int hash_get_device_data(struct hash_ctx *ctx, return 0; } +/** + * hash_hw_write_key - Writes the key to the hardware registries. + * + * @device_data: Structure for the hash device. + * @key: Key to be written. + * @keylen: The lengt of the key. + * + * Note! This function DOES NOT write to the NBLW registry, even though + * specified in the the hw design spec. Either due to incorrect info in the + * spec or due to a bug in the hw. + */ +static void hash_hw_write_key(struct hash_device_data *device_data, + const u8 *key, unsigned int keylen) +{ + u32 word = 0; + + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); + while (keylen >= 4) { + word = ((u32) (key[3] & 0xff) << 24) | + ((u32) (key[2] & 0xff) << 16) | + ((u32) (key[1] & 0xff) << 8) | + ((u32) (key[0] & 0xff)); + + HASH_SET_DIN(word); + keylen -= 4; + key += 4; + } + + /* Take care of the remaining bytes in the last word */ + if (keylen) { + word = 0; + while (keylen) { + word |= (key[keylen - 1] << (8 * (keylen - 1))); + keylen--; + } + HASH_SET_DIN(word); + } + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + + HASH_SET_DCAL; + + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); +} + /** * init_hash_hw - Initialise the hash hardware for a new calculation. * @device_data: Structure for the hash device. @@ -315,6 +401,9 @@ static int init_hash_hw(struct hash_device_data *device_data, hash_begin(device_data, ctx); + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + hash_hw_write_key(device_data, ctx->key, ctx->keylen); + return ret; } @@ -331,6 +420,9 @@ static int hash_init(struct ahash_request *req) pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); + if (!ctx->key) + ctx->keylen = 0; + memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; return 0; @@ -384,6 +476,7 @@ static void hash_messagepad(struct hash_device_data *device_data, { dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))", __func__, index_bytes); + /* * Clear hash str register, only clear NBLW * since DCAL will be reset by hardware. @@ -509,10 +602,24 @@ int hash_setconfiguration(struct hash_device_data *device_data, * MODE bit. This bit selects between HASH or HMAC mode for the * selected algorithm. 0b0 = HASH and 0b1 = HMAC. */ - if (HASH_OPER_MODE_HASH == config->oper_mode) { + if (HASH_OPER_MODE_HASH == config->oper_mode) HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); - } else { /* HMAC mode or wrong hash mode */ + else if (HASH_OPER_MODE_HMAC == config->oper_mode) { + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_MODE_MASK); + if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { + /* Truncate key to blocksize */ + dev_dbg(device_data->dev, "[%s] LKEY set", __func__); + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_LKEY_MASK); + } else { + dev_dbg(device_data->dev, "[%s] LKEY cleared", + __func__); + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_LKEY_MASK); + } + } else { /* Wrong hash mode */ ret = -EPERM; dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", __func__); @@ -700,6 +807,7 @@ int hash_hw_update(struct ahash_request *req) data_buffer = walk.data; ret = hash_process_data(device_data, ctx, msg_length, data_buffer, buffer, &index); + if (ret) { dev_err(device_data->dev, "[%s] hash_internal_hw_" "update() failed!", __func__); @@ -959,7 +1067,7 @@ static int ahash_final(struct ahash_request *req) struct hash_device_data *device_data; u8 digest[SHA256_DIGEST_SIZE]; - pr_debug(DEV_DBG_NAME "[%s] ", __func__); + pr_debug(DEV_DBG_NAME " [%s] ", __func__); ret = hash_get_device_data(ctx, &device_data); if (ret) @@ -983,29 +1091,34 @@ static int ahash_final(struct ahash_request *req) "failed!", __func__); goto out_power; } - } else if (!ctx->state.index) { + } else if (req->nbytes == 0 && ctx->keylen == 0) { u8 zero_hash[SHA256_DIGEST_SIZE]; u32 zero_hash_size = 0; - + bool zero_digest = false; /** * Use a pre-calculated empty message digest * (workaround since hw return zeroes, hw bug!?) */ - ret = get_empty_message_digest(device_data, - &zero_hash[0], &zero_hash_size); - if (!ret && likely(zero_hash_size == ctx->digestsize)) - memcpy(req->result, &zero_hash[0], - ctx->digestsize); - else + ret = get_empty_message_digest(device_data, &zero_hash[0], + &zero_hash_size, &zero_digest); + if (!ret && likely(zero_hash_size == ctx->digestsize) && + zero_digest) { + memcpy(req->result, &zero_hash[0], ctx->digestsize); + goto out_power; + } else if (!ret && !zero_digest) { + dev_dbg(device_data->dev, "[%s] HMAC zero msg with " + "key, continue...", __func__); + } else { dev_err(device_data->dev, "[%s] ret=%d, or wrong " "digest size? %s", __func__, ret, (zero_hash_size == ctx->digestsize) ? "true" : "false"); - /** - * Empty message digest copied to req->result, or return error - */ - goto out_power; - } else { + /* Return error */ + goto out_power; + } + } + + if (!ctx->updated) { ret = init_hash_hw(device_data, ctx); if (ret) { dev_err(device_data->dev, "[%s] init_hash_hw() " @@ -1014,8 +1127,19 @@ static int ahash_final(struct ahash_request *req) } } - hash_messagepad(device_data, ctx->state.buffer, - ctx->state.index); + if (ctx->state.index) + hash_messagepad(device_data, ctx->state.buffer, + ctx->state.index); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, + ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + hash_get_digest(device_data, digest, ctx->config.algorithm); memcpy(req->result, digest, ctx->digestsize); @@ -1028,9 +1152,38 @@ out_power: out: release_hash_device(device_data); + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + return ret; } +static int hash_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen, int alg) +{ + int ret = 0; + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s] keylen: %d", __func__, keylen); + + /** + * Freed in final. + */ + ctx->key = kmalloc(keylen, GFP_KERNEL); + if (!ctx->key) { + pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " + "for %d\n", __func__, alg); + return -ENOMEM; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return ret; + } + static int ahash_sha1_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -1095,6 +1248,86 @@ out: return ret1 ? ret1 : ret2; } +static int hmac_sha1_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->digestsize = SHA1_DIGEST_SIZE; + + return hash_init(req); +} + +static int hmac_sha256_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA256; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->digestsize = SHA256_DIGEST_SIZE; + + return hash_init(req); +} + +static int hmac_sha1_digest(struct ahash_request *req) +{ + int ret2, ret1; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ret1 = hmac_sha1_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int hmac_sha256_digest(struct ahash_request *req) +{ + int ret2, ret1; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ret1 = hmac_sha256_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int hmac_sha1_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); +} + +static int hmac_sha256_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); +} + static struct ahash_alg ahash_sha1_alg = { .init = ahash_sha1_init, .update = ahash_update, @@ -1130,12 +1363,52 @@ static struct ahash_alg ahash_sha256_alg = { } }; +static struct ahash_alg hmac_sha1_alg = { + .init = hmac_sha1_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha1_digest, + .setkey = hmac_sha1_setkey, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-u8500", + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + } +}; + +static struct ahash_alg hmac_sha256_alg = { + .init = hmac_sha256_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha256_digest, + .setkey = hmac_sha256_setkey, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac-sha256-u8500", + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + } +}; + /** * struct hash_alg *u8500_hash_algs[] - */ static struct ahash_alg *u8500_ahash_algs[] = { &ahash_sha1_alg, - &ahash_sha256_alg + &ahash_sha256_alg, + &hmac_sha1_alg, + &hmac_sha256_alg }; /** @@ -1533,3 +1806,5 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("sha1-all"); MODULE_ALIAS("sha256-all"); +MODULE_ALIAS("hmac-sha1-all"); +MODULE_ALIAS("hmac-sha256-all"); -- cgit v1.2.3 From 974980454bf6f12b14f8431e05713cea53444278 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Sun, 16 Oct 2011 11:03:52 +0200 Subject: ux500: security: update commit 1d3e28fa4e85d54434614575f37a4171e32dfe0c Author: Mian Yousaf Kaukab Date: Tue Aug 23 12:28:26 2011 +0200 [Android]: pm: usecase: limit cpu frequency to 400MHz during voice-call ST Ericsson ID: 357594 commit 70f52392190a111758836d5fca7fb533ad8b4453 Author: Rickard Andersson Date: Tue Aug 23 11:05:58 2011 +0200 ux500: cpuidle: Correct PRCMU error messages Show correct error messages when PRCMU requested sleep fails. ST-Ericsson ID: - commit 912e987c1e46815c8cf128bce3d7077d3b05e7d6 Author: Rickard Andersson Date: Tue Aug 23 11:00:01 2011 +0200 ux500: cpuidle: Optimize stack sizes Optimze the stack sizes for arm registers and CP15 registers ST-Ericsson ID: - commit df0c08abadbdbe19c8c00ca09c3197f3fb0ead29 Author: Rickard Andersson Date: Mon Aug 22 11:19:13 2011 +0200 ux500: cpuidle: GIC freeze delay optimized GIC freeze delay was previously unnecessary long. ST-Ericsson ID: - commit a28fec98e99d5115b62d3d704452f427ca2a20ac Author: Rajagopala V Date: Wed Aug 24 13:16:39 2011 +0530 u5500: cpuidle: fix coverity warning check for clockevents_program_event return value in cpuidle driver during wakeup ST-Ericsson ID: ER356883 commit b716ed3140664c6b4b80381b18945032b792b853 Author: Rabin Vincent Date: Fri Aug 19 14:32:29 2011 +0530 u5500: enable PRCMU QoS and debugging Dummy implementations are provided to get it to build. ST-Ericsson ID: 348762 commit 34cd29c2f8cd1ce8fa51be6d33624b441416d478 Author: om prakash Date: Tue Aug 23 14:08:52 2011 +0530 cpuidle:Removed the CHECKED_RETURN error Removed the CHECKED_RETURN coverity error in cpuidle. ST-Ericsson ID: 354434 commit d9d500b6742ff1fcb12747cb55d7f26c89ef5c96 Author: Rabin Vincent Date: Mon Aug 22 10:23:36 2011 +0530 u5500: support cpufreq only on v2 Support cpufreq only on 5500v2+ only, since that is where the other power management features will be enabled. ST-Ericsson ID: 355981 commit 74cf0e658bb9abe240d04427d9043f145dd0505b Author: Hemanth Puranik Date: Wed Aug 3 10:29:10 2011 +0530 U5500: Print PRCMU firware version ST-Ericsson ID: WP332193 commit bb04cadb6948a3a68f409fa7828457ce64540172 Author: Vijaya Kumar Kilari Date: Thu Aug 11 11:49:27 2011 +0530 U5500: Add MSP1 and Cable detect clock support MSP1 and CD clocks are managed by PRCMU FW so special handling for these clocks are required ST-Ericsson ID: 332193 commit 0d9b20560a729b4674d9f55c77572db04272edbd Author: Rajagopala V Date: Fri Aug 5 16:52:41 2011 +0530 u5500: prcmu: add irqs for db5500 temperature sensor add irqs to support db5500 temp sensor high and low interrupts so as to clearly distinguish whether sensor temperature has crossed min/max values ST-Ericsson ID: WP257616 commit 63877225cb09c38d0fbea2dcf01eb6670eb05549 Author: Jonas Aaberg Date: Wed Jul 13 09:01:22 2011 +0200 ARM: u8500: pm: Do not touch TPIU registers if JTAG disabled If JTAG is disabled, the Linux cannot touch the TPIU registers. ST-Ericsson ID: 349265 commit 9fcee5876ff138e85356b63d392b2a050a68601e Author: Jonas Aaberg Date: Mon Jul 11 15:27:49 2011 +0200 ARM: ux500: prcmu-debug: Add ARM opp to debugfs Add arm opp to debugfs interface, plus some code clean-up/simplification. ST-Ericsson ID: - commit 0e1f18385d2416e03af308cac1e38997f6bbf044 Author: Jonas Aaberg Date: Thu Jul 14 12:49:30 2011 +0200 ARM: ux500: context: Do not touch TPIU when not clocked ST-Ericsson ID: 352300 commit 0ebdb6b6b2a7f9924bd2785654f0abfb1473dda0 Author: Jayarami Reddy Date: Thu Jul 14 14:39:07 2011 +0530 u5500: fix to boot the kernel in DB5500 commit a20f6a9088ac5d34634cd26207dd5a072f04c37b Author: Jonas Aaberg Date: Mon Jul 11 15:26:40 2011 +0200 ARM: ux500: prcmu-qos: Add ARM OPP qos Make it possible to request lowest ARM OPP. Must be bound to cpufreq to actually do something. ST-Ericsson ID: - commit be6842df3ee21a9aa00216c84e93a643b4ddeabe Author: Jonas Aaberg Date: Wed Jul 13 09:00:30 2011 +0200 ARM: ux500: Read product settings at boot ST-Ericsson ID: 349265 commit 94843c6131bac39aa8e7d0ec4c9c2b34e2819c92 Author: Jonas Aaberg Date: Wed Jul 13 08:56:38 2011 +0200 drivers: tee: Update with product id configuration Add structs and defines needed to detect product settings. ST-Ericsson ID: 349265 commit 4c70d615b782965c064b3f97963eff5a290acf68 Author: Jonas Aaberg Date: Wed Jul 6 08:26:41 2011 +0200 ARM: ux500: prcmu-wdog: Remove check for fw bug In prcmu fw version 3.4.4 the issue with longer intervalls than 131 s was fixed. ST-Ericsson ID: - commit f6ccbf262ddebe39584b4a4c01cfa16af1fbfac6 Author: Jonas Aaberg Date: Tue Jul 12 08:53:42 2011 +0200 ARM: ux500: cpuidle: Remove duplicated ApIdle state The ARM PLL is handled automatic by the prcmu fw and cpuidle cannot affect it. Remove duplicated ApIdle state that does the same as the other ApIdle state. ST-Ericsson ID: - commit bdf3c3dd8c22ffe0fbd8c674e8e6bdab83a5ec01 Author: Rabin Vincent Date: Fri Jul 8 14:33:23 2011 +0530 ux500: support ApDeepSleep on 5500 ST-Ericsson ID: 332193 commit 5326d7744cd226e67253774f30a2bb57c9b2badc Author: Rabin Vincent Date: Mon Jul 11 11:27:45 2011 +0530 u5500: fix prcmu_get_arm_opp() prcmu_set_arm_opp() maps between logical ARM OPP values and PRCMU firmware values, so prcmu_get_arm_opp() should do it too. ST-Ericsson ID: 332226 commit 6422856927e1230197ae674795eff6538df885e5 Author: Jonas Aaberg Date: Thu Jul 7 08:33:59 2011 +0200 ARM: u8500: prcmu-dbg: display AVS settings Add debugfs node showing AVS settings. root@ME:/ cat /debugfs/prcmu/avs VBB_RET : 0x 0 VBB_MAX_OPP : 0xdb VBB_100_OPP : 0xdb VBB_50_OPP : 0xdb VARM_MAX_OPP : 0x2f VARM_100_OPP : 0x2e VARM_50_OPP : 0x1d VARM_RET : 0x 0 VAPE_100_OPP : 0x2a VAPE_50_OPP : 0x1a VMOD_100_OPP : 0x29 VMOD_50_OPP : 0x1a VSAFE : 0x29 ST-Ericsson ID: - commit 3184873f10bff0c7c54db75d9c2694e21ebc40b0 Author: Pawel Szyszuk Date: Wed Jun 22 16:03:29 2011 +0200 ARM: U5500: PRCMU CLKOUTx configuration API U5500 API for setting the programmable CLKOUTx source and divisor. New API used for setting the sources of camera clocks. ST-Ericsson ID: - commit 98e9cfc32a25f6cf3b5d4c3456bb6ff6a34ff9cb Author: Mian Yousaf Kaukab Date: Thu Jun 23 16:07:48 2011 +0200 [ANDROID]: pm: usecase: add sysfs interface to disable the governor Move all the code to control the state of the governor in a separate function. ST-Ericsson ID: CR339643 commit 6e9ab8ab3013d0b372a51d94d617d24fd8b38664 Author: Rickard Andersson Date: Tue Jun 7 13:04:08 2011 +0200 ARM: ux500: pm: Turn off unnecessary GIC IRQs in deep sleep In the sleep state ApDeepSleep turn off all IRQs in the GIC except for the PRCMU IRQs ST-Ericsson ID: ER338876 commit ba5f28731b27386cb90cf87c4e7b1910e09474a9 Author: Rickard Andersson Date: Fri May 27 08:56:47 2011 +0200 ARM: ux500: pm: Deepsleep bugfix and optimization Fixing deep sleep sync problems. Also cache clean and saves to backup RAM is now only done when really needed. ST-Ericsson ID: ER338876 commit 0415c755958de0d613a9bd52f73b820cb3a2b916 Author: Rabin Vincent Date: Tue Jun 28 18:18:36 2011 +0530 u5500: handle SDMMC0 clock change on DB5500v2 On DB5500v2, SDMMC0 is parented to SPARE1CLK instead of SDMMCCLK. Also, correct the PRCM_IRDACLK_MGT register address which is wrong even for v1. ST-Ericsson ID: 349062 commit 2036360d62f3c3f0cd722d751ba90a8739034c0f Author: Rabin Vincent Date: Mon Jun 27 20:25:06 2011 +0530 u5500: cpufreq: DB5500v2 support ST-Ericsson ID: 349062 commit 190b11834463e835de2d792116dfd8673d775752 Author: Rabin Vincent Date: Wed Jun 22 07:45:00 2011 +0530 u5500: allow SUSPEND_STANDBY and CPUIDLE to be enabled ST-Ericsson ID: 332226 commit 7c4906d6ee888df46baa64b690dfdfaf44502d86 Author: Pawel Szyszuk Date: Mon Jun 20 15:28:34 2011 +0200 ARM: u5500: PRCMU reset API Added API for rebooting the board and for getting the last reboot code. ST-Ericsson ID: 341245 Change-Id: Ibbcd9e3528cd605c724b9c2c88ae3b41a27f2f1c --- drivers/tee/tee_driver.c | 2 +- include/linux/tee.h | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c index 73c62871ddc..5ae0cc9508b 100644 --- a/drivers/tee/tee_driver.c +++ b/drivers/tee/tee_driver.c @@ -636,7 +636,7 @@ static void __exit tee_exit(void) misc_deregister(&tee_dev); } -module_init(tee_init); +subsys_initcall(tee_init); module_exit(tee_exit); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/include/linux/tee.h b/include/linux/tee.h index 4928e4dca1f..8b71224ac77 100644 --- a/include/linux/tee.h +++ b/include/linux/tee.h @@ -74,8 +74,44 @@ /* * Exposed functions (command_id) in the static TA */ +#define TEE_STA_GET_PRODUCT_CONFIG 10 #define TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER 11 +/* Flags indicating run-time environment */ +#define TEE_RT_FLAGS_NORMAL 0x00000000 +#define TEE_RT_FLAGS_MASK_ITP_PROD 0x00000001 +#define TEE_RT_FLAGS_MODEM_DEBUG 0x00000002 +#define TEE_RT_FLAGS_RNG_REG_PUBLIC 0x00000004 +#define TEE_RT_FLAGS_JTAG_ENABLED 0x00000008 + +/* + * Product id numbers + */ +#define TEE_PRODUCT_ID_UNKNOWN 0 +#define TEE_PRODUCT_ID_8400 1 +#define TEE_PRODUCT_ID_8500 2 +#define TEE_PRODUCT_ID_9500 3 +#define TEE_PRODUCT_ID_5500 4 +#define TEE_PRODUCT_ID_7400 5 +#define TEE_PRODUCT_ID_8500C 6 + +/* Flags indicating fuses */ +#define TEE_FUSE_FLAGS_MODEM_DISABLE 0x00000001 + +/** + * struct tee_product_config - System configuration structure. + * + * @product_id: Product identification. + * @rt_flags: Runtime configuration flags. + * @fuse_flags: Fuse flags. + * + */ +struct tee_product_config { + uint32_t product_id; + uint32_t rt_flags; + uint32_t fuse_flags; +}; + /** * struct tee_uuid - Structure that represent an uuid. * @timeLow: The low field of the time stamp. -- cgit v1.2.3 From f4ec907012bf3f02a66c6c9dcb39bef6b413b89f Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Thu, 8 Sep 2011 14:23:57 +0530 Subject: ux500: cryp/hash: Updated for both u8500 & u5500 cryp1 & hash1 updated to be compatible on both u8500 and u5500: - added to u5500_defconfig. - settings from devices.c to board-mop500.c & board-u5500.c. - dynamic driver registration in board-mop500.c & board-u5500.c. - added cryp1 to clock-db5500.c and renamed cryp to cryp0. - added function dbx500_add_platform_device_noirq to devices-common.c. - added cryp1 and hash1 inline functions to devices-common.h (dbx500_add_cryp1). - defines added to devices-db5500.h and devices-db8500.h. - u8500_cryp/hash changed to ux500_cryp/hash. - update to handle different value for CRYP_PERIPHERAL_ID2 between u8500 and u5500 (more info in ER336742). ST-Ericsson ID: 257104 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Ref: Commit-id: Ibe72c72d8f9d781008164f1bf24ceafa82ac9083 Signed-off-by: Avinash A Change-Id: I08a8f71acb89be99cbf8b54390be569e2369c73b Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/30437 Reviewed-by: Avinash A Tested-by: Avinash A --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 5 +- drivers/crypto/ux500/cryp/Makefile | 4 +- drivers/crypto/ux500/cryp/cryp.c | 27 +++++--- drivers/crypto/ux500/cryp/cryp_core.c | 83 ++++++++++++------------- drivers/crypto/ux500/cryp/cryp_p.h | 4 +- drivers/crypto/ux500/hash/Makefile | 4 +- drivers/crypto/ux500/hash/hash_core.c | 69 ++++++++++---------- 7 files changed, 104 insertions(+), 92 deletions(-) diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h index 57da88398d5..9d1e1c52c13 100644 --- a/arch/arm/mach-ux500/include/mach/crypto-ux500.h +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -6,11 +6,14 @@ */ #ifndef _CRYPTO_UX500_H #include -#include struct cryp_platform_data { struct stedma40_chan_cfg mem_to_engine; struct stedma40_chan_cfg engine_to_mem; }; +struct hash_platform_data { + struct stedma40_chan_cfg mem_to_engine; +}; + #endif diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile index fd5e6df3861..e5d362a6f68 100644 --- a/drivers/crypto/ux500/cryp/Makefile +++ b/drivers/crypto/ux500/cryp/Makefile @@ -9,5 +9,5 @@ CFLAGS_cryp.o := -DDEBUG -O0 CFLAGS_cryp_irq.o := -DDEBUG -O0 endif -obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += u8500_cryp.o -u8500_cryp-objs := cryp.o cryp_irq.o cryp_core.o +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o +ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index ae4fe318528..211200fed34 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -12,6 +12,8 @@ #include #include +#include + #include "cryp_p.h" #include "cryp.h" @@ -30,26 +32,33 @@ void cryp_wait_until_done(struct cryp_device_data *device_data) */ int cryp_check(struct cryp_device_data *device_data) { + int peripheralID2 = 0; + if (NULL == device_data) return -EINVAL; + if (cpu_is_u8500()) + peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500; + else if (cpu_is_u5500()) + peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500; + /* Check Peripheral and Pcell Id Register for CRYP */ if ((CRYP_PERIPHERAL_ID0 == - readl_relaxed(&device_data->base->periphId0)) + readl_relaxed(&device_data->base->periphId0)) && (CRYP_PERIPHERAL_ID1 == - readl_relaxed(&device_data->base->periphId1)) - && (CRYP_PERIPHERAL_ID2 == - readl_relaxed(&device_data->base->periphId2)) + readl_relaxed(&device_data->base->periphId1)) + && (peripheralID2 == + readl_relaxed(&device_data->base->periphId2)) && (CRYP_PERIPHERAL_ID3 == - readl_relaxed(&device_data->base->periphId3)) + readl_relaxed(&device_data->base->periphId3)) && (CRYP_PCELL_ID0 == - readl_relaxed(&device_data->base->pcellId0)) + readl_relaxed(&device_data->base->pcellId0)) && (CRYP_PCELL_ID1 == - readl_relaxed(&device_data->base->pcellId1)) + readl_relaxed(&device_data->base->pcellId1)) && (CRYP_PCELL_ID2 == - readl_relaxed(&device_data->base->pcellId2)) + readl_relaxed(&device_data->base->pcellId2)) && (CRYP_PCELL_ID3 == - readl_relaxed(&device_data->base->pcellId3))) { + readl_relaxed(&device_data->base->pcellId3))) { return 0; } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f0aed67f29a..5893abb57dc 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -33,7 +33,6 @@ #include #include -#include #include "cryp_p.h" #include "cryp.h" @@ -1194,13 +1193,14 @@ static int cryp_hw_calculate(struct cryp_ctx *ctx) } if (hw_crypt_noxts(ctx, device_data)) - pr_err("u8500_cryp:crypX: [%s]: hw_crypt_noxts() failed!", + dev_err(device_data->dev, "[%s]: hw_crypt_noxts() failed!", __func__); out: if (cryp_disable_power(device_data->dev, device_data, false)) dev_err(device_data->dev, "[%s]: " "cryp_disable_power() failed!", __func__); + /* Release the device */ spin_lock(&device_data->ctx_lock); device_data->current_ctx = NULL; @@ -1232,7 +1232,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1252,7 +1252,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1272,7 +1272,7 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1292,7 +1292,7 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1312,7 +1312,7 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1332,7 +1332,7 @@ static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ctx->datalen = ctx->blocksize; if (cryp_hw_calculate(ctx)) - pr_err("u8500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", + pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!", __func__); } @@ -1605,7 +1605,7 @@ static int des3_cbc_decrypt(struct ablkcipher_request *areq) */ static struct crypto_alg aes_alg = { .cra_name = "aes", - .cra_driver_name = "aes-u8500", + .cra_driver_name = "aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, @@ -1629,7 +1629,7 @@ static struct crypto_alg aes_alg = { */ static struct crypto_alg des_alg = { .cra_name = "des", - .cra_driver_name = "des-u8500", + .cra_driver_name = "des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, @@ -1653,7 +1653,7 @@ static struct crypto_alg des_alg = { */ static struct crypto_alg des3_alg = { .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-u8500", + .cra_driver_name = "des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1677,7 +1677,7 @@ static struct crypto_alg des3_alg = { */ static struct crypto_alg aes_ecb_alg = { .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-u8500", + .cra_driver_name = "ecb-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1703,7 +1703,7 @@ static struct crypto_alg aes_ecb_alg = { */ static struct crypto_alg aes_cbc_alg = { .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-u8500", + .cra_driver_name = "cbc-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1730,7 +1730,7 @@ static struct crypto_alg aes_cbc_alg = { */ static struct crypto_alg aes_ctr_alg = { .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-u8500", + .cra_driver_name = "ctr-aes-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1757,7 +1757,7 @@ static struct crypto_alg aes_ctr_alg = { */ static struct crypto_alg des_ecb_alg = { .cra_name = "ecb(des)", - .cra_driver_name = "ecb-des-u8500", + .cra_driver_name = "ecb-des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1783,7 +1783,7 @@ static struct crypto_alg des_ecb_alg = { */ static struct crypto_alg des_cbc_alg = { .cra_name = "cbc(des)", - .cra_driver_name = "cbc-des-u8500", + .cra_driver_name = "cbc-des-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1810,7 +1810,7 @@ static struct crypto_alg des_cbc_alg = { */ static struct crypto_alg des3_ecb_alg = { .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3_ede-u8500", + .cra_driver_name = "ecb-des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1836,7 +1836,7 @@ static struct crypto_alg des3_ecb_alg = { */ static struct crypto_alg des3_cbc_alg = { .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3_ede-u8500", + .cra_driver_name = "cbc-des3_ede-ux500", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, @@ -1859,9 +1859,9 @@ static struct crypto_alg des3_cbc_alg = { }; /** - * struct crypto_alg *u8500_cryp_algs[] - + * struct crypto_alg *ux500_cryp_algs[] - */ -static struct crypto_alg *u8500_cryp_algs[] = { +static struct crypto_alg *ux500_cryp_algs[] = { &aes_alg, &des_alg, &des3_alg, @@ -1885,19 +1885,19 @@ static int cryp_algs_register_all(void) pr_debug("[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) { - ret = crypto_register_alg(u8500_cryp_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) { + ret = crypto_register_alg(ux500_cryp_algs[i]); if (ret) { count = i; pr_err("[%s] alg registration failed", - u8500_cryp_algs[i]->cra_driver_name); + ux500_cryp_algs[i]->cra_driver_name); goto unreg; } } return 0; unreg: for (i = 0; i < count; i++) - crypto_unregister_alg(u8500_cryp_algs[i]); + crypto_unregister_alg(ux500_cryp_algs[i]); return ret; } @@ -1910,11 +1910,11 @@ static void cryp_algs_unregister_all(void) pr_debug(DEV_DBG_NAME " [%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_cryp_algs); i++) - crypto_unregister_alg(u8500_cryp_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) + crypto_unregister_alg(ux500_cryp_algs[i]); } -static int u8500_cryp_probe(struct platform_device *pdev) +static int ux500_cryp_probe(struct platform_device *pdev) { int ret; int cryp_error = 0; @@ -2071,7 +2071,7 @@ out: return ret; } -static int u8500_cryp_remove(struct platform_device *pdev) +static int ux500_cryp_remove(struct platform_device *pdev) { struct resource *res = NULL; struct resource *res_irq = NULL; @@ -2137,7 +2137,7 @@ static int u8500_cryp_remove(struct platform_device *pdev) return 0; } -static void u8500_cryp_shutdown(struct platform_device *pdev) +static void ux500_cryp_shutdown(struct platform_device *pdev) { struct resource *res_irq = NULL; struct cryp_device_data *device_data; @@ -2190,7 +2190,7 @@ static void u8500_cryp_shutdown(struct platform_device *pdev) } -static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) +static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct cryp_device_data *device_data; @@ -2234,7 +2234,7 @@ static int u8500_cryp_suspend(struct platform_device *pdev, pm_message_t state) return ret; } -static int u8500_cryp_resume(struct platform_device *pdev) +static int ux500_cryp_resume(struct platform_device *pdev) { int ret = 0; struct cryp_device_data *device_data; @@ -2274,40 +2274,39 @@ static int u8500_cryp_resume(struct platform_device *pdev) } static struct platform_driver cryp_driver = { - .probe = u8500_cryp_probe, - .remove = u8500_cryp_remove, - .shutdown = u8500_cryp_shutdown, - .suspend = u8500_cryp_suspend, - .resume = u8500_cryp_resume, + .probe = ux500_cryp_probe, + .remove = ux500_cryp_remove, + .shutdown = ux500_cryp_shutdown, + .suspend = ux500_cryp_suspend, + .resume = ux500_cryp_resume, .driver = { .owner = THIS_MODULE, .name = "cryp1" } }; -static int __init u8500_cryp_mod_init(void) +static int __init ux500_cryp_mod_init(void) { pr_debug("[%s] is called!", __func__); - klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); return platform_driver_register(&cryp_driver); } -static void __exit u8500_cryp_mod_fini(void) +static void __exit ux500_cryp_mod_fini(void) { pr_debug("[%s] is called!", __func__); platform_driver_unregister(&cryp_driver); return; } -module_init(u8500_cryp_mod_init); -module_exit(u8500_cryp_mod_fini); +module_init(ux500_cryp_mod_init); +module_exit(ux500_cryp_mod_fini); module_param(cryp_mode, int, 0); -MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 CRYP crypto engine."); +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); MODULE_ALIAS("aes-all"); MODULE_ALIAS("des-all"); diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 4b615a33fe9..0e070829edc 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -39,7 +39,9 @@ */ #define CRYP_PERIPHERAL_ID0 0xE3 #define CRYP_PERIPHERAL_ID1 0x05 -#define CRYP_PERIPHERAL_ID2 0x28 + +#define CRYP_PERIPHERAL_ID2_DB8500 0x28 +#define CRYP_PERIPHERAL_ID2_DB5500 0x29 #define CRYP_PERIPHERAL_ID3 0x00 #define CRYP_PCELL_ID0 0x0D diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile index aaa5f56a2c2..b2f90d9bac7 100644 --- a/drivers/crypto/ux500/hash/Makefile +++ b/drivers/crypto/ux500/hash/Makefile @@ -7,5 +7,5 @@ ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG CFLAGS_hash_core.o := -DDEBUG -O0 endif -obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += u8500_hash.o -u8500_hash-objs := hash_core.o +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o +ux500_hash-objs := hash_core.o diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index b042808496c..ce2c9d645fa 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1337,7 +1337,7 @@ static struct ahash_alg ahash_sha1_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "sha1", - .cra_driver_name = "sha1-u8500", + .cra_driver_name = "sha1-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1354,7 +1354,7 @@ static struct ahash_alg ahash_sha256_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "sha256", - .cra_driver_name = "sha256-u8500", + .cra_driver_name = "sha256-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1373,7 +1373,7 @@ static struct ahash_alg hmac_sha1_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac-sha1-u8500", + .cra_driver_name = "hmac-sha1-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1392,7 +1392,7 @@ static struct ahash_alg hmac_sha256_alg = { .halg.statesize = sizeof(struct hash_ctx), .halg.base = { .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac-sha256-u8500", + .cra_driver_name = "hmac-sha256-ux500", .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct hash_ctx), @@ -1402,9 +1402,9 @@ static struct ahash_alg hmac_sha256_alg = { }; /** - * struct hash_alg *u8500_hash_algs[] - + * struct hash_alg *ux500_hash_algs[] - */ -static struct ahash_alg *u8500_ahash_algs[] = { +static struct ahash_alg *ux500_ahash_algs[] = { &ahash_sha1_alg, &ahash_sha256_alg, &hmac_sha1_alg, @@ -1422,20 +1422,20 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) { - ret = crypto_register_ahash(u8500_ahash_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) { + ret = crypto_register_ahash(ux500_ahash_algs[i]); if (ret) { count = i; dev_err(device_data->dev, "[%s] alg registration" " failed", - u8500_ahash_algs[i]->halg.base.cra_driver_name); + ux500_ahash_algs[i]->halg.base.cra_driver_name); goto unreg; } } return 0; unreg: for (i = 0; i < count; i++) - crypto_unregister_ahash(u8500_ahash_algs[i]); + crypto_unregister_ahash(ux500_ahash_algs[i]); return ret; } @@ -1448,15 +1448,15 @@ static void ahash_algs_unregister_all(struct hash_device_data *device_data) dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(u8500_ahash_algs); i++) - crypto_unregister_ahash(u8500_ahash_algs[i]); + for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) + crypto_unregister_ahash(ux500_ahash_algs[i]); } /** - * u8500_hash_probe - Function that probes the hash hardware. + * ux500_hash_probe - Function that probes the hash hardware. * @pdev: The platform device. */ -static int u8500_hash_probe(struct platform_device *pdev) +static int ux500_hash_probe(struct platform_device *pdev) { int ret = 0; struct resource *res = NULL; @@ -1571,10 +1571,10 @@ out: } /** - * u8500_hash_remove - Function that removes the hash device from the platform. + * ux500_hash_remove - Function that removes the hash device from the platform. * @pdev: The platform device. */ -static int u8500_hash_remove(struct platform_device *pdev) +static int ux500_hash_remove(struct platform_device *pdev) { struct resource *res; struct hash_device_data *device_data; @@ -1633,10 +1633,10 @@ static int u8500_hash_remove(struct platform_device *pdev) } /** - * u8500_hash_shutdown - Function that shutdown the hash device. + * ux500_hash_shutdown - Function that shutdown the hash device. * @pdev: The platform device */ -static void u8500_hash_shutdown(struct platform_device *pdev) +static void ux500_hash_shutdown(struct platform_device *pdev) { struct resource *res = NULL; struct hash_device_data *device_data; @@ -1686,11 +1686,11 @@ static void u8500_hash_shutdown(struct platform_device *pdev) } /** - * u8500_hash_suspend - Function that suspends the hash device. + * ux500_hash_suspend - Function that suspends the hash device. * @pdev: The platform device. * @state: - */ -static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) +static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct hash_device_data *device_data; @@ -1726,10 +1726,10 @@ static int u8500_hash_suspend(struct platform_device *pdev, pm_message_t state) } /** - * u8500_hash_resume - Function that resume the hash device. + * ux500_hash_resume - Function that resume the hash device. * @pdev: The platform device. */ -static int u8500_hash_resume(struct platform_device *pdev) +static int ux500_hash_resume(struct platform_device *pdev) { int ret = 0; struct hash_device_data *device_data; @@ -1762,11 +1762,11 @@ static int u8500_hash_resume(struct platform_device *pdev) } static struct platform_driver hash_driver = { - .probe = u8500_hash_probe, - .remove = u8500_hash_remove, - .shutdown = u8500_hash_shutdown, - .suspend = u8500_hash_suspend, - .resume = u8500_hash_resume, + .probe = ux500_hash_probe, + .remove = ux500_hash_remove, + .shutdown = ux500_hash_shutdown, + .suspend = ux500_hash_suspend, + .resume = ux500_hash_resume, .driver = { .owner = THIS_MODULE, .name = "hash1", @@ -1774,12 +1774,11 @@ static struct platform_driver hash_driver = { }; /** - * u8500_hash_mod_init - The kernel module init function. + * ux500_hash_mod_init - The kernel module init function. */ -static int __init u8500_hash_mod_init(void) +static int __init ux500_hash_mod_init(void) { pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); - klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); @@ -1788,9 +1787,9 @@ static int __init u8500_hash_mod_init(void) } /** - * u8500_hash_mod_fini - The kernel module exit function. + * ux500_hash_mod_fini - The kernel module exit function. */ -static void __exit u8500_hash_mod_fini(void) +static void __exit ux500_hash_mod_fini(void) { pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); @@ -1798,10 +1797,10 @@ static void __exit u8500_hash_mod_fini(void) return; } -module_init(u8500_hash_mod_init); -module_exit(u8500_hash_mod_fini); +module_init(ux500_hash_mod_init); +module_exit(ux500_hash_mod_fini); -MODULE_DESCRIPTION("Driver for ST-Ericsson U8500 HASH engine."); +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); MODULE_LICENSE("GPL"); MODULE_ALIAS("sha1-all"); -- cgit v1.2.3 From 9cec987f6f34623761e7adaad0e747b4f47e9c10 Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Thu, 8 Sep 2011 16:06:15 +0530 Subject: crypto: ux500: hash: Block size data fix. - Include fix for block size data (64 byte). ST-Ericsson ID: 352122 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Ref: Commit-id: I069aba31d8620e01e74e2b1a5987a0642e74af01 Signed-off-by: Avinash A Change-Id: Idea6cc1e34d6263066188b99d97fafae0f1225aa Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/30440 Reviewed-by: Avinash A Tested-by: Avinash A --- drivers/crypto/ux500/hash/hash_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index ce2c9d645fa..08a89eeb601 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1127,9 +1127,14 @@ static int ahash_final(struct ahash_request *req) } } - if (ctx->state.index) + if (ctx->state.index) { hash_messagepad(device_data, ctx->state.buffer, ctx->state.index); + } else { + HASH_SET_DCAL; + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + } if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { unsigned int keylen = ctx->keylen; -- cgit v1.2.3 From 247a5c4671e7a9671c5800663db606486e3973b1 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Tue, 20 Sep 2011 13:33:46 +0200 Subject: crypto: ux500: update prcmu header file path File path updated according to the mainline version of prcmu driver. Change-Id: Iddf4b63a209f1191d0c60d5c5f20b8c663fba34c Signed-off-by: Mian Yousaf Kaukab Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/32118 Reviewed-by: Jonas ABERG Tested-by: Jonas ABERG --- drivers/crypto/ux500/cryp/cryp_core.c | 2 +- drivers/crypto/ux500/hash/hash_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 5893abb57dc..f778c6d143f 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 08a89eeb601..a5325943ecc 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include -- cgit v1.2.3 From b9b55561dbf7fb0612bfe3473c43bd2d55af8f91 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Mon, 17 Oct 2011 14:02:10 +0200 Subject: crypto: Move atomic regulator header file Atomic regulators should be placed in a regulator include file, not in mfd. Signed-off-by: Jonas Aaberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/32136 --- drivers/crypto/ux500/cryp/cryp_core.c | 2 +- drivers/crypto/ux500/hash/hash_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f778c6d143f..5893abb57dc 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index a5325943ecc..08a89eeb601 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include -- cgit v1.2.3 From 43d77c96d9d50d2868ef1a8f14b0fb0cd478e3cc Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Thu, 27 Oct 2011 14:08:33 +0200 Subject: TEE driver: NULL variable usage Return if error in kmalloc, instead of goto err and call function using NULL variable. ST-Ericsson ID: 369796 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ib72acb6366f36febe3ea5cb7c9ceb275cfb1f038 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/35493 Reviewed-by: Joakim BECH Reviewed-by: Jonas ABERG Reviewed-by: QATOOLS Reviewed-by: QABUILD --- drivers/tee/tee_driver.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c index 5ae0cc9508b..8bafcf1e755 100644 --- a/drivers/tee/tee_driver.c +++ b/drivers/tee/tee_driver.c @@ -216,8 +216,7 @@ static int invoke_command(struct tee_session *ts, pr_err("[%s] error, out of memory " "(op)\n", __func__); set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); - ret = -ENOMEM; - goto err; + return -ENOMEM; } } -- cgit v1.2.3 From ea52e07a9554e72ac8a6ff26eb529f14b3e66248 Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Wed, 25 May 2011 14:27:51 +0200 Subject: crypto: ux500: hash: DMA support - Support for DMA. - Direct to CPU mode for data size < 4 byte data. - Workaround to handle data sizes not modulo wordsize. - Error message/check for HMAC empty message with keysize > 0. - Error message/check for HMAC DMA for u5500, since not working. - Additional, update cryp driver dma code according to this patch and make minor adjustments to comply with mainline code and design are covered by AP370178. ST-Ericsson ID: 280691 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I14d64d1577f007969b372ed4ef04556eca8bc0d6 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/33983 Reviewed-by: Per FORLIN Reviewed-by: QATOOLS Reviewed-by: QABUILD --- arch/arm/mach-ux500/include/mach/crypto-ux500.h | 4 +- drivers/crypto/ux500/hash/hash_alg.h | 36 +- drivers/crypto/ux500/hash/hash_core.c | 602 +++++++++++++++++------- 3 files changed, 470 insertions(+), 172 deletions(-) diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h index 9d1e1c52c13..80c4620d633 100644 --- a/arch/arm/mach-ux500/include/mach/crypto-ux500.h +++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h @@ -5,6 +5,7 @@ * License terms: GNU General Public License (GPL) version 2 */ #ifndef _CRYPTO_UX500_H +#include #include struct cryp_platform_data { @@ -13,7 +14,8 @@ struct cryp_platform_data { }; struct hash_platform_data { - struct stedma40_chan_cfg mem_to_engine; + void *mem_to_engine; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; #endif diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 299f0bacc2c..61db5b511b6 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -11,6 +11,7 @@ #include #define HASH_BLOCK_SIZE 64 +#define HASH_DMA_ALIGN_SIZE 4 /* Maximum value of the length's high word */ #define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL @@ -125,6 +126,12 @@ 0x01, HASH_STR_DCAL_POS, \ HASH_STR_DCAL_MASK) +/* Hardware access method */ +enum hash_mode { + HASH_MODE_CPU, + HASH_MODE_DMA +}; + /** * struct uint64 - Structure to handle 64 bits integers. * @high_word: Most significant bits. @@ -286,6 +293,26 @@ struct hash_config { int oper_mode; }; +/** + * struct hash_dma - Structure used for dma. + * @mask: DMA capabilities bitmap mask. + * @complete: Used to maintain state for a "completion". + * @chan_mem2hash: DMA channel. + * @cfg_mem2hash: DMA channel configuration. + * @sg_len: Scatterlist length. + * @sg: Scatterlist. + * @nents: Number of sg entries. + */ +struct hash_dma { + dma_cap_mask_t mask; + struct completion complete; + struct dma_chan *chan_mem2hash; + void *cfg_mem2hash; + int sg_len; + struct scatterlist *sg; + int nents; +}; + /** * struct hash_ctx - The context used for hash calculations. * @key: The key used in the operation. @@ -293,8 +320,10 @@ struct hash_config { * @updated: Indicates if hardware is initialized for new operations. * @state: The state of the current calculations. * @config: The current configuration. - * @digestsize The size of current digest. - * @device Pointer to the device structure. + * @digestsize: The size of current digest. + * @device: Pointer to the device structure. + * @dma_mode: Used in special cases (workaround), e.g. need to change to + * cpu mode, if not supported/working in dma mode. */ struct hash_ctx { u8 *key; @@ -304,6 +333,7 @@ struct hash_ctx { struct hash_config config; int digestsize; struct hash_device_data *device; + bool dma_mode; }; /** @@ -318,6 +348,7 @@ struct hash_ctx { * @regulator: Pointer to the device's power control. * @clk: Pointer to the device's clock control. * @restore_dev_state: TRUE = saved state, FALSE = no saved state. + * @dma: Structure used for dma. */ struct hash_device_data { struct hash_register __iomem *base; @@ -330,6 +361,7 @@ struct hash_device_data { struct ux500_regulator *regulator; struct clk *clk; bool restore_dev_state; + struct hash_dma dma; }; int hash_check_hw(struct hash_device_data *device_data); diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 08a89eeb601..b2a58dccf76 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -29,12 +30,17 @@ #include #include +#include #include #include "hash_alg.h" #define DEV_DBG_NAME "hashX hashX:" +static int hash_mode; +module_param(hash_mode, int, 0); +MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); + /** * Pre-calculated empty message digests. */ @@ -113,6 +119,101 @@ static void release_hash_device(struct hash_device_data *device_data) up(&driver_data.device_allocation); } +static void hash_dma_setup_channel(struct hash_device_data *device_data, + struct device *dev) +{ + struct hash_platform_data *platform_data = dev->platform_data; + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; + device_data->dma.chan_mem2hash = + dma_request_channel(device_data->dma.mask, + platform_data->dma_filter, + device_data->dma.cfg_mem2hash); + + init_completion(&device_data->dma.complete); +} + +static void hash_dma_callback(void *data) +{ + struct hash_ctx *ctx = (struct hash_ctx *) data; + + complete(&ctx->device->dma.complete); +} + +static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, + int len, enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + if (direction != DMA_TO_DEVICE) { + dev_err(ctx->device->dev, "[%s] Invalid DMA direction", + __func__); + return -EFAULT; + } + + sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); + + channel = ctx->device->dma.chan_mem2hash; + ctx->device->dma.sg = sg; + ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg, ctx->device->dma.nents, + direction); + + if (!ctx->device->dma.sg_len) { + dev_err(ctx->device->dev, + "[%s]: Could not map the sg list (TO_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(TO_DEVICE)", __func__); + desc = channel->device->device_prep_slave_sg(channel, + ctx->device->dma.sg, ctx->device->dma.sg_len, + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(ctx->device->dev, + "[%s]: device_prep_slave_sg() failed!", __func__); + return -EFAULT; + } + + desc->callback = hash_dma_callback; + desc->callback_param = ctx; + + cookie = desc->tx_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void hash_dma_done(struct hash_ctx *ctx) +{ + struct dma_chan *chan; + + chan = ctx->device->dma.chan_mem2hash; + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, + ctx->device->dma.sg_len, DMA_TO_DEVICE); + +} + +static int hash_dma_write(struct hash_ctx *ctx, + struct scatterlist *sg, int len) +{ + int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + /** * get_empty_message_digest - Returns a pre-calculated digest for * the empty message. @@ -197,8 +298,6 @@ static int hash_disable_power( int ret = 0; struct device *dev = device_data->dev; - dev_dbg(dev, "[%s]", __func__); - spin_lock(&device_data->power_state_lock); if (!device_data->power_state) goto out; @@ -236,7 +335,6 @@ static int hash_enable_power( { int ret = 0; struct device *dev = device_data->dev; - dev_dbg(dev, "[%s]", __func__); spin_lock(&device_data->power_state_lock); if (!device_data->power_state) { @@ -287,8 +385,6 @@ static int hash_get_device_data(struct hash_ctx *ctx, struct klist_node *device_node; struct hash_device_data *local_device_data = NULL; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - /* Wait until a device is available */ ret = down_interruptible(&driver_data.device_allocation); if (ret) @@ -390,8 +486,6 @@ static int init_hash_hw(struct hash_device_data *device_data, { int ret = 0; - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx); - ret = hash_setconfiguration(device_data, &ctx->config); if (ret) { dev_err(device_data->dev, "[%s] hash_setconfiguration() " @@ -407,6 +501,61 @@ static int init_hash_hw(struct hash_device_data *device_data, return ret; } +/** + * hash_get_nents - Return number of entries (nents) in scatterlist (sg). + * + * @sg: Scatterlist. + * @size: Size in bytes. + * @aligned: True if sg data aligned to work in DMA mode. + * + * Reentrancy: Non Re-entrant + */ +static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) +{ + int nents = 0; + bool aligned_data = true; + + while (size > 0 && sg) { + nents++; + size -= sg->length; + + /* hash_set_dma_transfer will align last nent */ + if (aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE) || + (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && + size > 0)) + aligned_data = false; + + sg = sg_next(sg); + } + + if (aligned) + *aligned = aligned_data; + + if (size != 0) + return -EFAULT; + + return nents; +} + +/** + * hash_dma_valid_data - checks for dma valid sg data. + * @sg: Scatterlist. + * @datasize: Datasize in bytes. + * + * NOTE! This function checks for dma valid sg data, since dma + * only accept datasizes of even wordsize. + */ +static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) +{ + bool aligned; + + /* Need to include at least one nent, else error */ + if (hash_get_nents(sg, datasize, &aligned) < 1) + return false; + + return aligned; +} + /** * hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. @@ -418,13 +567,39 @@ static int hash_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); - if (!ctx->key) ctx->keylen = 0; memset(&ctx->state, 0, sizeof(struct hash_state)); ctx->updated = 0; + if (hash_mode == HASH_MODE_DMA) { + if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) && + cpu_is_u5500()) { + pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working " + "on u5500, directing to CPU mode.", + __func__); + ctx->dma_mode = false; /* Don't use DMA in this case */ + goto out; + } + + if (req->nbytes < HASH_DMA_ALIGN_SIZE) { + ctx->dma_mode = false; /* Don't use DMA in this case */ + + pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " + "to CPU mode for data size < %d", + __func__, HASH_DMA_ALIGN_SIZE); + } else { + if (hash_dma_valid_data(req->src, req->nbytes)) { + ctx->dma_mode = true; + } else { + ctx->dma_mode = false; + pr_debug(DEV_DBG_NAME " [%s] DMA mode, but " + "direct to CPU mode for " + "non-aligned data", __func__); + } + } + } +out: return 0; } @@ -474,9 +649,6 @@ static void hash_processblock( static void hash_messagepad(struct hash_device_data *device_data, const u32 *message, u8 index_bytes) { - dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))", - __func__, index_bytes); - /* * Clear hash str register, only clear NBLW * since DCAL will be reset by hardware. @@ -561,7 +733,6 @@ int hash_setconfiguration(struct hash_device_data *device_data, struct hash_config *config) { int ret = 0; - dev_dbg(device_data->dev, "[%s] ", __func__); if (config->algorithm != HASH_ALGO_SHA1 && config->algorithm != HASH_ALGO_SHA256) @@ -573,13 +744,6 @@ int hash_setconfiguration(struct hash_device_data *device_data, */ HASH_SET_DATA_FORMAT(config->data_format); - /* - * Empty message bit. This bit is needed when the hash input data - * contain the empty message. Always set in current impl. but with - * no impact on data different than empty message. - */ - HASH_SET_BITS(&device_data->base->cr, HASH_CR_EMPTYMSG_MASK); - /* * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 */ @@ -652,7 +816,6 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) { /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - dev_dbg(device_data->dev, "[%s] ", __func__); while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); @@ -688,6 +851,7 @@ int hash_process_data( msg_length = 0; } else { if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); if (ret) { @@ -696,7 +860,6 @@ int hash_process_data( " failed!", __func__); goto out; } - } else { ret = init_hash_hw(device_data, ctx); if (ret) { @@ -732,6 +895,7 @@ int hash_process_data( } hash_incrementlength(ctx, HASH_BLOCK_SIZE); data_buffer += (HASH_BLOCK_SIZE - *index); + msg_length -= (HASH_BLOCK_SIZE - *index); *index = 0; @@ -750,6 +914,236 @@ out: return ret; } +/** + * hash_dma_final - The hash dma final function for SHA1/SHA256. + * @req: The hash request for the job. + */ +static int hash_dma_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + int bytes_written = 0; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } + + if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); + + if (ret) { + dev_err(device_data->dev, "[%s] hash_resume_state() " + "failed!", __func__); + goto out_power; + } + + } + + if (!ctx->updated) { + ret = hash_setconfiguration(device_data, &ctx->config); + if (ret) { + dev_err(device_data->dev, "[%s] " + "hash_setconfiguration() failed!", + __func__); + goto out_power; + } + + /* Enable DMA input */ + if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) { + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + } else { + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_PRIVN_MASK); + } + + HASH_INITIALIZE; + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + hash_hw_write_key(device_data, ctx->key, ctx->keylen); + + /* Number of bits in last word = (nbytes * 8) % 32 */ + HASH_SET_NBLW((req->nbytes * 8) % 32); + ctx->updated = 1; + } + + /* Store the nents in the dma struct. */ + ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); + if (!ctx->device->dma.nents) { + dev_err(device_data->dev, "[%s] " + "ctx->device->dma.nents = 0", __func__); + goto out_power; + } + + bytes_written = hash_dma_write(ctx, req->src, req->nbytes); + if (bytes_written != req->nbytes) { + dev_err(device_data->dev, "[%s] " + "hash_dma_write() failed!", __func__); + goto out_power; + } + + wait_for_completion(&ctx->device->dma.complete); + hash_dma_done(ctx); + + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, + ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + __func__); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + +/** + * hash_hw_final - The final hash calculation function + * @req: The hash request for the job. + */ +int hash_hw_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(device_data->dev, "[%s]: " + "hash_enable_power() failed!", __func__); + goto out; + } + + if (ctx->updated) { + ret = hash_resume_state(device_data, &ctx->state); + + if (ret) { + dev_err(device_data->dev, "[%s] hash_resume_state() " + "failed!", __func__); + goto out_power; + } + } else if (req->nbytes == 0 && ctx->keylen == 0) { + u8 zero_hash[SHA256_DIGEST_SIZE]; + u32 zero_hash_size = 0; + bool zero_digest = false; + /** + * Use a pre-calculated empty message digest + * (workaround since hw return zeroes, hw bug!?) + */ + ret = get_empty_message_digest(device_data, &zero_hash[0], + &zero_hash_size, &zero_digest); + if (!ret && likely(zero_hash_size == ctx->digestsize) && + zero_digest) { + memcpy(req->result, &zero_hash[0], ctx->digestsize); + goto out_power; + } else if (!ret && !zero_digest) { + dev_dbg(device_data->dev, "[%s] HMAC zero msg with " + "key, continue...", __func__); + } else { + dev_err(device_data->dev, "[%s] ret=%d, or wrong " + "digest size? %s", __func__, ret, + (zero_hash_size == ctx->digestsize) ? + "true" : "false"); + /* Return error */ + goto out_power; + } + } else if (req->nbytes == 0 && ctx->keylen > 0) { + dev_err(device_data->dev, "[%s] Empty message with " + "keylength > 0, NOT supported.", __func__); + goto out_power; + } + + if (!ctx->updated) { + ret = init_hash_hw(device_data, ctx); + if (ret) { + dev_err(device_data->dev, "[%s] init_hash_hw() " + "failed!", __func__); + goto out_power; + } + } + + if (ctx->state.index) { + hash_messagepad(device_data, ctx->state.buffer, + ctx->state.index); + } else { + HASH_SET_DCAL; + while (device_data->base->str & HASH_STR_DCAL_MASK) + cpu_relax(); + } + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, + ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out_power: + /* Disable power (and clock) */ + if (hash_disable_power(device_data, false)) + dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + __func__); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + /** * hash_hw_update - Updates current HASH computation hashing another part of * the message. @@ -770,8 +1164,6 @@ int hash_hw_update(struct ahash_request *req) struct crypto_hash_walk walk; int msg_length = crypto_hash_walk_first(req, &walk); - pr_debug(DEV_DBG_NAME " [%s] datalength: %d", __func__, msg_length); - /* Empty message ("") is correct indata */ if (msg_length == 0) return ret; @@ -818,9 +1210,9 @@ int hash_hw_update(struct ahash_request *req) } ctx->state.index = index; - dev_dbg(device_data->dev, "[%s] indata length=%d, " - "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index); + "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index); + out_power: /* Disable power (and clock) */ if (hash_disable_power(device_data, false)) @@ -846,9 +1238,6 @@ int hash_resume_state(struct hash_device_data *device_data, s32 count; int hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(device_data->dev, "[%s] (state(0x%x)))", - __func__, (u32) device_state); - if (NULL == device_state) { dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", __func__); @@ -909,9 +1298,6 @@ int hash_save_state(struct hash_device_data *device_data, u32 count; int hash_mode = HASH_OPER_MODE_HASH; - dev_dbg(device_data->dev, "[%s] state(0x%x)))", - __func__, (u32) device_state); - if (NULL == device_state) { dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", __func__); @@ -961,8 +1347,6 @@ int hash_check_hw(struct hash_device_data *device_data) { int ret = 0; - dev_dbg(device_data->dev, "[%s] ", __func__); - if (NULL == device_data) { ret = -EPERM; dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", @@ -1041,17 +1425,18 @@ void hash_get_digest(struct hash_device_data *device_data, static int ahash_update(struct ahash_request *req) { int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] ", __func__); + if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) + ret = hash_hw_update(req); + /* Skip update for DMA, all data will be passed to DMA in final */ - ret = hash_hw_update(req); if (ret) { pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", __func__); - goto out; } -out: return ret; } @@ -1064,103 +1449,18 @@ static int ahash_final(struct ahash_request *req) int ret = 0; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_device_data *device_data; - u8 digest[SHA256_DIGEST_SIZE]; - - pr_debug(DEV_DBG_NAME " [%s] ", __func__); - ret = hash_get_device_data(ctx, &device_data); - if (ret) - return ret; + pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + if ((hash_mode == HASH_MODE_DMA) && ctx->dma_mode) + ret = hash_dma_final(req); + else + ret = hash_hw_final(req); - /* Enable device power (and clock) */ - ret = hash_enable_power(device_data, false); if (ret) { - dev_err(device_data->dev, "[%s]: " - "hash_enable_power() failed!", __func__); - goto out; - } - - if (ctx->updated) { - ret = hash_resume_state(device_data, &ctx->state); - - if (ret) { - dev_err(device_data->dev, "[%s] hash_resume_state() " - "failed!", __func__); - goto out_power; - } - } else if (req->nbytes == 0 && ctx->keylen == 0) { - u8 zero_hash[SHA256_DIGEST_SIZE]; - u32 zero_hash_size = 0; - bool zero_digest = false; - /** - * Use a pre-calculated empty message digest - * (workaround since hw return zeroes, hw bug!?) - */ - ret = get_empty_message_digest(device_data, &zero_hash[0], - &zero_hash_size, &zero_digest); - if (!ret && likely(zero_hash_size == ctx->digestsize) && - zero_digest) { - memcpy(req->result, &zero_hash[0], ctx->digestsize); - goto out_power; - } else if (!ret && !zero_digest) { - dev_dbg(device_data->dev, "[%s] HMAC zero msg with " - "key, continue...", __func__); - } else { - dev_err(device_data->dev, "[%s] ret=%d, or wrong " - "digest size? %s", __func__, ret, - (zero_hash_size == ctx->digestsize) ? - "true" : "false"); - /* Return error */ - goto out_power; - } - } - - if (!ctx->updated) { - ret = init_hash_hw(device_data, ctx); - if (ret) { - dev_err(device_data->dev, "[%s] init_hash_hw() " - "failed!", __func__); - goto out_power; - } - } - - if (ctx->state.index) { - hash_messagepad(device_data, ctx->state.buffer, - ctx->state.index); - } else { - HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) - cpu_relax(); - } - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { - unsigned int keylen = ctx->keylen; - u8 *key = ctx->key; - - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, - ctx->keylen); - hash_hw_write_key(device_data, key, keylen); - } - - hash_get_digest(device_data, digest, ctx->config.algorithm); - memcpy(req->result, digest, ctx->digestsize); - -out_power: - /* Disable power (and clock) */ - if (hash_disable_power(device_data, false)) - dev_err(device_data->dev, "[%s] hash_disable_power() failed!", + pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", __func__); - -out: - release_hash_device(device_data); - - /** - * Allocated in setkey, and only used in HMAC. - */ - kfree(ctx->key); + } return ret; } @@ -1171,8 +1471,6 @@ static int hash_setkey(struct crypto_ahash *tfm, int ret = 0; struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s] keylen: %d", __func__, keylen); - /** * Freed in final. */ @@ -1194,8 +1492,6 @@ static int ahash_sha1_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; ctx->config.oper_mode = HASH_OPER_MODE_HASH; @@ -1209,8 +1505,6 @@ static int ahash_sha256_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA256; ctx->config.oper_mode = HASH_OPER_MODE_HASH; @@ -1223,8 +1517,6 @@ static int ahash_sha1_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = ahash_sha1_init(req); if (ret1) goto out; @@ -1240,8 +1532,6 @@ static int ahash_sha256_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = ahash_sha256_init(req); if (ret1) goto out; @@ -1258,8 +1548,6 @@ static int hmac_sha1_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA1; ctx->config.oper_mode = HASH_OPER_MODE_HMAC; @@ -1273,8 +1561,6 @@ static int hmac_sha256_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx); - ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = HASH_ALGO_SHA256; ctx->config.oper_mode = HASH_OPER_MODE_HMAC; @@ -1287,8 +1573,6 @@ static int hmac_sha1_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = hmac_sha1_init(req); if (ret1) goto out; @@ -1304,8 +1588,6 @@ static int hmac_sha256_digest(struct ahash_request *req) { int ret2, ret1; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - ret1 = hmac_sha256_init(req); if (ret1) goto out; @@ -1320,16 +1602,12 @@ out: static int hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - pr_debug(DEV_DBG_NAME " [%s]", __func__); - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); } static int hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - pr_debug(DEV_DBG_NAME " [%s]", __func__); - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); } @@ -1425,8 +1703,6 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) int i; int count; - dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) { ret = crypto_register_ahash(ux500_ahash_algs[i]); if (ret) { @@ -1451,8 +1727,6 @@ static void ahash_algs_unregister_all(struct hash_device_data *device_data) { int i; - dev_dbg(device_data->dev, "[%s]", __func__); - for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) crypto_unregister_ahash(ux500_ahash_algs[i]); } @@ -1468,7 +1742,6 @@ static int ux500_hash_probe(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); if (!device_data) { dev_dbg(dev, "[%s] kzalloc() failed!", __func__); @@ -1505,7 +1778,6 @@ static int ux500_hash_probe(struct platform_device *pdev) /* Enable power for HASH1 hardware block */ device_data->regulator = ux500_regulator_get(dev); - if (IS_ERR(device_data->regulator)) { dev_err(dev, "[%s] regulator_get() failed!", __func__); ret = PTR_ERR(device_data->regulator); @@ -1534,6 +1806,9 @@ static int ux500_hash_probe(struct platform_device *pdev) goto out_power; } + if (hash_mode == HASH_MODE_DMA) + hash_dma_setup_channel(device_data, dev); + platform_set_drvdata(pdev, device_data); /* Put the new device into the device list... */ @@ -1585,8 +1860,6 @@ static int ux500_hash_remove(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(dev, "[%s]: platform_get_drvdata() failed!", @@ -1646,8 +1919,6 @@ static void ux500_hash_shutdown(struct platform_device *pdev) struct resource *res = NULL; struct hash_device_data *device_data; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1701,8 +1972,6 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1740,8 +2009,6 @@ static int ux500_hash_resume(struct platform_device *pdev) struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); if (!device_data) { dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", @@ -1783,7 +2050,6 @@ static struct platform_driver hash_driver = { */ static int __init ux500_hash_mod_init(void) { - pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); klist_init(&driver_data.device_list, NULL, NULL); /* Initialize the semaphore to 0 devices (locked state) */ sema_init(&driver_data.device_allocation, 0); @@ -1796,8 +2062,6 @@ static int __init ux500_hash_mod_init(void) */ static void __exit ux500_hash_mod_fini(void) { - pr_debug(DEV_DBG_NAME " [%s] is called!", __func__); - platform_driver_unregister(&hash_driver); return; } -- cgit v1.2.3 From 9180fb4c4907f1476bc8ee1459d2954a7a93371c Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Tue, 25 Oct 2011 14:17:01 +0200 Subject: crypto: ux500: hash: Coverity correction Coverity found invalid usage of device_data->dev. ST-Ericsson ID: 361610 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I78e756eb36af833e6620d0b02eda25c36741c94a Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/36480 Reviewed-by: QATOOLS Reviewed-by: QABUILD --- drivers/crypto/ux500/hash/hash_core.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index b2a58dccf76..04546ddbbd1 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1173,11 +1173,10 @@ int hash_hw_update(struct ahash_request *req) /* Check if ctx->state.length + msg_length overflows */ - if (msg_length > - (ctx->state.length.low_word + msg_length) - && HASH_HIGH_WORD_MAX_VAL == - (ctx->state.length.high_word)) { - dev_err(device_data->dev, "[%s] HASH_MSG_LENGTH_OVERFLOW!", + if (msg_length > (ctx->state.length.low_word + msg_length) && + HASH_HIGH_WORD_MAX_VAL == + ctx->state.length.high_word) { + pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", __func__); return -EPERM; } @@ -1349,7 +1348,7 @@ int hash_check_hw(struct hash_device_data *device_data) if (NULL == device_data) { ret = -EPERM; - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", + pr_err(DEV_DBG_NAME " [%s] HASH_INVALID_PARAMETER!", __func__); goto out; } -- cgit v1.2.3 From 18c245732b85fb56457cb70fcdf34878dade3228 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 12 Jan 2012 13:14:22 +0100 Subject: security: ux500: Coding style fixes Fix the most obvious violations of the kernel coding style Signed-off-by: Jonas Aaberg --- arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h index 6978b7314c5..2ac88edfe71 100644 --- a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h +++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h @@ -45,4 +45,3 @@ struct tee_ta_start_modem { int tee_ta_start_modem(struct tee_ta_start_modem *data); #endif - -- cgit v1.2.3 From 356d0c567bcdd463fb19971841a46fb17b776f61 Mon Sep 17 00:00:00 2001 From: Berne Hebark Date: Wed, 2 Nov 2011 10:23:53 +0100 Subject: crypto: ux500: hash: Performance improvements - writel and readl changed to writel/readl_relaxed. - changed to writesl in HASH_SET_DIN macro. - DMA limitation to use CPU mode for datasize < 1kB. ST-Ericsson ID: 371579 ST-Ericsson Linux next: Not tested, ER 320876 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I111351fbdbc874a35dbc5bce2dd9dbe3f4411253 Signed-off-by: Berne Hebark Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/36618 Reviewed-by: QATOOLS Reviewed-by: QABUILD Reviewed-by: Per FORLIN --- drivers/crypto/ux500/hash/hash_alg.h | 10 ++-- drivers/crypto/ux500/hash/hash_core.c | 105 +++++++++++++++++----------------- 2 files changed, 60 insertions(+), 55 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index 61db5b511b6..b8619ea4a27 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -12,6 +12,8 @@ #define HASH_BLOCK_SIZE 64 #define HASH_DMA_ALIGN_SIZE 4 +#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 +#define HASH_BYTES_PER_WORD 4 /* Maximum value of the length's high word */ #define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL @@ -93,16 +95,16 @@ #define HASH_CELL_ID3 0xB1 #define HASH_SET_BITS(reg_name, mask) \ - writel((readl(reg_name) | mask), reg_name) + writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) #define HASH_CLEAR_BITS(reg_name, mask) \ - writel((readl(reg_name) & ~mask), reg_name) + writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name) #define HASH_PUT_BITS(reg, val, shift, mask) \ - writel(((readl(reg) & ~(mask)) | \ + writel_relaxed(((readl(reg) & ~(mask)) | \ (((u32)val << shift) & (mask))), reg) -#define HASH_SET_DIN(val) writel((val), &device_data->base->din) +#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len)) #define HASH_INITIALIZE \ HASH_PUT_BITS( \ diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 04546ddbbd1..9cc50e91c9e 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -442,15 +442,17 @@ static void hash_hw_write_key(struct hash_device_data *device_data, const u8 *key, unsigned int keylen) { u32 word = 0; + int nwords = 1; HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); + while (keylen >= 4) { word = ((u32) (key[3] & 0xff) << 24) | ((u32) (key[2] & 0xff) << 16) | ((u32) (key[1] & 0xff) << 8) | ((u32) (key[0] & 0xff)); - HASH_SET_DIN(word); + HASH_SET_DIN(&word, nwords); keylen -= 4; key += 4; } @@ -462,8 +464,10 @@ static void hash_hw_write_key(struct hash_device_data *device_data, word |= (key[keylen - 1] << (8 * (keylen - 1))); keylen--; } - HASH_SET_DIN(word); + + HASH_SET_DIN(&word, nwords); } + while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); @@ -520,9 +524,9 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) size -= sg->length; /* hash_set_dma_transfer will align last nent */ - if (aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE) || - (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && - size > 0)) + if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) + || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && + size > 0)) aligned_data = false; sg = sg_next(sg); @@ -589,13 +593,17 @@ static int hash_init(struct ahash_request *req) "to CPU mode for data size < %d", __func__, HASH_DMA_ALIGN_SIZE); } else { - if (hash_dma_valid_data(req->src, req->nbytes)) { + if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && + hash_dma_valid_data(req->src, + req->nbytes)) { ctx->dma_mode = true; } else { ctx->dma_mode = false; - pr_debug(DEV_DBG_NAME " [%s] DMA mode, but " - "direct to CPU mode for " - "non-aligned data", __func__); + pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" + " CPU mode for datalength < %d" + " or non-aligned data, except " + "in last nent", __func__, + HASH_DMA_PERFORMANCE_MIN_SIZE); } } } @@ -614,10 +622,9 @@ out: */ static void hash_processblock( struct hash_device_data *device_data, - const u32 *message) + const u32 *message, int length) { - u32 count; - + int len = length / HASH_BYTES_PER_WORD; /* * NBLW bits. Reset the number of bits in last word (NBLW). */ @@ -626,13 +633,7 @@ static void hash_processblock( /* * Write message data to the HASH_DIN register. */ - for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) { - HASH_SET_DIN(message[0]); - HASH_SET_DIN(message[1]); - HASH_SET_DIN(message[2]); - HASH_SET_DIN(message[3]); - message += 4; - } + HASH_SET_DIN(message, len); } /** @@ -649,22 +650,23 @@ static void hash_processblock( static void hash_messagepad(struct hash_device_data *device_data, const u32 *message, u8 index_bytes) { + int nwords = 1; + /* * Clear hash str register, only clear NBLW * since DCAL will be reset by hardware. */ - writel((readl(&device_data->base->str) & ~HASH_STR_NBLW_MASK), - &device_data->base->str); + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); /* Main loop */ while (index_bytes >= 4) { - HASH_SET_DIN(message[0]); + HASH_SET_DIN(message, nwords); index_bytes -= 4; message++; } if (index_bytes) - HASH_SET_DIN(message[0]); + HASH_SET_DIN(message, nwords); while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); @@ -672,13 +674,13 @@ static void hash_messagepad(struct hash_device_data *device_data, /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ HASH_SET_NBLW(index_bytes * 8); dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, - readl(&device_data->base->din), - (int)(readl(&device_data->base->str) & + readl_relaxed(&device_data->base->din), + (int)(readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK)); HASH_SET_DCAL; dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", - __func__, readl(&device_data->base->din), - (int)(readl(&device_data->base->str) & + __func__, readl_relaxed(&device_data->base->din), + (int)(readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK)); while (device_data->base->str & HASH_STR_DCAL_MASK) @@ -881,7 +883,7 @@ int hash_process_data( && (0 == *index)) hash_processblock(device_data, (const u32 *) - data_buffer); + data_buffer, HASH_BLOCK_SIZE); else { for (count = 0; count < (u32)(HASH_BLOCK_SIZE - @@ -891,7 +893,8 @@ int hash_process_data( *(data_buffer + count); } hash_processblock(device_data, - (const u32 *)buffer); + (const u32 *)buffer, + HASH_BLOCK_SIZE); } hash_incrementlength(ctx, HASH_BLOCK_SIZE); data_buffer += (HASH_BLOCK_SIZE - *index); @@ -1155,7 +1158,7 @@ out: int hash_hw_update(struct ahash_request *req) { int ret = 0; - u8 index; + u8 index = 0; u8 *buffer; struct hash_device_data *device_data; u8 *data_buffer; @@ -1259,7 +1262,7 @@ int hash_resume_state(struct hash_device_data *device_data, HASH_INITIALIZE; temp_cr = device_state->temp_cr; - writel(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); + writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); if (device_data->base->cr & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; @@ -1270,15 +1273,15 @@ int hash_resume_state(struct hash_device_data *device_data, if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) break; - writel(device_state->csr[count], + writel_relaxed(device_state->csr[count], &device_data->base->csrx[count]); } - writel(device_state->csfull, &device_data->base->csfull); - writel(device_state->csdatain, &device_data->base->csdatain); + writel_relaxed(device_state->csfull, &device_data->base->csfull); + writel_relaxed(device_state->csdatain, &device_data->base->csdatain); - writel(device_state->str_reg, &device_data->base->str); - writel(temp_cr, &device_data->base->cr); + writel_relaxed(device_state->str_reg, &device_data->base->str); + writel_relaxed(temp_cr, &device_data->base->cr); return 0; } @@ -1310,11 +1313,11 @@ int hash_save_state(struct hash_device_data *device_data, while (device_data->base->str & HASH_STR_DCAL_MASK) cpu_relax(); - temp_cr = readl(&device_data->base->cr); + temp_cr = readl_relaxed(&device_data->base->cr); - device_state->str_reg = readl(&device_data->base->str); + device_state->str_reg = readl_relaxed(&device_data->base->str); - device_state->din_reg = readl(&device_data->base->din); + device_state->din_reg = readl_relaxed(&device_data->base->din); if (device_data->base->cr & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; @@ -1326,11 +1329,11 @@ int hash_save_state(struct hash_device_data *device_data, break; device_state->csr[count] = - readl(&device_data->base->csrx[count]); + readl_relaxed(&device_data->base->csrx[count]); } - device_state->csfull = readl(&device_data->base->csfull); - device_state->csdatain = readl(&device_data->base->csdatain); + device_state->csfull = readl_relaxed(&device_data->base->csfull); + device_state->csdatain = readl_relaxed(&device_data->base->csdatain); device_state->temp_cr = temp_cr; @@ -1354,14 +1357,14 @@ int hash_check_hw(struct hash_device_data *device_data) } /* Checking Peripheral Ids */ - if ((HASH_P_ID0 == readl(&device_data->base->periphid0)) - && (HASH_P_ID1 == readl(&device_data->base->periphid1)) - && (HASH_P_ID2 == readl(&device_data->base->periphid2)) - && (HASH_P_ID3 == readl(&device_data->base->periphid3)) - && (HASH_CELL_ID0 == readl(&device_data->base->cellid0)) - && (HASH_CELL_ID1 == readl(&device_data->base->cellid1)) - && (HASH_CELL_ID2 == readl(&device_data->base->cellid2)) - && (HASH_CELL_ID3 == readl(&device_data->base->cellid3)) + if ((HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)) + && (HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)) + && (HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)) + && (HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)) + && (HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)) + && (HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)) + && (HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)) + && (HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) ) { ret = 0; goto out;; @@ -1409,7 +1412,7 @@ void hash_get_digest(struct hash_device_data *device_data, /* Copy result into digest array */ for (count = 0; count < loop_ctr; count++) { - temp_hx_val = readl(&device_data->base->hx[count]); + temp_hx_val = readl_relaxed(&device_data->base->hx[count]); digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); -- cgit v1.2.3 From 76a9eb20c263866946d74df422a879fb21af54f3 Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Fri, 25 Nov 2011 14:56:06 +0100 Subject: ux500: TEE: Moved conversion of virt_to_phys If memory allocation failed when trying to copy memrefs from user space to kernel space we jumped to error handling code which tried to free the memrefs that had been allocated previously. The problem was that after each allocation we converted the allocated buffers to physical addresses, hence if any of the allocations except the first one failed we ended up trying to free a physical address. In this patch we have moved the translation from virtual to physical addresses to be done after all allocations have succeeded. ST-Ericsson ID: 374810, 374493, 374920 ST-Ericsson FOSS-OUT ID: NA ST-Ericsson Linux next: NA Change-Id: I3a109a9ebb46f74a2089916fa300bf6f4347501b Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/39876 Reviewed-by: QATOOLS Reviewed-by: QABUILD Reviewed-by: QATEST Reviewed-by: Jonas ABERG --- drivers/tee/tee_driver.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c index 8bafcf1e755..feabe3615c1 100644 --- a/drivers/tee/tee_driver.c +++ b/drivers/tee/tee_driver.c @@ -110,6 +110,18 @@ static inline void memrefs_phys_to_virt(struct tee_session *ts) } } +static inline void memrefs_virt_to_phys(struct tee_session *ts) +{ + int i; + + for (i = 0; i < 4; ++i) { + if (ts->op->flags & (1 << i)) { + ts->op->shm[i].buffer = + (void *)virt_to_phys(ts->op->shm[i].buffer); + } + } +} + static int copy_memref_to_user(struct tee_operation *op, struct tee_operation *ubuf_op, int memref) @@ -170,9 +182,6 @@ static int copy_memref_to_kernel(struct tee_operation *op, op->shm[memref].size = kbuf_op->shm[memref].size; op->shm[memref].flags = kbuf_op->shm[memref].flags; - /* Secure world expects physical addresses. */ - op->shm[memref].buffer = (void *)virt_to_phys(op->shm[memref].buffer); - return 0; } @@ -238,7 +247,9 @@ static int invoke_command(struct tee_session *ts, } } - /* To call secure world */ + /* Secure world expects physical addresses. */ + memrefs_virt_to_phys(ts); + if (call_sec_world(ts, TEED_INVOKE)) { ret = -EINVAL; goto err; -- cgit v1.2.3 From 63cbfc54c34637e71604db1bad3288eeadf5338c Mon Sep 17 00:00:00 2001 From: Joakim Bech Date: Fri, 2 Dec 2011 15:37:04 +0100 Subject: ux500: Tee: Use hwmem for memrefs - Instead of kmalloc, hwmem is used when copying memrefs from user space to kernel space. - Fix for a invalid free when calls to secure world fails. - Remove free to memrefs etc in tee_release since they should be handled completely in tee_write. ST-Ericsson ID: 375595 ST-Ericsson FOSS-OUT ID: NA ST-Ericsson Linux next: NA Depends-On: I2067c34223ce49515c6b7ee8fcc4dcecb9119300 Change-Id: Ibab9edd618d3efe6d6d6302cbb9fb4bde987b99a Signed-off-by: Joakim Bech Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/42167 Reviewed-by: QATOOLS Reviewed-by: Jonas ABERG Reviewed-by: Berne HEBARK --- drivers/tee/tee_driver.c | 275 +++++++++++++++++++++++++++-------------------- include/linux/tee.h | 2 + 2 files changed, 159 insertions(+), 118 deletions(-) diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c index feabe3615c1..442dec5fe06 100644 --- a/drivers/tee/tee_driver.c +++ b/drivers/tee/tee_driver.c @@ -15,8 +15,10 @@ #include #include #include +#include #define TEED_NAME "tee" +#define TEED_PFX "TEE: " #define TEED_STATE_OPEN_DEV 0 #define TEED_STATE_OPEN_SESSION 1 @@ -30,18 +32,23 @@ static int tee_read(struct file *filp, char __user *buffer, static int tee_write(struct file *filp, const char __user *buffer, size_t length, loff_t *offset); -static inline void set_emsg(struct tee_session *ts, u32 msg) +static inline void set_emsg(struct tee_session *ts, u32 msg, int line) { + pr_err(TEED_PFX "msg: 0x%08x at line: %d\n", msg, line); ts->err = msg; ts->origin = TEED_ORIGIN_DRIVER; } static void reset_session(struct tee_session *ts) { + int i; + ts->state = TEED_STATE_OPEN_DEV; ts->err = TEED_SUCCESS; ts->origin = TEED_ORIGIN_DRIVER; ts->id = 0; + for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; i++) + ts->vaddr[i] = NULL; ts->ta = NULL; ts->uuid = NULL; ts->cmd = 0; @@ -55,9 +62,9 @@ static int copy_ta(struct tee_session *ts, { ts->ta = kmalloc(ku_buffer->ta_size, GFP_KERNEL); if (ts->ta == NULL) { - pr_err("[%s] error, out of memory (ta)\n", + pr_err(TEED_PFX "[%s] error, out of memory (ta)\n", __func__); - set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__); return -ENOMEM; } @@ -73,9 +80,9 @@ static int copy_uuid(struct tee_session *ts, ts->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL); if (ts->uuid == NULL) { - pr_err("[%s] error, out of memory (uuid)\n", + pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n", __func__); - set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__); return -ENOMEM; } @@ -84,13 +91,22 @@ static int copy_uuid(struct tee_session *ts, return 0; } -static inline void free_operation(struct tee_session *ts) +static inline void free_operation(struct tee_session *ts, + struct hwmem_alloc **alloc, + int memrefs_allocated) { int i; - for (i = 0; i < 4; ++i) { - kfree(ts->op->shm[i].buffer); - ts->op->shm[i].buffer = NULL; + for (i = 0; i < memrefs_allocated; ++i) { + if (ts->op->shm[i].buffer) { + hwmem_kunmap(alloc[i]); + hwmem_unpin(alloc[i]); + hwmem_release(alloc[i]); + ts->op->shm[i].buffer = NULL; + } + + if (ts->vaddr[i]) + ts->vaddr[i] = NULL; } kfree(ts->op); @@ -101,7 +117,7 @@ static inline void memrefs_phys_to_virt(struct tee_session *ts) { int i; - for (i = 0; i < 4; ++i) { + for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) { if (ts->op->flags & (1 << i)) { ts->op->shm[i].buffer = phys_to_virt((unsigned long) @@ -114,7 +130,7 @@ static inline void memrefs_virt_to_phys(struct tee_session *ts) { int i; - for (i = 0; i < 4; ++i) { + for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) { if (ts->op->flags & (1 << i)) { ts->op->shm[i].buffer = (void *)virt_to_phys(ts->op->shm[i].buffer); @@ -122,34 +138,35 @@ static inline void memrefs_virt_to_phys(struct tee_session *ts) } } -static int copy_memref_to_user(struct tee_operation *op, - struct tee_operation *ubuf_op, +static int copy_memref_to_user(struct tee_session *ts, + struct tee_operation __user *ubuf_op, int memref) { unsigned long bytes_left; bytes_left = copy_to_user(ubuf_op->shm[memref].buffer, - op->shm[memref].buffer, - op->shm[memref].size); + ts->vaddr[memref], + ts->op->shm[memref].size); if (bytes_left != 0) { - pr_err("[%s] Failed to copy result to user space (%lu " + pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu " "bytes left of buffer).\n", __func__, bytes_left); return bytes_left; } - bytes_left = put_user(op->shm[memref].size, &ubuf_op->shm[memref].size); + bytes_left = put_user(ts->op->shm[memref].size, + &ubuf_op->shm[memref].size); if (bytes_left != 0) { - pr_err("[%s] Failed to copy result to user space (%lu " + pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu " "bytes left of size).\n", __func__, bytes_left); return -EINVAL; } - bytes_left = put_user(op->shm[memref].flags, + bytes_left = put_user(ts->op->shm[memref].flags, &ubuf_op->shm[memref].flags); if (bytes_left != 0) { - pr_err("[%s] Failed to copy result to user space (%lu " + pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu " "bytes left of flags).\n", __func__, bytes_left); return -EINVAL; } @@ -157,30 +174,66 @@ static int copy_memref_to_user(struct tee_operation *op, return 0; } -static int copy_memref_to_kernel(struct tee_operation *op, - struct tee_operation *kbuf_op, +static int copy_memref_to_kernel(struct tee_session *ts, + struct tee_session *ku_buffer, + struct hwmem_alloc **alloc, int memref) { - /* Buffer freed in invoke_command if this function fails */ - op->shm[memref].buffer = kmalloc(kbuf_op->shm[memref].size, GFP_KERNEL); + int ret = -EINVAL; + size_t mem_chunks_length = 1; + struct hwmem_mem_chunk mem_chunks; - if (!op->shm[memref].buffer) { - pr_err("[%s] out of memory\n", __func__); - return -ENOMEM; + if (ku_buffer->op->shm[memref].size == 0) { + pr_err(TEED_PFX "[%s] error, size of memref is zero " + "(memref: %d)\n", __func__, memref); + return ret; + } + + alloc[memref] = hwmem_alloc(ku_buffer->op->shm[memref].size, + (HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE), + (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | + HWMEM_ACCESS_IMPORT), + HWMEM_MEM_CONTIGUOUS_SYS); + + if (IS_ERR(alloc[memref])) { + pr_err(TEED_PFX "[%s] couldn't alloc hwmem_alloc (memref: %d)" + "\n", __func__, memref); + return PTR_ERR(alloc[memref]); + } + + ret = hwmem_pin(alloc[memref], &mem_chunks, &mem_chunks_length); + if (ret) { + pr_err(TEED_PFX "[%s] couldn't pin buffer (memref: %d)\n", + __func__, memref); + return ret; } /* - * Copy shared memory operations to a local kernel - * buffer if they are of type input. + * Since phys_to_virt is not working for hwmem memory we are storing the + * virtual addresses in separate array in tee_session and we keep the + * address of the physical pointers in the memref buffer. */ - if (kbuf_op->shm[memref].flags & TEEC_MEM_INPUT) { - memcpy(op->shm[memref].buffer, - kbuf_op->shm[memref].buffer, - kbuf_op->shm[memref].size); + ts->op->shm[memref].buffer = (void *)mem_chunks.paddr; + ts->vaddr[memref] = hwmem_kmap(alloc[memref]); + + /* Buffer unmapped/freed in invoke_command if this function fails. */ + if (!ts->op->shm[memref].buffer || !ts->vaddr[memref]) { + pr_err(TEED_PFX "[%s] out of memory (memref: %d)\n", + __func__, memref); + return -ENOMEM; } - op->shm[memref].size = kbuf_op->shm[memref].size; - op->shm[memref].flags = kbuf_op->shm[memref].flags; + if (ku_buffer->op->shm[memref].flags & TEEC_MEM_INPUT) + memcpy(ts->vaddr[memref], + ku_buffer->op->shm[memref].buffer, + ku_buffer->op->shm[memref].size); + + ts->op->shm[memref].size = ku_buffer->op->shm[memref].size; + ts->op->shm[memref].flags = ku_buffer->op->shm[memref].flags; return 0; } @@ -191,7 +244,7 @@ static int open_tee_device(struct tee_session *ts, int ret; if (ku_buffer->driver_cmd != TEED_OPEN_SESSION) { - set_emsg(ts, TEED_ERROR_BAD_STATE); + set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__); return -EINVAL; } @@ -200,7 +253,7 @@ static int open_tee_device(struct tee_session *ts, } else if (ku_buffer->uuid) { ret = copy_uuid(ts, ku_buffer); } else { - set_emsg(ts, TEED_ERROR_COMMUNICATION); + set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__); return -EINVAL; } @@ -215,63 +268,61 @@ static int invoke_command(struct tee_session *ts, { int i; int ret = 0; - struct tee_operation *kbuf_op = - (struct tee_operation *)ku_buffer->op; + /* To keep track of which memrefs to free when failure occurs. */ + int memrefs_allocated = 0; + struct hwmem_alloc *alloc[TEEC_CONFIG_PAYLOAD_REF_COUNT]; ts->op = kmalloc(sizeof(struct tee_operation), GFP_KERNEL); if (!ts->op) { if (ts->op == NULL) { - pr_err("[%s] error, out of memory " + pr_err(TEED_PFX "[%s] error, out of memory " "(op)\n", __func__); - set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY); + set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__); return -ENOMEM; } } - /* Copy memrefs to kernel space. */ - ts->op->flags = kbuf_op->flags; + ts->op->flags = ku_buffer->op->flags; ts->cmd = ku_buffer->cmd; - for (i = 0; i < 4; ++i) { - /* We only want to copy memrefs in use. */ - if (kbuf_op->flags & (1 << i)) { - ret = copy_memref_to_kernel(ts->op, kbuf_op, i); - - if (ret) + for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) { + ts->op->shm[i].buffer = NULL; + memrefs_allocated++; + + /* We only want to copy memrefs in use to kernel space. */ + if (ku_buffer->op->flags & (1 << i)) { + ret = copy_memref_to_kernel(ts, ku_buffer, alloc, i); + if (ret) { + pr_err(TEED_PFX "[%s] failed copy memref[%d] " + "to kernel", __func__, i); goto err; + } } else { - ts->op->shm[i].buffer = NULL; ts->op->shm[i].size = 0; ts->op->shm[i].flags = 0; } } - /* Secure world expects physical addresses. */ - memrefs_virt_to_phys(ts); - if (call_sec_world(ts, TEED_INVOKE)) { + set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__); ret = -EINVAL; goto err; } - /* - * Convert physical addresses back to virtual address so the - * kernel can free the buffers when closing the session. - */ - memrefs_phys_to_virt(ts); - - for (i = 0; i < 4; ++i) { - if ((kbuf_op->flags & (1 << i)) && - (kbuf_op->shm[i].flags & TEEC_MEM_OUTPUT)) { - struct tee_operation *ubuf_op = - (struct tee_operation *)u_buffer->op; - - ret = copy_memref_to_user(ts->op, ubuf_op, i); + for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) { + if ((ku_buffer->op->flags & (1 << i)) && + (ku_buffer->op->shm[i].flags & TEEC_MEM_OUTPUT)) { + ret = copy_memref_to_user(ts, u_buffer->op, i); + if (ret) { + pr_err(TEED_PFX "[%s] failed copy memref[%d] " + "to user", __func__, i); + goto err; + } } } err: - free_operation(ts); + free_operation(ts, alloc, memrefs_allocated); return ret; } @@ -279,15 +330,15 @@ err: static int tee_open(struct inode *inode, struct file *filp) { struct tee_session *ts; - filp->private_data = kmalloc(sizeof(struct tee_session), GFP_KERNEL); - if (filp->private_data == NULL) + if (filp->private_data == NULL) { + pr_err(TEED_PFX "[%s] allocation failed", __func__); return -ENOMEM; + } - ts = (struct tee_session *) (filp->private_data); - + ts = (struct tee_session *)(filp->private_data); reset_session(ts); return 0; @@ -295,28 +346,6 @@ static int tee_open(struct inode *inode, struct file *filp) static int tee_release(struct inode *inode, struct file *filp) { - struct tee_session *ts; - int i; - - ts = (struct tee_session *) (filp->private_data); - - if (ts == NULL) - goto no_ts; - - if (ts->op) { - for (i = 0; i < 4; ++i) { - kfree(ts->op->shm[i].buffer); - ts->op->shm[i].buffer = NULL; - } - } - - kfree(ts->op); - ts->op = NULL; - - kfree(ts->ta); - ts->ta = NULL; - -no_ts: kfree(filp->private_data); filp->private_data = NULL; @@ -334,15 +363,15 @@ static int tee_read(struct file *filp, char __user *buffer, struct tee_session *ts; if (length != sizeof(struct tee_read)) { - pr_err("[%s] error, incorrect input length\n", + pr_err(TEED_PFX "[%s] error, incorrect input length\n", __func__); return -EINVAL; } - ts = (struct tee_session *) (filp->private_data); + ts = (struct tee_session *)(filp->private_data); if (ts == NULL) { - pr_err("[%s] error, private_data not " + pr_err(TEED_PFX "[%s] error, private_data not " "initialized\n", __func__); return -EINVAL; } @@ -355,7 +384,7 @@ static int tee_read(struct file *filp, char __user *buffer, mutex_unlock(&sync); if (copy_to_user(buffer, &buf, length)) { - pr_err("[%s] error, copy_to_user failed!\n", + pr_err(TEED_PFX "[%s] error, copy_to_user failed!\n", __func__); return -EINVAL; } @@ -364,31 +393,31 @@ static int tee_read(struct file *filp, char __user *buffer, } /* - * Called when a process writes to a dev file + * Called when a process writes to a dev file. */ static int tee_write(struct file *filp, const char __user *buffer, size_t length, loff_t *offset) { struct tee_session ku_buffer; struct tee_session *ts; - int ret = length; + int ret = 0; if (length != sizeof(struct tee_session)) { - pr_err("[%s] error, incorrect input length\n", + pr_err(TEED_PFX "[%s] error, incorrect input length\n", __func__); return -EINVAL; } if (copy_from_user(&ku_buffer, buffer, length)) { - pr_err("[%s] error, tee_session " + pr_err(TEED_PFX "[%s] error, tee_session " "copy_from_user failed\n", __func__); return -EINVAL; } - ts = (struct tee_session *) (filp->private_data); + ts = (struct tee_session *)(filp->private_data); if (ts == NULL) { - pr_err("[%s] error, private_data not " + pr_err(TEED_PFX "[%s] error, private_data not " "initialized\n", __func__); return -EINVAL; } @@ -409,8 +438,11 @@ static int tee_write(struct file *filp, const char __user *buffer, case TEED_CLOSE_SESSION: /* no caching implemented yet... */ - if (call_sec_world(ts, TEED_CLOSE_SESSION)) + if (call_sec_world(ts, TEED_CLOSE_SESSION)) { + set_emsg(ts, TEED_ERROR_COMMUNICATION, + __LINE__); ret = -EINVAL; + } kfree(ts->ta); ts->ta = NULL; @@ -419,13 +451,13 @@ static int tee_write(struct file *filp, const char __user *buffer, break; default: - set_emsg(ts, TEED_ERROR_BAD_PARAMETERS); + set_emsg(ts, TEED_ERROR_BAD_PARAMETERS, __LINE__); ret = -EINVAL; } break; default: - pr_err("[%s] unknown state\n", __func__); - set_emsg(ts, TEED_ERROR_BAD_STATE); + pr_err(TEED_PFX "[%s] unknown state\n", __func__); + set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__); ret = -EINVAL; } @@ -433,10 +465,12 @@ static int tee_write(struct file *filp, const char __user *buffer, * We expect that ret has value zero when reaching the end here. * If it has any other value some error must have occured. */ - if (!ret) + if (!ret) { ret = length; - else + } else { + pr_err(TEED_PFX "[%s], forcing error to -EINVAL\n", __func__); ret = -EINVAL; + } mutex_unlock(&sync); @@ -465,7 +499,8 @@ int teec_open_session(struct tee_context *context, int res = TEED_SUCCESS; if (session == NULL || destination == NULL) { - pr_err("[%s] session or destination == NULL\n", __func__); + pr_err(TEED_PFX "[%s] session or destination == NULL\n", + __func__); if (error_origin != NULL) *error_origin = TEED_ORIGIN_DRIVER; res = TEED_ERROR_BAD_PARAMETERS; @@ -476,12 +511,12 @@ int teec_open_session(struct tee_context *context, /* * Open a session towards an application already loaded inside - * the TEE + * the TEE. */ session->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL); if (session->uuid == NULL) { - pr_err("[%s] error, out of memory (uuid)\n", + pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n", __func__); if (error_origin != NULL) *error_origin = TEED_ORIGIN_DRIVER; @@ -506,13 +541,14 @@ int teec_close_session(struct tee_session *session) mutex_lock(&sync); if (session == NULL) { - pr_err("[%s] error, session == NULL\n", __func__); + pr_err(TEED_PFX "[%s] error, session == NULL\n", __func__); res = TEED_ERROR_BAD_PARAMETERS; goto exit; } if (call_sec_world(session, TEED_CLOSE_SESSION)) { - pr_err("[%s] error, call_sec_world failed\n", __func__); + pr_err(TEED_PFX "[%s] error, call_sec_world failed\n", + __func__); res = TEED_ERROR_GENERIC; goto exit; } @@ -539,7 +575,8 @@ int teec_invoke_command( mutex_lock(&sync); if (session == NULL || operation == NULL || error_origin == NULL) { - pr_err("[%s] error, input parameters == NULL\n", __func__); + pr_err(TEED_PFX "[%s] error, input parameters == NULL\n", + __func__); if (error_origin != NULL) *error_origin = TEED_ORIGIN_DRIVER; res = TEED_ERROR_BAD_PARAMETERS; @@ -561,13 +598,15 @@ int teec_invoke_command( * Call secure world */ if (call_sec_world(session, TEED_INVOKE)) { - pr_err("[%s] error, call_sec_world failed\n", __func__); + pr_err(TEED_PFX "[%s] error, call_sec_world failed\n", + __func__); if (error_origin != NULL) *error_origin = TEED_ORIGIN_DRIVER; res = TEED_ERROR_GENERIC; } if (session->err != TEED_SUCCESS) { - pr_err("[%s] error, call_sec_world failed\n", __func__); + pr_err(TEED_PFX "[%s] error, call_sec_world failed\n", + __func__); if (error_origin != NULL) *error_origin = session->origin; res = session->err; @@ -632,7 +671,7 @@ static int __init tee_init(void) err = misc_register(&tee_dev); if (err) { - pr_err("[%s] error %d adding character device " + pr_err(TEED_PFX "[%s] error %d adding character device " "TEE\n", __func__, err); } diff --git a/include/linux/tee.h b/include/linux/tee.h index 8b71224ac77..c0f8f11d58d 100644 --- a/include/linux/tee.h +++ b/include/linux/tee.h @@ -160,6 +160,7 @@ struct tee_context {}; * @err: Error code (as in Global Platform TEE Client API spec) * @origin: Origin for the error code (also from spec). * @id: Implementation defined type, 0 if not used. + * @vaddr: Virtual address for the memrefs. * @ta: The trusted application. * @uuid: The uuid for the trusted application. * @cmd: The command to be executed in the trusted application. @@ -179,6 +180,7 @@ struct tee_session { uint32_t err; uint32_t origin; uint32_t id; + uint32_t *vaddr[TEEC_CONFIG_PAYLOAD_REF_COUNT]; void *ta; struct tee_uuid *uuid; unsigned int cmd; -- cgit v1.2.3 From 3ee97c4b21460d30f853e936808e4074fc86da14 Mon Sep 17 00:00:00 2001 From: Michel JAOUEN Date: Thu, 19 Jan 2012 18:21:50 +0100 Subject: mach-ux500, drivers: u9540 security fix ST-Ericsson ID: 409625 ST-Ericsson FOSS-OUT ID: trivial ST-Ericsson Linux next: NA Depends-On: Iff4121811d2afbf581eec0905077c58bff96ce09 Change-Id: I43d5d593a4b6183d39322851db930e687177eead Signed-off-by: Michel JAOUEN Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/45317 Reviewed-by: QATOOLS Reviewed-by: QABUILD Reviewed-by: Srinidhi KASAGAR Reviewed-by: Berne HEBARK Reviewed-by: Linus WALLEIJ --- arch/arm/mach-ux500/tee_ux500.c | 6 +++++- drivers/crypto/ux500/cryp/cryp.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 160ca529261..0fc10c0a744 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -24,7 +24,11 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) va_list ap; u32 ret; - if (cpu_is_u8500v20_or_later()) + if (cpu_is_u9540()) + hw_sec_rom_pub_bridge = (bridge_func) + ((u32)IO_ADDRESS_DB9540_ROM + (U9540_BOOT_ROM_BASE + 0x17300)); + else if (cpu_is_u8500v20_or_later()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); else if (cpu_is_u5500()) diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 211200fed34..cec92af2f73 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -37,7 +37,7 @@ int cryp_check(struct cryp_device_data *device_data) if (NULL == device_data) return -EINVAL; - if (cpu_is_u8500()) + if (cpu_is_u8500() || cpu_is_u9540()) peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500; else if (cpu_is_u5500()) peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500; -- cgit v1.2.3 From aeb2742534db86281ed3a6dfc295888c4893f4a2 Mon Sep 17 00:00:00 2001 From: Jonas Aaberg Date: Tue, 20 Dec 2011 08:24:42 +0100 Subject: ARM: ux500: tee: Always assume v2.0 or later Remove check for pre v2.0 db8500 since pre v2.0 hardware is no longer supported. ST-Ericsson Linux next: - ST-Ericsson ID: 370799 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I5d1ab944c6d85cc39eb748a9bc585c2c6ca5e5ac Signed-off-by: Jonas Aaberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/43189 Reviewed-by: QABUILD Reviewed-by: Joakim BECH Reviewed-by: QATEST --- arch/arm/mach-ux500/tee_ux500.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c index 0fc10c0a744..9fa985a48c8 100644 --- a/arch/arm/mach-ux500/tee_ux500.c +++ b/arch/arm/mach-ux500/tee_ux500.c @@ -28,7 +28,7 @@ static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS_DB9540_ROM (U9540_BOOT_ROM_BASE + 0x17300)); - else if (cpu_is_u8500v20_or_later()) + else if (cpu_is_u8500()) hw_sec_rom_pub_bridge = (bridge_func) ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300)); else if (cpu_is_u5500()) -- cgit v1.2.3 From 7ab5ecfab5802502955e1a9789eeb8e2fc3a985f Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 19 Apr 2012 14:50:07 +0200 Subject: crypto: ux500: hash: device_prep_slave_sg() has a new context param in 3.4 Signed-off-by: Philippe Langlais --- drivers/crypto/ux500/hash/hash_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 9cc50e91c9e..05a06b00ca0 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -174,7 +174,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, "(TO_DEVICE)", __func__); desc = channel->device->device_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, - direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, ctx); if (!desc) { dev_err(ctx->device->dev, "[%s]: device_prep_slave_sg() failed!", __func__); -- cgit v1.2.3