summaryrefslogtreecommitdiff
path: root/drivers/crypto/ux500/hash/hash_core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ux500/hash/hash_core.c')
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c602
1 files changed, 433 insertions, 169 deletions
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 08a89eeb601..b2a58dccf76 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -22,6 +22,7 @@
#include <linux/crypto.h>
#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <crypto/internal/hash.h>
@@ -29,12 +30,17 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
+#include <mach/crypto-ux500.h>
#include <mach/hardware.h>
#include "hash_alg.h"
#define DEV_DBG_NAME "hashX hashX:"
+static int hash_mode;
+module_param(hash_mode, int, 0);
+MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
+
/**
* Pre-calculated empty message digests.
*/
@@ -113,6 +119,101 @@ static void release_hash_device(struct hash_device_data *device_data)
up(&driver_data.device_allocation);
}
+static void hash_dma_setup_channel(struct hash_device_data *device_data,
+ struct device *dev)
+{
+ struct hash_platform_data *platform_data = dev->platform_data;
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
+ device_data->dma.chan_mem2hash =
+ dma_request_channel(device_data->dma.mask,
+ platform_data->dma_filter,
+ device_data->dma.cfg_mem2hash);
+
+ init_completion(&device_data->dma.complete);
+}
+
+static void hash_dma_callback(void *data)
+{
+ struct hash_ctx *ctx = (struct hash_ctx *) data;
+
+ complete(&ctx->device->dma.complete);
+}
+
+static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+ int len, enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ if (direction != DMA_TO_DEVICE) {
+ dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
+
+ channel = ctx->device->dma.chan_mem2hash;
+ ctx->device->dma.sg = sg;
+ ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg, ctx->device->dma.nents,
+ direction);
+
+ if (!ctx->device->dma.sg_len) {
+ dev_err(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg, ctx->device->dma.sg_len,
+ direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(ctx->device->dev,
+ "[%s]: device_prep_slave_sg() failed!", __func__);
+ return -EFAULT;
+ }
+
+ desc->callback = hash_dma_callback;
+ desc->callback_param = ctx;
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void hash_dma_done(struct hash_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ chan = ctx->device->dma.chan_mem2hash;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
+ ctx->device->dma.sg_len, DMA_TO_DEVICE);
+
+}
+
+static int hash_dma_write(struct hash_ctx *ctx,
+ struct scatterlist *sg, int len)
+{
+ int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
/**
* get_empty_message_digest - Returns a pre-calculated digest for
* the empty message.
@@ -197,8 +298,6 @@ static int hash_disable_power(
int ret = 0;
struct device *dev = device_data->dev;
- dev_dbg(dev, "[%s]", __func__);
-
spin_lock(&device_data->power_state_lock);
if (!device_data->power_state)
goto out;
@@ -236,7 +335,6 @@ static int hash_enable_power(
{
int ret = 0;
struct device *dev = device_data->dev;
- dev_dbg(dev, "[%s]", __func__);
spin_lock(&device_data->power_state_lock);
if (!device_data->power_state) {
@@ -287,8 +385,6 @@ static int hash_get_device_data(struct hash_ctx *ctx,
struct klist_node *device_node;
struct hash_device_data *local_device_data = NULL;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
/* Wait until a device is available */
ret = down_interruptible(&driver_data.device_allocation);
if (ret)
@@ -390,8 +486,6 @@ static int init_hash_hw(struct hash_device_data *device_data,
{
int ret = 0;
- dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx);
-
ret = hash_setconfiguration(device_data, &ctx->config);
if (ret) {
dev_err(device_data->dev, "[%s] hash_setconfiguration() "
@@ -408,6 +502,61 @@ static int init_hash_hw(struct hash_device_data *device_data,
}
/**
+ * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
+ *
+ * @sg: Scatterlist.
+ * @size: Size in bytes.
+ * @aligned: True if sg data aligned to work in DMA mode.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
+{
+ int nents = 0;
+ bool aligned_data = true;
+
+ while (size > 0 && sg) {
+ nents++;
+ size -= sg->length;
+
+ /* hash_set_dma_transfer will align last nent */
+ if (aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE) ||
+ (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
+ size > 0))
+ aligned_data = false;
+
+ sg = sg_next(sg);
+ }
+
+ if (aligned)
+ *aligned = aligned_data;
+
+ if (size != 0)
+ return -EFAULT;
+
+ return nents;
+}
+
+/**
+ * hash_dma_valid_data - checks for dma valid sg data.
+ * @sg: Scatterlist.
+ * @datasize: Datasize in bytes.
+ *
+ * NOTE! This function checks for dma valid sg data, since dma
+ * only accept datasizes of even wordsize.
+ */
+static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
+{
+ bool aligned;
+
+ /* Need to include at least one nent, else error */
+ if (hash_get_nents(sg, datasize, &aligned) < 1)
+ return false;
+
+ return aligned;
+}
+
+/**
* hash_init - Common hash init function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*
@@ -418,13 +567,39 @@ static int hash_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
-
if (!ctx->key)
ctx->keylen = 0;
memset(&ctx->state, 0, sizeof(struct hash_state));
ctx->updated = 0;
+ if (hash_mode == HASH_MODE_DMA) {
+ if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) &&
+ cpu_is_u5500()) {
+ pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working "
+ "on u5500, directing to CPU mode.",
+ __func__);
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+ goto out;
+ }
+
+ if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
+ "to CPU mode for data size < %d",
+ __func__, HASH_DMA_ALIGN_SIZE);
+ } else {
+ if (hash_dma_valid_data(req->src, req->nbytes)) {
+ ctx->dma_mode = true;
+ } else {
+ ctx->dma_mode = false;
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but "
+ "direct to CPU mode for "
+ "non-aligned data", __func__);
+ }
+ }
+ }
+out:
return 0;
}
@@ -474,9 +649,6 @@ static void hash_processblock(
static void hash_messagepad(struct hash_device_data *device_data,
const u32 *message, u8 index_bytes)
{
- dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))",
- __func__, index_bytes);
-
/*
* Clear hash str register, only clear NBLW
* since DCAL will be reset by hardware.
@@ -561,7 +733,6 @@ int hash_setconfiguration(struct hash_device_data *device_data,
struct hash_config *config)
{
int ret = 0;
- dev_dbg(device_data->dev, "[%s] ", __func__);
if (config->algorithm != HASH_ALGO_SHA1 &&
config->algorithm != HASH_ALGO_SHA256)
@@ -574,13 +745,6 @@ int hash_setconfiguration(struct hash_device_data *device_data,
HASH_SET_DATA_FORMAT(config->data_format);
/*
- * Empty message bit. This bit is needed when the hash input data
- * contain the empty message. Always set in current impl. but with
- * no impact on data different than empty message.
- */
- HASH_SET_BITS(&device_data->base->cr, HASH_CR_EMPTYMSG_MASK);
-
- /*
* ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
*/
switch (config->algorithm) {
@@ -652,7 +816,6 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
{
/* HW and SW initializations */
/* Note: there is no need to initialize buffer and digest members */
- dev_dbg(device_data->dev, "[%s] ", __func__);
while (device_data->base->str & HASH_STR_DCAL_MASK)
cpu_relax();
@@ -688,6 +851,7 @@ int hash_process_data(
msg_length = 0;
} else {
if (ctx->updated) {
+
ret = hash_resume_state(device_data,
&ctx->state);
if (ret) {
@@ -696,7 +860,6 @@ int hash_process_data(
" failed!", __func__);
goto out;
}
-
} else {
ret = init_hash_hw(device_data, ctx);
if (ret) {
@@ -732,6 +895,7 @@ int hash_process_data(
}
hash_incrementlength(ctx, HASH_BLOCK_SIZE);
data_buffer += (HASH_BLOCK_SIZE - *index);
+
msg_length -= (HASH_BLOCK_SIZE - *index);
*index = 0;
@@ -751,6 +915,236 @@ out:
}
/**
+ * hash_dma_final - The hash dma final function for SHA1/SHA256.
+ * @req: The hash request for the job.
+ */
+static int hash_dma_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+ int bytes_written = 0;
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ }
+
+ if (!ctx->updated) {
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_setconfiguration() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ /* Enable DMA input */
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) {
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ } else {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_PRIVN_MASK);
+ }
+
+ HASH_INITIALIZE;
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ /* Number of bits in last word = (nbytes * 8) % 32 */
+ HASH_SET_NBLW((req->nbytes * 8) % 32);
+ ctx->updated = 1;
+ }
+
+ /* Store the nents in the dma struct. */
+ ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
+ if (!ctx->device->dma.nents) {
+ dev_err(device_data->dev, "[%s] "
+ "ctx->device->dma.nents = 0", __func__);
+ goto out_power;
+ }
+
+ bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
+ if (bytes_written != req->nbytes) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_dma_write() failed!", __func__);
+ goto out_power;
+ }
+
+ wait_for_completion(&ctx->device->dma.complete);
+ hash_dma_done(ctx);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_final - The final hash calculation function
+ * @req: The hash request for the job.
+ */
+int hash_hw_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen == 0) {
+ u8 zero_hash[SHA256_DIGEST_SIZE];
+ u32 zero_hash_size = 0;
+ bool zero_digest = false;
+ /**
+ * Use a pre-calculated empty message digest
+ * (workaround since hw return zeroes, hw bug!?)
+ */
+ ret = get_empty_message_digest(device_data, &zero_hash[0],
+ &zero_hash_size, &zero_digest);
+ if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+ zero_digest) {
+ memcpy(req->result, &zero_hash[0], ctx->digestsize);
+ goto out_power;
+ } else if (!ret && !zero_digest) {
+ dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
+ "key, continue...", __func__);
+ } else {
+ dev_err(device_data->dev, "[%s] ret=%d, or wrong "
+ "digest size? %s", __func__, ret,
+ (zero_hash_size == ctx->digestsize) ?
+ "true" : "false");
+ /* Return error */
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen > 0) {
+ dev_err(device_data->dev, "[%s] Empty message with "
+ "keylength > 0, NOT supported.", __func__);
+ goto out_power;
+ }
+
+ if (!ctx->updated) {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] init_hash_hw() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ }
+
+ if (ctx->state.index) {
+ hash_messagepad(device_data, ctx->state.buffer,
+ ctx->state.index);
+ } else {
+ HASH_SET_DCAL;
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+ }
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
* hash_hw_update - Updates current HASH computation hashing another part of
* the message.
* @req: Byte array containing the message to be hashed (caller
@@ -770,8 +1164,6 @@ int hash_hw_update(struct ahash_request *req)
struct crypto_hash_walk walk;
int msg_length = crypto_hash_walk_first(req, &walk);
- pr_debug(DEV_DBG_NAME " [%s] datalength: %d", __func__, msg_length);
-
/* Empty message ("") is correct indata */
if (msg_length == 0)
return ret;
@@ -818,9 +1210,9 @@ int hash_hw_update(struct ahash_request *req)
}
ctx->state.index = index;
-
dev_dbg(device_data->dev, "[%s] indata length=%d, "
- "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index);
+ "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index);
+
out_power:
/* Disable power (and clock) */
if (hash_disable_power(device_data, false))
@@ -846,9 +1238,6 @@ int hash_resume_state(struct hash_device_data *device_data,
s32 count;
int hash_mode = HASH_OPER_MODE_HASH;
- dev_dbg(device_data->dev, "[%s] (state(0x%x)))",
- __func__, (u32) device_state);
-
if (NULL == device_state) {
dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
__func__);
@@ -909,9 +1298,6 @@ int hash_save_state(struct hash_device_data *device_data,
u32 count;
int hash_mode = HASH_OPER_MODE_HASH;
- dev_dbg(device_data->dev, "[%s] state(0x%x)))",
- __func__, (u32) device_state);
-
if (NULL == device_state) {
dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
__func__);
@@ -961,8 +1347,6 @@ int hash_check_hw(struct hash_device_data *device_data)
{
int ret = 0;
- dev_dbg(device_data->dev, "[%s] ", __func__);
-
if (NULL == device_data) {
ret = -EPERM;
dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
@@ -1041,17 +1425,18 @@ void hash_get_digest(struct hash_device_data *device_data,
static int ahash_update(struct ahash_request *req)
{
int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s] ", __func__);
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode)
+ ret = hash_hw_update(req);
+ /* Skip update for DMA, all data will be passed to DMA in final */
- ret = hash_hw_update(req);
if (ret) {
pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
__func__);
- goto out;
}
-out:
return ret;
}
@@ -1064,103 +1449,18 @@ static int ahash_final(struct ahash_request *req)
int ret = 0;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_device_data *device_data;
- u8 digest[SHA256_DIGEST_SIZE];
-
- pr_debug(DEV_DBG_NAME " [%s] ", __func__);
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
+ pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
- dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+ if ((hash_mode == HASH_MODE_DMA) && ctx->dma_mode)
+ ret = hash_dma_final(req);
+ else
+ ret = hash_hw_final(req);
- /* Enable device power (and clock) */
- ret = hash_enable_power(device_data, false);
if (ret) {
- dev_err(device_data->dev, "[%s]: "
- "hash_enable_power() failed!", __func__);
- goto out;
- }
-
- if (ctx->updated) {
- ret = hash_resume_state(device_data, &ctx->state);
-
- if (ret) {
- dev_err(device_data->dev, "[%s] hash_resume_state() "
- "failed!", __func__);
- goto out_power;
- }
- } else if (req->nbytes == 0 && ctx->keylen == 0) {
- u8 zero_hash[SHA256_DIGEST_SIZE];
- u32 zero_hash_size = 0;
- bool zero_digest = false;
- /**
- * Use a pre-calculated empty message digest
- * (workaround since hw return zeroes, hw bug!?)
- */
- ret = get_empty_message_digest(device_data, &zero_hash[0],
- &zero_hash_size, &zero_digest);
- if (!ret && likely(zero_hash_size == ctx->digestsize) &&
- zero_digest) {
- memcpy(req->result, &zero_hash[0], ctx->digestsize);
- goto out_power;
- } else if (!ret && !zero_digest) {
- dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
- "key, continue...", __func__);
- } else {
- dev_err(device_data->dev, "[%s] ret=%d, or wrong "
- "digest size? %s", __func__, ret,
- (zero_hash_size == ctx->digestsize) ?
- "true" : "false");
- /* Return error */
- goto out_power;
- }
- }
-
- if (!ctx->updated) {
- ret = init_hash_hw(device_data, ctx);
- if (ret) {
- dev_err(device_data->dev, "[%s] init_hash_hw() "
- "failed!", __func__);
- goto out_power;
- }
- }
-
- if (ctx->state.index) {
- hash_messagepad(device_data, ctx->state.buffer,
- ctx->state.index);
- } else {
- HASH_SET_DCAL;
- while (device_data->base->str & HASH_STR_DCAL_MASK)
- cpu_relax();
- }
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
- unsigned int keylen = ctx->keylen;
- u8 *key = ctx->key;
-
- dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
- ctx->keylen);
- hash_hw_write_key(device_data, key, keylen);
- }
-
- hash_get_digest(device_data, digest, ctx->config.algorithm);
- memcpy(req->result, digest, ctx->digestsize);
-
-out_power:
- /* Disable power (and clock) */
- if (hash_disable_power(device_data, false))
- dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
__func__);
-
-out:
- release_hash_device(device_data);
-
- /**
- * Allocated in setkey, and only used in HMAC.
- */
- kfree(ctx->key);
+ }
return ret;
}
@@ -1171,8 +1471,6 @@ static int hash_setkey(struct crypto_ahash *tfm,
int ret = 0;
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s] keylen: %d", __func__, keylen);
-
/**
* Freed in final.
*/
@@ -1194,8 +1492,6 @@ static int ahash_sha1_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
-
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
@@ -1209,8 +1505,6 @@ static int ahash_sha256_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
-
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA256;
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
@@ -1223,8 +1517,6 @@ static int ahash_sha1_digest(struct ahash_request *req)
{
int ret2, ret1;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
ret1 = ahash_sha1_init(req);
if (ret1)
goto out;
@@ -1240,8 +1532,6 @@ static int ahash_sha256_digest(struct ahash_request *req)
{
int ret2, ret1;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
ret1 = ahash_sha256_init(req);
if (ret1)
goto out;
@@ -1258,8 +1548,6 @@ static int hmac_sha1_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
-
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
@@ -1273,8 +1561,6 @@ static int hmac_sha256_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
-
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA256;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
@@ -1287,8 +1573,6 @@ static int hmac_sha1_digest(struct ahash_request *req)
{
int ret2, ret1;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
ret1 = hmac_sha1_init(req);
if (ret1)
goto out;
@@ -1304,8 +1588,6 @@ static int hmac_sha256_digest(struct ahash_request *req)
{
int ret2, ret1;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
ret1 = hmac_sha256_init(req);
if (ret1)
goto out;
@@ -1320,16 +1602,12 @@ out:
static int hmac_sha1_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
}
static int hmac_sha256_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
}
@@ -1425,8 +1703,6 @@ static int ahash_algs_register_all(struct hash_device_data *device_data)
int i;
int count;
- dev_dbg(device_data->dev, "[%s]", __func__);
-
for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) {
ret = crypto_register_ahash(ux500_ahash_algs[i]);
if (ret) {
@@ -1451,8 +1727,6 @@ static void ahash_algs_unregister_all(struct hash_device_data *device_data)
{
int i;
- dev_dbg(device_data->dev, "[%s]", __func__);
-
for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++)
crypto_unregister_ahash(ux500_ahash_algs[i]);
}
@@ -1468,7 +1742,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
- dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev);
device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
if (!device_data) {
dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
@@ -1505,7 +1778,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
/* Enable power for HASH1 hardware block */
device_data->regulator = ux500_regulator_get(dev);
-
if (IS_ERR(device_data->regulator)) {
dev_err(dev, "[%s] regulator_get() failed!", __func__);
ret = PTR_ERR(device_data->regulator);
@@ -1534,6 +1806,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
goto out_power;
}
+ if (hash_mode == HASH_MODE_DMA)
+ hash_dma_setup_channel(device_data, dev);
+
platform_set_drvdata(pdev, device_data);
/* Put the new device into the device list... */
@@ -1585,8 +1860,6 @@ static int ux500_hash_remove(struct platform_device *pdev)
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
- dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev);
-
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(dev, "[%s]: platform_get_drvdata() failed!",
@@ -1646,8 +1919,6 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
struct resource *res = NULL;
struct hash_device_data *device_data;
- dev_dbg(&pdev->dev, "[%s]", __func__);
-
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
@@ -1701,8 +1972,6 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
-
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
@@ -1740,8 +2009,6 @@ static int ux500_hash_resume(struct platform_device *pdev)
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
-
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
@@ -1783,7 +2050,6 @@ static struct platform_driver hash_driver = {
*/
static int __init ux500_hash_mod_init(void)
{
- pr_debug(DEV_DBG_NAME " [%s] is called!", __func__);
klist_init(&driver_data.device_list, NULL, NULL);
/* Initialize the semaphore to 0 devices (locked state) */
sema_init(&driver_data.device_allocation, 0);
@@ -1796,8 +2062,6 @@ static int __init ux500_hash_mod_init(void)
*/
static void __exit ux500_hash_mod_fini(void)
{
- pr_debug(DEV_DBG_NAME " [%s] is called!", __func__);
-
platform_driver_unregister(&hash_driver);
return;
}