summaryrefslogtreecommitdiff
path: root/drivers/crypto/ccp/ccp-ops.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 09:28:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 09:28:16 -0700
commit59ecc26004e77e100c700b1d0da7502b0fdadb46 (patch)
tree1faec47bda8439cc2cbe3bd9bf15756e67808e63 /drivers/crypto/ccp/ccp-ops.c
parentbea803183e12a1c78a12ec70907174d13d958333 (diff)
parent8ceee72808d1ae3fb191284afc2257a2be964725 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 3.15: - Added 3DES driver for OMAP4/AM43xx - Added AVX2 acceleration for SHA - Added hash-only AEAD algorithms in caam - Removed tegra driver as it is not functioning and the hardware is too slow - Allow blkcipher walks over AEAD (needed for ARM) - Fixed unprotected FPU/SSE access in ghash-clmulni-intel - Fixed highmem crash in omap-sham - Add (zero entropy) randomness when initialising hardware RNGs - Fixed unaligned ahash comletion functions - Added soft module depedency for crc32c for initrds that use crc32c" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (60 commits) crypto: ghash-clmulni-intel - use C implementation for setkey() crypto: x86/sha1 - reduce size of the AVX2 asm implementation crypto: x86/sha1 - fix stack alignment of AVX2 variant crypto: x86/sha1 - re-enable the AVX variant crypto: sha - SHA1 transform x86_64 AVX2 crypto: crypto_wq - Fix late crypto work queue initialization crypto: caam - add missing key_dma unmap crypto: caam - add support for aead null encryption crypto: testmgr - add aead null encryption test vectors crypto: export NULL algorithms defines crypto: caam - remove error propagation handling crypto: hash - Simplify the ahash_finup implementation crypto: hash - Pull out the functions to save/restore request crypto: hash - Fix the pointer voodoo in unaligned ahash crypto: caam - Fix first parameter to caam_init_rng crypto: omap-sham - Map SG pages if they are HIGHMEM before accessing crypto: caam - Dynamic memory allocation for caam_rng_ctx object crypto: allow blkcipher walks over AEAD data crypto: remove direct blkcipher_walk dependency on transform hwrng: add randomness to system from rng sources ...
Diffstat (limited to 'drivers/crypto/ccp/ccp-ops.c')
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
1 files changed, 105 insertions, 3 deletions
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 71ed3ade7e12..9ae006d69df4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -23,6 +23,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
#include "ccp-dev.h"
@@ -132,6 +133,27 @@ struct ccp_op {
} u;
};
+/* SHA initial context values */
+static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0,
+};
+
+static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
/* The CCP cannot perform zero-length sha operations so the caller
* is required to buffer data for the final operation. However, a
* sha operation for a message with a total length of zero is valid
@@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
return ret;
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ if (sha->first) {
+ const __be32 *init;
+
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ init = ccp_sha1_init;
+ break;
+ case CCP_SHA_TYPE_224:
+ init = ccp_sha224_init;
+ break;
+ case CCP_SHA_TYPE_256:
+ init = ccp_sha256_init;
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_ctx;
+ }
+ memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
+ } else
+ ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+
ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ if (sha->final && sha->opad) {
+ /* HMAC operation, recursively perform final SHA */
+ struct ccp_cmd hmac_cmd;
+ struct scatterlist sg;
+ u64 block_size, digest_size;
+ u8 *hmac_buf;
+
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ block_size = SHA1_BLOCK_SIZE;
+ digest_size = SHA1_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_224:
+ block_size = SHA224_BLOCK_SIZE;
+ digest_size = SHA224_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_256:
+ block_size = SHA256_BLOCK_SIZE;
+ digest_size = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_data;
+ }
+
+ if (sha->opad_len != block_size) {
+ ret = -EINVAL;
+ goto e_data;
+ }
+
+ hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
+ if (!hmac_buf) {
+ ret = -ENOMEM;
+ goto e_data;
+ }
+ sg_init_one(&sg, hmac_buf, block_size + digest_size);
+
+ scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
+ memcpy(hmac_buf + block_size, ctx.address, digest_size);
+
+ memset(&hmac_cmd, 0, sizeof(hmac_cmd));
+ hmac_cmd.engine = CCP_ENGINE_SHA;
+ hmac_cmd.u.sha.type = sha->type;
+ hmac_cmd.u.sha.ctx = sha->ctx;
+ hmac_cmd.u.sha.ctx_len = sha->ctx_len;
+ hmac_cmd.u.sha.src = &sg;
+ hmac_cmd.u.sha.src_len = block_size + digest_size;
+ hmac_cmd.u.sha.opad = NULL;
+ hmac_cmd.u.sha.opad_len = 0;
+ hmac_cmd.u.sha.first = 1;
+ hmac_cmd.u.sha.final = 1;
+ hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
+
+ ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
+ if (ret)
+ cmd->engine_error = hmac_cmd.engine_error;
+
+ kfree(hmac_buf);
+ }
+
e_data:
ccp_free_data(&src, cmd_q);
@@ -1666,8 +1768,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
- op.src.u.dma.offset = dst.sg_wa.sg_used;
- op.src.u.dma.length = op.src.u.dma.length;
+ op.dst.u.dma.offset = dst.sg_wa.sg_used;
+ op.dst.u.dma.length = op.src.u.dma.length;
ret = ccp_perform_passthru(&op);
if (ret) {