mirror of https://github.com/torvalds/linux.git
crypto: arm64/sha256 - implement library instead of shash
Instead of providing crypto_shash algorithms for the arch-optimized SHA-256 code, instead implement the SHA-256 library. This is much simpler, it makes the SHA-256 library functions be arch-optimized, and it fixes the longstanding issue where the arch-optimized SHA-256 was disabled by default. SHA-256 still remains available through crypto_shash, but individual architectures no longer need to handle it. Remove support for SHA-256 finalization from the ARMv8 CE assembly code, since the library does not yet support architecture-specific overrides of the finalization. (Support for that has been omitted for now, for simplicity and because usually it isn't performance-critical.) To match sha256_blocks_arch(), change the type of the nblocks parameter of the assembly functions from int or 'unsigned int' to size_t. Update the ARMv8 CE assembly function accordingly. The scalar and NEON assembly functions actually already treated it as size_t. While renaming the assembly files, also fix the naming quirks where "sha2" meant sha256, and "sha512" meant both sha256 and sha512. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
642cfc0680
commit
6e36be511d
|
|
@ -1737,7 +1737,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
|
|||
CONFIG_CRYPTO_CHACHA20_NEON=m
|
||||
CONFIG_CRYPTO_GHASH_ARM64_CE=y
|
||||
CONFIG_CRYPTO_SHA1_ARM64_CE=y
|
||||
CONFIG_CRYPTO_SHA2_ARM64_CE=y
|
||||
CONFIG_CRYPTO_SHA512_ARM64_CE=m
|
||||
CONFIG_CRYPTO_SHA3_ARM64=m
|
||||
CONFIG_CRYPTO_SM3_ARM64_CE=m
|
||||
|
|
|
|||
|
|
@ -36,25 +36,6 @@ config CRYPTO_SHA1_ARM64_CE
|
|||
Architecture: arm64 using:
|
||||
- ARMv8 Crypto Extensions
|
||||
|
||||
config CRYPTO_SHA256_ARM64
|
||||
tristate "Hash functions: SHA-224 and SHA-256"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
|
||||
|
||||
Architecture: arm64
|
||||
|
||||
config CRYPTO_SHA2_ARM64_CE
|
||||
tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_SHA256_ARM64
|
||||
help
|
||||
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
|
||||
|
||||
Architecture: arm64 using:
|
||||
- ARMv8 Crypto Extensions
|
||||
|
||||
config CRYPTO_SHA512_ARM64
|
||||
tristate "Hash functions: SHA-384 and SHA-512"
|
||||
select CRYPTO_HASH
|
||||
|
|
|
|||
|
|
@ -8,9 +8,6 @@
|
|||
obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
|
||||
sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
|
||||
sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o
|
||||
sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
|
||||
|
||||
|
|
@ -56,9 +53,6 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
|
|||
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
|
||||
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
|
||||
sha256-arm64-y := sha256-glue.o sha256-core.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o
|
||||
sha512-arm64-y := sha512-glue.o sha512-core.o
|
||||
|
||||
|
|
@ -74,10 +68,7 @@ aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
|
|||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) void $(@)
|
||||
|
||||
$(obj)/%-core.S: $(src)/%-armv8.pl
|
||||
$(obj)/sha512-core.S: $(src)/../lib/crypto/sha2-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
$(obj)/sha256-core.S: $(src)/sha512-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
clean-files += sha256-core.S sha512-core.S
|
||||
clean-files += sha512-core.S
|
||||
|
|
|
|||
|
|
@ -1,138 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
|
||||
*
|
||||
* Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("sha224");
|
||||
MODULE_ALIAS_CRYPTO("sha256");
|
||||
|
||||
struct sha256_ce_state {
|
||||
struct crypto_sha256_state sst;
|
||||
u32 finalize;
|
||||
};
|
||||
|
||||
extern const u32 sha256_ce_offsetof_count;
|
||||
extern const u32 sha256_ce_offsetof_finalize;
|
||||
|
||||
asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
||||
int blocks);
|
||||
|
||||
static void sha256_ce_transform(struct crypto_sha256_state *sst, u8 const *src,
|
||||
int blocks)
|
||||
{
|
||||
while (blocks) {
|
||||
int rem;
|
||||
|
||||
kernel_neon_begin();
|
||||
rem = __sha256_ce_transform(container_of(sst,
|
||||
struct sha256_ce_state,
|
||||
sst), src, blocks);
|
||||
kernel_neon_end();
|
||||
src += (blocks - rem) * SHA256_BLOCK_SIZE;
|
||||
blocks = rem;
|
||||
}
|
||||
}
|
||||
|
||||
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
|
||||
sst.count);
|
||||
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
|
||||
finalize);
|
||||
|
||||
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
sctx->finalize = 0;
|
||||
return sha256_base_do_update_blocks(desc, data, len,
|
||||
sha256_ce_transform);
|
||||
}
|
||||
|
||||
static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
bool finalize = !(len % SHA256_BLOCK_SIZE) && len;
|
||||
|
||||
/*
|
||||
* Allow the asm code to perform the finalization if there is no
|
||||
* partial data and the input is a round multiple of the block size.
|
||||
*/
|
||||
sctx->finalize = finalize;
|
||||
|
||||
if (finalize)
|
||||
sha256_base_do_update_blocks(desc, data, len,
|
||||
sha256_ce_transform);
|
||||
else
|
||||
sha256_base_do_finup(desc, data, len, sha256_ce_transform);
|
||||
return sha256_base_finish(desc, out);
|
||||
}
|
||||
|
||||
static int sha256_ce_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
sha256_base_init(desc);
|
||||
return sha256_ce_finup(desc, data, len, out);
|
||||
}
|
||||
|
||||
static struct shash_alg algs[] = { {
|
||||
.init = sha224_base_init,
|
||||
.update = sha256_ce_update,
|
||||
.finup = sha256_ce_finup,
|
||||
.descsize = sizeof(struct sha256_ce_state),
|
||||
.statesize = sizeof(struct crypto_sha256_state),
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-ce",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.init = sha256_base_init,
|
||||
.update = sha256_ce_update,
|
||||
.finup = sha256_ce_finup,
|
||||
.digest = sha256_ce_digest,
|
||||
.descsize = sizeof(struct sha256_ce_state),
|
||||
.statesize = sizeof(struct crypto_sha256_state),
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-ce",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
} };
|
||||
|
||||
static int __init sha2_ce_mod_init(void)
|
||||
{
|
||||
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
static void __exit sha2_ce_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_cpu_feature_match(SHA2, sha2_ce_mod_init);
|
||||
module_exit(sha2_ce_mod_fini);
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
|
||||
*
|
||||
* Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
|
||||
MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("sha224");
|
||||
MODULE_ALIAS_CRYPTO("sha256");
|
||||
|
||||
asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
|
||||
unsigned int num_blks);
|
||||
EXPORT_SYMBOL(sha256_block_data_order);
|
||||
|
||||
static void sha256_arm64_transform(struct crypto_sha256_state *sst,
|
||||
u8 const *src, int blocks)
|
||||
{
|
||||
sha256_block_data_order(sst->state, src, blocks);
|
||||
}
|
||||
|
||||
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
|
||||
unsigned int num_blks);
|
||||
|
||||
static void sha256_neon_transform(struct crypto_sha256_state *sst,
|
||||
u8 const *src, int blocks)
|
||||
{
|
||||
kernel_neon_begin();
|
||||
sha256_block_neon(sst->state, src, blocks);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
return sha256_base_do_update_blocks(desc, data, len,
|
||||
sha256_arm64_transform);
|
||||
}
|
||||
|
||||
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
sha256_base_do_finup(desc, data, len, sha256_arm64_transform);
|
||||
return sha256_base_finish(desc, out);
|
||||
}
|
||||
|
||||
static struct shash_alg algs[] = { {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_base_init,
|
||||
.update = crypto_sha256_arm64_update,
|
||||
.finup = crypto_sha256_arm64_finup,
|
||||
.descsize = sizeof(struct crypto_sha256_state),
|
||||
.base.cra_name = "sha256",
|
||||
.base.cra_driver_name = "sha256-arm64",
|
||||
.base.cra_priority = 125,
|
||||
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.base.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_base_init,
|
||||
.update = crypto_sha256_arm64_update,
|
||||
.finup = crypto_sha256_arm64_finup,
|
||||
.descsize = sizeof(struct crypto_sha256_state),
|
||||
.base.cra_name = "sha224",
|
||||
.base.cra_driver_name = "sha224-arm64",
|
||||
.base.cra_priority = 125,
|
||||
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.base.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
} };
|
||||
|
||||
static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
return sha256_base_do_update_blocks(desc, data, len,
|
||||
sha256_neon_transform);
|
||||
}
|
||||
|
||||
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (len >= SHA256_BLOCK_SIZE) {
|
||||
int remain = sha256_update_neon(desc, data, len);
|
||||
|
||||
data += len - remain;
|
||||
len = remain;
|
||||
}
|
||||
sha256_base_do_finup(desc, data, len, sha256_neon_transform);
|
||||
return sha256_base_finish(desc, out);
|
||||
}
|
||||
|
||||
static struct shash_alg neon_algs[] = { {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_base_init,
|
||||
.update = sha256_update_neon,
|
||||
.finup = sha256_finup_neon,
|
||||
.descsize = sizeof(struct crypto_sha256_state),
|
||||
.base.cra_name = "sha256",
|
||||
.base.cra_driver_name = "sha256-arm64-neon",
|
||||
.base.cra_priority = 150,
|
||||
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.base.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_base_init,
|
||||
.update = sha256_update_neon,
|
||||
.finup = sha256_finup_neon,
|
||||
.descsize = sizeof(struct crypto_sha256_state),
|
||||
.base.cra_name = "sha224",
|
||||
.base.cra_driver_name = "sha224-arm64-neon",
|
||||
.base.cra_priority = 150,
|
||||
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
|
||||
CRYPTO_AHASH_ALG_FINUP_MAX,
|
||||
.base.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
} };
|
||||
|
||||
static int __init sha256_mod_init(void)
|
||||
{
|
||||
int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cpu_have_named_feature(ASIMD)) {
|
||||
ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
|
||||
if (ret)
|
||||
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit sha256_mod_fini(void)
|
||||
{
|
||||
if (cpu_have_named_feature(ASIMD))
|
||||
crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
|
||||
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(sha256_mod_init);
|
||||
module_exit(sha256_mod_fini);
|
||||
|
|
@ -1,2 +1,3 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
poly1305-core.S
|
||||
sha256-core.S
|
||||
|
|
|
|||
|
|
@ -12,3 +12,8 @@ config CRYPTO_POLY1305_NEON
|
|||
depends on KERNEL_MODE_NEON
|
||||
default CRYPTO_LIB_POLY1305
|
||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
|
||||
config CRYPTO_SHA256_ARM64
|
||||
tristate
|
||||
default CRYPTO_LIB_SHA256
|
||||
select CRYPTO_ARCH_HAVE_LIB_SHA256
|
||||
|
|
|
|||
|
|
@ -8,10 +8,17 @@ poly1305-neon-y := poly1305-core.o poly1305-glue.o
|
|||
AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch
|
||||
AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
|
||||
sha256-arm64-y := sha256.o sha256-core.o
|
||||
sha256-arm64-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
|
||||
|
||||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) void $(@)
|
||||
|
||||
$(obj)/%-core.S: $(src)/%-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
clean-files += poly1305-core.S
|
||||
$(obj)/sha256-core.S: $(src)/sha2-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
clean-files += poly1305-core.S sha256-core.S
|
||||
|
|
|
|||
|
|
@ -71,8 +71,8 @@
|
|||
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||
|
||||
/*
|
||||
* int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
||||
* int blocks)
|
||||
* size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
|
||||
* const u8 *data, size_t nblocks);
|
||||
*/
|
||||
.text
|
||||
SYM_FUNC_START(__sha256_ce_transform)
|
||||
|
|
@ -86,20 +86,16 @@ SYM_FUNC_START(__sha256_ce_transform)
|
|||
/* load state */
|
||||
ld1 {dgav.4s, dgbv.4s}, [x0]
|
||||
|
||||
/* load sha256_ce_state::finalize */
|
||||
ldr_l w4, sha256_ce_offsetof_finalize, x4
|
||||
ldr w4, [x0, x4]
|
||||
|
||||
/* load input */
|
||||
0: ld1 {v16.4s-v19.4s}, [x1], #64
|
||||
sub w2, w2, #1
|
||||
sub x2, x2, #1
|
||||
|
||||
CPU_LE( rev32 v16.16b, v16.16b )
|
||||
CPU_LE( rev32 v17.16b, v17.16b )
|
||||
CPU_LE( rev32 v18.16b, v18.16b )
|
||||
CPU_LE( rev32 v19.16b, v19.16b )
|
||||
|
||||
1: add t0.4s, v16.4s, v0.4s
|
||||
add t0.4s, v16.4s, v0.4s
|
||||
mov dg0v.16b, dgav.16b
|
||||
mov dg1v.16b, dgbv.16b
|
||||
|
||||
|
|
@ -128,30 +124,12 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
|||
add dgbv.4s, dgbv.4s, dg1v.4s
|
||||
|
||||
/* handled all input blocks? */
|
||||
cbz w2, 2f
|
||||
cbz x2, 1f
|
||||
cond_yield 3f, x5, x6
|
||||
b 0b
|
||||
|
||||
/*
|
||||
* Final block: add padding and total bit count.
|
||||
* Skip if the input size was not a round multiple of the block size,
|
||||
* the padding is handled by the C code in that case.
|
||||
*/
|
||||
2: cbz x4, 3f
|
||||
ldr_l w4, sha256_ce_offsetof_count, x4
|
||||
ldr x4, [x0, x4]
|
||||
movi v17.2d, #0
|
||||
mov x8, #0x80000000
|
||||
movi v18.2d, #0
|
||||
ror x7, x4, #29 // ror(lsl(x4, 3), 32)
|
||||
fmov d16, x8
|
||||
mov x4, #0
|
||||
mov v19.d[0], xzr
|
||||
mov v19.d[1], x7
|
||||
b 1b
|
||||
|
||||
/* store new state */
|
||||
3: st1 {dgav.4s, dgbv.4s}, [x0]
|
||||
mov w0, w2
|
||||
1: st1 {dgav.4s, dgbv.4s}, [x0]
|
||||
mov x0, x2
|
||||
ret
|
||||
SYM_FUNC_END(__sha256_ce_transform)
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* SHA-256 optimized for ARM64
|
||||
*
|
||||
* Copyright 2025 Google LLC
|
||||
*/
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/internal/sha2.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
|
||||
const u8 *data, size_t nblocks);
|
||||
asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS],
|
||||
const u8 *data, size_t nblocks);
|
||||
asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
|
||||
const u8 *data, size_t nblocks);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
|
||||
|
||||
void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
|
||||
const u8 *data, size_t nblocks)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
|
||||
static_branch_likely(&have_neon) && crypto_simd_usable()) {
|
||||
if (static_branch_likely(&have_ce)) {
|
||||
do {
|
||||
size_t rem;
|
||||
|
||||
kernel_neon_begin();
|
||||
rem = __sha256_ce_transform(state,
|
||||
data, nblocks);
|
||||
kernel_neon_end();
|
||||
data += (nblocks - rem) * SHA256_BLOCK_SIZE;
|
||||
nblocks = rem;
|
||||
} while (nblocks);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
sha256_block_neon(state, data, nblocks);
|
||||
kernel_neon_end();
|
||||
}
|
||||
} else {
|
||||
sha256_block_data_order(state, data, nblocks);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(sha256_blocks_arch);
|
||||
|
||||
bool sha256_is_arch_optimized(void)
|
||||
{
|
||||
/* We always can use at least the ARM64 scalar implementation. */
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(sha256_is_arch_optimized);
|
||||
|
||||
static int __init sha256_arm64_mod_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
|
||||
cpu_have_named_feature(ASIMD)) {
|
||||
static_branch_enable(&have_neon);
|
||||
if (cpu_have_named_feature(SHA2))
|
||||
static_branch_enable(&have_ce);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(sha256_arm64_mod_init);
|
||||
|
||||
static void __exit sha256_arm64_mod_exit(void)
|
||||
{
|
||||
}
|
||||
module_exit(sha256_arm64_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("SHA-256 optimized for ARM64");
|
||||
Loading…
Reference in New Issue