Crypto library updates for 6.18

- Add a RISC-V optimized implementation of Poly1305. This code was
   written by Andy Polyakov and contributed by Zhihang Shao.
 
 - Migrate the MD5 code into lib/crypto/, and add KUnit tests for MD5.
   Yes, it's still the 90s, and several kernel subsystems are still using
   MD5 for legacy use cases. As long as that remains the case, it's
   helpful to clean it up in the same way as I've been doing for other
   algorithms. Later, I plan to convert most of these users of MD5 to use
   the new MD5 library API instead of the generic crypto API.
 
 - Simplify the organization of the ChaCha, Poly1305, BLAKE2s, and
   Curve25519 code. Consolidate these into one module per algorithm,
   and centralize the configuration and build process. This is the same
   reorganization that has already been successful for SHA-1 and SHA-2.
 
 - Remove the unused crypto_kpp API for Curve25519.
 
 - Migrate the BLAKE2s and Curve25519 self-tests to KUnit.
 
 - Always enable the architecture-optimized BLAKE2s code.
 
 Due to interdependencies between test and non-test code, both are
 included in this pull request. The broken-down diffstat is as follows:
 
     Tests:            735 insertions(+), 1917 deletions(-)
     RISC-V Poly1305:  877 insertions(+), 1 deletion(-)
     Other:           1777 insertions(+), 3117 deletions(-)
 
 Besides the new RISC-V code which is an addition, there are quite a
 few simplifications due to the improved code organization for multiple
 algorithms, the removal of the unused crypto_kpp API for Curve25519
 and redundant tests, and the redesign of the BLAKE2s test.
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCaNgwUhQcZWJpZ2dlcnNA
 a2VybmVsLm9yZwAKCRDzXCl4vpKOK3EnAP96hB1wD12DvIovGCmWnnlbzOt+CoK2
 B5CW74eYEZiSbwD7BiKPDqvSmLzEBtbKmOSwRvxKuQ2uGGef3USFKYVCiw0=
 =DY5R
 -----END PGP SIGNATURE-----

Merge tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux

Pull crypto library updates from Eric Biggers:

 - Add a RISC-V optimized implementation of Poly1305. This code was
   written by Andy Polyakov and contributed by Zhihang Shao.

 - Migrate the MD5 code into lib/crypto/, and add KUnit tests for MD5.

   Yes, it's still the 90s, and several kernel subsystems are still
   using MD5 for legacy use cases. As long as that remains the case,
   it's helpful to clean it up in the same way as I've been doing for
   other algorithms.

   Later, I plan to convert most of these users of MD5 to use the new
   MD5 library API instead of the generic crypto API.

 - Simplify the organization of the ChaCha, Poly1305, BLAKE2s, and
   Curve25519 code.

   Consolidate these into one module per algorithm, and centralize the
   configuration and build process. This is the same reorganization that
   has already been successful for SHA-1 and SHA-2.

 - Remove the unused crypto_kpp API for Curve25519.

 - Migrate the BLAKE2s and Curve25519 self-tests to KUnit.

 - Always enable the architecture-optimized BLAKE2s code.

* tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (38 commits)
  crypto: md5 - Implement export_core() and import_core()
  wireguard: kconfig: simplify crypto kconfig selections
  lib/crypto: tests: Enable Curve25519 test when CRYPTO_SELFTESTS
  lib/crypto: curve25519: Consolidate into single module
  lib/crypto: curve25519: Move a couple functions out-of-line
  lib/crypto: tests: Add Curve25519 benchmark
  lib/crypto: tests: Migrate Curve25519 self-test to KUnit
  crypto: curve25519 - Remove unused kpp support
  crypto: testmgr - Remove curve25519 kpp tests
  crypto: x86/curve25519 - Remove unused kpp support
  crypto: powerpc/curve25519 - Remove unused kpp support
  crypto: arm/curve25519 - Remove unused kpp support
  crypto: hisilicon/hpre - Remove unused curve25519 kpp support
  lib/crypto: tests: Add KUnit tests for BLAKE2s
  lib/crypto: blake2s: Consolidate into single C translation unit
  lib/crypto: blake2s: Move generic code into blake2s.c
  lib/crypto: blake2s: Always enable arch-optimized BLAKE2s code
  lib/crypto: blake2s: Remove obsolete self-test
  lib/crypto: x86/blake2s: Reduce size of BLAKE2S_SIGMA2
  lib/crypto: chacha: Consolidate into single module
  ...
This commit is contained in:
Linus Torvalds 2025-09-29 15:48:56 -07:00
commit d8768fb12a
143 changed files with 3395 additions and 5041 deletions

View File

@ -364,7 +364,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=y
CONFIG_CRYPTO_DEV_S5P=y
CONFIG_DMA_CMA=y

View File

@ -101,7 +101,6 @@ CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
# CONFIG_CRYPTO_HW is not set
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64

View File

@ -1291,7 +1291,6 @@ CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=m

View File

@ -708,7 +708,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_DEV_OMAP=m
CONFIG_CRYPTO_DEV_OMAP_SHAM=m
CONFIG_CRYPTO_DEV_OMAP_AES=m

View File

@ -2,19 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (arm)"
config CRYPTO_CURVE25519_NEON
tristate
depends on KERNEL_MODE_NEON
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
Architecture: arm with
- NEON (Advanced SIMD) extensions
config CRYPTO_GHASH_ARM_CE
tristate "Hash functions: GHASH (PMULL/NEON/ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON

View File

@ -7,7 +7,6 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o
obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
@ -18,4 +17,3 @@ blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o

View File

@ -1,137 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
* began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
* manually reworked for use in kernel space.
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/simd.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/scatterlist.h>
#include <crypto/curve25519.h>
asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE]);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
const u8 scalar[CURVE25519_KEY_SIZE],
const u8 point[CURVE25519_KEY_SIZE])
{
if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
kernel_neon_begin();
curve25519_neon(out, scalar, point);
kernel_neon_end();
} else {
curve25519_generic(out, scalar, point);
}
}
EXPORT_SYMBOL(curve25519_arch);
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
return curve25519_arch(pub, secret, curve25519_base_point);
}
EXPORT_SYMBOL(curve25519_base_arch);
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
u8 *secret = kpp_tfm_ctx(tfm);
if (!len)
curve25519_generate_secret(secret);
else if (len == CURVE25519_KEY_SIZE &&
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
memcpy(secret, buf, CURVE25519_KEY_SIZE);
else
return -EINVAL;
return 0;
}
static int curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 public_key[CURVE25519_KEY_SIZE];
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
u8 const *bp;
if (req->src) {
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
CURVE25519_KEY_SIZE),
public_key, CURVE25519_KEY_SIZE);
if (copied != CURVE25519_KEY_SIZE)
return -EINVAL;
bp = public_key;
} else {
bp = curve25519_base_point;
}
curve25519_arch(buf, secret, bp);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
{
return CURVE25519_KEY_SIZE;
}
static struct kpp_alg curve25519_alg = {
.base.cra_name = "curve25519",
.base.cra_driver_name = "curve25519-neon",
.base.cra_priority = 200,
.base.cra_module = THIS_MODULE,
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
.set_secret = curve25519_set_secret,
.generate_public_key = curve25519_compute_value,
.compute_shared_secret = curve25519_compute_value,
.max_size = curve25519_max_size,
};
static int __init arm_curve25519_init(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);
return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
crypto_register_kpp(&curve25519_alg) : 0;
}
return 0;
}
static void __exit arm_curve25519_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON)
crypto_unregister_kpp(&curve25519_alg);
}
module_init(arm_curve25519_init);
module_exit(arm_curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-neon");
MODULE_DESCRIPTION("Public key crypto: Curve25519 (NEON-accelerated)");
MODULE_LICENSE("GPL v2");

View File

@ -559,7 +559,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -516,7 +516,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -536,7 +536,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -518,7 +518,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -535,7 +535,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -622,7 +622,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -509,7 +509,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -525,7 +525,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m

View File

@ -11,9 +11,9 @@
obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
obj-y += dma-octeon.o
obj-y += octeon-crypto.o
obj-y += octeon-memcpy.o
obj-y += executive/
obj-y += crypto/
obj-$(CONFIG_MTD) += flash_setup.o
obj-$(CONFIG_SMP) += smp.o

View File

@ -1,8 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# OCTEON-specific crypto modules.
#
obj-y += octeon-crypto.o
obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o

View File

@ -1,214 +0,0 @@
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
*
* Based on crypto/md5.c, which is:
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/octeon/crypto.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/unaligned.h>
struct octeon_md5_state {
__le32 hash[MD5_HASH_WORDS];
u64 byte_count;
};
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void octeon_md5_store_hash(struct octeon_md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
write_octeon_64bit_hash_dword(hash[0], 0);
write_octeon_64bit_hash_dword(hash[1], 1);
}
static void octeon_md5_read_hash(struct octeon_md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
hash[0] = read_octeon_64bit_hash_dword(0);
hash[1] = read_octeon_64bit_hash_dword(1);
}
static void octeon_md5_transform(const void *_block)
{
const u64 *block = _block;
write_octeon_64bit_block_dword(block[0], 0);
write_octeon_64bit_block_dword(block[1], 1);
write_octeon_64bit_block_dword(block[2], 2);
write_octeon_64bit_block_dword(block[3], 3);
write_octeon_64bit_block_dword(block[4], 4);
write_octeon_64bit_block_dword(block[5], 5);
write_octeon_64bit_block_dword(block[6], 6);
octeon_md5_start(block[7]);
}
static int octeon_md5_init(struct shash_desc *desc)
{
struct octeon_md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = cpu_to_le32(MD5_H0);
mctx->hash[1] = cpu_to_le32(MD5_H1);
mctx->hash[2] = cpu_to_le32(MD5_H2);
mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
}
static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct octeon_md5_state *mctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
mctx->byte_count += len;
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
do {
octeon_md5_transform(data);
data += MD5_HMAC_BLOCK_SIZE;
len -= MD5_HMAC_BLOCK_SIZE;
} while (len >= MD5_HMAC_BLOCK_SIZE);
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
mctx->byte_count -= len;
return len;
}
static int octeon_md5_finup(struct shash_desc *desc, const u8 *src,
unsigned int offset, u8 *out)
{
struct octeon_md5_state *mctx = shash_desc_ctx(desc);
int padding = 56 - (offset + 1);
struct octeon_cop2_state state;
u32 block[MD5_BLOCK_WORDS];
unsigned long flags;
char *p;
p = memcpy(block, src, offset);
p += offset;
*p++ = 0x80;
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
if (padding < 0) {
memset(p, 0x00, padding + sizeof(u64));
octeon_md5_transform(block);
p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
mctx->byte_count += offset;
block[14] = mctx->byte_count << 3;
block[15] = mctx->byte_count >> 29;
cpu_to_le32_array(block + 14, 2);
octeon_md5_transform(block);
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
memzero_explicit(block, sizeof(block));
memcpy(out, mctx->hash, sizeof(mctx->hash));
return 0;
}
static int octeon_md5_export(struct shash_desc *desc, void *out)
{
struct octeon_md5_state *ctx = shash_desc_ctx(desc);
union {
u8 *u8;
u32 *u32;
u64 *u64;
} p = { .u8 = out };
int i;
for (i = 0; i < MD5_HASH_WORDS; i++)
put_unaligned(le32_to_cpu(ctx->hash[i]), p.u32++);
put_unaligned(ctx->byte_count, p.u64);
return 0;
}
static int octeon_md5_import(struct shash_desc *desc, const void *in)
{
struct octeon_md5_state *ctx = shash_desc_ctx(desc);
union {
const u8 *u8;
const u32 *u32;
const u64 *u64;
} p = { .u8 = in };
int i;
for (i = 0; i < MD5_HASH_WORDS; i++)
ctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++));
ctx->byte_count = get_unaligned(p.u64);
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = octeon_md5_init,
.update = octeon_md5_update,
.finup = octeon_md5_finup,
.export = octeon_md5_export,
.import = octeon_md5_import,
.statesize = MD5_STATE_SIZE,
.descsize = sizeof(struct octeon_md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init md5_mod_init(void)
{
if (!octeon_has_crypto())
return -ENOTSUPP;
return crypto_register_shash(&alg);
}
static void __exit md5_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");

View File

@ -155,7 +155,6 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5_OCTEON=y
CONFIG_CRYPTO_DES=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y

View File

@ -2,14 +2,4 @@
menu "Accelerated Cryptographic Algorithms for CPU (mips)"
config CRYPTO_MD5_OCTEON
tristate "Digests: MD5 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
select CRYPTO_MD5
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: mips OCTEON using crypto instructions, when available
endmenu

View File

@ -320,7 +320,6 @@ CONFIG_XMON=y
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_WP512=m

View File

@ -387,7 +387,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_AES_GCM_P10=m
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m

View File

@ -2,27 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
config CRYPTO_CURVE25519_PPC64
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
Architecture: PowerPC64
- Little-endian
config CRYPTO_MD5_PPC
tristate "Digests: MD5"
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: powerpc
config CRYPTO_AES_PPC_SPE
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
depends on SPE

View File

@ -6,16 +6,12 @@
#
obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o
obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override flavour := linux-ppc64le

View File

@ -1,99 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue code for MD5 implementation for PPC assembler
*
* Based on generic implementation.
*
* Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks);
static int ppc_md5_init(struct shash_desc *desc)
{
struct md5_state *sctx = shash_desc_ctx(desc);
sctx->hash[0] = MD5_H0;
sctx->hash[1] = MD5_H1;
sctx->hash[2] = MD5_H2;
sctx->hash[3] = MD5_H3;
sctx->byte_count = 0;
return 0;
}
static int ppc_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct md5_state *sctx = shash_desc_ctx(desc);
sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE);
ppc_md5_transform(sctx->hash, data, len >> 6);
return len - round_down(len, MD5_HMAC_BLOCK_SIZE);
}
static int ppc_md5_finup(struct shash_desc *desc, const u8 *src,
unsigned int offset, u8 *out)
{
struct md5_state *sctx = shash_desc_ctx(desc);
__le64 block[MD5_BLOCK_WORDS] = {};
u8 *p = memcpy(block, src, offset);
__le32 *dst = (__le32 *)out;
__le64 *pbits;
src = p;
p += offset;
*p++ = 0x80;
sctx->byte_count += offset;
pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1];
*pbits = cpu_to_le64(sctx->byte_count << 3);
ppc_md5_transform(sctx->hash, src, (pbits - block + 1) / 8);
memzero_explicit(block, sizeof(block));
dst[0] = cpu_to_le32(sctx->hash[0]);
dst[1] = cpu_to_le32(sctx->hash[1]);
dst[2] = cpu_to_le32(sctx->hash[2]);
dst[3] = cpu_to_le32(sctx->hash[3]);
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = ppc_md5_init,
.update = ppc_md5_update,
.finup = ppc_md5_finup,
.descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init ppc_md5_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit ppc_md5_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(ppc_md5_mod_init);
module_exit(ppc_md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, PPC assembler");
MODULE_ALIAS_CRYPTO("md5");
MODULE_ALIAS_CRYPTO("md5-ppc");

View File

@ -758,7 +758,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m

View File

@ -742,7 +742,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m

View File

@ -16,16 +16,6 @@ config CRYPTO_DES_SPARC64
Architecture: sparc64
config CRYPTO_MD5_SPARC64
tristate "Digests: MD5"
depends on SPARC64
select CRYPTO_MD5
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: sparc64 using crypto instructions, when available
config CRYPTO_AES_SPARC64
tristate "Ciphers: AES, modes: ECB, CBC, CTR"
depends on SPARC64

View File

@ -3,14 +3,10 @@
# Arch-specific CryptoAPI modules.
#
obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
md5-sparc64-y := md5_asm.o md5_glue.o
aes-sparc64-y := aes_asm.o aes_glue.o
des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o

View File

@ -1,174 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Glue code for MD5 hashing optimized for sparc64 crypto opcodes.
*
* This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
* and crypto/md5.c which are:
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/elf.h>
#include <asm/opcodes.h>
#include <asm/pstate.h>
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/unaligned.h>
struct sparc_md5_state {
__le32 hash[MD5_HASH_WORDS];
u64 byte_count;
};
asmlinkage void md5_sparc64_transform(__le32 *digest, const char *data,
unsigned int rounds);
static int md5_sparc64_init(struct shash_desc *desc)
{
struct sparc_md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = cpu_to_le32(MD5_H0);
mctx->hash[1] = cpu_to_le32(MD5_H1);
mctx->hash[2] = cpu_to_le32(MD5_H2);
mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
}
static int md5_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sparc_md5_state *sctx = shash_desc_ctx(desc);
sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE);
md5_sparc64_transform(sctx->hash, data, len / MD5_HMAC_BLOCK_SIZE);
return len - round_down(len, MD5_HMAC_BLOCK_SIZE);
}
/* Add padding and return the message digest. */
static int md5_sparc64_finup(struct shash_desc *desc, const u8 *src,
unsigned int offset, u8 *out)
{
struct sparc_md5_state *sctx = shash_desc_ctx(desc);
__le64 block[MD5_BLOCK_WORDS] = {};
u8 *p = memcpy(block, src, offset);
__le32 *dst = (__le32 *)out;
__le64 *pbits;
int i;
src = p;
p += offset;
*p++ = 0x80;
sctx->byte_count += offset;
pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1];
*pbits = cpu_to_le64(sctx->byte_count << 3);
md5_sparc64_transform(sctx->hash, src, (pbits - block + 1) / 8);
memzero_explicit(block, sizeof(block));
/* Store state in digest */
for (i = 0; i < MD5_HASH_WORDS; i++)
dst[i] = sctx->hash[i];
return 0;
}
static int md5_sparc64_export(struct shash_desc *desc, void *out)
{
struct sparc_md5_state *sctx = shash_desc_ctx(desc);
union {
u8 *u8;
u32 *u32;
u64 *u64;
} p = { .u8 = out };
int i;
for (i = 0; i < MD5_HASH_WORDS; i++)
put_unaligned(le32_to_cpu(sctx->hash[i]), p.u32++);
put_unaligned(sctx->byte_count, p.u64);
return 0;
}
static int md5_sparc64_import(struct shash_desc *desc, const void *in)
{
struct sparc_md5_state *sctx = shash_desc_ctx(desc);
union {
const u8 *u8;
const u32 *u32;
const u64 *u64;
} p = { .u8 = in };
int i;
for (i = 0; i < MD5_HASH_WORDS; i++)
sctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++));
sctx->byte_count = get_unaligned(p.u64);
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_sparc64_init,
.update = md5_sparc64_update,
.finup = md5_sparc64_finup,
.export = md5_sparc64_export,
.import = md5_sparc64_import,
.descsize = sizeof(struct sparc_md5_state),
.statesize = sizeof(struct sparc_md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static bool __init sparc64_has_md5_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_MD5))
return false;
return true;
}
static int __init md5_sparc64_mod_init(void)
{
if (sparc64_has_md5_opcode()) {
pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n");
return crypto_register_shash(&alg);
}
pr_info("sparc64 md5 opcode not available.\n");
return -ENODEV;
}
static void __exit md5_sparc64_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(md5_sparc64_mod_init);
module_exit(md5_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS_CRYPTO("md5");
#include "crop_devid.c"

View File

@ -2,19 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (x86)"
config CRYPTO_CURVE25519_X86
tristate
depends on 64BIT
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
Architecture: x86_64 using:
- ADX (large integer arithmetic)
config CRYPTO_AES_NI_INTEL
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)"
select CRYPTO_AEAD

View File

@ -62,8 +62,6 @@ nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
obj-$(CONFIG_CRYPTO_SM3_AVX_X86_64) += sm3-avx-x86_64.o
sm3-avx-x86_64-y := sm3-avx-asm_64.o sm3_avx_glue.o
@ -81,6 +79,3 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o
obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o
aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o
# Disable GCOV in odd or sensitive code
GCOV_PROFILE_curve25519-x86_64.o := n

View File

@ -344,14 +344,6 @@ config CRYPTO_ECRDSA
One of the Russian cryptographic standard algorithms (called GOST
algorithms). Only signature verification is implemented.
config CRYPTO_CURVE25519
tristate "Curve25519"
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 elliptic curve (RFC7748)
endmenu
menu "Block ciphers"
@ -609,6 +601,7 @@ menu "Length-preserving ciphers and modes"
config CRYPTO_ADIANTUM
tristate "Adiantum"
select CRYPTO_CHACHA20
select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
@ -647,7 +640,6 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@ -770,6 +762,7 @@ config CRYPTO_XTS
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
endmenu
@ -938,8 +931,9 @@ config CRYPTO_MD4
config CRYPTO_MD5
tristate "MD5"
select CRYPTO_HASH
select CRYPTO_LIB_MD5
help
MD5 message digest algorithm (RFC1321)
MD5 message digest algorithm (RFC1321), including HMAC support.
config CRYPTO_MICHAEL_MIC
tristate "Michael MIC"

View File

@ -182,7 +182,6 @@ obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o
obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o

View File

@ -47,7 +47,7 @@ static int chacha12_setkey(struct crypto_skcipher *tfm,
static int chacha_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx,
const u8 iv[CHACHA_IV_SIZE], bool arch)
const u8 iv[CHACHA_IV_SIZE])
{
struct skcipher_walk walk;
struct chacha_state state;
@ -63,36 +63,23 @@ static int chacha_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total)
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
if (arch)
chacha_crypt(&state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes, ctx->nrounds);
else
chacha_crypt_generic(&state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
chacha_crypt(&state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
static int crypto_chacha_crypt_generic(struct skcipher_request *req)
static int crypto_chacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv, false);
return chacha_stream_xor(req, ctx, req->iv);
}
static int crypto_chacha_crypt_arch(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv, true);
}
static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
static int crypto_xchacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
@ -102,10 +89,7 @@ static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
/* Compute the subkey given the original key and first 128 nonce bits */
chacha_init(&state, ctx->key, req->iv);
if (arch)
hchacha_block(&state, subctx.key, ctx->nrounds);
else
hchacha_block_generic(&state, subctx.key, ctx->nrounds);
hchacha_block(&state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
@ -113,71 +97,13 @@ static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
/* Generate the stream and XOR it with the data */
return chacha_stream_xor(req, &subctx, real_iv, arch);
}
static int crypto_xchacha_crypt_generic(struct skcipher_request *req)
{
return crypto_xchacha_crypt(req, false);
}
static int crypto_xchacha_crypt_arch(struct skcipher_request *req)
{
return crypto_xchacha_crypt(req, true);
return chacha_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_chacha_crypt_generic,
.decrypt = crypto_chacha_crypt_generic,
},
{
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_xchacha_crypt_generic,
.decrypt = crypto_xchacha_crypt_generic,
},
{
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = crypto_xchacha_crypt_generic,
.decrypt = crypto_xchacha_crypt_generic,
},
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-" __stringify(ARCH),
.base.cra_driver_name = "chacha20-lib",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
@ -188,12 +114,12 @@ static struct skcipher_alg algs[] = {
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_chacha_crypt_arch,
.decrypt = crypto_chacha_crypt_arch,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
},
{
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-" __stringify(ARCH),
.base.cra_driver_name = "xchacha20-lib",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
@ -204,12 +130,12 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_xchacha_crypt_arch,
.decrypt = crypto_xchacha_crypt_arch,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
},
{
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-" __stringify(ARCH),
.base.cra_driver_name = "xchacha12-lib",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
@ -220,27 +146,19 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = crypto_xchacha_crypt_arch,
.decrypt = crypto_xchacha_crypt_arch,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}
};
static unsigned int num_algs;
static int __init crypto_chacha_mod_init(void)
{
/* register the arch flavours only if they differ from generic */
num_algs = ARRAY_SIZE(algs);
BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0);
if (!chacha_is_arch_optimized())
num_algs /= 2;
return crypto_register_skciphers(algs, num_algs);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit crypto_chacha_mod_fini(void)
{
crypto_unregister_skciphers(algs, num_algs);
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(crypto_chacha_mod_init);
@ -250,11 +168,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-generic");
MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH));
MODULE_ALIAS_CRYPTO("chacha20-lib");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH));
MODULE_ALIAS_CRYPTO("xchacha20-lib");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-generic");
MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH));
MODULE_ALIAS_CRYPTO("xchacha12-lib");

View File

@ -1,91 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <crypto/curve25519.h>
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
u8 *secret = kpp_tfm_ctx(tfm);
if (!len)
curve25519_generate_secret(secret);
else if (len == CURVE25519_KEY_SIZE &&
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
memcpy(secret, buf, CURVE25519_KEY_SIZE);
else
return -EINVAL;
return 0;
}
static int curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 public_key[CURVE25519_KEY_SIZE];
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
u8 const *bp;
if (req->src) {
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
CURVE25519_KEY_SIZE),
public_key, CURVE25519_KEY_SIZE);
if (copied != CURVE25519_KEY_SIZE)
return -EINVAL;
bp = public_key;
} else {
bp = curve25519_base_point;
}
curve25519_generic(buf, secret, bp);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
{
return CURVE25519_KEY_SIZE;
}
static struct kpp_alg curve25519_alg = {
.base.cra_name = "curve25519",
.base.cra_driver_name = "curve25519-generic",
.base.cra_priority = 100,
.base.cra_module = THIS_MODULE,
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
.set_secret = curve25519_set_secret,
.generate_public_key = curve25519_compute_value,
.compute_shared_secret = curve25519_compute_value,
.max_size = curve25519_max_size,
};
static int __init curve25519_init(void)
{
return crypto_register_kpp(&curve25519_alg);
}
static void __exit curve25519_exit(void)
{
crypto_unregister_kpp(&curve25519_alg);
}
module_init(curve25519_init);
module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-generic");
MODULE_DESCRIPTION("Curve25519 elliptic curve (RFC7748)");
MODULE_LICENSE("GPL");

View File

@ -1,25 +1,62 @@
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Crypto API support for MD5 and HMAC-MD5
*
* Copyright 2025 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
/*
* Export and import functions. crypto_shash wants a particular format that
* matches that used by some legacy drivers. It currently is the same as the
* library MD5 context, except the value in bytecount must be block-aligned and
* the remainder must be stored in an extra u8 appended to the struct.
*/
#define MD5_SHASH_STATE_SIZE (sizeof(struct md5_ctx) + 1)
static_assert(sizeof(struct md5_ctx) == sizeof(struct md5_state));
static_assert(offsetof(struct md5_ctx, state) == offsetof(struct md5_state, hash));
static_assert(offsetof(struct md5_ctx, bytecount) == offsetof(struct md5_state, byte_count));
static_assert(offsetof(struct md5_ctx, buf) == offsetof(struct md5_state, block));
static int __crypto_md5_export(const struct md5_ctx *ctx0, void *out)
{
struct md5_ctx ctx = *ctx0;
unsigned int partial;
u8 *p = out;
partial = ctx.bytecount % MD5_BLOCK_SIZE;
ctx.bytecount -= partial;
memcpy(p, &ctx, sizeof(ctx));
p += sizeof(ctx);
*p = partial;
return 0;
}
static int __crypto_md5_import(struct md5_ctx *ctx, const void *in)
{
const u8 *p = in;
memcpy(ctx, p, sizeof(*ctx));
p += sizeof(*ctx);
ctx->bytecount += *p;
return 0;
}
static int __crypto_md5_export_core(const struct md5_ctx *ctx, void *out)
{
memcpy(out, ctx, offsetof(struct md5_ctx, buf));
return 0;
}
static int __crypto_md5_import_core(struct md5_ctx *ctx, const void *in)
{
memcpy(ctx, in, offsetof(struct md5_ctx, buf));
return 0;
}
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
@ -27,198 +64,173 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5_CTX(desc) ((struct md5_ctx *)shash_desc_ctx(desc))
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
static void md5_transform(__u32 *hash, __u32 const *in)
static int crypto_md5_init(struct shash_desc *desc)
{
u32 a, b, c, d;
a = hash[0];
b = hash[1];
c = hash[2];
d = hash[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
hash[0] += a;
hash[1] += b;
hash[2] += c;
hash[3] += d;
}
static inline void md5_transform_helper(struct md5_state *ctx,
u32 block[MD5_BLOCK_WORDS])
{
le32_to_cpu_array(block, MD5_BLOCK_WORDS);
md5_transform(ctx->hash, block);
}
static int md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = MD5_H0;
mctx->hash[1] = MD5_H1;
mctx->hash[2] = MD5_H2;
mctx->hash[3] = MD5_H3;
mctx->byte_count = 0;
md5_init(MD5_CTX(desc));
return 0;
}
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int crypto_md5_update(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
u32 block[MD5_BLOCK_WORDS];
mctx->byte_count += len;
do {
memcpy(block, data, sizeof(block));
md5_transform_helper(mctx, block);
data += sizeof(block);
len -= sizeof(block);
} while (len >= sizeof(block));
memzero_explicit(block, sizeof(block));
mctx->byte_count -= len;
return len;
}
static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len,
u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
u32 block[MD5_BLOCK_WORDS];
unsigned int offset;
int padding;
char *p;
memcpy(block, data, len);
offset = len;
p = (char *)block + offset;
padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
md5_transform_helper(mctx, block);
p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
mctx->byte_count += len;
block[14] = mctx->byte_count << 3;
block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32));
md5_transform(mctx->hash, block);
memzero_explicit(block, sizeof(block));
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
md5_update(MD5_CTX(desc), data, len);
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
.finup = md5_finup,
.descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-generic",
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
static int crypto_md5_final(struct shash_desc *desc, u8 *out)
{
md5_final(MD5_CTX(desc), out);
return 0;
}
static int crypto_md5_digest(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
md5(data, len, out);
return 0;
}
static int crypto_md5_export(struct shash_desc *desc, void *out)
{
return __crypto_md5_export(MD5_CTX(desc), out);
}
static int crypto_md5_import(struct shash_desc *desc, const void *in)
{
return __crypto_md5_import(MD5_CTX(desc), in);
}
static int crypto_md5_export_core(struct shash_desc *desc, void *out)
{
return __crypto_md5_export_core(MD5_CTX(desc), out);
}
static int crypto_md5_import_core(struct shash_desc *desc, const void *in)
{
return __crypto_md5_import_core(MD5_CTX(desc), in);
}
#define HMAC_MD5_KEY(tfm) ((struct hmac_md5_key *)crypto_shash_ctx(tfm))
#define HMAC_MD5_CTX(desc) ((struct hmac_md5_ctx *)shash_desc_ctx(desc))
static int crypto_hmac_md5_setkey(struct crypto_shash *tfm,
const u8 *raw_key, unsigned int keylen)
{
hmac_md5_preparekey(HMAC_MD5_KEY(tfm), raw_key, keylen);
return 0;
}
static int crypto_hmac_md5_init(struct shash_desc *desc)
{
hmac_md5_init(HMAC_MD5_CTX(desc), HMAC_MD5_KEY(desc->tfm));
return 0;
}
static int crypto_hmac_md5_update(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
hmac_md5_update(HMAC_MD5_CTX(desc), data, len);
return 0;
}
static int crypto_hmac_md5_final(struct shash_desc *desc, u8 *out)
{
hmac_md5_final(HMAC_MD5_CTX(desc), out);
return 0;
}
static int crypto_hmac_md5_digest(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
hmac_md5(HMAC_MD5_KEY(desc->tfm), data, len, out);
return 0;
}
static int crypto_hmac_md5_export(struct shash_desc *desc, void *out)
{
return __crypto_md5_export(&HMAC_MD5_CTX(desc)->hash_ctx, out);
}
static int crypto_hmac_md5_import(struct shash_desc *desc, const void *in)
{
struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc);
ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate;
return __crypto_md5_import(&ctx->hash_ctx, in);
}
static int crypto_hmac_md5_export_core(struct shash_desc *desc, void *out)
{
return __crypto_md5_export_core(&HMAC_MD5_CTX(desc)->hash_ctx, out);
}
static int crypto_hmac_md5_import_core(struct shash_desc *desc, const void *in)
{
struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc);
ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate;
return __crypto_md5_import_core(&ctx->hash_ctx, in);
}
static struct shash_alg algs[] = {
{
.base.cra_name = "md5",
.base.cra_driver_name = "md5-lib",
.base.cra_priority = 300,
.base.cra_blocksize = MD5_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.digestsize = MD5_DIGEST_SIZE,
.init = crypto_md5_init,
.update = crypto_md5_update,
.final = crypto_md5_final,
.digest = crypto_md5_digest,
.export = crypto_md5_export,
.import = crypto_md5_import,
.export_core = crypto_md5_export_core,
.import_core = crypto_md5_import_core,
.descsize = sizeof(struct md5_ctx),
.statesize = MD5_SHASH_STATE_SIZE,
},
{
.base.cra_name = "hmac(md5)",
.base.cra_driver_name = "hmac-md5-lib",
.base.cra_priority = 300,
.base.cra_blocksize = MD5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct hmac_md5_key),
.base.cra_module = THIS_MODULE,
.digestsize = MD5_DIGEST_SIZE,
.setkey = crypto_hmac_md5_setkey,
.init = crypto_hmac_md5_init,
.update = crypto_hmac_md5_update,
.final = crypto_hmac_md5_final,
.digest = crypto_hmac_md5_digest,
.export = crypto_hmac_md5_export,
.import = crypto_hmac_md5_import,
.export_core = crypto_hmac_md5_export_core,
.import_core = crypto_hmac_md5_import_core,
.descsize = sizeof(struct hmac_md5_ctx),
.statesize = MD5_SHASH_STATE_SIZE,
},
};
static int __init md5_mod_init(void)
static int __init crypto_md5_mod_init(void)
{
return crypto_register_shash(&alg);
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
module_init(crypto_md5_mod_init);
static void __exit md5_mod_fini(void)
static void __exit crypto_md5_mod_exit(void)
{
crypto_unregister_shash(&alg);
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(md5_mod_init);
module_exit(md5_mod_fini);
module_exit(crypto_md5_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
MODULE_DESCRIPTION("Crypto API support for MD5 and HMAC-MD5");
MODULE_ALIAS_CRYPTO("md5");
MODULE_ALIAS_CRYPTO("md5-lib");
MODULE_ALIAS_CRYPTO("hmac(md5)");
MODULE_ALIAS_CRYPTO("hmac-md5-lib");

View File

@ -4152,14 +4152,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
.generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
.generic_driver = "adiantum(xchacha12-lib,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
.generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
.generic_driver = "adiantum(xchacha20-lib,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@ -4178,6 +4178,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
@ -4484,6 +4485,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "chacha20",
.generic_driver = "chacha20-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(chacha20_tv_template)
@ -4639,12 +4641,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.cipher = __VECS(sm4_cts_tv_template)
}
}, {
.alg = "curve25519",
.test = alg_test_kpp,
.suite = {
.kpp = __VECS(curve25519_tv_template)
}
}, {
.alg = "deflate",
.test = alg_test_comp,
@ -5064,6 +5060,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(md5)",
.generic_driver = "hmac-md5-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_md5_tv_template)
@ -5250,6 +5247,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "md5",
.generic_driver = "md5-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(md5_tv_template)
@ -5417,12 +5415,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc7539(chacha20,poly1305)",
.generic_driver = "rfc7539(chacha20-lib,poly1305-generic)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(rfc7539_tv_template)
}
}, {
.alg = "rfc7539esp(chacha20,poly1305)",
.generic_driver = "rfc7539esp(chacha20-lib,poly1305-generic)",
.test = alg_test_aead,
.suite = {
.aead = {
@ -5588,12 +5588,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "xchacha12",
.generic_driver = "xchacha12-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha12_tv_template)
},
}, {
.alg = "xchacha20",
.generic_driver = "xchacha20-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha20_tv_template)

File diff suppressed because it is too large Load Diff

View File

@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@ -106,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
struct hpre_curve25519_ctx {
/* low address: p->a->k */
unsigned char *p;
dma_addr_t dma_p;
/* gx coordinate */
unsigned char *g;
dma_addr_t dma_g;
};
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@ -129,7 +118,6 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
@ -146,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@ -1214,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
bool is_ecdh)
static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@ -1224,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (is_ecdh && ctx->ecdh.p) {
if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
} else if (!is_ecdh && ctx->curve25519.p) {
/* curve25519: p->a->k */
memzero_explicit(ctx->curve25519.p + shift, sz);
dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
ctx->curve25519.dma_p);
ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@ -1432,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
hpre_ecc_clear_ctx(ctx, false, true);
hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@ -1683,337 +1663,7 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
hpre_ecc_clear_ctx(ctx, true, true);
}
static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
unsigned int len)
{
u8 secret[CURVE25519_KEY_SIZE] = { 0 };
unsigned int sz = ctx->key_sz;
const struct ecc_curve *curve;
unsigned int shift = sz << 1;
void *p;
/*
* The key from 'buf' is in little-endian, we should preprocess it as
* the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
* then convert it to big endian. Only in this way, the result can be
* the same as the software curve-25519 that exists in crypto.
*/
memcpy(secret, buf, len);
curve25519_clamp_secret(secret);
hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
p = ctx->curve25519.p + sz - len;
curve = ecc_get_curve25519();
/* fill curve parameters */
fill_curve_param(p, curve->p, len, curve->g.ndigits);
fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
memcpy(p + shift, secret, len);
fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
memzero_explicit(secret, CURVE25519_KEY_SIZE);
}
static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
unsigned int len)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
/* p->a->k->gx */
if (!ctx->curve25519.p) {
ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
&ctx->curve25519.dma_p,
GFP_KERNEL);
if (!ctx->curve25519.p)
return -ENOMEM;
}
ctx->curve25519.g = ctx->curve25519.p + shift + sz;
ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
hpre_curve25519_fill_curve(ctx, buf, len);
return 0;
}
static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = ctx->dev;
int ret = -EINVAL;
if (len != CURVE25519_KEY_SIZE ||
!crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
dev_err(dev, "key is null or key len is not 32bytes!\n");
return ret;
}
/* Free old secret if any */
hpre_ecc_clear_ctx(ctx, false, false);
ctx->key_sz = CURVE25519_KEY_SIZE;
ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
if (ret) {
dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
hpre_ecc_clear_ctx(ctx, false, false);
return ret;
}
return 0;
}
static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
struct hpre_asym_request *req,
struct scatterlist *dst,
struct scatterlist *src)
{
struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t dma;
dma = le64_to_cpu(sqe->in);
if (unlikely(dma_mapping_error(dev, dma)))
return;
if (src && req->src)
dma_free_coherent(dev, ctx->key_sz, req->src, dma);
dma = le64_to_cpu(sqe->out);
if (unlikely(dma_mapping_error(dev, dma)))
return;
if (req->dst)
dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
if (dst)
dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
}
static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
{
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req = NULL;
struct kpp_request *areq;
u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.curve25519;
areq->dst_len = ctx->key_sz;
overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
/* Do unmap before data processing */
hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
struct kpp_request *req)
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
int req_id;
void *tmp;
if (unlikely(req->dst_len < ctx->key_sz)) {
req->dst_len = ctx->key_sz;
return -EINVAL;
}
tmp = kpp_request_ctx(req);
h_req = PTR_ALIGN(tmp, hpre_align_sz());
h_req->cb = hpre_curve25519_cb;
h_req->areq.curve25519 = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->curve25519.dma_p);
msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
req_id = hpre_add_req_to_ctx(h_req);
if (req_id < 0)
return -EBUSY;
msg->tag = cpu_to_le16((u16)req_id);
return 0;
}
static void hpre_curve25519_src_modulo_p(u8 *ptr)
{
int i;
for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
ptr[i] = 0;
/* The modulus is ptr's last byte minus '0xed'(last byte of p) */
ptr[i] -= 0xed;
}
static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = ctx->dev;
u8 p[CURVE25519_KEY_SIZE] = { 0 };
const struct ecc_curve *curve;
dma_addr_t dma = 0;
u8 *ptr;
if (len != CURVE25519_KEY_SIZE) {
dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
return -EINVAL;
}
ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
if (unlikely(!ptr))
return -ENOMEM;
scatterwalk_map_and_copy(ptr, data, 0, len, 0);
if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
dev_err(dev, "gx is null!\n");
goto err;
}
/*
* Src_data(gx) is in little-endian order, MSB in the final byte should
* be masked as described in RFC7748, then transform it to big-endian
* form, then hisi_hpre can use the data.
*/
ptr[31] &= 0x7f;
hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
curve = ecc_get_curve25519();
fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
/*
* When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
* we get its modulus to p, and then use it.
*/
if (memcmp(ptr, p, ctx->key_sz) == 0) {
dev_err(dev, "gx is p!\n");
goto err;
} else if (memcmp(ptr, p, ctx->key_sz) > 0) {
hpre_curve25519_src_modulo_p(ptr);
}
hpre_req->src = ptr;
msg->in = cpu_to_le64(dma);
return 0;
err:
dma_free_coherent(dev, ctx->key_sz, ptr, dma);
return -EINVAL;
}
static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = ctx->dev;
dma_addr_t dma;
if (!data || !sg_is_last(data) || len != ctx->key_sz) {
dev_err(dev, "data or data length is illegal!\n");
return -EINVAL;
}
hpre_req->dst = NULL;
dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, dma))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
msg->out = cpu_to_le64(dma);
return 0;
}
static int hpre_curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
ret = hpre_curve25519_msg_request_set(ctx, req);
if (unlikely(ret)) {
dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
return ret;
}
if (req->src) {
ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
if (unlikely(ret)) {
dev_err(dev, "failed to init src data, ret = %d!\n",
ret);
goto clear_all;
}
} else {
msg->in = cpu_to_le64(ctx->curve25519.dma_g);
}
ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
if (unlikely(ret)) {
dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
goto clear_all;
}
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
clear_all:
hpre_rm_req_from_ctx(hpre_req);
hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}
static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
return ctx->key_sz;
}
static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
hpre_ecc_clear_ctx(ctx, true, false);
hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
@ -2095,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
static struct kpp_alg curve25519_alg = {
.set_secret = hpre_curve25519_set_secret,
.generate_public_key = hpre_curve25519_compute_value,
.compute_shared_secret = hpre_curve25519_compute_value,
.max_size = hpre_curve25519_max_size,
.init = hpre_curve25519_init_tfm,
.exit = hpre_curve25519_exit_tfm,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
.cra_name = "curve25519",
.cra_driver_name = "hpre-curve25519",
.cra_module = THIS_MODULE,
},
};
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@ -2192,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
static int hpre_register_x25519(struct hisi_qm *qm)
{
int ret;
if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
return 0;
ret = crypto_register_kpp(&curve25519_alg);
if (ret)
dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
return ret;
}
static void hpre_unregister_x25519(struct hisi_qm *qm)
{
if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
return;
crypto_unregister_kpp(&curve25519_alg);
}
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@ -2236,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
ret = hpre_register_x25519(qm);
if (ret)
goto unreg_ecdh;
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
unreg_ecdh:
hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@ -2262,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);

View File

@ -700,7 +700,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
return img_hash_cra_init(tfm, "md5-generic");
return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)

View File

@ -76,24 +76,11 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
select CRYPTO_BLAKE2S_ARM if ARM
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if MIPS
select CRYPTO_CHACHA_S390 if S390
select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's

View File

@ -45,19 +45,11 @@ static inline void chacha20_block(struct chacha_state *state,
chacha_block_generic(state, out, 20);
}
void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);
void hchacha_block_generic(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);
static inline void hchacha_block(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
hchacha_block_arch(state, out, nrounds);
else
hchacha_block_generic(state, out, nrounds);
}
void hchacha_block(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);
enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_EXPA = 0x61707865U,
@ -93,20 +85,8 @@ static inline void chacha_init(struct chacha_state *state,
state->x[15] = get_unaligned_le32(iv + 12);
}
void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
static inline void chacha_crypt(struct chacha_state *state,
u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
chacha_crypt_arch(state, dst, src, bytes, nrounds);
else
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
void chacha_crypt(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
static inline void chacha20_crypt(struct chacha_state *state,
u8 *dst, const u8 *src, unsigned int bytes)
@ -119,13 +99,4 @@ static inline void chacha_zeroize_state(struct chacha_state *state)
memzero_explicit(state, sizeof(*state));
}
#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)
bool chacha_is_arch_optimized(void);
#else
static inline bool chacha_is_arch_optimized(void)
{
return false;
}
#endif
#endif /* _CRYPTO_CHACHA_H */

View File

@ -6,7 +6,6 @@
#ifndef CURVE25519_H
#define CURVE25519_H
#include <crypto/algapi.h> // For crypto_memneq.
#include <linux/types.h>
#include <linux/random.h>
@ -14,49 +13,16 @@ enum curve25519_lengths {
CURVE25519_KEY_SIZE = 32
};
extern const u8 curve25519_null_point[];
extern const u8 curve25519_base_point[];
void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
const u8 scalar[CURVE25519_KEY_SIZE],
const u8 point[CURVE25519_KEY_SIZE]);
void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
const u8 scalar[CURVE25519_KEY_SIZE],
const u8 point[CURVE25519_KEY_SIZE]);
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE]);
bool curve25519_selftest(void);
static inline
bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
curve25519_arch(mypublic, secret, basepoint);
else
curve25519_generic(mypublic, secret, basepoint);
return crypto_memneq(mypublic, curve25519_null_point,
CURVE25519_KEY_SIZE);
}
const u8 basepoint[CURVE25519_KEY_SIZE]);
static inline bool
__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
if (unlikely(!crypto_memneq(secret, curve25519_null_point,
CURVE25519_KEY_SIZE)))
return false;
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
curve25519_base_arch(pub, secret);
else
curve25519_generic(pub, secret, curve25519_base_point);
return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
}
bool __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE]);
static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE])
{

View File

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Helper functions for BLAKE2s implementations.
* Keep this in sync with the corresponding BLAKE2b header.
*/
#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
#define _CRYPTO_INTERNAL_BLAKE2S_H
#include <crypto/blake2s.h>
#include <linux/string.h>
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc);
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc);
bool blake2s_selftest(void);
#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */

View File

@ -30,12 +30,13 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
void poly1305_block_init_arch(struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
void poly1305_block_init_generic(struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
unsigned int len, u32 padbit);
static inline void
poly1305_block_init_generic(struct poly1305_block_state *desc,
const u8 raw_key[POLY1305_BLOCK_SIZE])
{
poly1305_core_init(&desc->h);
poly1305_core_setkey(&desc->core_r, raw_key);
}
static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
const u8 *src, unsigned int len,
@ -45,9 +46,6 @@ static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
len / POLY1305_BLOCK_SIZE, padbit);
}
void poly1305_emit_arch(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]);
static inline void poly1305_emit_generic(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4])

View File

@ -7,6 +7,7 @@
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
#define MD5_STATE_SIZE 24
@ -27,4 +28,182 @@ struct md5_state {
u32 block[MD5_BLOCK_WORDS];
};
#endif
/* State for the MD5 compression function */
struct md5_block_state {
u32 h[MD5_HASH_WORDS];
};
/**
* struct md5_ctx - Context for hashing a message with MD5
* @state: the compression function state
* @bytecount: number of bytes processed so far
* @buf: partial block buffer; bytecount % MD5_BLOCK_SIZE bytes are valid
*/
struct md5_ctx {
struct md5_block_state state;
u64 bytecount;
u8 buf[MD5_BLOCK_SIZE] __aligned(__alignof__(__le64));
};
/**
* md5_init() - Initialize an MD5 context for a new message
* @ctx: the context to initialize
*
* If you don't need incremental computation, consider md5() instead.
*
* Context: Any context.
*/
void md5_init(struct md5_ctx *ctx);
/**
* md5_update() - Update an MD5 context with message data
* @ctx: the context to update; must have been initialized
* @data: the message data
* @len: the data length in bytes
*
* This can be called any number of times.
*
* Context: Any context.
*/
void md5_update(struct md5_ctx *ctx, const u8 *data, size_t len);
/**
* md5_final() - Finish computing an MD5 message digest
* @ctx: the context to finalize; must have been initialized
* @out: (output) the resulting MD5 message digest
*
* After finishing, this zeroizes @ctx. So the caller does not need to do it.
*
* Context: Any context.
*/
void md5_final(struct md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]);
/**
* md5() - Compute MD5 message digest in one shot
* @data: the message data
* @len: the data length in bytes
* @out: (output) the resulting MD5 message digest
*
* Context: Any context.
*/
void md5(const u8 *data, size_t len, u8 out[MD5_DIGEST_SIZE]);
/**
* struct hmac_md5_key - Prepared key for HMAC-MD5
* @istate: private
* @ostate: private
*/
struct hmac_md5_key {
struct md5_block_state istate;
struct md5_block_state ostate;
};
/**
* struct hmac_md5_ctx - Context for computing HMAC-MD5 of a message
* @hash_ctx: private
* @ostate: private
*/
struct hmac_md5_ctx {
struct md5_ctx hash_ctx;
struct md5_block_state ostate;
};
/**
* hmac_md5_preparekey() - Prepare a key for HMAC-MD5
* @key: (output) the key structure to initialize
* @raw_key: the raw HMAC-MD5 key
* @raw_key_len: the key length in bytes. All key lengths are supported.
*
* Note: the caller is responsible for zeroizing both the struct hmac_md5_key
* and the raw key once they are no longer needed.
*
* Context: Any context.
*/
void hmac_md5_preparekey(struct hmac_md5_key *key,
const u8 *raw_key, size_t raw_key_len);
/**
* hmac_md5_init() - Initialize an HMAC-MD5 context for a new message
* @ctx: (output) the HMAC context to initialize
* @key: the prepared HMAC key
*
* If you don't need incremental computation, consider hmac_md5() instead.
*
* Context: Any context.
*/
void hmac_md5_init(struct hmac_md5_ctx *ctx, const struct hmac_md5_key *key);
/**
* hmac_md5_init_usingrawkey() - Initialize an HMAC-MD5 context for a new
* message, using a raw key
* @ctx: (output) the HMAC context to initialize
* @raw_key: the raw HMAC-MD5 key
* @raw_key_len: the key length in bytes. All key lengths are supported.
*
* If you don't need incremental computation, consider hmac_md5_usingrawkey()
* instead.
*
* Context: Any context.
*/
void hmac_md5_init_usingrawkey(struct hmac_md5_ctx *ctx,
const u8 *raw_key, size_t raw_key_len);
/**
* hmac_md5_update() - Update an HMAC-MD5 context with message data
* @ctx: the HMAC context to update; must have been initialized
* @data: the message data
* @data_len: the data length in bytes
*
* This can be called any number of times.
*
* Context: Any context.
*/
static inline void hmac_md5_update(struct hmac_md5_ctx *ctx,
const u8 *data, size_t data_len)
{
md5_update(&ctx->hash_ctx, data, data_len);
}
/**
* hmac_md5_final() - Finish computing an HMAC-MD5 value
* @ctx: the HMAC context to finalize; must have been initialized
* @out: (output) the resulting HMAC-MD5 value
*
* After finishing, this zeroizes @ctx. So the caller does not need to do it.
*
* Context: Any context.
*/
void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]);
/**
* hmac_md5() - Compute HMAC-MD5 in one shot, using a prepared key
* @key: the prepared HMAC key
* @data: the message data
* @data_len: the data length in bytes
* @out: (output) the resulting HMAC-MD5 value
*
* If you're using the key only once, consider using hmac_md5_usingrawkey().
*
* Context: Any context.
*/
void hmac_md5(const struct hmac_md5_key *key,
const u8 *data, size_t data_len, u8 out[MD5_DIGEST_SIZE]);
/**
* hmac_md5_usingrawkey() - Compute HMAC-MD5 in one shot, using a raw key
* @raw_key: the raw HMAC-MD5 key
* @raw_key_len: the key length in bytes. All key lengths are supported.
* @data: the message data
* @data_len: the data length in bytes
* @out: (output) the resulting HMAC-MD5 value
*
* If you're using the key multiple times, prefer to use hmac_md5_preparekey()
* followed by multiple calls to hmac_md5() instead.
*
* Context: Any context.
*/
void hmac_md5_usingrawkey(const u8 *raw_key, size_t raw_key_len,
const u8 *data, size_t data_len,
u8 out[MD5_DIGEST_SIZE]);
#endif /* _CRYPTO_MD5_H */

View File

@ -64,13 +64,4 @@ void poly1305_update(struct poly1305_desc_ctx *desc,
const u8 *src, unsigned int nbytes);
void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)
bool poly1305_is_arch_optimized(void);
#else
static inline bool poly1305_is_arch_optimized(void)
{
return false;
}
#endif
#endif

View File

@ -28,108 +28,101 @@ config CRYPTO_LIB_ARC4
config CRYPTO_LIB_GF128MUL
tristate
config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
# BLAKE2s support is always built-in, so there's no CRYPTO_LIB_BLAKE2S option.
config CRYPTO_LIB_BLAKE2S_ARCH
bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Blake2s library interface,
either builtin or as a module.
config CRYPTO_LIB_BLAKE2S_GENERIC
def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
help
This symbol can be depended upon by arch implementations of the
Blake2s library interface that require the generic code as a
fallback, e.g., for SIMD implementations. If no arch specific
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_BLAKE2S.
config CRYPTO_ARCH_HAVE_LIB_CHACHA
bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the ChaCha library interface,
either builtin or as a module.
config CRYPTO_LIB_CHACHA_GENERIC
tristate
default CRYPTO_LIB_CHACHA if !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_UTILS
help
This symbol can be selected by arch implementations of the ChaCha
library interface that require the generic code as a fallback, e.g.,
for SIMD implementations. If no arch specific implementation is
enabled, this implementation serves the users of CRYPTO_LIB_CHACHA.
depends on !UML
default y if ARM
default y if X86_64
config CRYPTO_LIB_CHACHA
tristate
help
Enable the ChaCha library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
is available and enabled.
config CRYPTO_ARCH_HAVE_LIB_CURVE25519
bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Curve25519 library interface,
either builtin or as a module.
config CRYPTO_LIB_CURVE25519_GENERIC
tristate
select CRYPTO_LIB_UTILS
help
This symbol can be depended upon by arch implementations of the
Curve25519 library interface that require the generic code as a
fallback, e.g., for SIMD implementations. If no arch specific
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_CURVE25519.
Enable the ChaCha library interface. Select this if your module uses
chacha_crypt() or hchacha_block().
config CRYPTO_LIB_CURVE25519_INTERNAL
tristate
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
config CRYPTO_LIB_CHACHA_ARCH
bool
depends on CRYPTO_LIB_CHACHA && !UML && !KMSAN
default y if ARM
default y if ARM64 && KERNEL_MODE_NEON
default y if MIPS && CPU_MIPS32_R2
default y if PPC64 && CPU_LITTLE_ENDIAN && VSX
default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
default y if S390
default y if X86_64
config CRYPTO_LIB_CURVE25519
tristate
select CRYPTO
select CRYPTO_LIB_CURVE25519_INTERNAL
select CRYPTO_LIB_UTILS
help
Enable the Curve25519 library interface. This interface may be
fulfilled by either the generic implementation or an arch-specific
one, if one is available and enabled.
The Curve25519 library functions. Select this if your module uses any
of the functions from <crypto/curve25519.h>.
config CRYPTO_LIB_CURVE25519_ARCH
bool
depends on CRYPTO_LIB_CURVE25519 && !UML && !KMSAN
default y if ARM && KERNEL_MODE_NEON
default y if PPC64 && CPU_LITTLE_ENDIAN
default y if X86_64
config CRYPTO_LIB_CURVE25519_GENERIC
bool
depends on CRYPTO_LIB_CURVE25519
default y if !CRYPTO_LIB_CURVE25519_ARCH || ARM || X86_64
config CRYPTO_LIB_DES
tristate
config CRYPTO_LIB_POLY1305_RSIZE
int
default 2 if MIPS
default 11 if X86_64
default 9 if ARM || ARM64
default 1
config CRYPTO_ARCH_HAVE_LIB_POLY1305
bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Poly1305 library interface,
either builtin or as a module.
config CRYPTO_LIB_POLY1305_GENERIC
config CRYPTO_LIB_MD5
tristate
default CRYPTO_LIB_POLY1305 if !CRYPTO_ARCH_HAVE_LIB_POLY1305
help
This symbol can be selected by arch implementations of the Poly1305
library interface that require the generic code as a fallback, e.g.,
for SIMD implementations. If no arch specific implementation is
enabled, this implementation serves the users of CRYPTO_LIB_POLY1305.
The MD5 and HMAC-MD5 library functions. Select this if your module
uses any of the functions from <crypto/md5.h>.
config CRYPTO_LIB_MD5_ARCH
bool
depends on CRYPTO_LIB_MD5 && !UML
default y if MIPS && CPU_CAVIUM_OCTEON
default y if PPC
default y if SPARC64
config CRYPTO_LIB_POLY1305
tristate
help
Enable the Poly1305 library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
is available and enabled.
The Poly1305 library functions. Select this if your module uses any
of the functions from <crypto/poly1305.h>.
config CRYPTO_LIB_POLY1305_ARCH
bool
depends on CRYPTO_LIB_POLY1305 && !UML
default y if ARM
default y if ARM64 && KERNEL_MODE_NEON
default y if MIPS
# The PPC64 code needs to be fixed to work in softirq context.
default y if PPC64 && CPU_LITTLE_ENDIAN && VSX && BROKEN
default y if RISCV
default y if X86_64
# This symbol controls the inclusion of the Poly1305 generic code. This differs
# from most of the other algorithms, which handle the generic code
# "automatically" via __maybe_unused. This is needed so that the Adiantum code,
# which calls the poly1305_core_*() functions directly, can enable them.
config CRYPTO_LIB_POLY1305_GENERIC
bool
depends on CRYPTO_LIB_POLY1305
# Enable if there's no arch impl or the arch impl requires the generic
# impl as a fallback. (Or if selected explicitly.)
default y if !CRYPTO_LIB_POLY1305_ARCH || PPC64
config CRYPTO_LIB_POLY1305_RSIZE
int
default 2 if MIPS || RISCV
default 11 if X86_64
default 9 if ARM || ARM64
default 1
config CRYPTO_LIB_CHACHA20POLY1305
tristate
@ -196,28 +189,4 @@ config CRYPTO_LIB_SM3
source "lib/crypto/tests/Kconfig"
if !KMSAN # avoid false positives from assembly
if ARM
source "lib/crypto/arm/Kconfig"
endif
if ARM64
source "lib/crypto/arm64/Kconfig"
endif
if MIPS
source "lib/crypto/mips/Kconfig"
endif
if PPC
source "lib/crypto/powerpc/Kconfig"
endif
if RISCV
source "lib/crypto/riscv/Kconfig"
endif
if S390
source "lib/crypto/s390/Kconfig"
endif
if X86
source "lib/crypto/x86/Kconfig"
endif
endif
endmenu

View File

@ -15,10 +15,6 @@ obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
libcryptoutils-y := memneq.o utils.o
# chacha is used by the /dev/random driver which is always builtin
obj-y += chacha.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
@ -33,39 +29,162 @@ libarc4-y := arc4.o
obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
################################################################################
# blake2s is used by the /dev/random driver which is always builtin
obj-y += libblake2s.o
libblake2s-y := blake2s.o
libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
libblake2s-$(CONFIG_CRYPTO_SELFTESTS) += blake2s-selftest.o
obj-y += blake2s.o
ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2S_ARCH),y)
CFLAGS_blake2s.o += -I$(src)/$(SRCARCH)
obj-$(CONFIG_ARM) += arm/blake2s-core.o
obj-$(CONFIG_X86) += x86/blake2s-core.o
endif
################################################################################
# chacha20_block() is used by the /dev/random driver which is always builtin
obj-y += chacha-block-generic.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA) += libchacha.o
libchacha-y := chacha.o
ifeq ($(CONFIG_CRYPTO_LIB_CHACHA_ARCH),y)
CFLAGS_chacha.o += -I$(src)/$(SRCARCH)
ifeq ($(CONFIG_ARM),y)
libchacha-y += arm/chacha-scalar-core.o
libchacha-$(CONFIG_KERNEL_MODE_NEON) += arm/chacha-neon-core.o
endif
libchacha-$(CONFIG_ARM64) += arm64/chacha-neon-core.o
ifeq ($(CONFIG_MIPS),y)
libchacha-y += mips/chacha-core.o
AFLAGS_mips/chacha-core.o += -O2 # needed to fill branch delay slots
endif
libchacha-$(CONFIG_PPC) += powerpc/chacha-p10le-8x.o
libchacha-$(CONFIG_RISCV) += riscv/chacha-riscv64-zvkb.o
libchacha-$(CONFIG_S390) += s390/chacha-s390.o
libchacha-$(CONFIG_X86) += x86/chacha-ssse3-x86_64.o \
x86/chacha-avx2-x86_64.o \
x86/chacha-avx512vl-x86_64.o
endif # CONFIG_CRYPTO_LIB_CHACHA_ARCH
################################################################################
obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
libchacha20poly1305-y += chacha20poly1305.o
libchacha20poly1305-$(CONFIG_CRYPTO_SELFTESTS) += chacha20poly1305-selftest.o
obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o
libcurve25519-generic-y := curve25519-fiat32.o
libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
libcurve25519-generic-y += curve25519-generic.o
################################################################################
obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
libcurve25519-y := curve25519.o
# Disable GCOV in odd or sensitive code
GCOV_PROFILE_curve25519.o := n
ifeq ($(CONFIG_ARCH_SUPPORTS_INT128),y)
libcurve25519-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += curve25519-hacl64.o
else
libcurve25519-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += curve25519-fiat32.o
endif
# clang versions prior to 18 may blow out the stack with KASAN
ifeq ($(call clang-min-version, 180000),)
KASAN_SANITIZE_curve25519-hacl64.o := n
endif
obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
libcurve25519-y += curve25519.o
libcurve25519-$(CONFIG_CRYPTO_SELFTESTS) += curve25519-selftest.o
ifeq ($(CONFIG_CRYPTO_LIB_CURVE25519_ARCH),y)
CFLAGS_curve25519.o += -I$(src)/$(SRCARCH)
libcurve25519-$(CONFIG_ARM) += arm/curve25519-core.o
libcurve25519-$(CONFIG_PPC) += powerpc/curve25519-ppc64le_asm.o
endif
################################################################################
obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
libdes-y := des.o
obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o
libpoly1305-y += poly1305.o
################################################################################
obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305-generic.o
libpoly1305-generic-y := poly1305-donna32.o
libpoly1305-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
libpoly1305-generic-y += poly1305-generic.o
obj-$(CONFIG_CRYPTO_LIB_MD5) += libmd5.o
libmd5-y := md5.o
ifeq ($(CONFIG_CRYPTO_LIB_MD5_ARCH),y)
CFLAGS_md5.o += -I$(src)/$(SRCARCH)
libmd5-$(CONFIG_PPC) += powerpc/md5-asm.o
libmd5-$(CONFIG_SPARC) += sparc/md5_asm.o
endif # CONFIG_CRYPTO_LIB_MD5_ARCH
################################################################################
obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o
libpoly1305-y := poly1305.o
ifeq ($(CONFIG_ARCH_SUPPORTS_INT128),y)
libpoly1305-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += poly1305-donna64.o
else
libpoly1305-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += poly1305-donna32.o
endif
ifeq ($(CONFIG_CRYPTO_LIB_POLY1305_ARCH),y)
CFLAGS_poly1305.o += -I$(src)/$(SRCARCH)
ifeq ($(CONFIG_ARM),y)
libpoly1305-y += arm/poly1305-core.o
$(obj)/arm/poly1305-core.S: $(src)/arm/poly1305-armv4.pl
$(call cmd,perlasm)
# massage the perlasm code a bit so we only get the NEON routine if we need it
poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
AFLAGS_arm/poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
endif
ifeq ($(CONFIG_ARM64),y)
libpoly1305-y += arm64/poly1305-core.o
$(obj)/arm64/poly1305-core.S: $(src)/arm64/poly1305-armv8.pl
$(call cmd,perlasm_with_args)
endif
ifeq ($(CONFIG_MIPS),y)
libpoly1305-y += mips/poly1305-core.o
poly1305-perlasm-flavour-$(CONFIG_32BIT) := o32
poly1305-perlasm-flavour-$(CONFIG_64BIT) := 64
quiet_cmd_perlasm_poly1305 = PERLASM $@
cmd_perlasm_poly1305 = $(PERL) $< $(poly1305-perlasm-flavour-y) $@
# Use if_changed instead of cmd, in case the flavour changed.
$(obj)/mips/poly1305-core.S: $(src)/mips/poly1305-mips.pl FORCE
$(call if_changed,perlasm_poly1305)
targets += mips/poly1305-core.S
endif
libpoly1305-$(CONFIG_PPC) += powerpc/poly1305-p10le_64.o
ifeq ($(CONFIG_RISCV),y)
libpoly1305-y += riscv/poly1305-core.o
poly1305-perlasm-flavour-$(CONFIG_32BIT) := 32
poly1305-perlasm-flavour-$(CONFIG_64BIT) := 64
quiet_cmd_perlasm_poly1305 = PERLASM $@
cmd_perlasm_poly1305 = $(PERL) $< $(poly1305-perlasm-flavour-y) $@
# Use if_changed instead of cmd, in case the flavour changed.
$(obj)/riscv/poly1305-core.S: $(src)/riscv/poly1305-riscv.pl FORCE
$(call if_changed,perlasm_poly1305)
targets += riscv/poly1305-core.S
AFLAGS_riscv/poly1305-core.o += -Dpoly1305_init=poly1305_block_init
endif
ifeq ($(CONFIG_X86),y)
libpoly1305-y += x86/poly1305-x86_64-cryptogams.o
$(obj)/x86/poly1305-x86_64-cryptogams.S: $(src)/x86/poly1305-x86_64-cryptogams.pl
$(call cmd,perlasm)
endif
endif # CONFIG_CRYPTO_LIB_POLY1305_ARCH
# clean-files must be defined unconditionally
clean-files += arm/poly1305-core.S \
arm64/poly1305-core.S \
mips/poly1305-core.S \
riscv/poly1305-core.S \
x86/poly1305-x86_64-cryptogams.S
################################################################################
@ -156,14 +275,6 @@ obj-$(CONFIG_CRYPTO_SELFTESTS_FULL) += simd.o
obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
libsm3-y := sm3.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_ARM64) += arm64/
obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_PPC) += powerpc/
obj-$(CONFIG_RISCV) += riscv/
obj-$(CONFIG_S390) += s390/
obj-$(CONFIG_X86) += x86/
# clean-files must be defined unconditionally
clean-files += arm/sha256-core.S arm/sha512-core.S
clean-files += arm64/sha256-core.S arm64/sha512-core.S

View File

@ -1,24 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_BLAKE2S_ARM
bool "Hash functions: BLAKE2s"
select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
help
BLAKE2s cryptographic hash function (RFC 7693)
Architecture: arm
This is faster than the generic implementations of BLAKE2s and
BLAKE2b, but slower than the NEON implementation of BLAKE2b.
There is no NEON implementation of BLAKE2s, since NEON doesn't
really help with it.
config CRYPTO_CHACHA20_NEON
tristate
default CRYPTO_LIB_CHACHA
select CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_POLY1305_ARM
tristate
default CRYPTO_LIB_POLY1305
select CRYPTO_ARCH_HAVE_LIB_POLY1305

View File

@ -1,26 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o
libblake2s-arm-y := blake2s-core.o blake2s-glue.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
chacha-neon-y := chacha-scalar-core.o chacha-glue.o
chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
poly1305-arm-y := poly1305-core.o poly1305-glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
clean-files += poly1305-core.S
aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
# massage the perlasm code a bit so we only get the NEON routine if we need it
poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)

View File

@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BLAKE2s digest algorithm, ARM scalar implementation
* BLAKE2s digest algorithm, ARM scalar implementation. This is faster
* than the generic implementations of BLAKE2s and BLAKE2b, but slower
* than the NEON implementation of BLAKE2b. There is no NEON
* implementation of BLAKE2s, since NEON doesn't really help with it.
*
* Copyright 2020 Google LLC
*

View File

@ -1,7 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <crypto/internal/blake2s.h>
#include <linux/module.h>
/* defined in blake2s-core.S */
EXPORT_SYMBOL(blake2s_compress);

5
lib/crypto/arm/blake2s.h Normal file
View File

@ -0,0 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* defined in blake2s-core.S */
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, u32 inc);

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0 */
/*
* ChaCha and HChaCha functions (ARM optimized)
*
@ -6,11 +6,9 @@
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/cputype.h>
#include <asm/hwcap.h>
@ -64,8 +62,8 @@ static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
}
}
void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
static void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
hchacha_block_arm(state, out, nrounds);
@ -75,10 +73,9 @@ void hchacha_block_arch(const struct chacha_state *state,
kernel_neon_end();
}
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
static void chacha_crypt_arch(struct chacha_state *state, u8 *dst,
const u8 *src, unsigned int bytes, int nrounds)
{
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
bytes <= CHACHA_BLOCK_SIZE) {
@ -99,16 +96,9 @@ void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
dst += todo;
} while (bytes);
}
EXPORT_SYMBOL(chacha_crypt_arch);
bool chacha_is_arch_optimized(void)
{
/* We always can use at least the ARM scalar implementation. */
return true;
}
EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init chacha_arm_mod_init(void)
#define chacha_mod_init_arch chacha_mod_init_arch
static void chacha_mod_init_arch(void)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
switch (read_cpuid_part()) {
@ -124,15 +114,4 @@ static int __init chacha_arm_mod_init(void)
static_branch_enable(&use_neon);
}
}
return 0;
}
subsys_initcall(chacha_arm_mod_init);
static void __exit chacha_arm_mod_exit(void)
{
}
module_exit(chacha_arm_mod_exit);
MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
* began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
* manually reworked for use in kernel space.
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <linux/types.h>
#include <linux/jump_label.h>
asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE]);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
const u8 scalar[CURVE25519_KEY_SIZE],
const u8 point[CURVE25519_KEY_SIZE])
{
if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
kernel_neon_begin();
curve25519_neon(out, scalar, point);
kernel_neon_end();
} else {
curve25519_generic(out, scalar, point);
}
}
static void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
curve25519_arch(pub, secret, curve25519_base_point);
}
#define curve25519_mod_init_arch curve25519_mod_init_arch
static void curve25519_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON)
static_branch_enable(&have_neon);
}

View File

@ -43,9 +43,8 @@ $code.=<<___;
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
# define poly1305_init poly1305_block_init_arch
# define poly1305_init poly1305_block_init
# define poly1305_blocks poly1305_blocks_arm
# define poly1305_emit poly1305_emit_arch
#endif
#if defined(__thumb2__)

View File

@ -1,76 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/unaligned.h>
asmlinkage void poly1305_block_init_arch(
struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
asmlinkage void poly1305_blocks_arm(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
EXPORT_SYMBOL_GPL(poly1305_emit_arch);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
unsigned int len, u32 padbit)
{
len = round_down(len, POLY1305_BLOCK_SIZE);
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(may_use_simd())) {
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
kernel_neon_begin();
poly1305_blocks_neon(state, src, todo, padbit);
kernel_neon_end();
len -= todo;
src += todo;
} while (len);
} else
poly1305_blocks_arm(state, src, len, padbit);
}
EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
bool poly1305_is_arch_optimized(void)
{
/* We always can use at least the ARM scalar implementation. */
return true;
}
EXPORT_SYMBOL(poly1305_is_arch_optimized);
static int __init arm_poly1305_mod_init(void)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
(elf_hwcap & HWCAP_NEON))
static_branch_enable(&have_neon);
return 0;
}
subsys_initcall(arm_poly1305_mod_init);
static void __exit arm_poly1305_mod_exit(void)
{
}
module_exit(arm_poly1305_mod_exit);
MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
MODULE_LICENSE("GPL v2");

53
lib/crypto/arm/poly1305.h Normal file
View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
asmlinkage void poly1305_block_init(struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_arm(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src,
unsigned int len, u32 padbit)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(may_use_simd())) {
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
kernel_neon_begin();
poly1305_blocks_neon(state, src, todo, padbit);
kernel_neon_end();
len -= todo;
src += todo;
} while (len);
} else
poly1305_blocks_arm(state, src, len, padbit);
}
#ifdef CONFIG_KERNEL_MODE_NEON
#define poly1305_mod_init_arch poly1305_mod_init_arch
static void poly1305_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON)
static_branch_enable(&have_neon);
}
#endif /* CONFIG_KERNEL_MODE_NEON */

View File

@ -35,7 +35,7 @@ static void sha1_blocks(struct sha1_block_state *state,
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha1_mod_init_arch sha1_mod_init_arch
static inline void sha1_mod_init_arch(void)
static void sha1_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);

View File

@ -5,7 +5,10 @@
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
#include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
@ -14,14 +17,11 @@ asmlinkage void sha256_block_data_order_neon(struct sha256_block_state *state,
asmlinkage void sha256_ce_transform(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && crypto_simd_usable()) {
static_branch_likely(&have_neon) && likely(may_use_simd())) {
kernel_neon_begin();
if (static_branch_likely(&have_ce))
sha256_ce_transform(state, data, nblocks);
@ -35,7 +35,7 @@ static void sha256_blocks(struct sha256_block_state *state,
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha256_mod_init_arch sha256_mod_init_arch
static inline void sha256_mod_init_arch(void)
static void sha256_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);

View File

@ -4,9 +4,8 @@
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
#include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
@ -19,7 +18,7 @@ static void sha512_blocks(struct sha512_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(crypto_simd_usable())) {
static_branch_likely(&have_neon) && likely(may_use_simd())) {
kernel_neon_begin();
sha512_block_data_order_neon(state, data, nblocks);
kernel_neon_end();
@ -30,7 +29,7 @@ static void sha512_blocks(struct sha512_block_state *state,
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha512_mod_init_arch sha512_mod_init_arch
static inline void sha512_mod_init_arch(void)
static void sha512_mod_init_arch(void)
{
if (cpu_has_neon())
static_branch_enable(&have_neon);

View File

@ -1,14 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_CHACHA20_NEON
tristate
depends on KERNEL_MODE_NEON
default CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_POLY1305_NEON
tristate
depends on KERNEL_MODE_NEON
default CRYPTO_LIB_POLY1305
select CRYPTO_ARCH_HAVE_LIB_POLY1305

View File

@ -1,17 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o
poly1305-neon-y := poly1305-core.o poly1305-glue.o
AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch
AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
$(obj)/%-core.S: $(src)/%-armv8.pl
$(call cmd,perlasm)
clean-files += poly1305-core.S

View File

@ -18,11 +18,9 @@
* (at your option) any later version.
*/
#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/hwcap.h>
#include <asm/neon.h>
@ -61,8 +59,8 @@ static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
}
}
void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
static void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
hchacha_block_generic(state, out, nrounds);
@ -72,10 +70,9 @@ void hchacha_block_arch(const struct chacha_state *state,
kernel_neon_end();
}
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
static void chacha_crypt_arch(struct chacha_state *state, u8 *dst,
const u8 *src, unsigned int bytes, int nrounds)
{
if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
!crypto_simd_usable())
@ -93,27 +90,10 @@ void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
dst += todo;
} while (bytes);
}
EXPORT_SYMBOL(chacha_crypt_arch);
bool chacha_is_arch_optimized(void)
{
return static_key_enabled(&have_neon);
}
EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init chacha_simd_mod_init(void)
#define chacha_mod_init_arch chacha_mod_init_arch
static void chacha_mod_init_arch(void)
{
if (cpu_have_named_feature(ASIMD))
static_branch_enable(&have_neon);
return 0;
}
subsys_initcall(chacha_simd_mod_init);
static void __exit chacha_simd_mod_exit(void)
{
}
module_exit(chacha_simd_mod_exit);
MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM64 optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");

View File

@ -50,6 +50,9 @@ $code.=<<___;
#ifndef __KERNEL__
# include "arm_arch.h"
.extern OPENSSL_armcap_P
#else
# define poly1305_init poly1305_block_init
# define poly1305_blocks poly1305_blocks_arm64
#endif
.text

View File

@ -1,74 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/unaligned.h>
asmlinkage void poly1305_block_init_arch(
struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
asmlinkage void poly1305_blocks(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
EXPORT_SYMBOL_GPL(poly1305_emit_arch);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
unsigned int len, u32 padbit)
{
len = round_down(len, POLY1305_BLOCK_SIZE);
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
kernel_neon_begin();
poly1305_blocks_neon(state, src, todo, padbit);
kernel_neon_end();
len -= todo;
src += todo;
} while (len);
} else
poly1305_blocks(state, src, len, padbit);
}
EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
bool poly1305_is_arch_optimized(void)
{
/* We always can use at least the ARM64 scalar implementation. */
return true;
}
EXPORT_SYMBOL(poly1305_is_arch_optimized);
static int __init neon_poly1305_mod_init(void)
{
if (cpu_have_named_feature(ASIMD))
static_branch_enable(&have_neon);
return 0;
}
subsys_initcall(neon_poly1305_mod_init);
static void __exit neon_poly1305_mod_exit(void)
{
}
module_exit(neon_poly1305_mod_exit);
MODULE_DESCRIPTION("Poly1305 authenticator (ARM64 optimized)");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
asmlinkage void poly1305_block_init(struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_arm64(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src,
unsigned int len, u32 padbit)
{
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
kernel_neon_begin();
poly1305_blocks_neon(state, src, todo, padbit);
kernel_neon_end();
len -= todo;
src += todo;
} while (len);
} else
poly1305_blocks_arm64(state, src, len, padbit);
}
#define poly1305_mod_init_arch poly1305_mod_init_arch
static void poly1305_mod_init_arch(void)
{
if (cpu_have_named_feature(ASIMD))
static_branch_enable(&have_neon);
}

View File

@ -32,7 +32,7 @@ static void sha1_blocks(struct sha1_block_state *state,
}
#define sha1_mod_init_arch sha1_mod_init_arch
static inline void sha1_mod_init_arch(void)
static void sha1_mod_init_arch(void)
{
if (cpu_have_named_feature(SHA1))
static_branch_enable(&have_ce);

View File

@ -5,9 +5,12 @@
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
#include <asm/simd.h>
#include <linux/cpufeature.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha256_block_neon(struct sha256_block_state *state,
@ -15,14 +18,11 @@ asmlinkage void sha256_block_neon(struct sha256_block_state *state,
asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && crypto_simd_usable()) {
static_branch_likely(&have_neon) && likely(may_use_simd())) {
if (static_branch_likely(&have_ce)) {
do {
size_t rem;
@ -46,7 +46,7 @@ static void sha256_blocks(struct sha256_block_state *state,
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha256_mod_init_arch sha256_mod_init_arch
static inline void sha256_mod_init_arch(void)
static void sha256_mod_init_arch(void)
{
if (cpu_have_named_feature(ASIMD)) {
static_branch_enable(&have_neon);

View File

@ -4,9 +4,8 @@
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
#include <asm/simd.h>
#include <linux/cpufeature.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha512_insns);
@ -21,7 +20,7 @@ static void sha512_blocks(struct sha512_block_state *state,
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_sha512_insns) &&
likely(crypto_simd_usable())) {
likely(may_use_simd())) {
do {
size_t rem;
@ -38,7 +37,7 @@ static void sha512_blocks(struct sha512_block_state *state,
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha512_mod_init_arch sha512_mod_init_arch
static inline void sha512_mod_init_arch(void)
static void sha512_mod_init_arch(void)
{
if (cpu_have_named_feature(SHA512))
static_branch_enable(&have_sha512_insns);

View File

@ -1,111 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* This is an implementation of the BLAKE2s hash and PRF functions.
*
* Information: https://blake2.net/
*
*/
#include <crypto/internal/blake2s.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/unaligned.h>
static const u8 blake2s_sigma[10][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
};
static inline void blake2s_increment_counter(struct blake2s_state *state,
const u32 inc)
{
state->t[0] += inc;
state->t[1] += (state->t[0] < inc);
}
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
__weak __alias(blake2s_compress_generic);
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
u32 m[16];
u32 v[16];
int i;
WARN_ON(IS_ENABLED(DEBUG) &&
(nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
while (nblocks > 0) {
blake2s_increment_counter(state, inc);
memcpy(m, block, BLAKE2S_BLOCK_SIZE);
le32_to_cpu_array(m, ARRAY_SIZE(m));
memcpy(v, state->h, 32);
v[ 8] = BLAKE2S_IV0;
v[ 9] = BLAKE2S_IV1;
v[10] = BLAKE2S_IV2;
v[11] = BLAKE2S_IV3;
v[12] = BLAKE2S_IV4 ^ state->t[0];
v[13] = BLAKE2S_IV5 ^ state->t[1];
v[14] = BLAKE2S_IV6 ^ state->f[0];
v[15] = BLAKE2S_IV7 ^ state->f[1];
#define G(r, i, a, b, c, d) do { \
a += b + m[blake2s_sigma[r][2 * i + 0]]; \
d = ror32(d ^ a, 16); \
c += d; \
b = ror32(b ^ c, 12); \
a += b + m[blake2s_sigma[r][2 * i + 1]]; \
d = ror32(d ^ a, 8); \
c += d; \
b = ror32(b ^ c, 7); \
} while (0)
#define ROUND(r) do { \
G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
G(r, 2, v[2], v[ 6], v[10], v[14]); \
G(r, 3, v[3], v[ 7], v[11], v[15]); \
G(r, 4, v[0], v[ 5], v[10], v[15]); \
G(r, 5, v[1], v[ 6], v[11], v[12]); \
G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
} while (0)
ROUND(0);
ROUND(1);
ROUND(2);
ROUND(3);
ROUND(4);
ROUND(5);
ROUND(6);
ROUND(7);
ROUND(8);
ROUND(9);
#undef G
#undef ROUND
for (i = 0; i < 8; ++i)
state->h[i] ^= v[i] ^ v[i + 8];
block += BLAKE2S_BLOCK_SIZE;
--nblocks;
}
}
EXPORT_SYMBOL(blake2s_compress_generic);

View File

@ -1,651 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <crypto/internal/blake2s.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/string.h>
/*
* blake2s_testvecs[] generated with the program below (using libb2-dev and
* libssl-dev [OpenSSL])
*
* #include <blake2.h>
* #include <stdint.h>
* #include <stdio.h>
*
* #include <openssl/evp.h>
*
* #define BLAKE2S_TESTVEC_COUNT 256
*
* static void print_vec(const uint8_t vec[], int len)
* {
* int i;
*
* printf(" { ");
* for (i = 0; i < len; i++) {
* if (i && (i % 12) == 0)
* printf("\n ");
* printf("0x%02x, ", vec[i]);
* }
* printf("},\n");
* }
*
* int main(void)
* {
* uint8_t key[BLAKE2S_KEYBYTES];
* uint8_t buf[BLAKE2S_TESTVEC_COUNT];
* uint8_t hash[BLAKE2S_OUTBYTES];
* int i, j;
*
* key[0] = key[1] = 1;
* for (i = 2; i < BLAKE2S_KEYBYTES; ++i)
* key[i] = key[i - 2] + key[i - 1];
*
* for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i)
* buf[i] = (uint8_t)i;
*
* printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
*
* for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) {
* int outlen = 1 + i % BLAKE2S_OUTBYTES;
* int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1);
*
* blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i,
* keylen);
* print_vec(hash, outlen);
* }
* printf("};\n\n");
*
* return 0;
*}
*/
static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
{ 0xa1, },
{ 0x7c, 0x89, },
{ 0x74, 0x0e, 0xd4, },
{ 0x47, 0x0c, 0x21, 0x15, },
{ 0x18, 0xd6, 0x9c, 0xa6, 0xc4, },
{ 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, },
{ 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, },
{ 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, },
{ 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, },
{ 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, },
{ 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, },
{ 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, },
{ 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda,
0xb7, },
{ 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25,
0x52, 0x3e, },
{ 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc,
0x57, 0xe2, 0x27, },
{ 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6,
0x47, 0x2a, 0xc7, 0xf5, },
{ 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43,
0xe7, 0x72, 0x08, 0xec, 0xbe, },
{ 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96,
0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, },
{ 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e,
0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, },
{ 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d,
0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, },
{ 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86,
0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, },
{ 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41,
0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, },
{ 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22,
0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, },
{ 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00,
0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, },
{ 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9,
0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13,
0xd1, },
{ 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05,
0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07,
0x01, 0x3e, },
{ 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74,
0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b,
0xec, 0x13, 0xed, },
{ 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7,
0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37,
0x72, 0x67, 0x09, 0xfc, },
{ 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38,
0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c,
0x95, 0x29, 0x19, 0xf2, 0x4b, },
{ 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22,
0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30,
0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, },
{ 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e,
0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd,
0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, },
{ 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe,
0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61,
0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, },
{ 0x0a, },
{ 0x6e, 0xd4, },
{ 0x64, 0xe9, 0xd1, },
{ 0x30, 0xdd, 0x71, 0xef, },
{ 0x11, 0xb5, 0x0c, 0x87, 0xc9, },
{ 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, },
{ 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, },
{ 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, },
{ 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, },
{ 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, },
{ 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, },
{ 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, },
{ 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf,
0xe2, },
{ 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64,
0x7e, 0xb0, },
{ 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51,
0x98, 0x0a, 0x8d, },
{ 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04,
0xf1, 0xc3, 0x94, 0xd8, },
{ 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7,
0x2c, 0xe8, 0x8f, 0xc7, 0x34, },
{ 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41,
0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, },
{ 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81,
0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, },
{ 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4,
0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, },
{ 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a,
0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, },
{ 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb,
0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, },
{ 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9,
0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, },
{ 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9,
0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, },
{ 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e,
0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42,
0xe3, },
{ 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9,
0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e,
0xe3, 0x90, },
{ 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3,
0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10,
0x58, 0x06, 0x9a, },
{ 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5,
0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d,
0x53, 0x03, 0x1a, 0x39, },
{ 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0,
0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5,
0xd4, 0x36, 0xb1, 0xac, 0x73, },
{ 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59,
0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90,
0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, },
{ 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28,
0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75,
0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, },
{ 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6,
0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77,
0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, },
{ 0x9d, },
{ 0x9d, 0x7d, },
{ 0xfd, 0xc3, 0xda, },
{ 0xe8, 0x82, 0xcd, 0x21, },
{ 0xc3, 0x1d, 0x42, 0x4c, 0x74, },
{ 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, },
{ 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, },
{ 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, },
{ 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, },
{ 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, },
{ 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, },
{ 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, },
{ 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3,
0x63, },
{ 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f,
0xe6, 0xa9, },
{ 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a,
0xf6, 0x0e, 0x20, },
{ 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39,
0x18, 0x3e, 0xc7, 0xc3, },
{ 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c,
0x92, 0xfc, 0x7f, 0x34, 0x41, },
{ 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb,
0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, },
{ 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96,
0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, },
{ 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6,
0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, },
{ 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba,
0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, },
{ 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0,
0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, },
{ 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d,
0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, },
{ 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59,
0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, },
{ 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89,
0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e,
0xaf, },
{ 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc,
0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7,
0xb1, 0x1d, },
{ 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9,
0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf,
0xee, 0x6a, 0xbe, },
{ 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8,
0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd,
0x5c, 0xef, 0xa3, 0x25, },
{ 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6,
0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8,
0x4b, 0x0e, 0xe3, 0x94, 0x9f, },
{ 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc,
0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf,
0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, },
{ 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76,
0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46,
0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, },
{ 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02,
0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3,
0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, },
{ 0x02, },
{ 0x52, 0xa8, },
{ 0x38, 0x25, 0x0d, },
{ 0xe3, 0x04, 0xd4, 0x92, },
{ 0x97, 0xdb, 0xf7, 0x81, 0xca, },
{ 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, },
{ 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, },
{ 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, },
{ 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, },
{ 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, },
{ 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, },
{ 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, },
{ 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa,
0xd3, },
{ 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c,
0xb7, 0x9a, },
{ 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31,
0x3d, 0x47, 0x3d, },
{ 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0,
0x24, 0xf0, 0x17, 0xc0, },
{ 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76,
0xd2, 0xfd, 0x0c, 0x47, 0x40, },
{ 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae,
0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, },
{ 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee,
0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, },
{ 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d,
0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, },
{ 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61,
0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, },
{ 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd,
0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, },
{ 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5,
0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, },
{ 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17,
0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, },
{ 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c,
0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c,
0xae, },
{ 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5,
0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d,
0x59, 0x00, },
{ 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e,
0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c,
0x5a, 0x2c, 0x3b, },
{ 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda,
0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c,
0x10, 0x07, 0x2a, 0xc4, },
{ 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14,
0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf,
0xee, 0xa1, 0x74, 0xd6, 0x71, },
{ 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33,
0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff,
0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, },
{ 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c,
0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44,
0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, },
{ 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff,
0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23,
0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, },
{ 0xbe, },
{ 0x17, 0x6c, },
{ 0x1a, 0x42, 0x4f, },
{ 0xba, 0xaf, 0xb7, 0x65, },
{ 0xc2, 0x63, 0x43, 0x6a, 0xea, },
{ 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, },
{ 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, },
{ 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, },
{ 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, },
{ 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, },
{ 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, },
{ 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, },
{ 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92,
0xe9, },
{ 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d,
0xc4, 0xb3, },
{ 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60,
0xd7, 0xd3, 0x5e, },
{ 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16,
0xaf, 0x39, 0xbd, 0x92, },
{ 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10,
0xd9, 0xa7, 0x66, 0x27, 0xcf, },
{ 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02,
0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, },
{ 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd,
0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, },
{ 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3,
0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, },
{ 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f,
0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, },
{ 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51,
0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, },
{ 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3,
0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, },
{ 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda,
0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, },
{ 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6,
0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a,
0x26, },
{ 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24,
0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35,
0xf4, 0x1d, },
{ 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9,
0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46,
0x4e, 0x29, 0x26, },
{ 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09,
0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98,
0x1a, 0x5b, 0xff, 0xc9, },
{ 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2,
0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69,
0xbf, 0xf6, 0xbe, 0x3c, 0x40, },
{ 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31,
0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20,
0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, },
{ 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4,
0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30,
0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, },
{ 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0,
0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5,
0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, },
{ 0x1f, },
{ 0x82, 0x60, },
{ 0xcc, 0xe3, 0x08, },
{ 0x56, 0x17, 0xe4, 0x59, },
{ 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, },
{ 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, },
{ 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, },
{ 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, },
{ 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, },
{ 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, },
{ 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, },
{ 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, },
{ 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc,
0x5b, },
{ 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0,
0x90, 0x48, },
{ 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60,
0x04, 0xd9, 0xd5, },
{ 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47,
0xe5, 0x31, 0x06, 0x70, },
{ 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c,
0x70, 0x25, 0xdb, 0x33, 0x27, },
{ 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a,
0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, },
{ 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3,
0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, },
{ 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7,
0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, },
{ 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac,
0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, },
{ 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98,
0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, },
{ 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11,
0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, },
{ 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a,
0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, },
{ 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41,
0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70,
0x88, },
{ 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4,
0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3,
0xc6, 0xbb, },
{ 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55,
0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5,
0x55, 0xfd, 0x4f, },
{ 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e,
0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2,
0x42, 0x64, 0x3b, 0x1a, },
{ 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95,
0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5,
0x1d, 0x60, 0x8f, 0x8a, 0x1d, },
{ 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4,
0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d,
0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, },
{ 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b,
0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3,
0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, },
{ 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf,
0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f,
0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, },
{ 0x60, },
{ 0x24, 0x26, },
{ 0x47, 0xeb, 0xc9, },
{ 0x4a, 0xd0, 0xbc, 0xf0, },
{ 0x8e, 0x2b, 0xc9, 0x85, 0x3c, },
{ 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, },
{ 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, },
{ 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, },
{ 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, },
{ 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, },
{ 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, },
{ 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, },
{ 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91,
0x8d, },
{ 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33,
0xbf, 0xa0, },
{ 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c,
0xd5, 0x4b, 0xed, },
{ 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44,
0xc8, 0x24, 0x0a, 0xb7, },
{ 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75,
0xb2, 0x15, 0xd1, 0xb1, 0x10, },
{ 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35,
0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, },
{ 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9,
0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, },
{ 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47,
0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, },
{ 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35,
0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, },
{ 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1,
0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, },
{ 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21,
0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, },
{ 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4,
0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, },
{ 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7,
0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7,
0xd3, },
{ 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb,
0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16,
0xa6, 0xd6, },
{ 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80,
0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88,
0x55, 0xd1, 0xa9, },
{ 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54,
0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46,
0xef, 0xb0, 0x00, 0x8d, },
{ 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8,
0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94,
0x1c, 0x9f, 0x8b, 0xf3, 0xcb, },
{ 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4,
0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67,
0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, },
{ 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02,
0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04,
0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, },
{ 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28,
0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca,
0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, },
{ 0x7e, },
{ 0x1e, 0x21, },
{ 0x26, 0xd3, 0xdd, },
{ 0x2c, 0xd4, 0xb3, 0x3d, },
{ 0x86, 0x7b, 0x76, 0x3c, 0xf0, },
{ 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, },
{ 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, },
{ 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, },
{ 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, },
{ 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, },
{ 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, },
{ 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, },
{ 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77,
0x66, },
{ 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31,
0x55, 0x66, },
{ 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12,
0x43, 0xbf, 0xa1, },
{ 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e,
0x95, 0x8a, 0xc5, 0xed, },
{ 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef,
0xf6, 0x2b, 0x3a, 0xba, 0x60, },
{ 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2,
0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, },
{ 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b,
0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, },
{ 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94,
0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, },
{ 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b,
0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, },
{ 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf,
0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, },
{ 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8,
0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, },
{ 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea,
0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, },
{ 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2,
0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0,
0xd7, },
{ 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b,
0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd,
0xb6, 0xef, },
{ 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6,
0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57,
0xef, 0xf8, 0x53, },
{ 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46,
0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89,
0x89, 0xfd, 0xf7, 0x99, },
{ 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03,
0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a,
0xa9, 0x8f, 0x2b, 0x59, 0xfb, },
{ 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb,
0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d,
0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, },
{ 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0,
0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d,
0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, },
{ 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c,
0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92,
0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
};
static bool __init noinline_for_stack blake2s_digest_test(void)
{
u8 key[BLAKE2S_KEY_SIZE];
u8 buf[ARRAY_SIZE(blake2s_testvecs)];
u8 hash[BLAKE2S_HASH_SIZE];
struct blake2s_state state;
bool success = true;
int i, l;
key[0] = key[1] = 1;
for (i = 2; i < sizeof(key); ++i)
key[i] = key[i - 2] + key[i - 1];
for (i = 0; i < sizeof(buf); ++i)
buf[i] = (u8)i;
for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) {
int outlen = 1 + i % BLAKE2S_HASH_SIZE;
int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1);
blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i,
keylen);
if (memcmp(hash, blake2s_testvecs[i], outlen)) {
pr_err("blake2s self-test %d: FAIL\n", i + 1);
success = false;
}
if (!keylen)
blake2s_init(&state, outlen);
else
blake2s_init_key(&state, outlen,
key + BLAKE2S_KEY_SIZE - keylen,
keylen);
blake2s_update(&state, buf, l);
blake2s_update(&state, buf + l, i - l);
blake2s_final(&state, hash);
if (memcmp(hash, blake2s_testvecs[i], outlen)) {
pr_err("blake2s init/update/final self-test %d: FAIL\n",
i + 1);
success = false;
}
}
return success;
}
static bool __init noinline_for_stack blake2s_random_test(void)
{
struct blake2s_state state;
bool success = true;
int i, l;
for (i = 0; i < 32; ++i) {
enum { TEST_ALIGNMENT = 16 };
u8 blocks[BLAKE2S_BLOCK_SIZE * 2 + TEST_ALIGNMENT - 1]
__aligned(TEST_ALIGNMENT);
u8 *unaligned_block = blocks + BLAKE2S_BLOCK_SIZE;
struct blake2s_state state1, state2;
get_random_bytes(blocks, sizeof(blocks));
get_random_bytes(&state, sizeof(state));
#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
memcpy(&state1, &state, sizeof(state1));
memcpy(&state2, &state, sizeof(state2));
blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
if (memcmp(&state1, &state2, sizeof(state1))) {
pr_err("blake2s random compress self-test %d: FAIL\n",
i + 1);
success = false;
}
#endif
memcpy(&state1, &state, sizeof(state1));
blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
for (l = 1; l < TEST_ALIGNMENT; ++l) {
memcpy(unaligned_block + l, blocks,
BLAKE2S_BLOCK_SIZE);
memcpy(&state2, &state, sizeof(state2));
blake2s_compress(&state2, unaligned_block + l, 1,
BLAKE2S_BLOCK_SIZE);
if (memcmp(&state1, &state2, sizeof(state1))) {
pr_err("blake2s random compress align %d self-test %d: FAIL\n",
l, i + 1);
success = false;
}
}
}
return success;
}
bool __init blake2s_selftest(void)
{
bool success;
success = blake2s_digest_test();
success &= blake2s_random_test();
return success;
}

View File

@ -8,15 +8,108 @@
*
*/
#include <crypto/internal/blake2s.h>
#include <crypto/blake2s.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
static const u8 blake2s_sigma[10][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
};
static inline void blake2s_increment_counter(struct blake2s_state *state,
const u32 inc)
{
state->t[0] += inc;
state->t[1] += (state->t[0] < inc);
}
static void __maybe_unused
blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
u32 m[16];
u32 v[16];
int i;
WARN_ON(IS_ENABLED(DEBUG) &&
(nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
while (nblocks > 0) {
blake2s_increment_counter(state, inc);
memcpy(m, block, BLAKE2S_BLOCK_SIZE);
le32_to_cpu_array(m, ARRAY_SIZE(m));
memcpy(v, state->h, 32);
v[ 8] = BLAKE2S_IV0;
v[ 9] = BLAKE2S_IV1;
v[10] = BLAKE2S_IV2;
v[11] = BLAKE2S_IV3;
v[12] = BLAKE2S_IV4 ^ state->t[0];
v[13] = BLAKE2S_IV5 ^ state->t[1];
v[14] = BLAKE2S_IV6 ^ state->f[0];
v[15] = BLAKE2S_IV7 ^ state->f[1];
#define G(r, i, a, b, c, d) do { \
a += b + m[blake2s_sigma[r][2 * i + 0]]; \
d = ror32(d ^ a, 16); \
c += d; \
b = ror32(b ^ c, 12); \
a += b + m[blake2s_sigma[r][2 * i + 1]]; \
d = ror32(d ^ a, 8); \
c += d; \
b = ror32(b ^ c, 7); \
} while (0)
#define ROUND(r) do { \
G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
G(r, 2, v[2], v[ 6], v[10], v[14]); \
G(r, 3, v[3], v[ 7], v[11], v[15]); \
G(r, 4, v[0], v[ 5], v[10], v[15]); \
G(r, 5, v[1], v[ 6], v[11], v[12]); \
G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
} while (0)
ROUND(0);
ROUND(1);
ROUND(2);
ROUND(3);
ROUND(4);
ROUND(5);
ROUND(6);
ROUND(7);
ROUND(8);
ROUND(9);
#undef G
#undef ROUND
for (i = 0; i < 8; ++i)
state->h[i] ^= v[i] ^ v[i + 8];
block += BLAKE2S_BLOCK_SIZE;
--nblocks;
}
}
#ifdef CONFIG_CRYPTO_LIB_BLAKE2S_ARCH
#include "blake2s.h" /* $(SRCARCH)/blake2s.h */
#else
#define blake2s_compress blake2s_compress_generic
#endif
static inline void blake2s_set_lastblock(struct blake2s_state *state)
{
state->f[0] = -1;
@ -59,14 +152,14 @@ void blake2s_final(struct blake2s_state *state, u8 *out)
}
EXPORT_SYMBOL(blake2s_final);
#ifdef blake2s_mod_init_arch
static int __init blake2s_mod_init(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) &&
WARN_ON(!blake2s_selftest()))
return -ENODEV;
blake2s_mod_init_arch();
return 0;
}
subsys_initcall(blake2s_mod_init);
#endif
module_init(blake2s_mod_init);
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");

View File

@ -0,0 +1,114 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/chacha.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/unaligned.h>
static void chacha_permute(struct chacha_state *state, int nrounds)
{
u32 *x = state->x;
int i;
/* whitelist the allowed round counts */
WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
}
/**
* chacha_block_generic - generate one keystream block and increment block counter
* @state: input state matrix
* @out: output keystream block
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
void chacha_block_generic(struct chacha_state *state,
u8 out[CHACHA_BLOCK_SIZE], int nrounds)
{
struct chacha_state permuted_state = *state;
int i;
chacha_permute(&permuted_state, nrounds);
for (i = 0; i < ARRAY_SIZE(state->x); i++)
put_unaligned_le32(permuted_state.x[i] + state->x[i],
&out[i * sizeof(u32)]);
state->x[12]++;
}
EXPORT_SYMBOL(chacha_block_generic);
/**
* hchacha_block_generic - abbreviated ChaCha core, for XChaCha
* @state: input state matrix
* @out: the output words
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
* towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
void hchacha_block_generic(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
struct chacha_state permuted_state = *state;
chacha_permute(&permuted_state, nrounds);
memcpy(&out[0], &permuted_state.x[0], 16);
memcpy(&out[4], &permuted_state.x[12], 16);
}
EXPORT_SYMBOL(hchacha_block_generic);

View File

@ -1,114 +1,70 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
* The ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/algapi.h> // for crypto_xor_cpy
#include <crypto/chacha.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/unaligned.h>
#include <linux/module.h>
static void chacha_permute(struct chacha_state *state, int nrounds)
static void __maybe_unused
chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
u32 *x = state->x;
int i;
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
/* whitelist the allowed round counts */
WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, bytes);
}
}
/**
* chacha_block_generic - generate one keystream block and increment block counter
* @state: input state matrix
* @out: output keystream block
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
void chacha_block_generic(struct chacha_state *state,
u8 out[CHACHA_BLOCK_SIZE], int nrounds)
#ifdef CONFIG_CRYPTO_LIB_CHACHA_ARCH
#include "chacha.h" /* $(SRCARCH)/chacha.h */
#else
#define chacha_crypt_arch chacha_crypt_generic
#define hchacha_block_arch hchacha_block_generic
#endif
void chacha_crypt(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
struct chacha_state permuted_state = *state;
int i;
chacha_permute(&permuted_state, nrounds);
for (i = 0; i < ARRAY_SIZE(state->x); i++)
put_unaligned_le32(permuted_state.x[i] + state->x[i],
&out[i * sizeof(u32)]);
state->x[12]++;
chacha_crypt_arch(state, dst, src, bytes, nrounds);
}
EXPORT_SYMBOL(chacha_block_generic);
EXPORT_SYMBOL_GPL(chacha_crypt);
/**
* hchacha_block_generic - abbreviated ChaCha core, for XChaCha
* @state: input state matrix
* @out: the output words
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
* towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
void hchacha_block_generic(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
void hchacha_block(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
struct chacha_state permuted_state = *state;
chacha_permute(&permuted_state, nrounds);
memcpy(&out[0], &permuted_state.x[0], 16);
memcpy(&out[4], &permuted_state.x[12], 16);
hchacha_block_arch(state, out, nrounds);
}
EXPORT_SYMBOL(hchacha_block_generic);
EXPORT_SYMBOL_GPL(hchacha_block);
#ifdef chacha_mod_init_arch
static int __init chacha_mod_init(void)
{
chacha_mod_init_arch();
return 0;
}
subsys_initcall(chacha_mod_init);
static void __exit chacha_mod_exit(void)
{
}
module_exit(chacha_mod_exit);
#endif
MODULE_DESCRIPTION("ChaCha stream cipher (RFC7539)");
MODULE_LICENSE("GPL");

View File

@ -1,25 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* This is an implementation of the Curve25519 ECDH algorithm, using either
* a 32-bit implementation or a 64-bit implementation with 128-bit integers,
* depending on what is supported by the target compiler.
*
* Information: https://cr.yp.to/ecdh.html
*/
#include <crypto/curve25519.h>
#include <linux/export.h>
#include <linux/module.h>
const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
EXPORT_SYMBOL(curve25519_null_point);
EXPORT_SYMBOL(curve25519_base_point);
EXPORT_SYMBOL(curve25519_generic);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");

View File

@ -2,32 +2,77 @@
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* This is an implementation of the Curve25519 ECDH algorithm, using either
* a 32-bit implementation or a 64-bit implementation with 128-bit integers,
* This is an implementation of the Curve25519 ECDH algorithm, using either an
* architecture-optimized implementation or a generic implementation. The
* generic implementation is either 32-bit, or 64-bit with 128-bit integers,
* depending on what is supported by the target compiler.
*
* Information: https://cr.yp.to/ecdh.html
*/
#include <crypto/curve25519.h>
#include <linux/module.h>
#include <crypto/utils.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
static int __init curve25519_init(void)
static const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
static const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
#ifdef CONFIG_CRYPTO_LIB_CURVE25519_ARCH
#include "curve25519.h" /* $(SRCARCH)/curve25519.h */
#else
static void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) &&
WARN_ON(!curve25519_selftest()))
return -ENODEV;
curve25519_generic(mypublic, secret, basepoint);
}
static void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
curve25519_generic(pub, secret, curve25519_base_point);
}
#endif
bool __must_check
curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
curve25519_arch(mypublic, secret, basepoint);
return crypto_memneq(mypublic, curve25519_null_point,
CURVE25519_KEY_SIZE);
}
EXPORT_SYMBOL(curve25519);
bool __must_check
curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
if (unlikely(!crypto_memneq(secret, curve25519_null_point,
CURVE25519_KEY_SIZE)))
return false;
curve25519_base_arch(pub, secret);
return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
}
EXPORT_SYMBOL(curve25519_generate_public);
#ifdef curve25519_mod_init_arch
static int __init curve25519_mod_init(void)
{
curve25519_mod_init_arch();
return 0;
}
subsys_initcall(curve25519_mod_init);
static void __exit curve25519_exit(void)
static void __exit curve25519_mod_exit(void)
{
}
module_init(curve25519_init);
module_exit(curve25519_exit);
module_exit(curve25519_mod_exit);
#endif
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
MODULE_DESCRIPTION("Curve25519 algorithm");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");

View File

@ -1,35 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/algapi.h> // for crypto_xor_cpy
#include <crypto/chacha.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, bytes);
}
}
EXPORT_SYMBOL(chacha_crypt_generic);
MODULE_DESCRIPTION("ChaCha stream cipher (RFC7539)");
MODULE_LICENSE("GPL");

322
lib/crypto/md5.c Normal file
View File

@ -0,0 +1,322 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MD5 and HMAC-MD5 library functions
*
* md5_block_generic() is derived from cryptoapi implementation, originally
* based on the public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright 2025 Google LLC
*/
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/unaligned.h>
#include <linux/wordpart.h>
static const struct md5_block_state md5_iv = {
.h = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 },
};
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w << s | w >> (32 - s)) + x)
static void md5_block_generic(struct md5_block_state *state,
const u8 data[MD5_BLOCK_SIZE])
{
u32 in[MD5_BLOCK_WORDS];
u32 a, b, c, d;
memcpy(in, data, MD5_BLOCK_SIZE);
le32_to_cpu_array(in, ARRAY_SIZE(in));
a = state->h[0];
b = state->h[1];
c = state->h[2];
d = state->h[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
state->h[0] += a;
state->h[1] += b;
state->h[2] += c;
state->h[3] += d;
}
static void __maybe_unused md5_blocks_generic(struct md5_block_state *state,
const u8 *data, size_t nblocks)
{
do {
md5_block_generic(state, data);
data += MD5_BLOCK_SIZE;
} while (--nblocks);
}
#ifdef CONFIG_CRYPTO_LIB_MD5_ARCH
#include "md5.h" /* $(SRCARCH)/md5.h */
#else
#define md5_blocks md5_blocks_generic
#endif
void md5_init(struct md5_ctx *ctx)
{
ctx->state = md5_iv;
ctx->bytecount = 0;
}
EXPORT_SYMBOL_GPL(md5_init);
void md5_update(struct md5_ctx *ctx, const u8 *data, size_t len)
{
size_t partial = ctx->bytecount % MD5_BLOCK_SIZE;
ctx->bytecount += len;
if (partial + len >= MD5_BLOCK_SIZE) {
size_t nblocks;
if (partial) {
size_t l = MD5_BLOCK_SIZE - partial;
memcpy(&ctx->buf[partial], data, l);
data += l;
len -= l;
md5_blocks(&ctx->state, ctx->buf, 1);
}
nblocks = len / MD5_BLOCK_SIZE;
len %= MD5_BLOCK_SIZE;
if (nblocks) {
md5_blocks(&ctx->state, data, nblocks);
data += nblocks * MD5_BLOCK_SIZE;
}
partial = 0;
}
if (len)
memcpy(&ctx->buf[partial], data, len);
}
EXPORT_SYMBOL_GPL(md5_update);
static void __md5_final(struct md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE])
{
u64 bitcount = ctx->bytecount << 3;
size_t partial = ctx->bytecount % MD5_BLOCK_SIZE;
ctx->buf[partial++] = 0x80;
if (partial > MD5_BLOCK_SIZE - 8) {
memset(&ctx->buf[partial], 0, MD5_BLOCK_SIZE - partial);
md5_blocks(&ctx->state, ctx->buf, 1);
partial = 0;
}
memset(&ctx->buf[partial], 0, MD5_BLOCK_SIZE - 8 - partial);
*(__le64 *)&ctx->buf[MD5_BLOCK_SIZE - 8] = cpu_to_le64(bitcount);
md5_blocks(&ctx->state, ctx->buf, 1);
cpu_to_le32_array(ctx->state.h, ARRAY_SIZE(ctx->state.h));
memcpy(out, ctx->state.h, MD5_DIGEST_SIZE);
}
void md5_final(struct md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE])
{
__md5_final(ctx, out);
memzero_explicit(ctx, sizeof(*ctx));
}
EXPORT_SYMBOL_GPL(md5_final);
void md5(const u8 *data, size_t len, u8 out[MD5_DIGEST_SIZE])
{
struct md5_ctx ctx;
md5_init(&ctx);
md5_update(&ctx, data, len);
md5_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(md5);
static void __hmac_md5_preparekey(struct md5_block_state *istate,
struct md5_block_state *ostate,
const u8 *raw_key, size_t raw_key_len)
{
union {
u8 b[MD5_BLOCK_SIZE];
unsigned long w[MD5_BLOCK_SIZE / sizeof(unsigned long)];
} derived_key = { 0 };
if (unlikely(raw_key_len > MD5_BLOCK_SIZE))
md5(raw_key, raw_key_len, derived_key.b);
else
memcpy(derived_key.b, raw_key, raw_key_len);
for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++)
derived_key.w[i] ^= REPEAT_BYTE(HMAC_IPAD_VALUE);
*istate = md5_iv;
md5_blocks(istate, derived_key.b, 1);
for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++)
derived_key.w[i] ^= REPEAT_BYTE(HMAC_OPAD_VALUE ^
HMAC_IPAD_VALUE);
*ostate = md5_iv;
md5_blocks(ostate, derived_key.b, 1);
memzero_explicit(&derived_key, sizeof(derived_key));
}
void hmac_md5_preparekey(struct hmac_md5_key *key,
const u8 *raw_key, size_t raw_key_len)
{
__hmac_md5_preparekey(&key->istate, &key->ostate, raw_key, raw_key_len);
}
EXPORT_SYMBOL_GPL(hmac_md5_preparekey);
void hmac_md5_init(struct hmac_md5_ctx *ctx, const struct hmac_md5_key *key)
{
ctx->hash_ctx.state = key->istate;
ctx->hash_ctx.bytecount = MD5_BLOCK_SIZE;
ctx->ostate = key->ostate;
}
EXPORT_SYMBOL_GPL(hmac_md5_init);
void hmac_md5_init_usingrawkey(struct hmac_md5_ctx *ctx,
const u8 *raw_key, size_t raw_key_len)
{
__hmac_md5_preparekey(&ctx->hash_ctx.state, &ctx->ostate,
raw_key, raw_key_len);
ctx->hash_ctx.bytecount = MD5_BLOCK_SIZE;
}
EXPORT_SYMBOL_GPL(hmac_md5_init_usingrawkey);
void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE])
{
/* Generate the padded input for the outer hash in ctx->hash_ctx.buf. */
__md5_final(&ctx->hash_ctx, ctx->hash_ctx.buf);
memset(&ctx->hash_ctx.buf[MD5_DIGEST_SIZE], 0,
MD5_BLOCK_SIZE - MD5_DIGEST_SIZE);
ctx->hash_ctx.buf[MD5_DIGEST_SIZE] = 0x80;
*(__le64 *)&ctx->hash_ctx.buf[MD5_BLOCK_SIZE - 8] =
cpu_to_le64(8 * (MD5_BLOCK_SIZE + MD5_DIGEST_SIZE));
/* Compute the outer hash, which gives the HMAC value. */
md5_blocks(&ctx->ostate, ctx->hash_ctx.buf, 1);
cpu_to_le32_array(ctx->ostate.h, ARRAY_SIZE(ctx->ostate.h));
memcpy(out, ctx->ostate.h, MD5_DIGEST_SIZE);
memzero_explicit(ctx, sizeof(*ctx));
}
EXPORT_SYMBOL_GPL(hmac_md5_final);
void hmac_md5(const struct hmac_md5_key *key,
const u8 *data, size_t data_len, u8 out[MD5_DIGEST_SIZE])
{
struct hmac_md5_ctx ctx;
hmac_md5_init(&ctx, key);
hmac_md5_update(&ctx, data, data_len);
hmac_md5_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(hmac_md5);
void hmac_md5_usingrawkey(const u8 *raw_key, size_t raw_key_len,
const u8 *data, size_t data_len,
u8 out[MD5_DIGEST_SIZE])
{
struct hmac_md5_ctx ctx;
hmac_md5_init_usingrawkey(&ctx, raw_key, raw_key_len);
hmac_md5_update(&ctx, data, data_len);
hmac_md5_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(hmac_md5_usingrawkey);
#ifdef md5_mod_init_arch
static int __init md5_mod_init(void)
{
md5_mod_init_arch();
return 0;
}
subsys_initcall(md5_mod_init);
static void __exit md5_mod_exit(void)
{
}
module_exit(md5_mod_exit);
#endif
MODULE_DESCRIPTION("MD5 and HMAC-MD5 library functions");
MODULE_LICENSE("GPL");

View File

@ -1,12 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_CHACHA_MIPS
tristate
depends on CPU_MIPS32_R2
default CRYPTO_LIB_CHACHA
select CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_POLY1305_MIPS
tristate
default CRYPTO_LIB_POLY1305
select CRYPTO_ARCH_HAVE_LIB_POLY1305

View File

@ -1,19 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
chacha-mips-y := chacha-core.o chacha-glue.o
AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
poly1305-mips-y := poly1305-core.o poly1305-glue.o
perlasm-flavour-$(CONFIG_32BIT) := o32
perlasm-flavour-$(CONFIG_64BIT) := 64
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
$(call if_changed,perlasm)
targets += poly1305-core.S

View File

@ -1,29 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ChaCha and HChaCha functions (MIPS optimized)
*
* Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*/
#include <crypto/chacha.h>
#include <linux/kernel.h>
#include <linux/module.h>
asmlinkage void chacha_crypt_arch(struct chacha_state *state,
u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
EXPORT_SYMBOL(chacha_crypt_arch);
asmlinkage void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);
EXPORT_SYMBOL(hchacha_block_arch);
bool chacha_is_arch_optimized(void)
{
return true;
}
EXPORT_SYMBOL(chacha_is_arch_optimized);
MODULE_DESCRIPTION("ChaCha and HChaCha functions (MIPS optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");

14
lib/crypto/mips/chacha.h Normal file
View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* ChaCha and HChaCha functions (MIPS optimized)
*
* Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*/
#include <linux/kernel.h>
asmlinkage void chacha_crypt_arch(struct chacha_state *state,
u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
asmlinkage void hchacha_block_arch(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);

65
lib/crypto/mips/md5.h Normal file
View File

@ -0,0 +1,65 @@
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
*
* Based on crypto/md5.c, which is:
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/octeon/crypto.h>
#include <asm/octeon/octeon.h>
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void md5_blocks(struct md5_block_state *state,
const u8 *data, size_t nblocks)
{
struct octeon_cop2_state cop2_state;
u64 *state64 = (u64 *)state;
unsigned long flags;
if (!octeon_has_crypto())
return md5_blocks_generic(state, data, nblocks);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
flags = octeon_crypto_enable(&cop2_state);
write_octeon_64bit_hash_dword(state64[0], 0);
write_octeon_64bit_hash_dword(state64[1], 1);
do {
const u64 *block = (const u64 *)data;
write_octeon_64bit_block_dword(block[0], 0);
write_octeon_64bit_block_dword(block[1], 1);
write_octeon_64bit_block_dword(block[2], 2);
write_octeon_64bit_block_dword(block[3], 3);
write_octeon_64bit_block_dword(block[4], 4);
write_octeon_64bit_block_dword(block[5], 5);
write_octeon_64bit_block_dword(block[6], 6);
octeon_md5_start(block[7]);
data += MD5_BLOCK_SIZE;
} while (--nblocks);
state64[0] = read_octeon_64bit_hash_dword(0);
state64[1] = read_octeon_64bit_hash_dword(1);
octeon_crypto_disable(&cop2_state, flags);
le32_to_cpu_array(state->h, ARRAY_SIZE(state->h));
}

View File

@ -1,33 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/unaligned.h>
asmlinkage void poly1305_block_init_arch(
struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
asmlinkage void poly1305_blocks_arch(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
EXPORT_SYMBOL_GPL(poly1305_emit_arch);
bool poly1305_is_arch_optimized(void)
{
return true;
}
EXPORT_SYMBOL(poly1305_is_arch_optimized);
MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated");
MODULE_LICENSE("GPL v2");

View File

@ -93,9 +93,7 @@ $code.=<<___;
#endif
#ifdef __KERNEL__
# define poly1305_init poly1305_block_init_arch
# define poly1305_blocks poly1305_blocks_arch
# define poly1305_emit poly1305_emit_arch
# define poly1305_init poly1305_block_init
#endif
#if defined(__MIPSEB__) && !defined(MIPSEB)
@ -565,9 +563,7 @@ $code.=<<___;
#endif
#ifdef __KERNEL__
# define poly1305_init poly1305_block_init_arch
# define poly1305_blocks poly1305_blocks_arch
# define poly1305_emit poly1305_emit_arch
# define poly1305_init poly1305_block_init
#endif
#if defined(__MIPSEB__) && !defined(MIPSEB)

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
*
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
asmlinkage void poly1305_block_init(struct poly1305_block_state *state,
const u8 raw_key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks(struct poly1305_block_state *state,
const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit(const struct poly1305_state *state,
u8 digest[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);

View File

@ -1,25 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Poly1305 authenticator algorithm, RFC7539
*
* Copyright (C) 2015 Martin Willi
*
* Based on public domain code by Andrew Moon and Daniel J. Bernstein.
*/
#include <crypto/internal/poly1305.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
void poly1305_block_init_generic(struct poly1305_block_state *desc,
const u8 raw_key[POLY1305_BLOCK_SIZE])
{
poly1305_core_init(&desc->h);
poly1305_core_setkey(&desc->core_r, raw_key);
}
EXPORT_SYMBOL_GPL(poly1305_block_init_generic);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("Poly1305 algorithm (generic implementation)");

View File

@ -7,7 +7,6 @@
* Based on public domain code by Andrew Moon and Daniel J. Bernstein.
*/
#include <crypto/internal/blockhash.h>
#include <crypto/internal/poly1305.h>
#include <linux/export.h>
#include <linux/kernel.h>
@ -15,6 +14,14 @@
#include <linux/string.h>
#include <linux/unaligned.h>
#ifdef CONFIG_CRYPTO_LIB_POLY1305_ARCH
#include "poly1305.h" /* $(SRCARCH)/poly1305.h */
#else
#define poly1305_block_init poly1305_block_init_generic
#define poly1305_blocks poly1305_blocks_generic
#define poly1305_emit poly1305_emit_generic
#endif
void poly1305_init(struct poly1305_desc_ctx *desc,
const u8 key[POLY1305_KEY_SIZE])
{
@ -23,28 +30,40 @@ void poly1305_init(struct poly1305_desc_ctx *desc,
desc->s[2] = get_unaligned_le32(key + 24);
desc->s[3] = get_unaligned_le32(key + 28);
desc->buflen = 0;
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
poly1305_block_init_arch(&desc->state, key);
else
poly1305_block_init_generic(&desc->state, key);
poly1305_block_init(&desc->state, key);
}
EXPORT_SYMBOL(poly1305_init);
static inline void poly1305_blocks(struct poly1305_block_state *state,
const u8 *src, unsigned int len)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
poly1305_blocks_arch(state, src, len, 1);
else
poly1305_blocks_generic(state, src, len, 1);
}
void poly1305_update(struct poly1305_desc_ctx *desc,
const u8 *src, unsigned int nbytes)
{
desc->buflen = BLOCK_HASH_UPDATE(poly1305_blocks, &desc->state,
src, nbytes, POLY1305_BLOCK_SIZE,
desc->buf, desc->buflen);
if (desc->buflen + nbytes >= POLY1305_BLOCK_SIZE) {
unsigned int bulk_len;
if (desc->buflen) {
unsigned int l = POLY1305_BLOCK_SIZE - desc->buflen;
memcpy(&desc->buf[desc->buflen], src, l);
src += l;
nbytes -= l;
poly1305_blocks(&desc->state, desc->buf,
POLY1305_BLOCK_SIZE, 1);
desc->buflen = 0;
}
bulk_len = round_down(nbytes, POLY1305_BLOCK_SIZE);
nbytes %= POLY1305_BLOCK_SIZE;
if (bulk_len) {
poly1305_blocks(&desc->state, src, bulk_len, 1);
src += bulk_len;
}
}
if (nbytes) {
memcpy(&desc->buf[desc->buflen], src, nbytes);
desc->buflen += nbytes;
}
}
EXPORT_SYMBOL(poly1305_update);
@ -54,22 +73,28 @@ void poly1305_final(struct poly1305_desc_ctx *desc, u8 *dst)
desc->buf[desc->buflen++] = 1;
memset(desc->buf + desc->buflen, 0,
POLY1305_BLOCK_SIZE - desc->buflen);
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
poly1305_blocks_arch(&desc->state, desc->buf,
POLY1305_BLOCK_SIZE, 0);
else
poly1305_blocks_generic(&desc->state, desc->buf,
POLY1305_BLOCK_SIZE, 0);
poly1305_blocks(&desc->state, desc->buf, POLY1305_BLOCK_SIZE,
0);
}
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
poly1305_emit_arch(&desc->state.h, dst, desc->s);
else
poly1305_emit_generic(&desc->state.h, dst, desc->s);
poly1305_emit(&desc->state.h, dst, desc->s);
*desc = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final);
#ifdef poly1305_mod_init_arch
static int __init poly1305_mod_init(void)
{
poly1305_mod_init_arch();
return 0;
}
subsys_initcall(poly1305_mod_init);
static void __exit poly1305_mod_exit(void)
{
}
module_exit(poly1305_mod_exit);
#endif
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("Poly1305 authenticator algorithm, RFC7539");

View File

@ -1,16 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_CHACHA20_P10
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
default CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_POLY1305_P10
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
depends on BROKEN # Needs to be fixed to work in softirq context
default CRYPTO_LIB_POLY1305
select CRYPTO_ARCH_HAVE_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC

View File

@ -1,7 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o

Some files were not shown because too many files have changed in this diff Show More