mirror of https://github.com/torvalds/linux.git
lib/crc: arm64: add NEON accelerated CRC64-NVMe implementation
Implement an optimized CRC64 (NVMe) algorithm for ARM64 using NEON Polynomial Multiply Long (PMULL) instructions. The generic shift-and-XOR software implementation is slow, which creates a bottleneck in NVMe and other storage subsystems. The acceleration is implemented using C intrinsics (<arm_neon.h>) rather than raw assembly for better readability and maintainability. Key highlights of this implementation: - Uses 4KB chunking inside scoped_ksimd() to avoid preemption latency spikes on large buffers. - Pre-calculates and loads fold constants via vld1q_u64() to minimize register spilling. - Benchmarks show the break-even point against the generic implementation is around 128 bytes. The PMULL path is enabled only for len >= 128. Performance results (kunit crc_benchmark on Cortex-A72): - Generic (len=4096): ~268 MB/s - PMULL (len=4096): ~1556 MB/s (nearly 6x improvement) Signed-off-by: Demian Shulhan <demyansh@gmail.com> Link: https://lore.kernel.org/r/20260329074338.1053550-1-demyansh@gmail.com Signed-off-by: Eric Biggers <ebiggers@kernel.org>
This commit is contained in:
parent
6e4d63e899
commit
63432fd625
|
|
@ -82,6 +82,7 @@ config CRC64
|
|||
config CRC64_ARCH
|
||||
bool
|
||||
depends on CRC64 && CRC_OPTIMIZATIONS
|
||||
default y if ARM64
|
||||
default y if RISCV && RISCV_ISA_ZBC && 64BIT
|
||||
default y if X86_64
|
||||
|
||||
|
|
|
|||
|
|
@ -38,9 +38,15 @@ obj-$(CONFIG_CRC64) += crc64.o
|
|||
crc64-y := crc64-main.o
|
||||
ifeq ($(CONFIG_CRC64_ARCH),y)
|
||||
CFLAGS_crc64-main.o += -I$(src)/$(SRCARCH)
|
||||
|
||||
CFLAGS_REMOVE_arm64/crc64-neon-inner.o += -mgeneral-regs-only
|
||||
CFLAGS_arm64/crc64-neon-inner.o += -ffreestanding -march=armv8-a+crypto
|
||||
CFLAGS_arm64/crc64-neon-inner.o += -isystem $(shell $(CC) -print-file-name=include)
|
||||
crc64-$(CONFIG_ARM64) += arm64/crc64-neon-inner.o
|
||||
|
||||
crc64-$(CONFIG_RISCV) += riscv/crc64_lsb.o riscv/crc64_msb.o
|
||||
crc64-$(CONFIG_X86) += x86/crc64-pclmul.o
|
||||
endif
|
||||
endif # CONFIG_CRC64_ARCH
|
||||
|
||||
obj-y += tests/
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,78 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Accelerated CRC64 (NVMe) using ARM NEON C intrinsics
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/neon-intrinsics.h>
|
||||
|
||||
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len);
|
||||
|
||||
#define GET_P64_0(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 0))
|
||||
#define GET_P64_1(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 1))
|
||||
|
||||
/* x^191 mod G, x^127 mod G */
|
||||
static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL,
|
||||
0x21e9761e252621acULL };
|
||||
/* floor(x^127 / G), (G - x^64) / x */
|
||||
static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL,
|
||||
0x34d926535897936aULL };
|
||||
|
||||
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len)
|
||||
{
|
||||
uint64x2_t v0_u64 = { crc, 0 };
|
||||
poly64x2_t v0 = vreinterpretq_p64_u64(v0_u64);
|
||||
poly64x2_t fold_consts =
|
||||
vreinterpretq_p64_u64(vld1q_u64(fold_consts_val));
|
||||
poly64x2_t v1 = vreinterpretq_p64_u8(vld1q_u8(p));
|
||||
|
||||
v0 = vreinterpretq_p64_u8(veorq_u8(vreinterpretq_u8_p64(v0),
|
||||
vreinterpretq_u8_p64(v1)));
|
||||
p += 16;
|
||||
len -= 16;
|
||||
|
||||
do {
|
||||
v1 = vreinterpretq_p64_u8(vld1q_u8(p));
|
||||
|
||||
poly128_t v2 = vmull_high_p64(fold_consts, v0);
|
||||
poly128_t v0_128 =
|
||||
vmull_p64(GET_P64_0(fold_consts), GET_P64_0(v0));
|
||||
|
||||
uint8x16_t x0 = veorq_u8(vreinterpretq_u8_p128(v0_128),
|
||||
vreinterpretq_u8_p128(v2));
|
||||
|
||||
x0 = veorq_u8(x0, vreinterpretq_u8_p64(v1));
|
||||
v0 = vreinterpretq_p64_u8(x0);
|
||||
|
||||
p += 16;
|
||||
len -= 16;
|
||||
} while (len >= 16);
|
||||
|
||||
/* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */
|
||||
poly64x2_t v7 = vreinterpretq_p64_u64((uint64x2_t){ 0, 0 });
|
||||
poly128_t v1_128 = vmull_p64(GET_P64_1(fold_consts), GET_P64_0(v0));
|
||||
|
||||
uint8x16_t ext_v0 =
|
||||
vextq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p64(v7), 8);
|
||||
uint8x16_t x0 = veorq_u8(ext_v0, vreinterpretq_u8_p128(v1_128));
|
||||
|
||||
v0 = vreinterpretq_p64_u8(x0);
|
||||
|
||||
/* Final Barrett reduction */
|
||||
poly64x2_t bconsts = vreinterpretq_p64_u64(vld1q_u64(bconsts_val));
|
||||
|
||||
v1_128 = vmull_p64(GET_P64_0(bconsts), GET_P64_0(v0));
|
||||
|
||||
poly64x2_t v1_64 = vreinterpretq_p64_u8(vreinterpretq_u8_p128(v1_128));
|
||||
poly128_t v3_128 = vmull_p64(GET_P64_1(bconsts), GET_P64_0(v1_64));
|
||||
|
||||
x0 = veorq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p128(v3_128));
|
||||
|
||||
uint8x16_t ext_v2 = vextq_u8(vreinterpretq_u8_p64(v7),
|
||||
vreinterpretq_u8_p128(v1_128), 8);
|
||||
|
||||
x0 = veorq_u8(x0, ext_v2);
|
||||
|
||||
v0 = vreinterpretq_p64_u8(x0);
|
||||
return vgetq_lane_u64(vreinterpretq_u64_p64(v0), 1);
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* CRC64 using ARM64 PMULL instructions
|
||||
*/
|
||||
|
||||
#include <linux/cpufeature.h>
|
||||
#include <asm/simd.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len);
|
||||
|
||||
#define crc64_be_arch crc64_be_generic
|
||||
|
||||
static inline u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (len >= 128 && cpu_have_named_feature(PMULL) &&
|
||||
likely(may_use_simd())) {
|
||||
do {
|
||||
size_t chunk = min_t(size_t, len & ~15, SZ_4K);
|
||||
|
||||
scoped_ksimd()
|
||||
crc = crc64_nvme_arm64_c(crc, p, chunk);
|
||||
|
||||
p += chunk;
|
||||
len -= chunk;
|
||||
} while (len >= 128);
|
||||
}
|
||||
return crc64_nvme_generic(crc, p, len);
|
||||
}
|
||||
Loading…
Reference in New Issue