mirror of https://github.com/torvalds/linux.git
lib/crc: Use underlying functions instead of crypto_simd_usable()
Since crc_kunit now tests the fallback code paths without using crypto_simd_disabled_for_test, make the CRC code just use the underlying may_use_simd() and irq_fpu_usable() functions directly instead of crypto_simd_usable(). This eliminates an unnecessary layer. Take the opportunity to add likely() and unlikely() annotations as well. Link: https://lore.kernel.org/r/20250811182631.376302-4-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
This commit is contained in:
parent
842ec21357
commit
c2a0c5156a
|
|
@ -5,8 +5,6 @@
|
|||
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
|
|
@ -23,7 +21,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
|||
{
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
|
||||
if (static_branch_likely(&have_pmull)) {
|
||||
if (crypto_simd_usable()) {
|
||||
if (likely(may_use_simd())) {
|
||||
kernel_neon_begin();
|
||||
crc = crc_t10dif_pmull64(crc, data, length);
|
||||
kernel_neon_end();
|
||||
|
|
@ -31,7 +29,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
|||
}
|
||||
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
|
||||
static_branch_likely(&have_neon) &&
|
||||
crypto_simd_usable()) {
|
||||
likely(may_use_simd())) {
|
||||
u8 buf[16] __aligned(16);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#include <linux/cpufeature.h>
|
||||
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
|
@ -34,7 +32,7 @@ static inline u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len)
|
|||
static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (len >= PMULL_MIN_LEN + 15 &&
|
||||
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
|
||||
static_branch_likely(&have_pmull) && likely(may_use_simd())) {
|
||||
size_t n = -(uintptr_t)p & 15;
|
||||
|
||||
/* align p to 16-byte boundary */
|
||||
|
|
@ -63,7 +61,7 @@ static inline u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
|
|||
static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (len >= PMULL_MIN_LEN + 15 &&
|
||||
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
|
||||
static_branch_likely(&have_pmull) && likely(may_use_simd())) {
|
||||
size_t n = -(uintptr_t)p & 15;
|
||||
|
||||
/* align p to 16-byte boundary */
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#include <linux/cpufeature.h>
|
||||
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
|
|
@ -25,7 +23,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
|||
{
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
|
||||
if (static_branch_likely(&have_pmull)) {
|
||||
if (crypto_simd_usable()) {
|
||||
if (likely(may_use_simd())) {
|
||||
kernel_neon_begin();
|
||||
crc = crc_t10dif_pmull_p64(crc, data, length);
|
||||
kernel_neon_end();
|
||||
|
|
@ -33,7 +31,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
|||
}
|
||||
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
|
||||
static_branch_likely(&have_asimd) &&
|
||||
crypto_simd_usable()) {
|
||||
likely(may_use_simd())) {
|
||||
u8 buf[16];
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
|||
|
|
@ -5,8 +5,6 @@
|
|||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
// The minimum input length to consider the 4-way interleaved code path
|
||||
static const size_t min_len = 1024;
|
||||
|
||||
|
|
@ -23,7 +21,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
|
|||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32_le_base(crc, p, len);
|
||||
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) &&
|
||||
likely(may_use_simd())) {
|
||||
kernel_neon_begin();
|
||||
crc = crc32_le_arm64_4way(crc, p, len);
|
||||
kernel_neon_end();
|
||||
|
|
@ -43,7 +42,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
|||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32c_base(crc, p, len);
|
||||
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) &&
|
||||
likely(may_use_simd())) {
|
||||
kernel_neon_begin();
|
||||
crc = crc32c_le_arm64_4way(crc, p, len);
|
||||
kernel_neon_end();
|
||||
|
|
@ -63,7 +63,8 @@ static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
|
|||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32_be_base(crc, p, len);
|
||||
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
|
||||
if (len >= min_len && cpu_have_named_feature(PMULL) &&
|
||||
likely(may_use_simd())) {
|
||||
kernel_neon_begin();
|
||||
crc = crc32_be_arm64_4way(crc, p, len);
|
||||
kernel_neon_end();
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@
|
|||
* [based on crc32c-vpmsum_glue.c]
|
||||
*/
|
||||
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/preempt.h>
|
||||
|
|
@ -29,7 +29,8 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
|
|||
u32 crc = crci;
|
||||
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
|
||||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
|
||||
!static_branch_likely(&have_vec_crypto) ||
|
||||
unlikely(!may_use_simd()))
|
||||
return crc_t10dif_generic(crc, p, len);
|
||||
|
||||
if ((unsigned long)p & VMX_ALIGN_MASK) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/preempt.h>
|
||||
|
|
@ -24,7 +24,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
|||
unsigned int tail;
|
||||
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
|
||||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
|
||||
!static_branch_likely(&have_vec_crypto) ||
|
||||
unlikely(!may_use_simd()))
|
||||
return crc32c_base(crc, p, len);
|
||||
|
||||
if ((unsigned long)p & VMX_ALIGN_MASK) {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/static_call.h>
|
||||
#include "crc-pclmul-consts.h"
|
||||
|
||||
|
|
@ -57,7 +56,7 @@ static inline bool have_avx512(void)
|
|||
#define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq) \
|
||||
do { \
|
||||
if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) && \
|
||||
crypto_simd_usable()) { \
|
||||
likely(irq_fpu_usable())) { \
|
||||
const void *consts_ptr; \
|
||||
\
|
||||
consts_ptr = (consts).fold_across_128_bits_consts; \
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
|||
return crc32c_base(crc, p, len);
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
|
||||
static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
|
||||
static_branch_likely(&have_pclmulqdq) && likely(irq_fpu_usable())) {
|
||||
/*
|
||||
* Long length, the vector registers are usable, and the CPU is
|
||||
* 64-bit and supports both CRC32 and PCLMULQDQ instructions.
|
||||
|
|
|
|||
Loading…
Reference in New Issue