summaryrefslogtreecommitdiff
path: root/lib/crc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/crc')
-rw-r--r--lib/crc/arm/crc-t10dif.h8
-rw-r--r--lib/crc/arm/crc32.h8
-rw-r--r--lib/crc/arm64/crc-t10dif.h8
-rw-r--r--lib/crc/arm64/crc32.h11
-rw-r--r--lib/crc/loongarch/crc32.h2
-rw-r--r--lib/crc/mips/crc32.h2
-rw-r--r--lib/crc/powerpc/crc-t10dif.h7
-rw-r--r--lib/crc/powerpc/crc32.h7
-rw-r--r--lib/crc/sparc/crc32.h2
-rw-r--r--lib/crc/tests/crc_kunit.c62
-rw-r--r--lib/crc/x86/crc-pclmul-template.h3
-rw-r--r--lib/crc/x86/crc-t10dif.h2
-rw-r--r--lib/crc/x86/crc32.h4
-rw-r--r--lib/crc/x86/crc64.h2
14 files changed, 84 insertions, 44 deletions
diff --git a/lib/crc/arm/crc-t10dif.h b/lib/crc/arm/crc-t10dif.h
index 2edf7e9681d0..63441de5e3f1 100644
--- a/lib/crc/arm/crc-t10dif.h
+++ b/lib/crc/arm/crc-t10dif.h
@@ -5,8 +5,6 @@
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
-#include <crypto/internal/simd.h>
-
#include <asm/neon.h>
#include <asm/simd.h>
@@ -23,7 +21,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
{
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
if (static_branch_likely(&have_pmull)) {
- if (crypto_simd_usable()) {
+ if (likely(may_use_simd())) {
kernel_neon_begin();
crc = crc_t10dif_pmull64(crc, data, length);
kernel_neon_end();
@@ -31,7 +29,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
static_branch_likely(&have_neon) &&
- crypto_simd_usable()) {
+ likely(may_use_simd())) {
u8 buf[16] __aligned(16);
kernel_neon_begin();
@@ -45,7 +43,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
-static inline void crc_t10dif_mod_init_arch(void)
+static void crc_t10dif_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);
diff --git a/lib/crc/arm/crc32.h b/lib/crc/arm/crc32.h
index 018007e162a2..7b76f52f6907 100644
--- a/lib/crc/arm/crc32.h
+++ b/lib/crc/arm/crc32.h
@@ -7,8 +7,6 @@
#include <linux/cpufeature.h>
-#include <crypto/internal/simd.h>
-
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -34,7 +32,7 @@ static inline u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len)
static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
- static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+ static_branch_likely(&have_pmull) && likely(may_use_simd())) {
size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */
@@ -63,7 +61,7 @@ static inline u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
- static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+ static_branch_likely(&have_pmull) && likely(may_use_simd())) {
size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */
@@ -85,7 +83,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
if (elf_hwcap2 & HWCAP2_CRC32)
static_branch_enable(&have_crc32);
diff --git a/lib/crc/arm64/crc-t10dif.h b/lib/crc/arm64/crc-t10dif.h
index c4521a7f1ee9..f88db2971805 100644
--- a/lib/crc/arm64/crc-t10dif.h
+++ b/lib/crc/arm64/crc-t10dif.h
@@ -7,8 +7,6 @@
#include <linux/cpufeature.h>
-#include <crypto/internal/simd.h>
-
#include <asm/neon.h>
#include <asm/simd.h>
@@ -25,7 +23,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
{
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
if (static_branch_likely(&have_pmull)) {
- if (crypto_simd_usable()) {
+ if (likely(may_use_simd())) {
kernel_neon_begin();
crc = crc_t10dif_pmull_p64(crc, data, length);
kernel_neon_end();
@@ -33,7 +31,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
static_branch_likely(&have_asimd) &&
- crypto_simd_usable()) {
+ likely(may_use_simd())) {
u8 buf[16];
kernel_neon_begin();
@@ -47,7 +45,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
-static inline void crc_t10dif_mod_init_arch(void)
+static void crc_t10dif_mod_init_arch(void)
{
if (cpu_have_named_feature(ASIMD)) {
static_branch_enable(&have_asimd);
diff --git a/lib/crc/arm64/crc32.h b/lib/crc/arm64/crc32.h
index 6e5dec45f05d..31e649cd40a2 100644
--- a/lib/crc/arm64/crc32.h
+++ b/lib/crc/arm64/crc32.h
@@ -5,8 +5,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
-#include <crypto/internal/simd.h>
-
// The minimum input length to consider the 4-way interleaved code path
static const size_t min_len = 1024;
@@ -23,7 +21,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_le_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+ if (len >= min_len && cpu_have_named_feature(PMULL) &&
+ likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32_le_arm64_4way(crc, p, len);
kernel_neon_end();
@@ -43,7 +42,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32c_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+ if (len >= min_len && cpu_have_named_feature(PMULL) &&
+ likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32c_le_arm64_4way(crc, p, len);
kernel_neon_end();
@@ -63,7 +63,8 @@ static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_be_base(crc, p, len);
- if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+ if (len >= min_len && cpu_have_named_feature(PMULL) &&
+ likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32_be_arm64_4way(crc, p, len);
kernel_neon_end();
diff --git a/lib/crc/loongarch/crc32.h b/lib/crc/loongarch/crc32.h
index 6de5c96594af..d34fa4c68632 100644
--- a/lib/crc/loongarch/crc32.h
+++ b/lib/crc/loongarch/crc32.h
@@ -101,7 +101,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
if (cpu_has_crc32)
static_branch_enable(&have_crc32);
diff --git a/lib/crc/mips/crc32.h b/lib/crc/mips/crc32.h
index 11cb272c63a6..3100354a049e 100644
--- a/lib/crc/mips/crc32.h
+++ b/lib/crc/mips/crc32.h
@@ -148,7 +148,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
if (cpu_have_feature(cpu_feature(MIPS_CRC32)))
static_branch_enable(&have_crc32);
diff --git a/lib/crc/powerpc/crc-t10dif.h b/lib/crc/powerpc/crc-t10dif.h
index 59e16804a6ea..8f4592a5323d 100644
--- a/lib/crc/powerpc/crc-t10dif.h
+++ b/lib/crc/powerpc/crc-t10dif.h
@@ -6,8 +6,8 @@
* [based on crc32c-vpmsum_glue.c]
*/
+#include <asm/simd.h>
#include <asm/switch_to.h>
-#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/preempt.h>
@@ -29,7 +29,8 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
u32 crc = crci;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
- !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
+ !static_branch_likely(&have_vec_crypto) ||
+ unlikely(!may_use_simd()))
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
@@ -61,7 +62,7 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
-static inline void crc_t10dif_mod_init_arch(void)
+static void crc_t10dif_mod_init_arch(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
diff --git a/lib/crc/powerpc/crc32.h b/lib/crc/powerpc/crc32.h
index 811cc2e6ed24..0c852272a382 100644
--- a/lib/crc/powerpc/crc32.h
+++ b/lib/crc/powerpc/crc32.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/simd.h>
#include <asm/switch_to.h>
-#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/preempt.h>
@@ -24,7 +24,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
unsigned int tail;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
- !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
+ !static_branch_likely(&have_vec_crypto) ||
+ unlikely(!may_use_simd()))
return crc32c_base(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
@@ -54,7 +55,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
}
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
diff --git a/lib/crc/sparc/crc32.h b/lib/crc/sparc/crc32.h
index 60f2765ac015..df7c350acd7b 100644
--- a/lib/crc/sparc/crc32.h
+++ b/lib/crc/sparc/crc32.h
@@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *data, size_t len)
}
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
unsigned long cfr;
diff --git a/lib/crc/tests/crc_kunit.c b/lib/crc/tests/crc_kunit.c
index f08d985d8860..9a450e25ac81 100644
--- a/lib/crc/tests/crc_kunit.c
+++ b/lib/crc/tests/crc_kunit.c
@@ -6,6 +6,7 @@
*
* Author: Eric Biggers <ebiggers@google.com>
*/
+#include <kunit/run-in-irq-context.h>
#include <kunit/test.h>
#include <linux/crc7.h>
#include <linux/crc16.h>
@@ -141,6 +142,54 @@ static size_t generate_random_length(size_t max_length)
return len % (max_length + 1);
}
+#define IRQ_TEST_DATA_LEN 512
+#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
+
+struct crc_irq_test_state {
+ const struct crc_variant *v;
+ u64 initial_crc;
+ u64 expected_crcs[IRQ_TEST_NUM_BUFFERS];
+ atomic_t seqno;
+};
+
+/*
+ * Compute the CRC of one of the test messages and verify that it matches the
+ * expected CRC from @state->expected_crcs. To increase the chance of detecting
+ * problems, cycle through multiple messages.
+ */
+static bool crc_irq_test_func(void *state_)
+{
+ struct crc_irq_test_state *state = state_;
+ const struct crc_variant *v = state->v;
+ u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
+ u64 actual_crc = v->func(state->initial_crc,
+ &test_buffer[i * IRQ_TEST_DATA_LEN],
+ IRQ_TEST_DATA_LEN);
+
+ return actual_crc == state->expected_crcs[i];
+}
+
+/*
+ * Test that if CRCs are computed in task, softirq, and hardirq context
+ * concurrently, then all results are as expected.
+ */
+static void crc_interrupt_context_test(struct kunit *test,
+ const struct crc_variant *v)
+{
+ struct crc_irq_test_state state = {
+ .v = v,
+ .initial_crc = generate_random_initial_crc(v),
+ };
+
+ for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++) {
+ state.expected_crcs[i] = crc_ref(
+ v, state.initial_crc,
+ &test_buffer[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN);
+ }
+
+ kunit_run_irq_test(test, crc_irq_test_func, 100000, &state);
+}
+
/* Test that v->func gives the same CRCs as a reference implementation. */
static void crc_test(struct kunit *test, const struct crc_variant *v)
{
@@ -149,7 +198,6 @@ static void crc_test(struct kunit *test, const struct crc_variant *v)
for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) {
u64 init_crc, expected_crc, actual_crc;
size_t len, offset;
- bool nosimd;
init_crc = generate_random_initial_crc(v);
len = generate_random_length(CRC_KUNIT_MAX_LEN);
@@ -168,22 +216,18 @@ static void crc_test(struct kunit *test, const struct crc_variant *v)
/* Refresh the data occasionally. */
prandom_bytes_state(&rng, &test_buffer[offset], len);
- nosimd = rand32() % 8 == 0;
-
/*
* Compute the CRC, and verify that it equals the CRC computed
* by a simple bit-at-a-time reference implementation.
*/
expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len);
- if (nosimd)
- local_irq_disable();
actual_crc = v->func(init_crc, &test_buffer[offset], len);
- if (nosimd)
- local_irq_enable();
KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
- "Wrong result with len=%zu offset=%zu nosimd=%d",
- len, offset, nosimd);
+ "Wrong result with len=%zu offset=%zu",
+ len, offset);
}
+
+ crc_interrupt_context_test(test, v);
}
static __always_inline void
diff --git a/lib/crc/x86/crc-pclmul-template.h b/lib/crc/x86/crc-pclmul-template.h
index 35c950d7010c..02744831c6fa 100644
--- a/lib/crc/x86/crc-pclmul-template.h
+++ b/lib/crc/x86/crc-pclmul-template.h
@@ -12,7 +12,6 @@
#include <asm/cpufeatures.h>
#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <linux/static_call.h>
#include "crc-pclmul-consts.h"
@@ -57,7 +56,7 @@ static inline bool have_avx512(void)
#define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq) \
do { \
if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) && \
- crypto_simd_usable()) { \
+ likely(irq_fpu_usable())) { \
const void *consts_ptr; \
\
consts_ptr = (consts).fold_across_128_bits_consts; \
diff --git a/lib/crc/x86/crc-t10dif.h b/lib/crc/x86/crc-t10dif.h
index 2a02a3026f3f..8ee8824da551 100644
--- a/lib/crc/x86/crc-t10dif.h
+++ b/lib/crc/x86/crc-t10dif.h
@@ -19,7 +19,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
-static inline void crc_t10dif_mod_init_arch(void)
+static void crc_t10dif_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);
diff --git a/lib/crc/x86/crc32.h b/lib/crc/x86/crc32.h
index cea2c96d08d0..19a5e3c6c73b 100644
--- a/lib/crc/x86/crc32.h
+++ b/lib/crc/x86/crc32.h
@@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
- static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
+ static_branch_likely(&have_pclmulqdq) && likely(irq_fpu_usable())) {
/*
* Long length, the vector registers are usable, and the CPU is
* 64-bit and supports both CRC32 and PCLMULQDQ instructions.
@@ -106,7 +106,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
-static inline void crc32_mod_init_arch(void)
+static void crc32_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_XMM4_2))
static_branch_enable(&have_crc32);
diff --git a/lib/crc/x86/crc64.h b/lib/crc/x86/crc64.h
index fde1222c4c58..7d4599319343 100644
--- a/lib/crc/x86/crc64.h
+++ b/lib/crc/x86/crc64.h
@@ -27,7 +27,7 @@ static inline u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
}
#define crc64_mod_init_arch crc64_mod_init_arch
-static inline void crc64_mod_init_arch(void)
+static void crc64_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);