diff options
Diffstat (limited to 'lib/crc')
| -rw-r--r-- | lib/crc/arm/crc-t10dif.h | 19 | ||||
| -rw-r--r-- | lib/crc/arm/crc32.h | 11 | ||||
| -rw-r--r-- | lib/crc/arm64/crc-t10dif.h | 19 | ||||
| -rw-r--r-- | lib/crc/arm64/crc32.h | 16 |
4 files changed, 22 insertions, 43 deletions
diff --git a/lib/crc/arm/crc-t10dif.h b/lib/crc/arm/crc-t10dif.h index 63441de5e3f1..afc0ebf97f19 100644 --- a/lib/crc/arm/crc-t10dif.h +++ b/lib/crc/arm/crc-t10dif.h @@ -5,7 +5,6 @@ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> */ -#include <asm/neon.h> #include <asm/simd.h> static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); @@ -19,22 +18,16 @@ asmlinkage void crc_t10dif_pmull8(u16 init_crc, const u8 *buf, size_t len, static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length) { - if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) { + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && likely(may_use_simd())) { if (static_branch_likely(&have_pmull)) { - if (likely(may_use_simd())) { - kernel_neon_begin(); - crc = crc_t10dif_pmull64(crc, data, length); - kernel_neon_end(); - return crc; - } + scoped_ksimd() + return crc_t10dif_pmull64(crc, data, length); } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && - static_branch_likely(&have_neon) && - likely(may_use_simd())) { + static_branch_likely(&have_neon)) { u8 buf[16] __aligned(16); - kernel_neon_begin(); - crc_t10dif_pmull8(crc, data, length, buf); - kernel_neon_end(); + scoped_ksimd() + crc_t10dif_pmull8(crc, data, length, buf); return crc_t10dif_generic(0, buf, sizeof(buf)); } diff --git a/lib/crc/arm/crc32.h b/lib/crc/arm/crc32.h index 7b76f52f6907..f33de6b22cd4 100644 --- a/lib/crc/arm/crc32.h +++ b/lib/crc/arm/crc32.h @@ -8,7 +8,6 @@ #include <linux/cpufeature.h> #include <asm/hwcap.h> -#include <asm/neon.h> #include <asm/simd.h> static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); @@ -42,9 +41,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) len -= n; } n = round_down(len, 16); - kernel_neon_begin(); - crc = crc32_pmull_le(p, n, crc); - kernel_neon_end(); + scoped_ksimd() + crc = crc32_pmull_le(p, n, crc); p += n; len -= n; } @@ -71,9 +69,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) len -= n; } n = round_down(len, 16); - kernel_neon_begin(); - crc = crc32c_pmull_le(p, n, crc); - kernel_neon_end(); + scoped_ksimd() + crc = crc32c_pmull_le(p, n, crc); p += n; len -= n; } diff --git a/lib/crc/arm64/crc-t10dif.h b/lib/crc/arm64/crc-t10dif.h index f88db2971805..b8338139ed77 100644 --- a/lib/crc/arm64/crc-t10dif.h +++ b/lib/crc/arm64/crc-t10dif.h @@ -7,7 +7,6 @@ #include <linux/cpufeature.h> -#include <asm/neon.h> #include <asm/simd.h> static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_asimd); @@ -21,22 +20,16 @@ asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length) { - if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) { + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && likely(may_use_simd())) { if (static_branch_likely(&have_pmull)) { - if (likely(may_use_simd())) { - kernel_neon_begin(); - crc = crc_t10dif_pmull_p64(crc, data, length); - kernel_neon_end(); - return crc; - } + scoped_ksimd() + return crc_t10dif_pmull_p64(crc, data, length); } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && - static_branch_likely(&have_asimd) && - likely(may_use_simd())) { + static_branch_likely(&have_asimd)) { u8 buf[16]; - kernel_neon_begin(); - crc_t10dif_pmull_p8(crc, data, length, buf); - kernel_neon_end(); + scoped_ksimd() + crc_t10dif_pmull_p8(crc, data, length, buf); return crc_t10dif_generic(0, buf, sizeof(buf)); } diff --git a/lib/crc/arm64/crc32.h b/lib/crc/arm64/crc32.h index 31e649cd40a2..1939a5dee477 100644 --- a/lib/crc/arm64/crc32.h +++ b/lib/crc/arm64/crc32.h @@ -2,7 +2,6 @@ #include <asm/alternative.h> #include <asm/cpufeature.h> -#include <asm/neon.h> #include <asm/simd.h> // The minimum input length to consider the 4-way interleaved code path @@ -23,9 +22,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) if (len >= min_len && cpu_have_named_feature(PMULL) && likely(may_use_simd())) { - kernel_neon_begin(); - crc = crc32_le_arm64_4way(crc, p, len); - kernel_neon_end(); + scoped_ksimd() + crc = crc32_le_arm64_4way(crc, p, len); p += round_down(len, 64); len %= 64; @@ -44,9 +42,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len) if (len >= min_len && cpu_have_named_feature(PMULL) && likely(may_use_simd())) { - kernel_neon_begin(); - crc = crc32c_le_arm64_4way(crc, p, len); - kernel_neon_end(); + scoped_ksimd() + crc = crc32c_le_arm64_4way(crc, p, len); p += round_down(len, 64); len %= 64; @@ -65,9 +62,8 @@ static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) if (len >= min_len && cpu_have_named_feature(PMULL) && likely(may_use_simd())) { - kernel_neon_begin(); - crc = crc32_be_arm64_4way(crc, p, len); - kernel_neon_end(); + scoped_ksimd() + crc = crc32_be_arm64_4way(crc, p, len); p += round_down(len, 64); len %= 64; |
