summaryrefslogtreecommitdiff
path: root/lib/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2025-10-17 21:30:58 -0700
committerEric Biggers <ebiggers@kernel.org>2025-10-29 22:04:24 -0700
commit5e0ec8e46d4d6488242bb39a4ce5c0276afa5f32 (patch)
treec867511d4d827a19d4f3333789f09de260ebd352 /lib/crypto
parent50b8e36994a042103ea92b6d9f6d7de725f9ac5f (diff)
lib/crypto: blake2s: Rename blake2s_state to blake2s_ctx
For consistency with the SHA-1, SHA-2, SHA-3 (in development), and MD5 library APIs, rename blake2s_state to blake2s_ctx. As a refresher, the ctx name: - Is a bit shorter. - Avoids confusion with the compression function state, which is also often called the state (but is just part of the full context). - Is consistent with OpenSSL. Not a big deal, of course. But consistency is nice. With a BLAKE2b library API about to be added, this is a convenient time to update this. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20251018043106.375964-3-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib/crypto')
-rw-r--r--lib/crypto/arm/blake2s-core.S10
-rw-r--r--lib/crypto/arm/blake2s.h4
-rw-r--r--lib/crypto/blake2s.c58
-rw-r--r--lib/crypto/tests/blake2s_kunit.c23
-rw-r--r--lib/crypto/x86/blake2s.h12
5 files changed, 53 insertions, 54 deletions
diff --git a/lib/crypto/arm/blake2s-core.S b/lib/crypto/arm/blake2s-core.S
index 293f44fa8f31..78e758a7cb3e 100644
--- a/lib/crypto/arm/blake2s-core.S
+++ b/lib/crypto/arm/blake2s-core.S
@@ -170,10 +170,10 @@
.endm
//
-// void blake2s_compress(struct blake2s_state *state,
+// void blake2s_compress(struct blake2s_ctx *ctx,
// const u8 *block, size_t nblocks, u32 inc);
//
-// Only the first three fields of struct blake2s_state are used:
+// Only the first three fields of struct blake2s_ctx are used:
// u32 h[8]; (inout)
// u32 t[2]; (inout)
// u32 f[2]; (in)
@@ -183,7 +183,7 @@ ENTRY(blake2s_compress)
push {r0-r2,r4-r11,lr} // keep this an even number
.Lnext_block:
- // r0 is 'state'
+ // r0 is 'ctx'
// r1 is 'block'
// r3 is 'inc'
@@ -211,7 +211,7 @@ ENTRY(blake2s_compress)
// Calculate v[8..15]. Push v[9..15] onto the stack, and leave space
// for spilling v[8..9]. Leave v[8..9] in r8-r9.
- mov r14, r0 // r14 = state
+ mov r14, r0 // r14 = ctx
adr r12, .Lblake2s_IV
ldmia r12!, {r8-r9} // load IV[0..1]
__ldrd r0, r1, r14, 40 // load f[0..1]
@@ -275,7 +275,7 @@ ENTRY(blake2s_compress)
// Advance to the next block, if there is one. Note that if there are
// multiple blocks, then 'inc' (the counter increment amount) must be
// 64. So we can simply set it to 64 without re-loading it.
- ldm sp, {r0, r1, r2} // load (state, block, nblocks)
+ ldm sp, {r0, r1, r2} // load (ctx, block, nblocks)
mov r3, #64 // set 'inc'
subs r2, r2, #1 // nblocks--
str r2, [sp, #8]
diff --git a/lib/crypto/arm/blake2s.h b/lib/crypto/arm/blake2s.h
index aa7a97139ea7..ce009cd98de9 100644
--- a/lib/crypto/arm/blake2s.h
+++ b/lib/crypto/arm/blake2s.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* defined in blake2s-core.S */
-void blake2s_compress(struct blake2s_state *state, const u8 *block,
- size_t nblocks, u32 inc);
+void blake2s_compress(struct blake2s_ctx *ctx,
+ const u8 *block, size_t nblocks, u32 inc);
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 5638ed9d882d..1ad36cb29835 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -29,15 +29,15 @@ static const u8 blake2s_sigma[10][16] = {
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
};
-static inline void blake2s_increment_counter(struct blake2s_state *state,
+static inline void blake2s_increment_counter(struct blake2s_ctx *ctx,
const u32 inc)
{
- state->t[0] += inc;
- state->t[1] += (state->t[0] < inc);
+ ctx->t[0] += inc;
+ ctx->t[1] += (ctx->t[0] < inc);
}
static void __maybe_unused
-blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
+blake2s_compress_generic(struct blake2s_ctx *ctx, const u8 *block,
size_t nblocks, const u32 inc)
{
u32 m[16];
@@ -48,18 +48,18 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
(nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
while (nblocks > 0) {
- blake2s_increment_counter(state, inc);
+ blake2s_increment_counter(ctx, inc);
memcpy(m, block, BLAKE2S_BLOCK_SIZE);
le32_to_cpu_array(m, ARRAY_SIZE(m));
- memcpy(v, state->h, 32);
+ memcpy(v, ctx->h, 32);
v[ 8] = BLAKE2S_IV0;
v[ 9] = BLAKE2S_IV1;
v[10] = BLAKE2S_IV2;
v[11] = BLAKE2S_IV3;
- v[12] = BLAKE2S_IV4 ^ state->t[0];
- v[13] = BLAKE2S_IV5 ^ state->t[1];
- v[14] = BLAKE2S_IV6 ^ state->f[0];
- v[15] = BLAKE2S_IV7 ^ state->f[1];
+ v[12] = BLAKE2S_IV4 ^ ctx->t[0];
+ v[13] = BLAKE2S_IV5 ^ ctx->t[1];
+ v[14] = BLAKE2S_IV6 ^ ctx->f[0];
+ v[15] = BLAKE2S_IV7 ^ ctx->f[1];
#define G(r, i, a, b, c, d) do { \
a += b + m[blake2s_sigma[r][2 * i + 0]]; \
@@ -97,7 +97,7 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
#undef ROUND
for (i = 0; i < 8; ++i)
- state->h[i] ^= v[i] ^ v[i + 8];
+ ctx->h[i] ^= v[i] ^ v[i + 8];
block += BLAKE2S_BLOCK_SIZE;
--nblocks;
@@ -110,45 +110,45 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
#define blake2s_compress blake2s_compress_generic
#endif
-static inline void blake2s_set_lastblock(struct blake2s_state *state)
+static inline void blake2s_set_lastblock(struct blake2s_ctx *ctx)
{
- state->f[0] = -1;
+ ctx->f[0] = -1;
}
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
+void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen)
{
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+ const size_t fill = BLAKE2S_BLOCK_SIZE - ctx->buflen;
if (unlikely(!inlen))
return;
if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
+ memcpy(ctx->buf + ctx->buflen, in, fill);
+ blake2s_compress(ctx, ctx->buf, 1, BLAKE2S_BLOCK_SIZE);
+ ctx->buflen = 0;
in += fill;
inlen -= fill;
}
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress(ctx, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ memcpy(ctx->buf + ctx->buflen, in, inlen);
+ ctx->buflen += inlen;
}
EXPORT_SYMBOL(blake2s_update);
-void blake2s_final(struct blake2s_state *state, u8 *out)
+void blake2s_final(struct blake2s_ctx *ctx, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- blake2s_compress(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
- memzero_explicit(state, sizeof(*state));
+ blake2s_set_lastblock(ctx);
+ memset(ctx->buf + ctx->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - ctx->buflen); /* Padding */
+ blake2s_compress(ctx, ctx->buf, 1, ctx->buflen);
+ cpu_to_le32_array(ctx->h, ARRAY_SIZE(ctx->h));
+ memcpy(out, ctx->h, ctx->outlen);
+ memzero_explicit(ctx, sizeof(*ctx));
}
EXPORT_SYMBOL(blake2s_final);
diff --git a/lib/crypto/tests/blake2s_kunit.c b/lib/crypto/tests/blake2s_kunit.c
index 247bbdf7dc86..6832d9aa7b82 100644
--- a/lib/crypto/tests/blake2s_kunit.c
+++ b/lib/crypto/tests/blake2s_kunit.c
@@ -17,9 +17,9 @@ static void blake2s_default(const u8 *data, size_t len,
blake2s(NULL, 0, data, len, out, BLAKE2S_HASH_SIZE);
}
-static void blake2s_init_default(struct blake2s_state *state)
+static void blake2s_init_default(struct blake2s_ctx *ctx)
{
- blake2s_init(state, BLAKE2S_HASH_SIZE);
+ blake2s_init(ctx, BLAKE2S_HASH_SIZE);
}
/*
@@ -27,7 +27,7 @@ static void blake2s_init_default(struct blake2s_state *state)
* with a key length of 0 and a hash length of BLAKE2S_HASH_SIZE.
*/
#define HASH blake2s_default
-#define HASH_CTX blake2s_state
+#define HASH_CTX blake2s_ctx
#define HASH_SIZE BLAKE2S_HASH_SIZE
#define HASH_INIT blake2s_init_default
#define HASH_UPDATE blake2s_update
@@ -44,19 +44,19 @@ static void test_blake2s_all_key_and_hash_lens(struct kunit *test)
u8 *data = &test_buf[0];
u8 *key = data + data_len;
u8 *hash = key + BLAKE2S_KEY_SIZE;
- struct blake2s_state main_state;
+ struct blake2s_ctx main_ctx;
u8 main_hash[BLAKE2S_HASH_SIZE];
rand_bytes_seeded_from_len(data, data_len);
- blake2s_init(&main_state, BLAKE2S_HASH_SIZE);
+ blake2s_init(&main_ctx, BLAKE2S_HASH_SIZE);
for (int key_len = 0; key_len <= BLAKE2S_KEY_SIZE; key_len++) {
rand_bytes_seeded_from_len(key, key_len);
for (int out_len = 1; out_len <= BLAKE2S_HASH_SIZE; out_len++) {
blake2s(key, key_len, data, data_len, hash, out_len);
- blake2s_update(&main_state, hash, out_len);
+ blake2s_update(&main_ctx, hash, out_len);
}
}
- blake2s_final(&main_state, main_hash);
+ blake2s_final(&main_ctx, main_hash);
KUNIT_ASSERT_MEMEQ(test, main_hash, blake2s_keyed_testvec_consolidated,
BLAKE2S_HASH_SIZE);
}
@@ -75,7 +75,7 @@ static void test_blake2s_with_guarded_key_buf(struct kunit *test)
u8 *guarded_key = &test_buf[TEST_BUF_LEN - key_len];
u8 hash1[BLAKE2S_HASH_SIZE];
u8 hash2[BLAKE2S_HASH_SIZE];
- struct blake2s_state state;
+ struct blake2s_ctx ctx;
rand_bytes(key, key_len);
memcpy(guarded_key, key, key_len);
@@ -86,10 +86,9 @@ static void test_blake2s_with_guarded_key_buf(struct kunit *test)
hash2, BLAKE2S_HASH_SIZE);
KUNIT_ASSERT_MEMEQ(test, hash1, hash2, BLAKE2S_HASH_SIZE);
- blake2s_init_key(&state, BLAKE2S_HASH_SIZE,
- guarded_key, key_len);
- blake2s_update(&state, test_buf, data_len);
- blake2s_final(&state, hash2);
+ blake2s_init_key(&ctx, BLAKE2S_HASH_SIZE, guarded_key, key_len);
+ blake2s_update(&ctx, test_buf, data_len);
+ blake2s_final(&ctx, hash2);
KUNIT_ASSERT_MEMEQ(test, hash1, hash2, BLAKE2S_HASH_SIZE);
}
}
diff --git a/lib/crypto/x86/blake2s.h b/lib/crypto/x86/blake2s.h
index b6d30d2fa045..de360935b820 100644
--- a/lib/crypto/x86/blake2s.h
+++ b/lib/crypto/x86/blake2s.h
@@ -11,24 +11,24 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
-asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state,
+asmlinkage void blake2s_compress_ssse3(struct blake2s_ctx *ctx,
const u8 *block, const size_t nblocks,
const u32 inc);
-asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
+asmlinkage void blake2s_compress_avx512(struct blake2s_ctx *ctx,
const u8 *block, const size_t nblocks,
const u32 inc);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
-static void blake2s_compress(struct blake2s_state *state, const u8 *block,
+static void blake2s_compress(struct blake2s_ctx *ctx, const u8 *block,
size_t nblocks, const u32 inc)
{
/* SIMD disables preemption, so relax after processing each page. */
BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) {
- blake2s_compress_generic(state, block, nblocks, inc);
+ blake2s_compress_generic(ctx, block, nblocks, inc);
return;
}
@@ -38,9 +38,9 @@ static void blake2s_compress(struct blake2s_state *state, const u8 *block,
kernel_fpu_begin();
if (static_branch_likely(&blake2s_use_avx512))
- blake2s_compress_avx512(state, block, blocks, inc);
+ blake2s_compress_avx512(ctx, block, blocks, inc);
else
- blake2s_compress_ssse3(state, block, blocks, inc);
+ blake2s_compress_ssse3(ctx, block, blocks, inc);
kernel_fpu_end();
nblocks -= blocks;