diff options
Diffstat (limited to 'include')
40 files changed, 1156 insertions, 301 deletions
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h index dd7694477e50..3bc37fd103a7 100644 --- a/include/crypto/blake2b.h +++ b/include/crypto/blake2b.h @@ -7,20 +7,10 @@ #include <linux/types.h> #include <linux/string.h> -struct blake2b_state { - /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ - u64 h[8]; - u64 t[2]; - /* The true state ends here. The rest is temporary storage. */ - u64 f[2]; -}; - enum blake2b_lengths { BLAKE2B_BLOCK_SIZE = 128, BLAKE2B_HASH_SIZE = 64, BLAKE2B_KEY_SIZE = 64, - BLAKE2B_STATE_SIZE = offsetof(struct blake2b_state, f), - BLAKE2B_DESC_SIZE = sizeof(struct blake2b_state), BLAKE2B_160_HASH_SIZE = 20, BLAKE2B_256_HASH_SIZE = 32, @@ -28,6 +18,25 @@ enum blake2b_lengths { BLAKE2B_512_HASH_SIZE = 64, }; +/** + * struct blake2b_ctx - Context for hashing a message with BLAKE2b + * @h: compression function state + * @t: block counter + * @f: finalization indicator + * @buf: partial block buffer; 'buflen' bytes are valid + * @buflen: number of bytes buffered in @buf + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + */ +struct blake2b_ctx { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[BLAKE2B_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + enum blake2b_iv { BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, @@ -39,19 +48,109 @@ enum blake2b_iv { BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL, }; -static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, - size_t keylen) +static inline void __blake2b_init(struct blake2b_ctx *ctx, size_t outlen, + const void *key, size_t keylen) +{ + ctx->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + ctx->h[1] = BLAKE2B_IV1; + ctx->h[2] = BLAKE2B_IV2; + ctx->h[3] = BLAKE2B_IV3; + ctx->h[4] = BLAKE2B_IV4; + ctx->h[5] = BLAKE2B_IV5; + ctx->h[6] = BLAKE2B_IV6; + ctx->h[7] = BLAKE2B_IV7; + ctx->t[0] = 0; + ctx->t[1] = 0; + ctx->f[0] = 0; + ctx->f[1] = 0; + ctx->buflen = 0; + ctx->outlen = outlen; + if (keylen) { + memcpy(ctx->buf, key, keylen); + memset(&ctx->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); + ctx->buflen = BLAKE2B_BLOCK_SIZE; + } +} + +/** + * blake2b_init() - Initialize a BLAKE2b context for a new message (unkeyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2b_init(struct blake2b_ctx *ctx, size_t outlen) { - state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); - state->h[1] = BLAKE2B_IV1; - state->h[2] = BLAKE2B_IV2; - state->h[3] = BLAKE2B_IV3; - state->h[4] = BLAKE2B_IV4; - state->h[5] = BLAKE2B_IV5; - state->h[6] = BLAKE2B_IV6; - state->h[7] = BLAKE2B_IV7; - state->t[0] = 0; - state->t[1] = 0; + __blake2b_init(ctx, outlen, NULL, 0); +} + +/** + * blake2b_init_key() - Initialize a BLAKE2b context for a new message (keyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * @key: the key + * @keylen: the key length in bytes, at most BLAKE2B_KEY_SIZE + * + * Context: Any context. + */ +static inline void blake2b_init_key(struct blake2b_ctx *ctx, size_t outlen, + const void *key, size_t keylen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2B_HASH_SIZE || + !key || !keylen || keylen > BLAKE2B_KEY_SIZE)); + + __blake2b_init(ctx, outlen, key, keylen); +} + +/** + * blake2b_update() - Update a BLAKE2b context with message data + * @ctx: the context to update; must have been initialized + * @in: the message data + * @inlen: the data length in bytes + * + * This can be called any number of times. + * + * Context: Any context. + */ +void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen); + +/** + * blake2b_final() - Finish computing a BLAKE2b hash + * @ctx: the context to finalize; must have been initialized + * @out: (output) the resulting BLAKE2b hash. Its length will be equal to the + * @outlen that was passed to blake2b_init() or blake2b_init_key(). + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void blake2b_final(struct blake2b_ctx *ctx, u8 *out); + +/** + * blake2b() - Compute BLAKE2b hash in one shot + * @key: the key, or NULL for an unkeyed hash + * @keylen: the key length in bytes (at most BLAKE2B_KEY_SIZE), or 0 for an + * unkeyed hash + * @in: the message data + * @inlen: the data length in bytes + * @out: (output) the resulting BLAKE2b hash, with length @outlen + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2b(const u8 *key, size_t keylen, + const u8 *in, size_t inlen, + u8 *out, size_t outlen) +{ + struct blake2b_ctx ctx; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2B_HASH_SIZE || keylen > BLAKE2B_KEY_SIZE || + (!key && keylen))); + + __blake2b_init(&ctx, outlen, key, keylen); + blake2b_update(&ctx, in, inlen); + blake2b_final(&ctx, out); } #endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index f9ffd39194eb..648cb7824358 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -22,7 +22,16 @@ enum blake2s_lengths { BLAKE2S_256_HASH_SIZE = 32, }; -struct blake2s_state { +/** + * struct blake2s_ctx - Context for hashing a message with BLAKE2s + * @h: compression function state + * @t: block counter + * @f: finalization indicator + * @buf: partial block buffer; 'buflen' bytes are valid + * @buflen: number of bytes buffered in @buf + * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE + */ +struct blake2s_ctx { /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ u32 h[8]; u32 t[2]; @@ -43,62 +52,109 @@ enum blake2s_iv { BLAKE2S_IV7 = 0x5BE0CD19UL, }; -static inline void __blake2s_init(struct blake2s_state *state, size_t outlen, +static inline void __blake2s_init(struct blake2s_ctx *ctx, size_t outlen, const void *key, size_t keylen) { - state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); - state->h[1] = BLAKE2S_IV1; - state->h[2] = BLAKE2S_IV2; - state->h[3] = BLAKE2S_IV3; - state->h[4] = BLAKE2S_IV4; - state->h[5] = BLAKE2S_IV5; - state->h[6] = BLAKE2S_IV6; - state->h[7] = BLAKE2S_IV7; - state->t[0] = 0; - state->t[1] = 0; - state->f[0] = 0; - state->f[1] = 0; - state->buflen = 0; - state->outlen = outlen; + ctx->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); + ctx->h[1] = BLAKE2S_IV1; + ctx->h[2] = BLAKE2S_IV2; + ctx->h[3] = BLAKE2S_IV3; + ctx->h[4] = BLAKE2S_IV4; + ctx->h[5] = BLAKE2S_IV5; + ctx->h[6] = BLAKE2S_IV6; + ctx->h[7] = BLAKE2S_IV7; + ctx->t[0] = 0; + ctx->t[1] = 0; + ctx->f[0] = 0; + ctx->f[1] = 0; + ctx->buflen = 0; + ctx->outlen = outlen; if (keylen) { - memcpy(state->buf, key, keylen); - memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); - state->buflen = BLAKE2S_BLOCK_SIZE; + memcpy(ctx->buf, key, keylen); + memset(&ctx->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); + ctx->buflen = BLAKE2S_BLOCK_SIZE; } } -static inline void blake2s_init(struct blake2s_state *state, - const size_t outlen) +/** + * blake2s_init() - Initialize a BLAKE2s context for a new message (unkeyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2s_init(struct blake2s_ctx *ctx, size_t outlen) { - __blake2s_init(state, outlen, NULL, 0); + __blake2s_init(ctx, outlen, NULL, 0); } -static inline void blake2s_init_key(struct blake2s_state *state, - const size_t outlen, const void *key, - const size_t keylen) +/** + * blake2s_init_key() - Initialize a BLAKE2s context for a new message (keyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE + * @key: the key + * @keylen: the key length in bytes, at most BLAKE2S_KEY_SIZE + * + * Context: Any context. + */ +static inline void blake2s_init_key(struct blake2s_ctx *ctx, size_t outlen, + const void *key, size_t keylen) { WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); - __blake2s_init(state, outlen, key, keylen); + __blake2s_init(ctx, outlen, key, keylen); } -void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); -void blake2s_final(struct blake2s_state *state, u8 *out); +/** + * blake2s_update() - Update a BLAKE2s context with message data + * @ctx: the context to update; must have been initialized + * @in: the message data + * @inlen: the data length in bytes + * + * This can be called any number of times. + * + * Context: Any context. + */ +void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen); -static inline void blake2s(u8 *out, const u8 *in, const u8 *key, - const size_t outlen, const size_t inlen, - const size_t keylen) +/** + * blake2s_final() - Finish computing a BLAKE2s hash + * @ctx: the context to finalize; must have been initialized + * @out: (output) the resulting BLAKE2s hash. Its length will be equal to the + * @outlen that was passed to blake2s_init() or blake2s_init_key(). + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void blake2s_final(struct blake2s_ctx *ctx, u8 *out); + +/** + * blake2s() - Compute BLAKE2s hash in one shot + * @key: the key, or NULL for an unkeyed hash + * @keylen: the key length in bytes (at most BLAKE2S_KEY_SIZE), or 0 for an + * unkeyed hash + * @in: the message data + * @inlen: the data length in bytes + * @out: (output) the resulting BLAKE2s hash, with length @outlen + * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2s(const u8 *key, size_t keylen, + const u8 *in, size_t inlen, + u8 *out, size_t outlen) { - struct blake2s_state state; + struct blake2s_ctx ctx; WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || (!key && keylen))); - __blake2s_init(&state, outlen, key, keylen); - blake2s_update(&state, in, inlen); - blake2s_final(&state, out); + __blake2s_init(&ctx, outlen, key, keylen); + blake2s_update(&ctx, in, inlen); + blake2s_final(&ctx, out); } #endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 38e26dff27b0..1cc301a48469 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -38,18 +38,18 @@ struct chacha_state { }; void chacha_block_generic(struct chacha_state *state, - u8 out[CHACHA_BLOCK_SIZE], int nrounds); + u8 out[at_least CHACHA_BLOCK_SIZE], int nrounds); static inline void chacha20_block(struct chacha_state *state, - u8 out[CHACHA_BLOCK_SIZE]) + u8 out[at_least CHACHA_BLOCK_SIZE]) { chacha_block_generic(state, out, 20); } void hchacha_block_generic(const struct chacha_state *state, - u32 out[HCHACHA_OUT_WORDS], int nrounds); + u32 out[at_least HCHACHA_OUT_WORDS], int nrounds); void hchacha_block(const struct chacha_state *state, - u32 out[HCHACHA_OUT_WORDS], int nrounds); + u32 out[at_least HCHACHA_OUT_WORDS], int nrounds); enum chacha_constants { /* expand 32-byte k */ CHACHA_CONSTANT_EXPA = 0x61707865U, @@ -67,8 +67,8 @@ static inline void chacha_init_consts(struct chacha_state *state) } static inline void chacha_init(struct chacha_state *state, - const u32 key[CHACHA_KEY_WORDS], - const u8 iv[CHACHA_IV_SIZE]) + const u32 key[at_least CHACHA_KEY_WORDS], + const u8 iv[at_least CHACHA_IV_SIZE]) { chacha_init_consts(state); state->x[4] = key[0]; diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h index d2ac3ff7dc1e..0f71b037702d 100644 --- a/include/crypto/chacha20poly1305.h +++ b/include/crypto/chacha20poly1305.h @@ -18,32 +18,33 @@ enum chacha20poly1305_lengths { void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); bool __must_check chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, - const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); bool __must_check xchacha20poly1305_decrypt( - u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, - const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, - const u8 key[CHACHA20POLY1305_KEY_SIZE]); + const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]); bool chacha20poly1305_selftest(void); diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h index db63a5577c00..2362b48f8741 100644 --- a/include/crypto/curve25519.h +++ b/include/crypto/curve25519.h @@ -13,24 +13,28 @@ enum curve25519_lengths { CURVE25519_KEY_SIZE = 32 }; -void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], - const u8 scalar[CURVE25519_KEY_SIZE], - const u8 point[CURVE25519_KEY_SIZE]); +void curve25519_generic(u8 out[at_least CURVE25519_KEY_SIZE], + const u8 scalar[at_least CURVE25519_KEY_SIZE], + const u8 point[at_least CURVE25519_KEY_SIZE]); -bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], - const u8 secret[CURVE25519_KEY_SIZE], - const u8 basepoint[CURVE25519_KEY_SIZE]); +bool __must_check +curve25519(u8 mypublic[at_least CURVE25519_KEY_SIZE], + const u8 secret[at_least CURVE25519_KEY_SIZE], + const u8 basepoint[at_least CURVE25519_KEY_SIZE]); -bool __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], - const u8 secret[CURVE25519_KEY_SIZE]); +bool __must_check +curve25519_generate_public(u8 pub[at_least CURVE25519_KEY_SIZE], + const u8 secret[at_least CURVE25519_KEY_SIZE]); -static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) +static inline void +curve25519_clamp_secret(u8 secret[at_least CURVE25519_KEY_SIZE]) { secret[0] &= 248; secret[31] = (secret[31] & 127) | 64; } -static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) +static inline void +curve25519_generate_secret(u8 secret[at_least CURVE25519_KEY_SIZE]) { get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); curve25519_clamp_secret(secret); diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h deleted file mode 100644 index 3e09e2485306..000000000000 --- a/include/crypto/internal/blake2b.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* - * Helper functions for BLAKE2b implementations. - * Keep this in sync with the corresponding BLAKE2s header. - */ - -#ifndef _CRYPTO_INTERNAL_BLAKE2B_H -#define _CRYPTO_INTERNAL_BLAKE2B_H - -#include <asm/byteorder.h> -#include <crypto/blake2b.h> -#include <crypto/internal/hash.h> -#include <linux/array_size.h> -#include <linux/compiler.h> -#include <linux/build_bug.h> -#include <linux/errno.h> -#include <linux/math.h> -#include <linux/string.h> -#include <linux/types.h> - -static inline void blake2b_set_lastblock(struct blake2b_state *state) -{ - state->f[0] = -1; - state->f[1] = 0; -} - -static inline void blake2b_set_nonlast(struct blake2b_state *state) -{ - state->f[0] = 0; - state->f[1] = 0; -} - -typedef void (*blake2b_compress_t)(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); - -/* Helper functions for shash implementations of BLAKE2b */ - -struct blake2b_tfm_ctx { - u8 key[BLAKE2B_BLOCK_SIZE]; - unsigned int keylen; -}; - -static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen > BLAKE2B_KEY_SIZE) - return -EINVAL; - - BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE); - - memcpy(tctx->key, key, keylen); - memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen); - tctx->keylen = keylen; - - return 0; -} - -static inline int crypto_blake2b_init(struct shash_desc *desc) -{ - const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2b_state *state = shash_desc_ctx(desc); - unsigned int outlen = crypto_shash_digestsize(desc->tfm); - - __blake2b_init(state, outlen, tctx->keylen); - return tctx->keylen ? - crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0; -} - -static inline int crypto_blake2b_update_bo(struct shash_desc *desc, - const u8 *in, unsigned int inlen, - blake2b_compress_t compress) -{ - struct blake2b_state *state = shash_desc_ctx(desc); - - blake2b_set_nonlast(state); - compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE); - return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE); -} - -static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in, - unsigned int inlen, u8 *out, - blake2b_compress_t compress) -{ - struct blake2b_state *state = shash_desc_ctx(desc); - u8 buf[BLAKE2B_BLOCK_SIZE]; - int i; - - memcpy(buf, in, inlen); - memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen); - blake2b_set_lastblock(state); - compress(state, buf, 1, inlen); - for (i = 0; i < ARRAY_SIZE(state->h); i++) - __cpu_to_le64s(&state->h[i]); - memcpy(out, state->h, crypto_shash_digestsize(desc->tfm)); - memzero_explicit(buf, sizeof(buf)); - return 0; -} - -#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */ diff --git a/include/crypto/md5.h b/include/crypto/md5.h index c9aa5c3abc53..c47aedfe67ec 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -76,7 +76,7 @@ void md5_update(struct md5_ctx *ctx, const u8 *data, size_t len); * * Context: Any context. */ -void md5_final(struct md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]); +void md5_final(struct md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]); /** * md5() - Compute MD5 message digest in one shot @@ -86,7 +86,7 @@ void md5_final(struct md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]); * * Context: Any context. */ -void md5(const u8 *data, size_t len, u8 out[MD5_DIGEST_SIZE]); +void md5(const u8 *data, size_t len, u8 out[at_least MD5_DIGEST_SIZE]); /** * struct hmac_md5_key - Prepared key for HMAC-MD5 @@ -173,7 +173,7 @@ static inline void hmac_md5_update(struct hmac_md5_ctx *ctx, * * Context: Any context. */ -void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]); +void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]); /** * hmac_md5() - Compute HMAC-MD5 in one shot, using a prepared key @@ -187,7 +187,8 @@ void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[MD5_DIGEST_SIZE]); * Context: Any context. */ void hmac_md5(const struct hmac_md5_key *key, - const u8 *data, size_t data_len, u8 out[MD5_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least MD5_DIGEST_SIZE]); /** * hmac_md5_usingrawkey() - Compute HMAC-MD5 in one shot, using a raw key @@ -204,6 +205,6 @@ void hmac_md5(const struct hmac_md5_key *key, */ void hmac_md5_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[MD5_DIGEST_SIZE]); + u8 out[at_least MD5_DIGEST_SIZE]); #endif /* _CRYPTO_MD5_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index d4daeec8da19..190beb427c6d 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -59,7 +59,7 @@ struct poly1305_desc_ctx { }; void poly1305_init(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]); + const u8 key[at_least POLY1305_KEY_SIZE]); void poly1305_update(struct poly1305_desc_ctx *desc, const u8 *src, unsigned int nbytes); void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest); diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index d2e63743e592..b28b8ef11353 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -1,14 +1,190 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Common values for the Polyval hash algorithm + * POLYVAL library API * - * Copyright 2021 Google LLC + * Copyright 2025 Google LLC */ #ifndef _CRYPTO_POLYVAL_H #define _CRYPTO_POLYVAL_H +#include <linux/string.h> +#include <linux/types.h> + #define POLYVAL_BLOCK_SIZE 16 #define POLYVAL_DIGEST_SIZE 16 +/** + * struct polyval_elem - An element of the POLYVAL finite field + * @bytes: View of the element as a byte array (unioned with @lo and @hi) + * @lo: The low 64 terms of the element's polynomial + * @hi: The high 64 terms of the element's polynomial + * + * This represents an element of the finite field GF(2^128), using the POLYVAL + * convention: little-endian byte order and natural bit order. + */ +struct polyval_elem { + union { + u8 bytes[POLYVAL_BLOCK_SIZE]; + struct { + __le64 lo; + __le64 hi; + }; + }; +}; + +/** + * struct polyval_key - Prepared key for POLYVAL + * + * This may contain just the raw key H, or it may contain precomputed key + * powers, depending on the platform's POLYVAL implementation. Use + * polyval_preparekey() to initialize this. + * + * By H^i we mean H^(i-1) * H * x^-128, with base case H^1 = H. I.e. the + * exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128. + */ +struct polyval_key { +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +#ifdef CONFIG_ARM64 + /** @h_powers: Powers of the hash key H^8 through H^1 */ + struct polyval_elem h_powers[8]; +#elif defined(CONFIG_X86) + /** @h_powers: Powers of the hash key H^8 through H^1 */ + struct polyval_elem h_powers[8]; +#else +#error "Unhandled arch" #endif +#else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ + /** @h: The hash key H */ + struct polyval_elem h; +#endif /* !CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ +}; + +/** + * struct polyval_ctx - Context for computing a POLYVAL value + * @key: Pointer to the prepared POLYVAL key. The user of the API is + * responsible for ensuring that the key lives as long as the context. + * @acc: The accumulator + * @partial: Number of data bytes processed so far modulo POLYVAL_BLOCK_SIZE + */ +struct polyval_ctx { + const struct polyval_key *key; + struct polyval_elem acc; + size_t partial; +}; + +/** + * polyval_preparekey() - Prepare a POLYVAL key + * @key: (output) The key structure to initialize + * @raw_key: The raw hash key + * + * Initialize a POLYVAL key structure from a raw key. This may be a simple + * copy, or it may involve precomputing powers of the key, depending on the + * platform's POLYVAL implementation. + * + * Context: Any context. + */ +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +void polyval_preparekey(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]); + +#else +static inline void polyval_preparekey(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]) +{ + /* Just a simple copy, so inline it. */ + memcpy(key->h.bytes, raw_key, POLYVAL_BLOCK_SIZE); +} +#endif + +/** + * polyval_init() - Initialize a POLYVAL context for a new message + * @ctx: The context to initialize + * @key: The key to use. Note that a pointer to the key is saved in the + * context, so the key must live at least as long as the context. + */ +static inline void polyval_init(struct polyval_ctx *ctx, + const struct polyval_key *key) +{ + *ctx = (struct polyval_ctx){ .key = key }; +} + +/** + * polyval_import_blkaligned() - Import a POLYVAL accumulator value + * @ctx: The context to initialize + * @key: The key to import. Note that a pointer to the key is saved in the + * context, so the key must live at least as long as the context. + * @acc: The accumulator value to import. + * + * This imports an accumulator that was saved by polyval_export_blkaligned(). + * The same key must be used. + */ +static inline void +polyval_import_blkaligned(struct polyval_ctx *ctx, + const struct polyval_key *key, + const struct polyval_elem *acc) +{ + *ctx = (struct polyval_ctx){ .key = key, .acc = *acc }; +} + +/** + * polyval_export_blkaligned() - Export a POLYVAL accumulator value + * @ctx: The context to export the accumulator value from + * @acc: (output) The exported accumulator value + * + * This exports the accumulator from a POLYVAL context. The number of data + * bytes processed so far must be a multiple of POLYVAL_BLOCK_SIZE. + */ +static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx, + struct polyval_elem *acc) +{ + *acc = ctx->acc; +} + +/** + * polyval_update() - Update a POLYVAL context with message data + * @ctx: The context to update; must have been initialized + * @data: The message data + * @len: The data length in bytes. Doesn't need to be block-aligned. + * + * This can be called any number of times. + * + * Context: Any context. + */ +void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len); + +/** + * polyval_final() - Finish computing a POLYVAL value + * @ctx: The context to finalize + * @out: The output value + * + * If the total data length isn't a multiple of POLYVAL_BLOCK_SIZE, then the + * final block is automatically zero-padded. + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]); + +/** + * polyval() - Compute a POLYVAL value + * @key: The prepared key + * @data: The message data + * @len: The data length in bytes. Doesn't need to be block-aligned. + * @out: The output value + * + * Context: Any context. + */ +static inline void polyval(const struct polyval_key *key, + const u8 *data, size_t len, + u8 out[POLYVAL_BLOCK_SIZE]) +{ + struct polyval_ctx ctx; + + polyval_init(&ctx, key); + polyval_update(&ctx, data, len); + polyval_final(&ctx, out); +} + +#endif /* _CRYPTO_POLYVAL_H */ diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h index 162a529ec841..27f08b972931 100644 --- a/include/crypto/sha1.h +++ b/include/crypto/sha1.h @@ -84,7 +84,7 @@ void sha1_update(struct sha1_ctx *ctx, const u8 *data, size_t len); * * Context: Any context. */ -void sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]); +void sha1_final(struct sha1_ctx *ctx, u8 out[at_least SHA1_DIGEST_SIZE]); /** * sha1() - Compute SHA-1 message digest in one shot @@ -94,7 +94,7 @@ void sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]); * * Context: Any context. */ -void sha1(const u8 *data, size_t len, u8 out[SHA1_DIGEST_SIZE]); +void sha1(const u8 *data, size_t len, u8 out[at_least SHA1_DIGEST_SIZE]); /** * struct hmac_sha1_key - Prepared key for HMAC-SHA1 @@ -181,7 +181,8 @@ static inline void hmac_sha1_update(struct hmac_sha1_ctx *ctx, * * Context: Any context. */ -void hmac_sha1_final(struct hmac_sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]); +void hmac_sha1_final(struct hmac_sha1_ctx *ctx, + u8 out[at_least SHA1_DIGEST_SIZE]); /** * hmac_sha1() - Compute HMAC-SHA1 in one shot, using a prepared key @@ -195,7 +196,8 @@ void hmac_sha1_final(struct hmac_sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]); * Context: Any context. */ void hmac_sha1(const struct hmac_sha1_key *key, - const u8 *data, size_t data_len, u8 out[SHA1_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least SHA1_DIGEST_SIZE]); /** * hmac_sha1_usingrawkey() - Compute HMAC-SHA1 in one shot, using a raw key @@ -212,6 +214,6 @@ void hmac_sha1(const struct hmac_sha1_key *key, */ void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[SHA1_DIGEST_SIZE]); + u8 out[at_least SHA1_DIGEST_SIZE]); #endif /* _CRYPTO_SHA1_H */ diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h index e5dafb935cc8..7bb8fe169daf 100644 --- a/include/crypto/sha2.h +++ b/include/crypto/sha2.h @@ -190,7 +190,7 @@ static inline void sha224_update(struct sha224_ctx *ctx, * * Context: Any context. */ -void sha224_final(struct sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]); +void sha224_final(struct sha224_ctx *ctx, u8 out[at_least SHA224_DIGEST_SIZE]); /** * sha224() - Compute SHA-224 message digest in one shot @@ -200,7 +200,7 @@ void sha224_final(struct sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]); * * Context: Any context. */ -void sha224(const u8 *data, size_t len, u8 out[SHA224_DIGEST_SIZE]); +void sha224(const u8 *data, size_t len, u8 out[at_least SHA224_DIGEST_SIZE]); /** * struct hmac_sha224_key - Prepared key for HMAC-SHA224 @@ -287,7 +287,8 @@ static inline void hmac_sha224_update(struct hmac_sha224_ctx *ctx, * * Context: Any context. */ -void hmac_sha224_final(struct hmac_sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]); +void hmac_sha224_final(struct hmac_sha224_ctx *ctx, + u8 out[at_least SHA224_DIGEST_SIZE]); /** * hmac_sha224() - Compute HMAC-SHA224 in one shot, using a prepared key @@ -301,7 +302,8 @@ void hmac_sha224_final(struct hmac_sha224_ctx *ctx, u8 out[SHA224_DIGEST_SIZE]); * Context: Any context. */ void hmac_sha224(const struct hmac_sha224_key *key, - const u8 *data, size_t data_len, u8 out[SHA224_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least SHA224_DIGEST_SIZE]); /** * hmac_sha224_usingrawkey() - Compute HMAC-SHA224 in one shot, using a raw key @@ -318,7 +320,7 @@ void hmac_sha224(const struct hmac_sha224_key *key, */ void hmac_sha224_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[SHA224_DIGEST_SIZE]); + u8 out[at_least SHA224_DIGEST_SIZE]); /** * struct sha256_ctx - Context for hashing a message with SHA-256 @@ -363,7 +365,7 @@ static inline void sha256_update(struct sha256_ctx *ctx, * * Context: Any context. */ -void sha256_final(struct sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]); +void sha256_final(struct sha256_ctx *ctx, u8 out[at_least SHA256_DIGEST_SIZE]); /** * sha256() - Compute SHA-256 message digest in one shot @@ -373,7 +375,7 @@ void sha256_final(struct sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]); * * Context: Any context. */ -void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]); +void sha256(const u8 *data, size_t len, u8 out[at_least SHA256_DIGEST_SIZE]); /** * sha256_finup_2x() - Compute two SHA-256 digests from a common initial @@ -390,8 +392,9 @@ void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]); * Context: Any context. */ void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1, - const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE], - u8 out2[SHA256_DIGEST_SIZE]); + const u8 *data2, size_t len, + u8 out1[at_least SHA256_DIGEST_SIZE], + u8 out2[at_least SHA256_DIGEST_SIZE]); /** * sha256_finup_2x_is_optimized() - Check if sha256_finup_2x() is using a real @@ -488,7 +491,8 @@ static inline void hmac_sha256_update(struct hmac_sha256_ctx *ctx, * * Context: Any context. */ -void hmac_sha256_final(struct hmac_sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]); +void hmac_sha256_final(struct hmac_sha256_ctx *ctx, + u8 out[at_least SHA256_DIGEST_SIZE]); /** * hmac_sha256() - Compute HMAC-SHA256 in one shot, using a prepared key @@ -502,7 +506,8 @@ void hmac_sha256_final(struct hmac_sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]); * Context: Any context. */ void hmac_sha256(const struct hmac_sha256_key *key, - const u8 *data, size_t data_len, u8 out[SHA256_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least SHA256_DIGEST_SIZE]); /** * hmac_sha256_usingrawkey() - Compute HMAC-SHA256 in one shot, using a raw key @@ -519,7 +524,7 @@ void hmac_sha256(const struct hmac_sha256_key *key, */ void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[SHA256_DIGEST_SIZE]); + u8 out[at_least SHA256_DIGEST_SIZE]); /* State for the SHA-512 (and SHA-384) compression function */ struct sha512_block_state { @@ -598,7 +603,7 @@ static inline void sha384_update(struct sha384_ctx *ctx, * * Context: Any context. */ -void sha384_final(struct sha384_ctx *ctx, u8 out[SHA384_DIGEST_SIZE]); +void sha384_final(struct sha384_ctx *ctx, u8 out[at_least SHA384_DIGEST_SIZE]); /** * sha384() - Compute SHA-384 message digest in one shot @@ -608,7 +613,7 @@ void sha384_final(struct sha384_ctx *ctx, u8 out[SHA384_DIGEST_SIZE]); * * Context: Any context. */ -void sha384(const u8 *data, size_t len, u8 out[SHA384_DIGEST_SIZE]); +void sha384(const u8 *data, size_t len, u8 out[at_least SHA384_DIGEST_SIZE]); /** * struct hmac_sha384_key - Prepared key for HMAC-SHA384 @@ -695,7 +700,8 @@ static inline void hmac_sha384_update(struct hmac_sha384_ctx *ctx, * * Context: Any context. */ -void hmac_sha384_final(struct hmac_sha384_ctx *ctx, u8 out[SHA384_DIGEST_SIZE]); +void hmac_sha384_final(struct hmac_sha384_ctx *ctx, + u8 out[at_least SHA384_DIGEST_SIZE]); /** * hmac_sha384() - Compute HMAC-SHA384 in one shot, using a prepared key @@ -709,7 +715,8 @@ void hmac_sha384_final(struct hmac_sha384_ctx *ctx, u8 out[SHA384_DIGEST_SIZE]); * Context: Any context. */ void hmac_sha384(const struct hmac_sha384_key *key, - const u8 *data, size_t data_len, u8 out[SHA384_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least SHA384_DIGEST_SIZE]); /** * hmac_sha384_usingrawkey() - Compute HMAC-SHA384 in one shot, using a raw key @@ -726,7 +733,7 @@ void hmac_sha384(const struct hmac_sha384_key *key, */ void hmac_sha384_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[SHA384_DIGEST_SIZE]); + u8 out[at_least SHA384_DIGEST_SIZE]); /** * struct sha512_ctx - Context for hashing a message with SHA-512 @@ -771,7 +778,7 @@ static inline void sha512_update(struct sha512_ctx *ctx, * * Context: Any context. */ -void sha512_final(struct sha512_ctx *ctx, u8 out[SHA512_DIGEST_SIZE]); +void sha512_final(struct sha512_ctx *ctx, u8 out[at_least SHA512_DIGEST_SIZE]); /** * sha512() - Compute SHA-512 message digest in one shot @@ -781,7 +788,7 @@ void sha512_final(struct sha512_ctx *ctx, u8 out[SHA512_DIGEST_SIZE]); * * Context: Any context. */ -void sha512(const u8 *data, size_t len, u8 out[SHA512_DIGEST_SIZE]); +void sha512(const u8 *data, size_t len, u8 out[at_least SHA512_DIGEST_SIZE]); /** * struct hmac_sha512_key - Prepared key for HMAC-SHA512 @@ -868,7 +875,8 @@ static inline void hmac_sha512_update(struct hmac_sha512_ctx *ctx, * * Context: Any context. */ -void hmac_sha512_final(struct hmac_sha512_ctx *ctx, u8 out[SHA512_DIGEST_SIZE]); +void hmac_sha512_final(struct hmac_sha512_ctx *ctx, + u8 out[at_least SHA512_DIGEST_SIZE]); /** * hmac_sha512() - Compute HMAC-SHA512 in one shot, using a prepared key @@ -882,7 +890,8 @@ void hmac_sha512_final(struct hmac_sha512_ctx *ctx, u8 out[SHA512_DIGEST_SIZE]); * Context: Any context. */ void hmac_sha512(const struct hmac_sha512_key *key, - const u8 *data, size_t data_len, u8 out[SHA512_DIGEST_SIZE]); + const u8 *data, size_t data_len, + u8 out[at_least SHA512_DIGEST_SIZE]); /** * hmac_sha512_usingrawkey() - Compute HMAC-SHA512 in one shot, using a raw key @@ -899,6 +908,6 @@ void hmac_sha512(const struct hmac_sha512_key *key, */ void hmac_sha512_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, - u8 out[SHA512_DIGEST_SIZE]); + u8 out[at_least SHA512_DIGEST_SIZE]); #endif /* _CRYPTO_SHA2_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index 41e1b83a6d91..c9e4182ff74f 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for SHA-3 algorithms + * + * See also Documentation/crypto/sha3.rst */ #ifndef __CRYPTO_SHA3_H__ #define __CRYPTO_SHA3_H__ #include <linux/types.h> +#include <linux/string.h> #define SHA3_224_DIGEST_SIZE (224 / 8) #define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) @@ -23,14 +26,321 @@ #define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) #define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1 -#define SHA3_STATE_SIZE 200 +/* + * SHAKE128 and SHAKE256 actually have variable output size, but this is used to + * calculate the block size (rate) analogously to the above. + */ +#define SHAKE128_DEFAULT_SIZE (128 / 8) +#define SHAKE128_BLOCK_SIZE (200 - 2 * SHAKE128_DEFAULT_SIZE) +#define SHAKE256_DEFAULT_SIZE (256 / 8) +#define SHAKE256_BLOCK_SIZE (200 - 2 * SHAKE256_DEFAULT_SIZE) -struct shash_desc; +#define SHA3_STATE_SIZE 200 +/* + * State for the Keccak-f[1600] permutation: 25 64-bit words. + * + * We usually keep the state words as little-endian, to make absorbing and + * squeezing easier. (It means that absorbing and squeezing can just treat the + * state as a byte array.) The state words are converted to native-endian only + * temporarily by implementations of the permutation that need native-endian + * words. Of course, that conversion is a no-op on little-endian machines. + */ struct sha3_state { - u64 st[SHA3_STATE_SIZE / 8]; + union { + __le64 words[SHA3_STATE_SIZE / 8]; + u8 bytes[SHA3_STATE_SIZE]; + + u64 native_words[SHA3_STATE_SIZE / 8]; /* see comment above */ + }; +}; + +/* Internal context, shared by the digests (SHA3-*) and the XOFs (SHAKE*) */ +struct __sha3_ctx { + struct sha3_state state; + u8 digest_size; /* Digests only: the digest size in bytes */ + u8 block_size; /* Block size in bytes */ + u8 absorb_offset; /* Index of next state byte to absorb into */ + u8 squeeze_offset; /* XOFs only: index of next state byte to extract */ +}; + +void __sha3_update(struct __sha3_ctx *ctx, const u8 *in, size_t in_len); + +/** + * struct sha3_ctx - Context for SHA3-224, SHA3-256, SHA3-384, or SHA3-512 + * @ctx: private + */ +struct sha3_ctx { + struct __sha3_ctx ctx; }; -int crypto_sha3_init(struct shash_desc *desc); +/** + * sha3_zeroize_ctx() - Zeroize a SHA-3 context + * @ctx: The context to zeroize + * + * This is already called by sha3_final(). Call this explicitly when abandoning + * a context without calling sha3_final(). + */ +static inline void sha3_zeroize_ctx(struct sha3_ctx *ctx) +{ + memzero_explicit(ctx, sizeof(*ctx)); +} + +/** + * struct shake_ctx - Context for SHAKE128 or SHAKE256 + * @ctx: private + */ +struct shake_ctx { + struct __sha3_ctx ctx; +}; + +/** + * shake_zeroize_ctx() - Zeroize a SHAKE context + * @ctx: The context to zeroize + * + * Call this after the last squeeze. + */ +static inline void shake_zeroize_ctx(struct shake_ctx *ctx) +{ + memzero_explicit(ctx, sizeof(*ctx)); +} + +/** + * sha3_224_init() - Initialize a context for SHA3-224 + * @ctx: The context to initialize + * + * This begins a new SHA3-224 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_224_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_224_DIGEST_SIZE, + .ctx.block_size = SHA3_224_BLOCK_SIZE, + }; +} + +/** + * sha3_256_init() - Initialize a context for SHA3-256 + * @ctx: The context to initialize + * + * This begins a new SHA3-256 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_256_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_256_DIGEST_SIZE, + .ctx.block_size = SHA3_256_BLOCK_SIZE, + }; +} + +/** + * sha3_384_init() - Initialize a context for SHA3-384 + * @ctx: The context to initialize + * + * This begins a new SHA3-384 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_384_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_384_DIGEST_SIZE, + .ctx.block_size = SHA3_384_BLOCK_SIZE, + }; +} + +/** + * sha3_512_init() - Initialize a context for SHA3-512 + * @ctx: The context to initialize + * + * This begins a new SHA3-512 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_512_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_512_DIGEST_SIZE, + .ctx.block_size = SHA3_512_BLOCK_SIZE, + }; +} + +/** + * sha3_update() - Update a SHA-3 digest context with input data + * @ctx: The context to update; must have been initialized + * @in: The input data + * @in_len: Length of the input data in bytes + * + * This can be called any number of times to add data to a SHA3-224, SHA3-256, + * SHA3-384, or SHA3-512 digest (depending on which init function was called). + * + * Context: Any context. + */ +static inline void sha3_update(struct sha3_ctx *ctx, + const u8 *in, size_t in_len) +{ + __sha3_update(&ctx->ctx, in, in_len); +} + +/** + * sha3_final() - Finish computing a SHA-3 message digest + * @ctx: The context to finalize; must have been initialized + * @out: (output) The resulting SHA3-224, SHA3-256, SHA3-384, or SHA3-512 + * message digest, matching the init function that was called. Note that + * the size differs for each one; see SHA3_*_DIGEST_SIZE. + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void sha3_final(struct sha3_ctx *ctx, u8 *out); + +/** + * shake128_init() - Initialize a context for SHAKE128 + * @ctx: The context to initialize + * + * This begins a new SHAKE128 extendable-output function (XOF) computation. + * + * Context: Any context. + */ +static inline void shake128_init(struct shake_ctx *ctx) +{ + *ctx = (struct shake_ctx){ + .ctx.block_size = SHAKE128_BLOCK_SIZE, + }; +} + +/** + * shake256_init() - Initialize a context for SHAKE256 + * @ctx: The context to initialize + * + * This begins a new SHAKE256 extendable-output function (XOF) computation. + * + * Context: Any context. + */ +static inline void shake256_init(struct shake_ctx *ctx) +{ + *ctx = (struct shake_ctx){ + .ctx.block_size = SHAKE256_BLOCK_SIZE, + }; +} + +/** + * shake_update() - Update a SHAKE context with input data + * @ctx: The context to update; must have been initialized + * @in: The input data + * @in_len: Length of the input data in bytes + * + * This can be called any number of times to add more input data to SHAKE128 or + * SHAKE256. This cannot be called after squeezing has begun. + * + * Context: Any context. + */ +static inline void shake_update(struct shake_ctx *ctx, + const u8 *in, size_t in_len) +{ + __sha3_update(&ctx->ctx, in, in_len); +} + +/** + * shake_squeeze() - Generate output from SHAKE128 or SHAKE256 + * @ctx: The context to squeeze; must have been initialized + * @out: Where to write the resulting output data + * @out_len: The amount of data to extract to @out in bytes + * + * This may be called multiple times. A number of consecutive squeezes laid + * end-to-end will yield the same output as one big squeeze generating the same + * total amount of output. More input cannot be provided after squeezing has + * begun. After the last squeeze, call shake_zeroize_ctx(). + * + * Context: Any context. + */ +void shake_squeeze(struct shake_ctx *ctx, u8 *out, size_t out_len); + +/** + * sha3_224() - Compute SHA3-224 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-224 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]); + +/** + * sha3_256() - Compute SHA3-256 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-256 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]); + +/** + * sha3_384() - Compute SHA3-384 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-384 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]); + +/** + * sha3_512() - Compute SHA3-512 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-512 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]); + +/** + * shake128() - Compute SHAKE128 in one shot + * @in: The input data to be used + * @in_len: Length of the input data in bytes + * @out: The buffer into which the output will be stored + * @out_len: Length of the output to produce in bytes + * + * Convenience function that computes SHAKE128 in one shot. Use this instead of + * the incremental API if you're able to provide all the input at once as well + * as receive all the output at once. All output lengths are supported. + * + * Context: Any context. + */ +void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len); + +/** + * shake256() - Compute SHAKE256 in one shot + * @in: The input data to be used + * @in_len: Length of the input data in bytes + * @out: The buffer into which the output will be stored + * @out_len: Length of the output to produce in bytes + * + * Convenience function that computes SHAKE256 in one shot. Use this instead of + * the incremental API if you're able to provide all the input at once as well + * as receive all the output at once. All output lengths are supported. + * + * Context: Any context. + */ +void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len); -#endif +#endif /* __CRYPTO_SHA3_H__ */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 607db773b672..fbf0c3a65f59 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -8,6 +8,7 @@ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H +#include <linux/cleanup.h> #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/resource_ext.h> @@ -221,6 +222,17 @@ void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); +static inline struct acpi_table_header *acpi_get_table_pointer(char *signature, u32 instance) +{ + struct acpi_table_header *table; + int status = acpi_get_table(signature, instance, &table); + + if (ACPI_FAILURE(status)) + return ERR_PTR(-ENOENT); + return table; +} +DEFINE_FREE(acpi_put_table, struct acpi_table_header *, if (!IS_ERR_OR_NULL(_T)) acpi_put_table(_T)) + int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, @@ -755,7 +767,6 @@ int acpi_reconfig_notifier_unregister(struct notifier_block *nb); int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); int acpi_gtdt_map_ppi(int type); bool acpi_gtdt_c3stop(int type); -int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif #ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER @@ -1146,12 +1157,7 @@ struct acpi_s2idle_dev_ops { #if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); -int acpi_get_lps0_constraint(struct acpi_device *adev); #else /* CONFIG_SUSPEND && CONFIG_X86 */ -static inline int acpi_get_lps0_constraint(struct device *dev) -{ - return ACPI_STATE_UNKNOWN; -} static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) { return -ENODEV; @@ -1349,9 +1355,6 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); -struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, - struct fwnode_handle *child); - struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); @@ -1451,13 +1454,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, } static inline struct fwnode_handle * -acpi_get_next_subnode(const struct fwnode_handle *fwnode, - struct fwnode_handle *child) -{ - return NULL; -} - -static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { @@ -1548,6 +1544,9 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); +int find_acpi_cache_level_from_id(u32 cache_id); +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1569,6 +1568,17 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } +static inline void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, + cpumask_t *cpus) { } +static inline int find_acpi_cache_level_from_id(u32 cache_id) +{ + return -ENOENT; +} +static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, + cpumask_t *cpus) +{ + return -ENOENT; +} #endif void acpi_arch_init(void); diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index d72d6e5aa200..0c2a8b846c20 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -89,6 +89,21 @@ void remove_cpu_topology(unsigned int cpuid); void reset_cpu_topology(void); int parse_acpi_topology(void); void freq_inv_set_max_ratio(int cpu, u64 max_rate); -#endif + +/* + * Architectures like ARM64 don't have reliable architectural way to get SMT + * information and depend on the firmware (ACPI/OF) report. Non-SMT core won't + * initialize thread_id so we can use this to detect the SMT implementation. + */ +static inline bool topology_core_has_smt(int cpu) +{ + return cpu_topology[cpu].thread_id != -1; +} + +#else + +static inline bool topology_core_has_smt(int cpu) { return false; } + +#endif /* CONFIG_GENERIC_ARCH_TOPOLOGY */ #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h new file mode 100644 index 000000000000..7f00c5285a32 --- /dev/null +++ b/include/linux/arm_mpam.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2025 Arm Ltd. */ + +#ifndef __LINUX_ARM_MPAM_H +#define __LINUX_ARM_MPAM_H + +#include <linux/acpi.h> +#include <linux/types.h> + +struct mpam_msc; + +enum mpam_msc_iface { + MPAM_IFACE_MMIO, /* a real MPAM MSC */ + MPAM_IFACE_PCC, /* a fake MPAM MSC */ +}; + +enum mpam_class_types { + MPAM_CLASS_CACHE, /* Caches, e.g. L2, L3 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ +}; + +#define MPAM_CLASS_ID_DEFAULT 255 + +#ifdef CONFIG_ACPI_MPAM +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc); + +int acpi_mpam_count_msc(void); +#else +static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + return -EINVAL; +} + +static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +#endif + +#ifdef CONFIG_ARM64_MPAM_DRIVER +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id); +#else +static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, + int component_id) +{ + return -EINVAL; +} +#endif + +/** + * mpam_register_requestor() - Register a requestor with the MPAM driver + * @partid_max: The maximum PARTID value the requestor can generate. + * @pmg_max: The maximum PMG value the requestor can generate. + * + * Registers a requestor with the MPAM driver to ensure the chosen system-wide + * minimum PARTID and PMG values will allow the requestors features to be used. + * + * Returns an error if the registration is too late, and a larger PARTID/PMG + * value has been advertised to user-space. In this case the requestor should + * not use its MPAM features. Returns 0 on success. + */ +int mpam_register_requestor(u16 partid_max, u8 pmg_max); + +#endif /* __LINUX_ARM_MPAM_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index b3705e8bbe2b..55a44199de87 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -173,6 +173,22 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words) } } +static inline void le64_to_cpu_array(u64 *buf, unsigned int words) +{ + while (words--) { + __le64_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le64_array(u64 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le64s(buf); + buf++; + } +} + static inline void memcpy_from_le32(u32 *dst, const __le32 *src, size_t words) { size_t i; diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 0a1b9598940d..3eac51d68426 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -393,6 +393,21 @@ struct ftrace_likely_data { #define __counted_by_be(member) __counted_by(member) #endif +/* + * This designates the minimum number of elements a passed array parameter must + * have. For example: + * + * void some_function(u8 param[at_least 7]); + * + * If a caller passes an array with fewer than 7 elements, the compiler will + * emit a warning. + */ +#ifndef __CHECKER__ +#define at_least static +#else +#define at_least +#endif + /* Do not trap wrapping arithmetic within an annotated function. */ #ifdef CONFIG_UBSAN_INTEGER_WRAP # define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow"))) diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index a9ee4fe55dcf..4073690504a7 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -248,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, u64 latency_limit_ns); extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + u64 latency_limit_ns); extern void cpuidle_use_deepest_state(u64 latency_limit_ns); #else static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, @@ -256,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, u64 latency_limit_ns) {return -ENODEV; } static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, + u64 latency_limit_ns) {return -ENODEV; } static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) { diff --git a/include/linux/devfreq-governor.h b/include/linux/devfreq-governor.h new file mode 100644 index 000000000000..dfdd0160a29f --- /dev/null +++ b/include/linux/devfreq-governor.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * governor.h - internal header for devfreq governors. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This header is for devfreq governors + */ + +#ifndef __LINUX_DEVFREQ_DEVFREQ_H__ +#define __LINUX_DEVFREQ_DEVFREQ_H__ + +#include <linux/devfreq.h> + +#define DEVFREQ_NAME_LEN 16 + +#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) + +/* Devfreq events */ +#define DEVFREQ_GOV_START 0x1 +#define DEVFREQ_GOV_STOP 0x2 +#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3 +#define DEVFREQ_GOV_SUSPEND 0x4 +#define DEVFREQ_GOV_RESUME 0x5 + +#define DEVFREQ_MIN_FREQ 0 +#define DEVFREQ_MAX_FREQ ULONG_MAX + +/* + * Definition of the governor feature flags + * - DEVFREQ_GOV_FLAG_IMMUTABLE + * : This governor is never changeable to other governors. + * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN + * : The devfreq won't schedule the work for this governor. + */ +#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0) +#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1) + +/* + * Definition of governor attribute flags except for common sysfs attributes + * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL + * : Indicate polling_interval sysfs attribute + * - DEVFREQ_GOV_ATTR_TIMER + * : Indicate timer sysfs attribute + */ +#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0) +#define DEVFREQ_GOV_ATTR_TIMER BIT(1) + +/** + * struct devfreq_governor - Devfreq policy governor + * @node: list node - contains registered devfreq governors + * @name: Governor's name + * @attrs: Governor's sysfs attribute flags + * @flags: Governor's feature flags + * @get_target_freq: Returns desired operating frequency for the device. + * Basically, get_target_freq will run + * devfreq_dev_profile.get_dev_status() to get the + * status of the device (load = busy_time / total_time). + * @event_handler: Callback for devfreq core framework to notify events + * to governors. Events include per device governor + * init and exit, opp changes out of devfreq, suspend + * and resume of per device devfreq during device idle. + * + * Note that the callbacks are called with devfreq->lock locked by devfreq. + */ +struct devfreq_governor { + struct list_head node; + + const char name[DEVFREQ_NAME_LEN]; + const u64 attrs; + const u64 flags; + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + int (*event_handler)(struct devfreq *devfreq, + unsigned int event, void *data); +}; + +void devfreq_monitor_start(struct devfreq *devfreq); +void devfreq_monitor_stop(struct devfreq *devfreq); +void devfreq_monitor_suspend(struct devfreq *devfreq); +void devfreq_monitor_resume(struct devfreq *devfreq); +void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay); + +int devfreq_add_governor(struct devfreq_governor *governor); +int devfreq_remove_governor(struct devfreq_governor *governor); + +int devm_devfreq_add_governor(struct device *dev, + struct devfreq_governor *governor); + +int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); +int devfreq_update_target(struct devfreq *devfreq, unsigned long freq); +void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq, + unsigned long *max_freq); + +static inline int devfreq_update_stats(struct devfreq *df) +{ + if (!df->profile->get_dev_status) + return -EINVAL; + + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} +#endif /* __LINUX_DEVFREQ_DEVFREQ_H__ */ diff --git a/include/linux/efi.h b/include/linux/efi.h index a98cc39e7aaa..b23ff8b83219 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1126,6 +1126,8 @@ static inline bool efi_runtime_disabled(void) { return true; } extern void efi_call_virt_check_flags(unsigned long flags, const void *caller); extern unsigned long efi_call_virt_save_flags(void); +void efi_runtime_assert_lock_held(void); + enum efi_secureboot_mode { efi_secureboot_mode_unset, efi_secureboot_mode_unknown, diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 61d50571ad88..43aa6153dc57 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -54,6 +54,8 @@ struct em_perf_table { /** * struct em_perf_domain - Performance domain * @em_table: Pointer to the runtime modifiable em_perf_table + * @node: node in em_pd_list (in energy_model.c) + * @id: A unique ID number for each performance domain * @nr_perf_states: Number of performance states * @min_perf_state: Minimum allowed Performance State index * @max_perf_state: Maximum allowed Performance State index @@ -71,6 +73,8 @@ struct em_perf_table { */ struct em_perf_domain { struct em_perf_table __rcu *em_table; + struct list_head node; + int id; int nr_perf_states; int min_perf_state; int max_perf_state; diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 32884c9721e5..0a8c6c4d1a82 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -22,14 +22,18 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ extern unsigned int freeze_timeout_msecs; /* - * Check if a process has been frozen + * Check if a process has been frozen for PM or cgroup1 freezer. Note that + * cgroup2 freezer uses the job control mechanism and does not interact with + * the PM freezer. */ extern bool frozen(struct task_struct *p); extern bool freezing_slow_path(struct task_struct *p); /* - * Check if there is a request to freeze a process + * Check if there is a request to freeze a task from PM or cgroup1 freezer. + * Note that cgroup2 freezer uses the job control mechanism and does not + * interact with the PM freezer. */ static inline bool freezing(struct task_struct *p) { @@ -63,9 +67,9 @@ extern bool freeze_task(struct task_struct *p); extern bool set_freezable(void); #ifdef CONFIG_CGROUP_FREEZER -extern bool cgroup_freezing(struct task_struct *task); +extern bool cgroup1_freezing(struct task_struct *task); #else /* !CONFIG_CGROUP_FREEZER */ -static inline bool cgroup_freezing(struct task_struct *task) +static inline bool cgroup1_freezing(struct task_struct *task) { return false; } diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 71ac78b9f834..11cab07f322a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -11,7 +11,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -void huge_pmd_set_accessed(struct vm_fault *vmf); +bool huge_pmd_set_accessed(struct vm_fault *vmf); int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma); diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index c0397423d3a8..e9ade2ff4af6 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -152,7 +152,7 @@ struct rapl_if_priv { union rapl_reg reg_unit; union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; int limits[RAPL_DOMAIN_MAX]; - int (*read_raw)(int id, struct reg_action *ra); + int (*read_raw)(int id, struct reg_action *ra, bool atomic); int (*write_raw)(int id, struct reg_action *ra); void *defaults; void *rpi; diff --git a/include/linux/memory.h b/include/linux/memory.h index 0c214256216f..ba1515160894 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -96,17 +96,8 @@ int set_memory_block_size_order(unsigned int order); #define MEM_GOING_ONLINE (1<<3) #define MEM_CANCEL_ONLINE (1<<4) #define MEM_CANCEL_OFFLINE (1<<5) -#define MEM_PREPARE_ONLINE (1<<6) -#define MEM_FINISH_OFFLINE (1<<7) struct memory_notify { - /* - * The altmap_start_pfn and altmap_nr_pages fields are designated for - * specifying the altmap range and are exclusively intended for use in - * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers. - */ - unsigned long altmap_start_pfn; - unsigned long altmap_nr_pages; unsigned long start_pfn; unsigned long nr_pages; }; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 23f038a16231..f2f16cdd73ee 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -58,22 +58,6 @@ typedef int __bitwise mhp_t; * implies the node id (nid). */ #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) -/* - * The hotplugged memory is completely inaccessible while the memory is - * offline. The memory provider will handle MEM_PREPARE_ONLINE / - * MEM_FINISH_OFFLINE notifications and make the memory accessible. - * - * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, - * because the altmap cannot be written (e.g., poisoned) when adding - * memory -- before it is set online. - * - * This allows for adding memory with an altmap that is not currently - * made available by a hypervisor. When onlining that memory, the - * hypervisor can be instructed to make that memory available, and - * the onlining phase will not require any memory allocations, which is - * helpful in low-memory situations. - */ -#define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) /* * Extended parameters for memory hotplug: @@ -123,7 +107,7 @@ extern void adjust_present_page_count(struct page *page, long nr_pages); /* VM interface that may be used by firmware interface */ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, - struct zone *zone, bool mhp_off_inaccessible); + struct zone *zone); extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index e5951ba12a28..30c7aecbd245 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -25,7 +25,6 @@ struct vmem_altmap { unsigned long free; unsigned long align; unsigned long alloc; - bool inaccessible; }; /* diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 12d90360f6db..43c854a273c3 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -52,7 +52,7 @@ __section(".discard") __attribute__((unused)) /* - * s390 and alpha modules require percpu variables to be defined as + * alpha modules require percpu variables to be defined as * weak to force the compiler to generate GOT based external * references for them. This is necessary because percpu sections * will be located outside of the usually addressable area. diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bab26a7d79f4..52b37f7bdbf9 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -119,6 +119,7 @@ struct arm_pmu { /* PMUv3 only */ int pmuver; + bool has_smt; u64 reg_pmmir; u64 reg_brbidr; #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 32e8457ad535..ee3148ef87f6 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1232,6 +1232,10 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) #endif +#ifndef flush_tlb_fix_spurious_fault_pmd +#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) do { } while (0) +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index ad66333ce85c..93c945331f39 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -234,6 +234,7 @@ extern int platform_device_add_data(struct platform_device *pdev, extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); +DEFINE_FREE(platform_device_put, struct platform_device *, if (_T) platform_device_put(_T)) struct platform_driver { int (*probe)(struct platform_device *); diff --git a/include/linux/pm.h b/include/linux/pm.h index cc7b2dc28574..7f69f739f613 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -25,11 +25,12 @@ extern void (*pm_power_off)(void); struct device; /* we have a circular dep with device.h */ #ifdef CONFIG_VT_CONSOLE_SLEEP -extern void pm_vt_switch_required(struct device *dev, bool required); +extern int pm_vt_switch_required(struct device *dev, bool required); extern void pm_vt_switch_unregister(struct device *dev); #else -static inline void pm_vt_switch_required(struct device *dev, bool required) +static inline int pm_vt_switch_required(struct device *dev, bool required) { + return 0; } static inline void pm_vt_switch_unregister(struct device *dev) { @@ -507,6 +508,7 @@ const struct dev_pm_ops name = { \ * RECOVER Creation of a hibernation image or restoration of the main * memory contents from a hibernation image has failed, call * ->thaw() and ->complete() for all devices. + * POWEROFF System will poweroff, call ->poweroff() for all devices. * * The following PM_EVENT_ messages are defined for internal use by * kernel subsystems. They are never issued by the PM core. @@ -537,6 +539,7 @@ const struct dev_pm_ops name = { \ #define PM_EVENT_USER 0x0100 #define PM_EVENT_REMOTE 0x0200 #define PM_EVENT_AUTO 0x0400 +#define PM_EVENT_POWEROFF 0x0800 #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) @@ -551,6 +554,7 @@ const struct dev_pm_ops name = { \ #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) +#define PMSG_POWEROFF ((struct pm_message){ .event = PM_EVENT_POWEROFF, }) #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f67a2cb7d781..93ba0143ca47 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -153,6 +153,7 @@ enum genpd_sync_state { }; struct dev_power_governor { + bool (*system_power_down_ok)(struct dev_pm_domain *domain); bool (*power_down_ok)(struct dev_pm_domain *domain); bool (*suspend_ok)(struct device *dev); }; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 4a69d4af3ff8..6cea4455f867 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -162,6 +162,15 @@ static inline void cpu_latency_qos_update_request(struct pm_qos_request *req, static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {} #endif +#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP +s32 cpu_wakeup_latency_qos_limit(void); +#else +static inline s32 cpu_wakeup_latency_qos_limit(void) +{ + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; +} +#endif + #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 0b436e15f4cd..911d7a4d32c1 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -637,6 +637,30 @@ DEFINE_GUARD_COND(pm_runtime_active_auto, _try, DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled, pm_runtime_resume_and_get(_T), _RET == 0) +/* ACQUIRE() wrapper macros for the guards defined above. */ + +#define PM_RUNTIME_ACQUIRE(_dev, _var) \ + ACQUIRE(pm_runtime_active_try, _var)(_dev) + +#define PM_RUNTIME_ACQUIRE_AUTOSUSPEND(_dev, _var) \ + ACQUIRE(pm_runtime_active_auto_try, _var)(_dev) + +#define PM_RUNTIME_ACQUIRE_IF_ENABLED(_dev, _var) \ + ACQUIRE(pm_runtime_active_try_enabled, _var)(_dev) + +#define PM_RUNTIME_ACQUIRE_IF_ENABLED_AUTOSUSPEND(_dev, _var) \ + ACQUIRE(pm_runtime_active_auto_try_enabled, _var)(_dev) + +/* + * ACQUIRE_ERR() wrapper macro for guard pm_runtime_active. + * + * Always check PM_RUNTIME_ACQUIRE_ERR() after using one of the + * PM_RUNTIME_ACQUIRE*() macros defined above (yes, it can be used with + * any of them) and if it is nonzero, avoid accessing the given device. + */ +#define PM_RUNTIME_ACQUIRE_ERR(_var_ptr) \ + ACQUIRE_ERR(pm_runtime_active, _var_ptr) + /** * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. * @dev: Target device. diff --git a/include/linux/prandom.h b/include/linux/prandom.h index f2ed5b72b3d6..ff7dcc3fa105 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -47,10 +47,4 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) state->s4 = __seed(i, 128U); } -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif diff --git a/include/linux/random.h b/include/linux/random.h index 333cecfca93f..8a8064dc3970 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -130,21 +130,6 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes) return ret; } -#define declare_get_random_var_wait(name, ret_type) \ - static inline int get_random_ ## name ## _wait(ret_type *out) { \ - int ret = wait_for_random_bytes(); \ - if (unlikely(ret)) \ - return ret; \ - *out = get_random_ ## name(); \ - return 0; \ - } -declare_get_random_var_wait(u8, u8) -declare_get_random_var_wait(u16, u16) -declare_get_random_var_wait(u32, u32) -declare_get_random_var_wait(u64, u32) -declare_get_random_var_wait(long, unsigned long) -#undef declare_get_random_var - #ifdef CONFIG_SMP int random_prepare_cpu(unsigned int cpu); int random_online_cpu(unsigned int cpu); diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 82904291c2b8..370f8df2fdb4 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -179,7 +179,8 @@ TRACE_EVENT(pstate_sample, { PM_EVENT_HIBERNATE, "hibernate" }, \ { PM_EVENT_THAW, "thaw" }, \ { PM_EVENT_RESTORE, "restore" }, \ - { PM_EVENT_RECOVER, "recover" }) + { PM_EVENT_RECOVER, "recover" }, \ + { PM_EVENT_POWEROFF, "poweroff" }) DEFINE_EVENT(cpu, cpu_frequency, diff --git a/include/uapi/linux/energy_model.h b/include/uapi/linux/energy_model.h new file mode 100644 index 000000000000..4ec4c0eabbbb --- /dev/null +++ b/include/uapi/linux/energy_model.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/em.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_ENERGY_MODEL_H +#define _UAPI_LINUX_ENERGY_MODEL_H + +#define EM_FAMILY_NAME "em" +#define EM_FAMILY_VERSION 1 + +enum { + EM_A_PDS_PD = 1, + + __EM_A_PDS_MAX, + EM_A_PDS_MAX = (__EM_A_PDS_MAX - 1) +}; + +enum { + EM_A_PD_PAD = 1, + EM_A_PD_PD_ID, + EM_A_PD_FLAGS, + EM_A_PD_CPUS, + + __EM_A_PD_MAX, + EM_A_PD_MAX = (__EM_A_PD_MAX - 1) +}; + +enum { + EM_A_PD_TABLE_PD_ID = 1, + EM_A_PD_TABLE_PS, + + __EM_A_PD_TABLE_MAX, + EM_A_PD_TABLE_MAX = (__EM_A_PD_TABLE_MAX - 1) +}; + +enum { + EM_A_PS_PAD = 1, + EM_A_PS_PERFORMANCE, + EM_A_PS_FREQUENCY, + EM_A_PS_POWER, + EM_A_PS_COST, + EM_A_PS_FLAGS, + + __EM_A_PS_MAX, + EM_A_PS_MAX = (__EM_A_PS_MAX - 1) +}; + +enum { + EM_CMD_GET_PDS = 1, + EM_CMD_GET_PD_TABLE, + EM_CMD_PD_CREATED, + EM_CMD_PD_UPDATED, + EM_CMD_PD_DELETED, + + __EM_CMD_MAX, + EM_CMD_MAX = (__EM_CMD_MAX - 1) +}; + +#define EM_MCGRP_EVENT "event" + +#endif /* _UAPI_LINUX_ENERGY_MODEL_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index d292f96bc06f..c44a8fb3e418 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -382,6 +382,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ #define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ #define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ +#define PERF_ATTR_SIZE_VER9 144 /* add: config4 */ /* * 'struct perf_event_attr' contains various attributes that define @@ -545,6 +546,7 @@ struct perf_event_attr { __u64 sig_data; __u64 config3; /* extension of config2 */ + __u64 config4; /* extension of config3 */ }; /* |
