summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig35
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/aead.c20
-rw-r--r--crypto/aegis128-neon.c33
-rw-r--r--crypto/af_alg.c5
-rw-r--r--crypto/ahash.c18
-rw-r--r--crypto/algif_hash.c3
-rw-r--r--crypto/algif_rng.c3
-rw-r--r--crypto/ansi_cprng.c474
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c14
-rw-r--r--crypto/asymmetric_keys/restrict.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c2
-rw-r--r--crypto/authenc.c75
-rw-r--r--crypto/blake2b.c111
-rw-r--r--crypto/blake2b_generic.c192
-rw-r--r--crypto/deflate.c3
-rw-r--r--crypto/df_sp80090a.c232
-rw-r--r--crypto/drbg.c266
-rw-r--r--crypto/fips.c5
-rw-r--r--crypto/hctr2.c226
-rw-r--r--crypto/jitterentropy-kcapi.c12
-rw-r--r--crypto/polyval-generic.c205
-rw-r--r--crypto/scatterwalk.c345
-rw-r--r--crypto/sha3.c166
-rw-r--r--crypto/sha3_generic.c290
-rw-r--r--crypto/skcipher.c261
-rw-r--r--crypto/tcrypt.c12
-rw-r--r--crypto/tcrypt.h18
-rw-r--r--crypto/testmgr.c118
-rw-r--r--crypto/testmgr.h397
-rw-r--r--crypto/zstd.c17
32 files changed, 1205 insertions, 2371 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index a04595f9d0ca..2e5b195b1b06 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS
+ depends on CRYPTO_DRBG && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -696,7 +696,7 @@ config CRYPTO_ECB
config CRYPTO_HCTR2
tristate "HCTR2"
select CRYPTO_XCTR
- select CRYPTO_POLYVAL
+ select CRYPTO_LIB_POLYVAL
select CRYPTO_MANAGER
help
HCTR2 length-preserving encryption mode
@@ -881,6 +881,7 @@ menu "Hashes, digests, and MACs"
config CRYPTO_BLAKE2B
tristate "BLAKE2b"
select CRYPTO_HASH
+ select CRYPTO_LIB_BLAKE2B
help
BLAKE2b cryptographic hash function (RFC 7693)
@@ -947,16 +948,6 @@ config CRYPTO_MICHAEL_MIC
This algorithm is required for TKIP, but it should not be used for
other purposes because of the weakness of the algorithm.
-config CRYPTO_POLYVAL
- tristate
- select CRYPTO_HASH
- select CRYPTO_LIB_GF128MUL
- help
- POLYVAL hash function for HCTR2
-
- This is used in HCTR2. It is not a general-purpose
- cryptographic hash function.
-
config CRYPTO_RMD160
tristate "RIPEMD-160"
select CRYPTO_HASH
@@ -1005,6 +996,7 @@ config CRYPTO_SHA512
config CRYPTO_SHA3
tristate "SHA-3"
select CRYPTO_HASH
+ select CRYPTO_LIB_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
@@ -1169,17 +1161,6 @@ endmenu
menu "Random number generation"
-config CRYPTO_ANSI_CPRNG
- tristate "ANSI PRNG (Pseudo Random Number Generator)"
- select CRYPTO_AES
- select CRYPTO_RNG
- help
- Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4)
-
- This uses the AES cipher algorithm.
-
- Note that this option must be enabled if CRYPTO_FIPS is selected
-
menuconfig CRYPTO_DRBG_MENU
tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)"
help
@@ -1205,8 +1186,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "CTR_DRBG"
- select CRYPTO_AES
- select CRYPTO_CTR
+ select CRYPTO_DF80090A
help
CTR_DRBG variant as defined in NIST SP800-90A.
@@ -1342,6 +1322,11 @@ config CRYPTO_KDF800108_CTR
select CRYPTO_HMAC
select CRYPTO_SHA256
+config CRYPTO_DF80090A
+ tristate
+ select CRYPTO_AES
+ select CRYPTO_CTR
+
endmenu
menu "Userspace interface"
diff --git a/crypto/Makefile b/crypto/Makefile
index e430e6e99b6a..16a35649dd91 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -78,13 +78,12 @@ obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
-obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
+obj-$(CONFIG_CRYPTO_SHA3) += sha3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
-obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
-CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
+obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
@@ -163,7 +162,6 @@ obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
-obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
CFLAGS_jitterentropy.o = -O0
@@ -173,7 +171,6 @@ jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
-obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
@@ -209,4 +206,6 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
#
obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o
+obj-$(CONFIG_CRYPTO_DF80090A) += df_sp80090a.o
+
obj-$(CONFIG_CRYPTO_KRB5) += krb5/
diff --git a/crypto/aead.c b/crypto/aead.c
index 5d14b775036e..08d44c5e5c33 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -120,6 +120,7 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct aead_alg *alg = crypto_aead_alg(aead);
crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+ crypto_aead_set_reqsize(aead, crypto_tfm_alg_reqsize(tfm));
aead->authsize = alg->maxauthsize;
@@ -204,6 +205,25 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask)
+{
+ struct crypto_aead *tfm;
+
+ /* Only sync algorithms are allowed. */
+ mask |= CRYPTO_ALG_ASYNC;
+ type &= ~(CRYPTO_ALG_ASYNC);
+
+ tfm = crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask);
+
+ if (!IS_ERR(tfm) && WARN_ON(crypto_aead_reqsize(tfm) > MAX_SYNC_AEAD_REQSIZE)) {
+ crypto_free_aead(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return (struct crypto_sync_aead *)tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sync_aead);
+
int crypto_has_aead(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask);
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
index 9ee50549e823..b41807e63bd3 100644
--- a/crypto/aegis128-neon.c
+++ b/crypto/aegis128-neon.c
@@ -4,7 +4,7 @@
*/
#include <asm/cpufeature.h>
-#include <asm/neon.h>
+#include <asm/simd.h>
#include "aegis.h"
#include "aegis-neon.h"
@@ -24,32 +24,28 @@ void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
- kernel_neon_begin();
- crypto_aegis128_init_neon(state, key, iv);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_init_neon(state, key, iv);
}
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
{
- kernel_neon_begin();
- crypto_aegis128_update_neon(state, msg);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_update_neon(state, msg);
}
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
- kernel_neon_begin();
- crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
}
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
- kernel_neon_begin();
- crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
}
int crypto_aegis128_final_simd(struct aegis_state *state,
@@ -58,12 +54,7 @@ int crypto_aegis128_final_simd(struct aegis_state *state,
unsigned int cryptlen,
unsigned int authsize)
{
- int ret;
-
- kernel_neon_begin();
- ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen,
- authsize);
- kernel_neon_end();
-
- return ret;
+ scoped_ksimd()
+ return crypto_aegis128_final_neon(state, tag_xor, assoclen,
+ cryptlen, authsize);
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 5e760ab62618..e468714f539d 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1212,15 +1212,14 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
+ memset(areq, 0, areqlen);
+
ctx->inflight = true;
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
- areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
- areq->tsgl = NULL;
- areq->tsgl_entries = 0;
return areq;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index dfb4f5476428..66492ae75fcf 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -423,7 +423,11 @@ static int ahash_update_finish(struct ahash_request *req, int err)
req->nbytes += nonzero - blen;
- blen = err < 0 ? 0 : err + nonzero;
+ blen = 0;
+ if (err >= 0) {
+ blen = err + nonzero;
+ err = 0;
+ }
if (ahash_request_isvirt(req))
memcpy(buf, req->svirt + req->nbytes - blen, blen);
else
@@ -661,6 +665,12 @@ int crypto_ahash_import_core(struct ahash_request *req, const void *in)
in);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
return crypto_ahash_alg(tfm)->import_core(req, in);
}
EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
@@ -674,10 +684,14 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
u8 *buf = ahash_request_ctx(req);
- buf[reqsize - 1] = 0;
+ memcpy(buf + reqsize - plen, in + ss - plen, plen);
+ if (buf[reqsize - 1] >= plen)
+ return -EOVERFLOW;
}
return crypto_ahash_alg(tfm)->import(req, in);
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index e3f1a4852737..4d3dfc60a16a 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -416,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
- ctx->result = NULL;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 10c41adac3b1..1a86e40c8372 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -248,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->addtl = NULL;
- ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
deleted file mode 100644
index 153523ce6076..000000000000
--- a/crypto/ansi_cprng.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PRNG: Pseudo Random Number Generator
- * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
- * AES 128 cipher
- *
- * (C) Neil Horman <nhorman@tuxdriver.com>
- */
-
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/rng.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-
-#define DEFAULT_PRNG_KEY "0123456789abcdef"
-#define DEFAULT_PRNG_KSZ 16
-#define DEFAULT_BLK_SZ 16
-#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
-
-/*
- * Flags for the prng_context flags field
- */
-
-#define PRNG_FIXED_SIZE 0x1
-#define PRNG_NEED_RESET 0x2
-
-/*
- * Note: DT is our counter value
- * I is our intermediate value
- * V is our seed vector
- * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
- * for implementation details
- */
-
-
-struct prng_context {
- spinlock_t prng_lock;
- unsigned char rand_data[DEFAULT_BLK_SZ];
- unsigned char last_rand_data[DEFAULT_BLK_SZ];
- unsigned char DT[DEFAULT_BLK_SZ];
- unsigned char I[DEFAULT_BLK_SZ];
- unsigned char V[DEFAULT_BLK_SZ];
- u32 rand_data_valid;
- struct crypto_cipher *tfm;
- u32 flags;
-};
-
-static int dbg;
-
-static void hexdump(char *note, unsigned char *buf, unsigned int len)
-{
- if (dbg) {
- printk(KERN_CRIT "%s", note);
- print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
- 16, 1,
- buf, len, false);
- }
-}
-
-#define dbgprint(format, args...) do {\
-if (dbg)\
- printk(format, ##args);\
-} while (0)
-
-static void xor_vectors(unsigned char *in1, unsigned char *in2,
- unsigned char *out, unsigned int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- out[i] = in1[i] ^ in2[i];
-
-}
-/*
- * Returns DEFAULT_BLK_SZ bytes of random data per call
- * returns 0 if generation succeeded, <0 if something went wrong
- */
-static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
-{
- int i;
- unsigned char tmp[DEFAULT_BLK_SZ];
- unsigned char *output = NULL;
-
-
- dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
- ctx);
-
- hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
-
- /*
- * This algorithm is a 3 stage state machine
- */
- for (i = 0; i < 3; i++) {
-
- switch (i) {
- case 0:
- /*
- * Start by encrypting the counter value
- * This gives us an intermediate value I
- */
- memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
- output = ctx->I;
- hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
- break;
- case 1:
-
- /*
- * Next xor I with our secret vector V
- * encrypt that result to obtain our
- * pseudo random data which we output
- */
- xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
- hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
- output = ctx->rand_data;
- break;
- case 2:
- /*
- * First check that we didn't produce the same
- * random data that we did last time around through this
- */
- if (!memcmp(ctx->rand_data, ctx->last_rand_data,
- DEFAULT_BLK_SZ)) {
- if (cont_test) {
- panic("cprng %p Failed repetition check!\n",
- ctx);
- }
-
- printk(KERN_ERR
- "ctx %p Failed repetition check!\n",
- ctx);
-
- ctx->flags |= PRNG_NEED_RESET;
- return -EINVAL;
- }
- memcpy(ctx->last_rand_data, ctx->rand_data,
- DEFAULT_BLK_SZ);
-
- /*
- * Lastly xor the random data with I
- * and encrypt that to obtain a new secret vector V
- */
- xor_vectors(ctx->rand_data, ctx->I, tmp,
- DEFAULT_BLK_SZ);
- output = ctx->V;
- hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
- break;
- }
-
-
- /* do the encryption */
- crypto_cipher_encrypt_one(ctx->tfm, output, tmp);
-
- }
-
- /*
- * Now update our DT value
- */
- for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
- ctx->DT[i] += 1;
- if (ctx->DT[i] != 0)
- break;
- }
-
- dbgprint("Returning new block for context %p\n", ctx);
- ctx->rand_data_valid = 0;
-
- hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
- hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
-
- return 0;
-}
-
-/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
- int do_cont_test)
-{
- unsigned char *ptr = buf;
- unsigned int byte_count = (unsigned int)nbytes;
- int err;
-
-
- spin_lock_bh(&ctx->prng_lock);
-
- err = -EINVAL;
- if (ctx->flags & PRNG_NEED_RESET)
- goto done;
-
- /*
- * If the FIXED_SIZE flag is on, only return whole blocks of
- * pseudo random data
- */
- err = -EINVAL;
- if (ctx->flags & PRNG_FIXED_SIZE) {
- if (nbytes < DEFAULT_BLK_SZ)
- goto done;
- byte_count = DEFAULT_BLK_SZ;
- }
-
- /*
- * Return 0 in case of success as mandated by the kernel
- * crypto API interface definition.
- */
- err = 0;
-
- dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
- byte_count, ctx);
-
-
-remainder:
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
-
- /*
- * Copy any data less than an entire block
- */
- if (byte_count < DEFAULT_BLK_SZ) {
-empty_rbuf:
- while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
- *ptr = ctx->rand_data[ctx->rand_data_valid];
- ptr++;
- byte_count--;
- ctx->rand_data_valid++;
- if (byte_count == 0)
- goto done;
- }
- }
-
- /*
- * Now copy whole blocks
- */
- for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
- if (ctx->rand_data_valid > 0)
- goto empty_rbuf;
- memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
- ctx->rand_data_valid += DEFAULT_BLK_SZ;
- ptr += DEFAULT_BLK_SZ;
- }
-
- /*
- * Now go back and get any remaining partial block
- */
- if (byte_count)
- goto remainder;
-
-done:
- spin_unlock_bh(&ctx->prng_lock);
- dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
- err, ctx);
- return err;
-}
-
-static void free_prng_context(struct prng_context *ctx)
-{
- crypto_free_cipher(ctx->tfm);
-}
-
-static int reset_prng_context(struct prng_context *ctx,
- const unsigned char *key, size_t klen,
- const unsigned char *V, const unsigned char *DT)
-{
- int ret;
- const unsigned char *prng_key;
-
- spin_lock_bh(&ctx->prng_lock);
- ctx->flags |= PRNG_NEED_RESET;
-
- prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
-
- if (!key)
- klen = DEFAULT_PRNG_KSZ;
-
- if (V)
- memcpy(ctx->V, V, DEFAULT_BLK_SZ);
- else
- memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
-
- if (DT)
- memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
- else
- memset(ctx->DT, 0, DEFAULT_BLK_SZ);
-
- memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
- memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
-
- ctx->rand_data_valid = DEFAULT_BLK_SZ;
-
- ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
- if (ret) {
- dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
- crypto_cipher_get_flags(ctx->tfm));
- goto out;
- }
-
- ret = 0;
- ctx->flags &= ~PRNG_NEED_RESET;
-out:
- spin_unlock_bh(&ctx->prng_lock);
- return ret;
-}
-
-static int cprng_init(struct crypto_tfm *tfm)
-{
- struct prng_context *ctx = crypto_tfm_ctx(tfm);
-
- spin_lock_init(&ctx->prng_lock);
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- return PTR_ERR(ctx->tfm);
- }
-
- if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
- return -EINVAL;
-
- /*
- * after allocation, we should always force the user to reset
- * so they don't inadvertently use the insecure default values
- * without specifying them intentially
- */
- ctx->flags |= PRNG_NEED_RESET;
- return 0;
-}
-
-static void cprng_exit(struct crypto_tfm *tfm)
-{
- free_prng_context(crypto_tfm_ctx(tfm));
-}
-
-static int cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 0);
-}
-
-/*
- * This is the cprng_registered reset method the seed value is
- * interpreted as the tuple { V KEY DT}
- * V and KEY are required during reset, and DT is optional, detected
- * as being present by testing the length of the seed
- */
-static int cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
- const u8 *key = seed + DEFAULT_BLK_SZ;
- const u8 *dt = NULL;
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
- dt = key + DEFAULT_PRNG_KSZ;
-
- reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt);
-
- if (prng->flags & PRNG_NEED_RESET)
- return -EINVAL;
- return 0;
-}
-
-#ifdef CONFIG_CRYPTO_FIPS
-static int fips_cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 1);
-}
-
-static int fips_cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- u8 rdata[DEFAULT_BLK_SZ];
- const u8 *key = seed + DEFAULT_BLK_SZ;
- int rc;
-
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- /* fips strictly requires seed != key */
- if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
- return -EINVAL;
-
- rc = cprng_reset(tfm, seed, slen);
-
- if (!rc)
- goto out;
-
- /* this primes our continuity test */
- rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
- prng->rand_data_valid = DEFAULT_BLK_SZ;
-
-out:
- return rc;
-}
-#endif
-
-static struct rng_alg rng_algs[] = { {
- .generate = cprng_get_random,
- .seed = cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "stdrng",
- .cra_driver_name = "ansi_cprng",
- .cra_priority = 100,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#ifdef CONFIG_CRYPTO_FIPS
-}, {
- .generate = fips_cprng_get_random,
- .seed = fips_cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "fips(ansi_cprng)",
- .cra_driver_name = "fips_ansi_cprng",
- .cra_priority = 300,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#endif
-} };
-
-/* Module initalization */
-static int __init prng_mod_init(void)
-{
- return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-static void __exit prng_mod_fini(void)
-{
- crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
-MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
-module_param(dbg, int, 0);
-MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
-module_exit(prng_mod_fini);
-MODULE_ALIAS_CRYPTO("stdrng");
-MODULE_ALIAS_CRYPTO("ansi_cprng");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index ba2d9d1ea235..348966ea2175 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -11,6 +11,7 @@
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
@@ -141,12 +142,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_2)
{
struct asymmetric_key_id *kid;
-
- kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
- GFP_KERNEL);
+ size_t kid_sz;
+ size_t len;
+
+ if (check_add_overflow(len_1, len_2, &len))
+ return ERR_PTR(-EOVERFLOW);
+ if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz))
+ return ERR_PTR(-EOVERFLOW);
+ kid = kmalloc(kid_sz, GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
- kid->len = len_1 + len_2;
+ kid->len = len;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index afcd4d101ac5..86292965f493 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -17,9 +17,12 @@ static struct asymmetric_key_id *ca_keyid;
#ifndef MODULE
static struct {
- struct asymmetric_key_id id;
- unsigned char data[10];
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct asymmetric_key_id, id, data,
+ unsigned char data[10];
+ );
} cakey;
+static_assert(offsetof(typeof(cakey), id.data) == offsetof(typeof(cakey), data));
static int __init ca_keys_setup(char *str)
{
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 8df3fa60a44f..b37cae914987 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL_GPL(x509_free_certificate);
*/
struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
- struct x509_certificate *cert __free(x509_free_certificate);
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
struct x509_parse_context *ctx __free(kfree) = NULL;
struct asymmetric_key_id *kid;
long ret;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 8409d7d36cb4..12e3341e806b 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -148,7 +148,7 @@ not_self_signed:
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
- struct x509_certificate *cert __free(x509_free_certificate);
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
struct asymmetric_key_ids *kids __free(kfree) = NULL;
char *p, *desc __free(kfree) = NULL;
const char *q;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a723769c8777..ac679ce2cb95 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -37,7 +37,7 @@ struct authenc_request_ctx {
static void authenc_request_complete(struct aead_request *req, int err)
{
- if (err != -EINPROGRESS)
+ if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
}
@@ -107,27 +107,42 @@ out:
return err;
}
-static void authenc_geniv_ahash_done(void *data, int err)
+static void authenc_geniv_ahash_finish(struct aead_request *req)
{
- struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
- if (err)
- goto out;
-
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
+}
-out:
+static void authenc_geniv_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
aead_request_complete(req, err);
}
-static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
+/*
+ * Used when the ahash request was invoked in the async callback context
+ * of the previous skcipher request. Eat any EINPROGRESS notifications.
+ */
+static void authenc_geniv_ahash_done2(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
+ authenc_request_complete(req, err);
+}
+
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -136,6 +151,7 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *hash = areq_ctx->tail;
int err;
@@ -143,7 +159,8 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
- authenc_geniv_ahash_done, req);
+ mask ? authenc_geniv_ahash_done2 :
+ authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
@@ -159,12 +176,11 @@ static void crypto_authenc_encrypt_done(void *data, int err)
{
struct aead_request *areq = data;
- if (err)
- goto out;
-
- err = crypto_authenc_genicv(areq, 0);
-
-out:
+ if (err) {
+ aead_request_complete(areq, err);
+ return;
+ }
+ err = crypto_authenc_genicv(areq, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(areq, err);
}
@@ -199,11 +215,18 @@ static int crypto_authenc_encrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_genicv(req, aead_request_flags(req));
+ return crypto_authenc_genicv(req, 0);
+}
+
+static void authenc_decrypt_tail_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
- unsigned int flags)
+ unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -214,6 +237,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
@@ -230,7 +254,9 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
- req->base.complete, req->base.data);
+ mask ? authenc_decrypt_tail_done :
+ req->base.complete,
+ mask ? req : req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
@@ -241,12 +267,11 @@ static void authenc_verify_ahash_done(void *data, int err)
{
struct aead_request *req = data;
- if (err)
- goto out;
-
- err = crypto_authenc_decrypt_tail(req, 0);
-
-out:
+ if (err) {
+ aead_request_complete(req, err);
+ return;
+ }
+ err = crypto_authenc_decrypt_tail(req, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(req, err);
}
@@ -273,7 +298,7 @@ static int crypto_authenc_decrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
+ return crypto_authenc_decrypt_tail(req, 0);
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
diff --git a/crypto/blake2b.c b/crypto/blake2b.c
new file mode 100644
index 000000000000..67a6dae43a54
--- /dev/null
+++ b/crypto/blake2b.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for BLAKE2b
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+struct blake2b_tfm_ctx {
+ unsigned int keylen;
+ u8 key[BLAKE2B_KEY_SIZE];
+};
+
+static int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+ return 0;
+}
+
+#define BLAKE2B_CTX(desc) ((struct blake2b_ctx *)shash_desc_ctx(desc))
+
+static int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ unsigned int digestsize = crypto_shash_digestsize(desc->tfm);
+
+ blake2b_init_key(BLAKE2B_CTX(desc), digestsize,
+ tctx->key, tctx->keylen);
+ return 0;
+}
+
+static int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ blake2b_update(BLAKE2B_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_blake2b_final(struct shash_desc *desc, u8 *out)
+{
+ blake2b_final(BLAKE2B_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_blake2b_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ unsigned int digestsize = crypto_shash_digestsize(desc->tfm);
+
+ blake2b(tctx->key, tctx->keylen, data, len, out, digestsize);
+ return 0;
+}
+
+#define BLAKE2B_ALG(name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = name "-lib", \
+ .base.cra_priority = 300, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2b_setkey, \
+ .init = crypto_blake2b_init, \
+ .update = crypto_blake2b_update, \
+ .final = crypto_blake2b_final, \
+ .digest = crypto_blake2b_digest, \
+ .descsize = sizeof(struct blake2b_ctx), \
+ }
+
+static struct shash_alg algs[] = {
+ BLAKE2B_ALG("blake2b-160", BLAKE2B_160_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-256", BLAKE2B_256_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-384", BLAKE2B_384_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-512", BLAKE2B_512_HASH_SIZE),
+};
+
+static int __init crypto_blake2b_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_blake2b_mod_init);
+
+static void __exit crypto_blake2b_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_blake2b_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for BLAKE2b");
+
+MODULE_ALIAS_CRYPTO("blake2b-160");
+MODULE_ALIAS_CRYPTO("blake2b-160-lib");
+MODULE_ALIAS_CRYPTO("blake2b-256");
+MODULE_ALIAS_CRYPTO("blake2b-256-lib");
+MODULE_ALIAS_CRYPTO("blake2b-384");
+MODULE_ALIAS_CRYPTO("blake2b-384-lib");
+MODULE_ALIAS_CRYPTO("blake2b-512");
+MODULE_ALIAS_CRYPTO("blake2b-512-lib");
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
deleted file mode 100644
index 60f056217510..000000000000
--- a/crypto/blake2b_generic.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
-/*
- * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b
- * reference implementation, but it has been heavily modified for use in the
- * kernel. The reference implementation was:
- *
- * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under
- * the terms of the CC0, the OpenSSL Licence, or the Apache Public License
- * 2.0, at your option. The terms of these licenses can be found at:
- *
- * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- * - OpenSSL license : https://www.openssl.org/source/license.html
- * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
- *
- * More information about BLAKE2 can be found at https://blake2.net.
- */
-
-#include <crypto/internal/blake2b.h>
-#include <crypto/internal/hash.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/unaligned.h>
-
-static const u8 blake2b_sigma[12][16] = {
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
- { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
- { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
- { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
- { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
- { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
- { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
- { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
- { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
-};
-
-static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
-{
- S->t[0] += inc;
- S->t[1] += (S->t[0] < inc);
-}
-
-#define G(r,i,a,b,c,d) \
- do { \
- a = a + b + m[blake2b_sigma[r][2*i+0]]; \
- d = ror64(d ^ a, 32); \
- c = c + d; \
- b = ror64(b ^ c, 24); \
- a = a + b + m[blake2b_sigma[r][2*i+1]]; \
- d = ror64(d ^ a, 16); \
- c = c + d; \
- b = ror64(b ^ c, 63); \
- } while (0)
-
-#define ROUND(r) \
- do { \
- G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
- G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
- G(r,2,v[ 2],v[ 6],v[10],v[14]); \
- G(r,3,v[ 3],v[ 7],v[11],v[15]); \
- G(r,4,v[ 0],v[ 5],v[10],v[15]); \
- G(r,5,v[ 1],v[ 6],v[11],v[12]); \
- G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
- G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
- } while (0)
-
-static void blake2b_compress_one_generic(struct blake2b_state *S,
- const u8 block[BLAKE2B_BLOCK_SIZE])
-{
- u64 m[16];
- u64 v[16];
- size_t i;
-
- for (i = 0; i < 16; ++i)
- m[i] = get_unaligned_le64(block + i * sizeof(m[i]));
-
- for (i = 0; i < 8; ++i)
- v[i] = S->h[i];
-
- v[ 8] = BLAKE2B_IV0;
- v[ 9] = BLAKE2B_IV1;
- v[10] = BLAKE2B_IV2;
- v[11] = BLAKE2B_IV3;
- v[12] = BLAKE2B_IV4 ^ S->t[0];
- v[13] = BLAKE2B_IV5 ^ S->t[1];
- v[14] = BLAKE2B_IV6 ^ S->f[0];
- v[15] = BLAKE2B_IV7 ^ S->f[1];
-
- ROUND(0);
- ROUND(1);
- ROUND(2);
- ROUND(3);
- ROUND(4);
- ROUND(5);
- ROUND(6);
- ROUND(7);
- ROUND(8);
- ROUND(9);
- ROUND(10);
- ROUND(11);
-#ifdef CONFIG_CC_IS_CLANG
-#pragma nounroll /* https://llvm.org/pr45803 */
-#endif
- for (i = 0; i < 8; ++i)
- S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
-}
-
-#undef G
-#undef ROUND
-
-static void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc)
-{
- do {
- blake2b_increment_counter(state, inc);
- blake2b_compress_one_generic(state, block);
- block += BLAKE2B_BLOCK_SIZE;
- } while (--nblocks);
-}
-
-static int crypto_blake2b_update_generic(struct shash_desc *desc,
- const u8 *in, unsigned int inlen)
-{
- return crypto_blake2b_update_bo(desc, in, inlen,
- blake2b_compress_generic);
-}
-
-static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in,
- unsigned int inlen, u8 *out)
-{
- return crypto_blake2b_finup(desc, in, inlen, out,
- blake2b_compress_generic);
-}
-
-#define BLAKE2B_ALG(name, driver_name, digest_size) \
- { \
- .base.cra_name = name, \
- .base.cra_driver_name = driver_name, \
- .base.cra_priority = 100, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
- CRYPTO_AHASH_ALG_BLOCK_ONLY | \
- CRYPTO_AHASH_ALG_FINAL_NONZERO, \
- .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
- .base.cra_module = THIS_MODULE, \
- .digestsize = digest_size, \
- .setkey = crypto_blake2b_setkey, \
- .init = crypto_blake2b_init, \
- .update = crypto_blake2b_update_generic, \
- .finup = crypto_blake2b_finup_generic, \
- .descsize = BLAKE2B_DESC_SIZE, \
- .statesize = BLAKE2B_STATE_SIZE, \
- }
-
-static struct shash_alg blake2b_algs[] = {
- BLAKE2B_ALG("blake2b-160", "blake2b-160-generic",
- BLAKE2B_160_HASH_SIZE),
- BLAKE2B_ALG("blake2b-256", "blake2b-256-generic",
- BLAKE2B_256_HASH_SIZE),
- BLAKE2B_ALG("blake2b-384", "blake2b-384-generic",
- BLAKE2B_384_HASH_SIZE),
- BLAKE2B_ALG("blake2b-512", "blake2b-512-generic",
- BLAKE2B_512_HASH_SIZE),
-};
-
-static int __init blake2b_mod_init(void)
-{
- return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
-}
-
-static void __exit blake2b_mod_fini(void)
-{
- crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
-}
-
-module_init(blake2b_mod_init);
-module_exit(blake2b_mod_fini);
-
-MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
-MODULE_DESCRIPTION("BLAKE2b generic implementation");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("blake2b-160");
-MODULE_ALIAS_CRYPTO("blake2b-160-generic");
-MODULE_ALIAS_CRYPTO("blake2b-256");
-MODULE_ALIAS_CRYPTO("blake2b-256-generic");
-MODULE_ALIAS_CRYPTO("blake2b-384");
-MODULE_ALIAS_CRYPTO("blake2b-384-generic");
-MODULE_ALIAS_CRYPTO("blake2b-512");
-MODULE_ALIAS_CRYPTO("blake2b-512-generic");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 21404515dc77..a3e1fff55661 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/overflow.h>
#include <linux/percpu.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -39,7 +40,7 @@ static void *deflate_alloc_stream(void)
DEFLATE_DEF_MEMLEVEL));
struct deflate_stream *ctx;
- ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ ctx = kvmalloc(struct_size(ctx, workspace, size), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
diff --git a/crypto/df_sp80090a.c b/crypto/df_sp80090a.c
new file mode 100644
index 000000000000..dc63b31a93fc
--- /dev/null
+++ b/crypto/df_sp80090a.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
+
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key,
+ u8 keylen);
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key, u8 keylen)
+{
+ aes_expandkey(aesctx, key, keylen);
+}
+
+static void drbg_kcapi_sym(struct crypto_aes_ctx *aesctx,
+ unsigned char *outval,
+ const struct drbg_string *in, u8 blocklen_bytes)
+{
+ /* there is only component in *in */
+ BUG_ON(in->len < blocklen_bytes);
+ aes_encrypt(aesctx, outval, in->buf);
+}
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+
+static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
+ unsigned char *out, const unsigned char *key,
+ struct list_head *in,
+ u8 blocklen_bytes,
+ u8 keylen)
+{
+ struct drbg_string *curr = NULL;
+ struct drbg_string data;
+ short cnt = 0;
+
+ drbg_string_fill(&data, out, blocklen_bytes);
+
+ /* 10.4.3 step 2 / 4 */
+ drbg_kcapi_symsetkey(aesctx, key, keylen);
+ list_for_each_entry(curr, in, list) {
+ const unsigned char *pos = curr->buf;
+ size_t len = curr->len;
+ /* 10.4.3 step 4.1 */
+ while (len) {
+ /* 10.4.3 step 4.2 */
+ if (blocklen_bytes == cnt) {
+ cnt = 0;
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+ }
+ out[cnt] ^= *pos;
+ pos++;
+ cnt++;
+ len--;
+ }
+ }
+ /* 10.4.3 step 4.2 for last block */
+ if (cnt)
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with crypto_drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ * temp
+ * start: drbg->scratchpad
+ * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * note: the cipher writing into this variable works
+ * blocklen-wise. Now, when the statelen is not a multiple
+ * of blocklen, the generateion loop below "spills over"
+ * by at most blocklen. Thus, we need to give sufficient
+ * memory.
+ * df_data
+ * start: drbg->scratchpad +
+ * drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * length: drbg_statelen(drbg)
+ *
+ * crypto_drbg_ctr_df:
+ * pad
+ * start: df_data + drbg_statelen(drbg)
+ * length: drbg_blocklen(drbg)
+ * iv
+ * start: pad + drbg_blocklen(drbg)
+ * length: drbg_blocklen(drbg)
+ * temp
+ * start: iv + drbg_blocklen(drbg)
+ * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ * note: temp is the buffer that the BCC function operates
+ * on. BCC operates blockwise. drbg_statelen(drbg)
+ * is sufficient when the DRBG state length is a multiple
+ * of the block size. For AES192 (and maybe other ciphers)
+ * this is not correct and the length for temp is
+ * insufficient (yes, that also means for such ciphers,
+ * the final output of all BCC rounds are truncated).
+ * Therefore, add drbg_blocklen(drbg) to cover all
+ * possibilities.
+ * refer to crypto_drbg_ctr_df_datalen() to get required length
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
+ unsigned char *df_data, size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen)
+{
+ unsigned char L_N[8];
+ /* S3 is input */
+ struct drbg_string S1, S2, S4, cipherin;
+ LIST_HEAD(bcc_list);
+ unsigned char *pad = df_data + statelen;
+ unsigned char *iv = pad + blocklen_bytes;
+ unsigned char *temp = iv + blocklen_bytes;
+ size_t padlen = 0;
+ unsigned int templen = 0;
+ /* 10.4.2 step 7 */
+ unsigned int i = 0;
+ /* 10.4.2 step 8 */
+ const unsigned char *K = (unsigned char *)
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+ unsigned char *X;
+ size_t generated_len = 0;
+ size_t inputlen = 0;
+ struct drbg_string *seed = NULL;
+ u8 keylen;
+
+ memset(pad, 0, blocklen_bytes);
+ memset(iv, 0, blocklen_bytes);
+ keylen = statelen - blocklen_bytes;
+ /* 10.4.2 step 1 is implicit as we work byte-wise */
+
+ /* 10.4.2 step 2 */
+ if ((512 / 8) < bytes_to_return)
+ return -EINVAL;
+
+ /* 10.4.2 step 2 -- calculate the entire length of all input data */
+ list_for_each_entry(seed, seedlist, list)
+ inputlen += seed->len;
+ drbg_cpu_to_be32(inputlen, &L_N[0]);
+
+ /* 10.4.2 step 3 */
+ drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
+
+ /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+ padlen = (inputlen + sizeof(L_N) + 1) % (blocklen_bytes);
+ /* wrap the padlen appropriately */
+ if (padlen)
+ padlen = blocklen_bytes - padlen;
+ /*
+ * pad / padlen contains the 0x80 byte and the following zero bytes.
+ * As the calculated padlen value only covers the number of zero
+ * bytes, this value has to be incremented by one for the 0x80 byte.
+ */
+ padlen++;
+ pad[0] = 0x80;
+
+ /* 10.4.2 step 4 -- first fill the linked list and then order it */
+ drbg_string_fill(&S1, iv, blocklen_bytes);
+ list_add_tail(&S1.list, &bcc_list);
+ drbg_string_fill(&S2, L_N, sizeof(L_N));
+ list_add_tail(&S2.list, &bcc_list);
+ list_splice_tail(seedlist, &bcc_list);
+ drbg_string_fill(&S4, pad, padlen);
+ list_add_tail(&S4.list, &bcc_list);
+
+ /* 10.4.2 step 9 */
+ while (templen < (keylen + (blocklen_bytes))) {
+ /*
+ * 10.4.2 step 9.1 - the padding is implicit as the buffer
+ * holds zeros after allocation -- even the increment of i
+ * is irrelevant as the increment remains within length of i
+ */
+ drbg_cpu_to_be32(i, iv);
+ /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+ drbg_ctr_bcc(aesctx, temp + templen, K, &bcc_list,
+ blocklen_bytes, keylen);
+ /* 10.4.2 step 9.3 */
+ i++;
+ templen += blocklen_bytes;
+ }
+
+ /* 10.4.2 step 11 */
+ X = temp + (keylen);
+ drbg_string_fill(&cipherin, X, blocklen_bytes);
+
+ /* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+ /* 10.4.2 step 13 */
+ drbg_kcapi_symsetkey(aesctx, temp, keylen);
+ while (generated_len < bytes_to_return) {
+ short blocklen = 0;
+ /*
+ * 10.4.2 step 13.1: the truncation of the key length is
+ * implicit as the key is only drbg_blocklen in size based on
+ * the implementation of the cipher function callback
+ */
+ drbg_kcapi_sym(aesctx, X, &cipherin, blocklen_bytes);
+ blocklen = (blocklen_bytes <
+ (bytes_to_return - generated_len)) ?
+ blocklen_bytes :
+ (bytes_to_return - generated_len);
+ /* 10.4.2 step 13.2 and 14 */
+ memcpy(df_data + generated_len, X, blocklen);
+ generated_len += blocklen;
+ }
+
+ memset(iv, 0, blocklen_bytes);
+ memset(temp, 0, statelen + blocklen_bytes);
+ memset(pad, 0, blocklen_bytes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_drbg_ctr_df);
+
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Derivation Function conformant to SP800-90A");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index dbe4c8bb5ceb..1d433dae9955 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
*/
#include <crypto/drbg.h>
+#include <crypto/df_sp80090a.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
@@ -261,26 +262,6 @@ static int drbg_fips_continuous_test(struct drbg_state *drbg,
return 0;
}
-/*
- * Convert an integer into a byte representation of this integer.
- * The byte representation is big-endian
- *
- * @val value to be converted
- * @buf buffer holding the converted integer -- caller must ensure that
- * buffer size is at least 32 bit
- */
-#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
-static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
-{
- struct s {
- __be32 conv;
- };
- struct s *conversion = (struct s *) buf;
-
- conversion->conv = cpu_to_be32(val);
-}
-#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
-
/******************************************************************
* CTR DRBG callback functions
******************************************************************/
@@ -294,10 +275,6 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key);
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
@@ -305,202 +282,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *outbuf, u32 outlen);
#define DRBG_OUTSCRATCHLEN 256
-/* BCC function for CTR DRBG as defined in 10.4.3 */
-static int drbg_ctr_bcc(struct drbg_state *drbg,
- unsigned char *out, const unsigned char *key,
- struct list_head *in)
-{
- int ret = 0;
- struct drbg_string *curr = NULL;
- struct drbg_string data;
- short cnt = 0;
-
- drbg_string_fill(&data, out, drbg_blocklen(drbg));
-
- /* 10.4.3 step 2 / 4 */
- drbg_kcapi_symsetkey(drbg, key);
- list_for_each_entry(curr, in, list) {
- const unsigned char *pos = curr->buf;
- size_t len = curr->len;
- /* 10.4.3 step 4.1 */
- while (len) {
- /* 10.4.3 step 4.2 */
- if (drbg_blocklen(drbg) == cnt) {
- cnt = 0;
- ret = drbg_kcapi_sym(drbg, out, &data);
- if (ret)
- return ret;
- }
- out[cnt] ^= *pos;
- pos++;
- cnt++;
- len--;
- }
- }
- /* 10.4.3 step 4.2 for last block */
- if (cnt)
- ret = drbg_kcapi_sym(drbg, out, &data);
-
- return ret;
-}
-
-/*
- * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
- * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
- * the scratchpad is used as follows:
- * drbg_ctr_update:
- * temp
- * start: drbg->scratchpad
- * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
- * note: the cipher writing into this variable works
- * blocklen-wise. Now, when the statelen is not a multiple
- * of blocklen, the generateion loop below "spills over"
- * by at most blocklen. Thus, we need to give sufficient
- * memory.
- * df_data
- * start: drbg->scratchpad +
- * drbg_statelen(drbg) + drbg_blocklen(drbg)
- * length: drbg_statelen(drbg)
- *
- * drbg_ctr_df:
- * pad
- * start: df_data + drbg_statelen(drbg)
- * length: drbg_blocklen(drbg)
- * iv
- * start: pad + drbg_blocklen(drbg)
- * length: drbg_blocklen(drbg)
- * temp
- * start: iv + drbg_blocklen(drbg)
- * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
- * note: temp is the buffer that the BCC function operates
- * on. BCC operates blockwise. drbg_statelen(drbg)
- * is sufficient when the DRBG state length is a multiple
- * of the block size. For AES192 (and maybe other ciphers)
- * this is not correct and the length for temp is
- * insufficient (yes, that also means for such ciphers,
- * the final output of all BCC rounds are truncated).
- * Therefore, add drbg_blocklen(drbg) to cover all
- * possibilities.
- */
-
-/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df(struct drbg_state *drbg,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist)
{
- int ret = -EFAULT;
- unsigned char L_N[8];
- /* S3 is input */
- struct drbg_string S1, S2, S4, cipherin;
- LIST_HEAD(bcc_list);
- unsigned char *pad = df_data + drbg_statelen(drbg);
- unsigned char *iv = pad + drbg_blocklen(drbg);
- unsigned char *temp = iv + drbg_blocklen(drbg);
- size_t padlen = 0;
- unsigned int templen = 0;
- /* 10.4.2 step 7 */
- unsigned int i = 0;
- /* 10.4.2 step 8 */
- const unsigned char *K = (unsigned char *)
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
- unsigned char *X;
- size_t generated_len = 0;
- size_t inputlen = 0;
- struct drbg_string *seed = NULL;
-
- memset(pad, 0, drbg_blocklen(drbg));
- memset(iv, 0, drbg_blocklen(drbg));
-
- /* 10.4.2 step 1 is implicit as we work byte-wise */
-
- /* 10.4.2 step 2 */
- if ((512/8) < bytes_to_return)
- return -EINVAL;
-
- /* 10.4.2 step 2 -- calculate the entire length of all input data */
- list_for_each_entry(seed, seedlist, list)
- inputlen += seed->len;
- drbg_cpu_to_be32(inputlen, &L_N[0]);
-
- /* 10.4.2 step 3 */
- drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
-
- /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
- padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
- /* wrap the padlen appropriately */
- if (padlen)
- padlen = drbg_blocklen(drbg) - padlen;
- /*
- * pad / padlen contains the 0x80 byte and the following zero bytes.
- * As the calculated padlen value only covers the number of zero
- * bytes, this value has to be incremented by one for the 0x80 byte.
- */
- padlen++;
- pad[0] = 0x80;
-
- /* 10.4.2 step 4 -- first fill the linked list and then order it */
- drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
- list_add_tail(&S1.list, &bcc_list);
- drbg_string_fill(&S2, L_N, sizeof(L_N));
- list_add_tail(&S2.list, &bcc_list);
- list_splice_tail(seedlist, &bcc_list);
- drbg_string_fill(&S4, pad, padlen);
- list_add_tail(&S4.list, &bcc_list);
-
- /* 10.4.2 step 9 */
- while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
- /*
- * 10.4.2 step 9.1 - the padding is implicit as the buffer
- * holds zeros after allocation -- even the increment of i
- * is irrelevant as the increment remains within length of i
- */
- drbg_cpu_to_be32(i, iv);
- /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
- ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
- if (ret)
- goto out;
- /* 10.4.2 step 9.3 */
- i++;
- templen += drbg_blocklen(drbg);
- }
-
- /* 10.4.2 step 11 */
- X = temp + (drbg_keylen(drbg));
- drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
-
- /* 10.4.2 step 12: overwriting of outval is implemented in next step */
-
- /* 10.4.2 step 13 */
- drbg_kcapi_symsetkey(drbg, temp);
- while (generated_len < bytes_to_return) {
- short blocklen = 0;
- /*
- * 10.4.2 step 13.1: the truncation of the key length is
- * implicit as the key is only drbg_blocklen in size based on
- * the implementation of the cipher function callback
- */
- ret = drbg_kcapi_sym(drbg, X, &cipherin);
- if (ret)
- goto out;
- blocklen = (drbg_blocklen(drbg) <
- (bytes_to_return - generated_len)) ?
- drbg_blocklen(drbg) :
- (bytes_to_return - generated_len);
- /* 10.4.2 step 13.2 and 14 */
- memcpy(df_data + generated_len, X, blocklen);
- generated_len += blocklen;
- }
-
- ret = 0;
-
-out:
- memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
- memset(pad, 0, drbg_blocklen(drbg));
- return ret;
+ return crypto_drbg_ctr_df(drbg->priv_data, df_data, drbg_statelen(drbg),
+ seedlist, drbg_blocklen(drbg), drbg_statelen(drbg));
}
/*
@@ -1310,10 +1097,8 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
sb_size = 0;
else if (drbg->core->flags & DRBG_CTR)
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
- drbg_statelen(drbg) + /* df_data */
- drbg_blocklen(drbg) + /* pad */
- drbg_blocklen(drbg) + /* iv */
- drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+ crypto_drbg_ctr_df_datalen(drbg_statelen(drbg),
+ drbg_blocklen(drbg));
else
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
@@ -1658,7 +1443,6 @@ static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash;
- char ctx[];
};
static int drbg_init_hash_kernel(struct drbg_state *drbg)
@@ -1721,10 +1505,9 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm =
- (struct crypto_cipher *)drbg->priv_data;
- if (tfm)
- crypto_free_cipher(tfm);
+ struct crypto_aes_ctx *aesctx = (struct crypto_aes_ctx *)drbg->priv_data;
+
+ kfree(aesctx);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
@@ -1743,20 +1526,16 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm;
+ struct crypto_aes_ctx *aesctx;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
- if (IS_ERR(tfm)) {
- pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
- drbg->core->backend_cra_name);
- return PTR_ERR(tfm);
- }
- BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
- drbg->priv_data = tfm;
+ aesctx = kzalloc(sizeof(*aesctx), GFP_KERNEL);
+ if (!aesctx)
+ return -ENOMEM;
+ drbg->priv_data = aesctx;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
@@ -1800,25 +1579,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return alignmask;
}
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
-}
-
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- /* there is only component in *in */
- BUG_ON(in->len < drbg_blocklen(drbg));
- crypto_cipher_encrypt_one(tfm, outval, in->buf);
- return 0;
-}
-
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
diff --git a/crypto/fips.c b/crypto/fips.c
index e88a604cb42b..65d2bc070a26 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -24,7 +24,10 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
static int fips_enable(char *str)
{
- fips_enabled = !!simple_strtol(str, NULL, 0);
+ if (kstrtoint(str, 0, &fips_enabled))
+ return 0;
+
+ fips_enabled = !!fips_enabled;
pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index c8932777bba8..f4cd6c29b4d3 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -17,7 +17,6 @@
*/
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/polyval.h>
#include <crypto/scatterwalk.h>
@@ -37,23 +36,14 @@
struct hctr2_instance_ctx {
struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_skcipher_spawn xctr_spawn;
- struct crypto_shash_spawn polyval_spawn;
};
struct hctr2_tfm_ctx {
struct crypto_cipher *blockcipher;
struct crypto_skcipher *xctr;
- struct crypto_shash *polyval;
+ struct polyval_key poly_key;
+ struct polyval_elem hashed_tweaklens[2];
u8 L[BLOCKCIPHER_BLOCK_SIZE];
- int hashed_tweak_offset;
- /*
- * This struct is allocated with extra space for two exported hash
- * states. Since the hash state size is not known at compile-time, we
- * can't add these to the struct directly.
- *
- * hashed_tweaklen_divisible;
- * hashed_tweaklen_remainder;
- */
};
struct hctr2_request_ctx {
@@ -63,39 +53,17 @@ struct hctr2_request_ctx {
struct scatterlist *bulk_part_src;
struct scatterlist sg_src[2];
struct scatterlist sg_dst[2];
+ struct polyval_elem hashed_tweak;
/*
- * Sub-request sizes are unknown at compile-time, so they need to go
- * after the members with known sizes.
+ * skcipher sub-request size is unknown at compile-time, so it needs to
+ * go after the members with known sizes.
*/
union {
- struct shash_desc hash_desc;
+ struct polyval_ctx poly_ctx;
struct skcipher_request xctr_req;
} u;
- /*
- * This struct is allocated with extra space for one exported hash
- * state. Since the hash state size is not known at compile-time, we
- * can't add it to the struct directly.
- *
- * hashed_tweak;
- */
};
-static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx,
- bool has_remainder)
-{
- u8 *p = (u8 *)tctx + sizeof(*tctx);
-
- if (has_remainder) /* For messages not a multiple of block length */
- p += crypto_shash_statesize(tctx->polyval);
- return p;
-}
-
-static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
- struct hctr2_request_ctx *rctx)
-{
- return (u8 *)rctx + tctx->hashed_tweak_offset;
-}
-
/*
* The input data for each HCTR2 hash step begins with a 16-byte block that
* contains the tweak length and a flag that indicates whether the input is evenly
@@ -106,24 +74,23 @@ static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
*
* These precomputed hashes are stored in hctr2_tfm_ctx.
*/
-static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder)
+static void hctr2_hash_tweaklens(struct hctr2_tfm_ctx *tctx)
{
- SHASH_DESC_ON_STACK(shash, tfm->polyval);
- __le64 tweak_length_block[2];
- int err;
-
- shash->tfm = tctx->polyval;
- memset(tweak_length_block, 0, sizeof(tweak_length_block));
-
- tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder);
- err = crypto_shash_init(shash);
- if (err)
- return err;
- err = crypto_shash_update(shash, (u8 *)tweak_length_block,
- POLYVAL_BLOCK_SIZE);
- if (err)
- return err;
- return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder));
+ struct polyval_ctx ctx;
+
+ for (int has_remainder = 0; has_remainder < 2; has_remainder++) {
+ const __le64 tweak_length_block[2] = {
+ cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder),
+ };
+
+ polyval_init(&ctx, &tctx->poly_key);
+ polyval_update(&ctx, (const u8 *)&tweak_length_block,
+ sizeof(tweak_length_block));
+ static_assert(sizeof(tweak_length_block) == POLYVAL_BLOCK_SIZE);
+ polyval_export_blkaligned(
+ &ctx, &tctx->hashed_tweaklens[has_remainder]);
+ }
+ memzero_explicit(&ctx, sizeof(ctx));
}
static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -156,51 +123,42 @@ static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
tctx->L[0] = 0x01;
crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L);
- crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK);
- crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE);
- if (err)
- return err;
+ static_assert(sizeof(hbar) == POLYVAL_BLOCK_SIZE);
+ polyval_preparekey(&tctx->poly_key, hbar);
memzero_explicit(hbar, sizeof(hbar));
- return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false);
+ hctr2_hash_tweaklens(tctx);
+ return 0;
}
-static int hctr2_hash_tweak(struct skcipher_request *req)
+static void hctr2_hash_tweak(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
- int err;
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE;
- hash_desc->tfm = tctx->polyval;
- err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder));
- if (err)
- return err;
- err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE);
- if (err)
- return err;
+ polyval_import_blkaligned(poly_ctx, &tctx->poly_key,
+ &tctx->hashed_tweaklens[has_remainder]);
+ polyval_update(poly_ctx, req->iv, TWEAK_SIZE);
// Store the hashed tweak, since we need it when computing both
// H(T || N) and H(T || V).
- return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx));
+ static_assert(TWEAK_SIZE % POLYVAL_BLOCK_SIZE == 0);
+ polyval_export_blkaligned(poly_ctx, &rctx->hashed_tweak);
}
-static int hctr2_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl,
- u8 digest[POLYVAL_DIGEST_SIZE])
+static void hctr2_hash_message(struct skcipher_request *req,
+ struct scatterlist *sgl,
+ u8 digest[POLYVAL_DIGEST_SIZE])
{
- static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 };
+ static const u8 padding = 0x1;
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct sg_mapping_iter miter;
- unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE;
int i;
- int err = 0;
int n = 0;
sg_miter_start(&miter, sgl, sg_nents(sgl),
@@ -208,22 +166,13 @@ static int hctr2_hash_message(struct skcipher_request *req,
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
- err = crypto_shash_update(hash_desc, miter.addr, n);
- if (err)
- break;
+ polyval_update(poly_ctx, miter.addr, n);
}
sg_miter_stop(&miter);
- if (err)
- return err;
-
- if (remainder) {
- err = crypto_shash_update(hash_desc, padding,
- BLOCKCIPHER_BLOCK_SIZE - remainder);
- if (err)
- return err;
- }
- return crypto_shash_final(hash_desc, digest);
+ if (req->cryptlen % BLOCKCIPHER_BLOCK_SIZE)
+ polyval_update(poly_ctx, &padding, 1);
+ polyval_final(poly_ctx, digest);
}
static int hctr2_finish(struct skcipher_request *req)
@@ -231,19 +180,14 @@ static int hctr2_finish(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
u8 digest[POLYVAL_DIGEST_SIZE];
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
- int err;
// U = UU ^ H(T || V)
// or M = MM ^ H(T || N)
- hash_desc->tfm = tctx->polyval;
- err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx));
- if (err)
- return err;
- err = hctr2_hash_message(req, rctx->bulk_part_dst, digest);
- if (err)
- return err;
+ polyval_import_blkaligned(poly_ctx, &tctx->poly_key,
+ &rctx->hashed_tweak);
+ hctr2_hash_message(req, rctx->bulk_part_dst, digest);
crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE);
// Copy U (or M) into dst scatterlist
@@ -269,7 +213,6 @@ static int hctr2_crypt(struct skcipher_request *req, bool enc)
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
u8 digest[POLYVAL_DIGEST_SIZE];
int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- int err;
// Requests must be at least one block
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
@@ -287,12 +230,8 @@ static int hctr2_crypt(struct skcipher_request *req, bool enc)
// MM = M ^ H(T || N)
// or UU = U ^ H(T || V)
- err = hctr2_hash_tweak(req);
- if (err)
- return err;
- err = hctr2_hash_message(req, rctx->bulk_part_src, digest);
- if (err)
- return err;
+ hctr2_hash_tweak(req);
+ hctr2_hash_message(req, rctx->bulk_part_src, digest);
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
// UU = E(MM)
@@ -338,8 +277,6 @@ static int hctr2_init_tfm(struct crypto_skcipher *tfm)
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *xctr;
struct crypto_cipher *blockcipher;
- struct crypto_shash *polyval;
- unsigned int subreq_size;
int err;
xctr = crypto_spawn_skcipher(&ictx->xctr_spawn);
@@ -352,31 +289,17 @@ static int hctr2_init_tfm(struct crypto_skcipher *tfm)
goto err_free_xctr;
}
- polyval = crypto_spawn_shash(&ictx->polyval_spawn);
- if (IS_ERR(polyval)) {
- err = PTR_ERR(polyval);
- goto err_free_blockcipher;
- }
-
tctx->xctr = xctr;
tctx->blockcipher = blockcipher;
- tctx->polyval = polyval;
BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) !=
sizeof(struct hctr2_request_ctx));
- subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) +
- crypto_shash_descsize(polyval),
- sizeof_field(struct hctr2_request_ctx, u.xctr_req) +
- crypto_skcipher_reqsize(xctr));
-
- tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) +
- subreq_size;
- crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset +
- crypto_shash_statesize(polyval));
+ crypto_skcipher_set_reqsize(
+ tfm, max(sizeof(struct hctr2_request_ctx),
+ offsetofend(struct hctr2_request_ctx, u.xctr_req) +
+ crypto_skcipher_reqsize(xctr)));
return 0;
-err_free_blockcipher:
- crypto_free_cipher(blockcipher);
err_free_xctr:
crypto_free_skcipher(xctr);
return err;
@@ -388,7 +311,6 @@ static void hctr2_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(tctx->blockcipher);
crypto_free_skcipher(tctx->xctr);
- crypto_free_shash(tctx->polyval);
}
static void hctr2_free_instance(struct skcipher_instance *inst)
@@ -397,21 +319,17 @@ static void hctr2_free_instance(struct skcipher_instance *inst)
crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_skcipher(&ictx->xctr_spawn);
- crypto_drop_shash(&ictx->polyval_spawn);
kfree(inst);
}
-static int hctr2_create_common(struct crypto_template *tmpl,
- struct rtattr **tb,
- const char *xctr_name,
- const char *polyval_name)
+static int hctr2_create_common(struct crypto_template *tmpl, struct rtattr **tb,
+ const char *xctr_name)
{
struct skcipher_alg_common *xctr_alg;
u32 mask;
struct skcipher_instance *inst;
struct hctr2_instance_ctx *ictx;
struct crypto_alg *blockcipher_alg;
- struct shash_alg *polyval_alg;
char blockcipher_name[CRYPTO_MAX_ALG_NAME];
int len;
int err;
@@ -457,19 +375,6 @@ static int hctr2_create_common(struct crypto_template *tmpl,
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
goto err_free_inst;
- /* Polyval ε-∆U hash function */
- err = crypto_grab_shash(&ictx->polyval_spawn,
- skcipher_crypto_instance(inst),
- polyval_name, 0, mask);
- if (err)
- goto err_free_inst;
- polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn);
-
- /* Ensure Polyval is being used */
- err = -EINVAL;
- if (strcmp(polyval_alg->base.cra_name, "polyval") != 0)
- goto err_free_inst;
-
/* Instance fields */
err = -ENAMETOOLONG;
@@ -477,22 +382,16 @@ static int hctr2_create_common(struct crypto_template *tmpl,
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "hctr2_base(%s,%s)",
- xctr_alg->base.cra_driver_name,
- polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "hctr2_base(%s,polyval-lib)",
+ xctr_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
- inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) +
- polyval_alg->statesize * 2;
+ inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx);
inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask;
- /*
- * The hash function is called twice, so it is weighted higher than the
- * xctr and blockcipher.
- */
inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority +
- 4 * polyval_alg->base.cra_priority +
- blockcipher_alg->cra_priority) / 7;
+ blockcipher_alg->cra_priority) /
+ 3;
inst->alg.setkey = hctr2_setkey;
inst->alg.encrypt = hctr2_encrypt;
@@ -525,8 +424,11 @@ static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb)
polyval_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(polyval_name))
return PTR_ERR(polyval_name);
+ if (strcmp(polyval_name, "polyval") != 0 &&
+ strcmp(polyval_name, "polyval-lib") != 0)
+ return -ENOENT;
- return hctr2_create_common(tmpl, tb, xctr_name, polyval_name);
+ return hctr2_create_common(tmpl, tb, xctr_name);
}
static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -542,7 +444,7 @@ static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
blockcipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- return hctr2_create_common(tmpl, tb, xctr_name, "polyval");
+ return hctr2_create_common(tmpl, tb, xctr_name);
}
static struct crypto_template hctr2_tmpls[] = {
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index a53de7affe8d..7c880cf34c52 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -48,7 +48,7 @@
#include "jitterentropy.h"
-#define JENT_CONDITIONING_HASH "sha3-256-generic"
+#define JENT_CONDITIONING_HASH "sha3-256"
/***************************************************************************
* Helper function
@@ -230,15 +230,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
spin_lock_init(&rng->jent_lock);
- /*
- * Use SHA3-256 as conditioner. We allocate only the generic
- * implementation as we are not interested in high-performance. The
- * execution time of the SHA3 operation is measured and adds to the
- * Jitter RNG's unpredictable behavior. If we have a slower hash
- * implementation, the execution timing variations are larger. When
- * using a fast implementation, we would need to call it more often
- * as its variations are lower.
- */
+ /* Use SHA3-256 as conditioner */
hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(hash)) {
pr_err("Cannot allocate conditioning digest\n");
diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
deleted file mode 100644
index db8adb56e4ca..000000000000
--- a/crypto/polyval-generic.c
+++ /dev/null
@@ -1,205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * POLYVAL: hash function for HCTR2.
- *
- * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
- * Copyright (c) 2009 Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- * Copyright 2021 Google LLC
- */
-
-/*
- * Code based on crypto/ghash-generic.c
- *
- * POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different
- * modulus for finite field multiplication which makes hardware accelerated
- * implementations on little-endian machines faster. POLYVAL is used in the
- * kernel to implement HCTR2, but was originally specified for AES-GCM-SIV
- * (RFC 8452).
- *
- * For more information see:
- * Length-preserving encryption with HCTR2:
- * https://eprint.iacr.org/2021/1441.pdf
- * AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption:
- * https://datatracker.ietf.org/doc/html/rfc8452
- *
- * Like GHASH, POLYVAL is not a cryptographic hash function and should
- * not be used outside of crypto modes explicitly designed to use POLYVAL.
- *
- * This implementation uses a convenient trick involving the GHASH and POLYVAL
- * fields. This trick allows multiplication in the POLYVAL field to be
- * implemented by using multiplication in the GHASH field as a subroutine. An
- * element of the POLYVAL field can be converted to an element of the GHASH
- * field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of
- * a. Similarly, an element of the GHASH field can be converted back to the
- * POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see:
- * https://datatracker.ietf.org/doc/html/rfc8452#appendix-A
- *
- * By using this trick, we do not need to implement the POLYVAL field for the
- * generic implementation.
- *
- * Warning: this generic implementation is not intended to be used in practice
- * and is not constant time. For practical use, a hardware accelerated
- * implementation of POLYVAL should be used instead.
- *
- */
-
-#include <crypto/gf128mul.h>
-#include <crypto/internal/hash.h>
-#include <crypto/polyval.h>
-#include <crypto/utils.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/unaligned.h>
-
-struct polyval_tfm_ctx {
- struct gf128mul_4k *gf128;
-};
-
-struct polyval_desc_ctx {
- union {
- u8 buffer[POLYVAL_BLOCK_SIZE];
- be128 buffer128;
- };
-};
-
-static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
- const u8 src[POLYVAL_BLOCK_SIZE])
-{
- u64 a = get_unaligned((const u64 *)&src[0]);
- u64 b = get_unaligned((const u64 *)&src[8]);
-
- put_unaligned(swab64(a), (u64 *)&dst[8]);
- put_unaligned(swab64(b), (u64 *)&dst[0]);
-}
-
-static int polyval_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
- be128 k;
-
- if (keylen != POLYVAL_BLOCK_SIZE)
- return -EINVAL;
-
- gf128mul_free_4k(ctx->gf128);
-
- BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE);
- copy_and_reverse((u8 *)&k, key);
- gf128mul_x_lle(&k, &k);
-
- ctx->gf128 = gf128mul_init_4k_lle(&k);
- memzero_explicit(&k, POLYVAL_BLOCK_SIZE);
-
- if (!ctx->gf128)
- return -ENOMEM;
-
- return 0;
-}
-
-static int polyval_init(struct shash_desc *desc)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-
- memset(dctx, 0, sizeof(*dctx));
-
- return 0;
-}
-
-static int polyval_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- u8 tmp[POLYVAL_BLOCK_SIZE];
-
- do {
- copy_and_reverse(tmp, src);
- crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- src += POLYVAL_BLOCK_SIZE;
- srclen -= POLYVAL_BLOCK_SIZE;
- } while (srclen >= POLYVAL_BLOCK_SIZE);
-
- return srclen;
-}
-
-static int polyval_finup(struct shash_desc *desc, const u8 *src,
- unsigned int len, u8 *dst)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (len) {
- u8 tmp[POLYVAL_BLOCK_SIZE] = {};
-
- memcpy(tmp, src, len);
- polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE);
- }
- copy_and_reverse(dst, dctx->buffer);
- return 0;
-}
-
-static int polyval_export(struct shash_desc *desc, void *out)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-
- copy_and_reverse(out, dctx->buffer);
- return 0;
-}
-
-static int polyval_import(struct shash_desc *desc, const void *in)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-
- copy_and_reverse(dctx->buffer, in);
- return 0;
-}
-
-static void polyval_exit_tfm(struct crypto_shash *tfm)
-{
- struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
-
- gf128mul_free_4k(ctx->gf128);
-}
-
-static struct shash_alg polyval_alg = {
- .digestsize = POLYVAL_DIGEST_SIZE,
- .init = polyval_init,
- .update = polyval_update,
- .finup = polyval_finup,
- .setkey = polyval_setkey,
- .export = polyval_export,
- .import = polyval_import,
- .exit_tfm = polyval_exit_tfm,
- .statesize = sizeof(struct polyval_desc_ctx),
- .descsize = sizeof(struct polyval_desc_ctx),
- .base = {
- .cra_name = "polyval",
- .cra_driver_name = "polyval-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
- .cra_blocksize = POLYVAL_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct polyval_tfm_ctx),
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init polyval_mod_init(void)
-{
- return crypto_register_shash(&polyval_alg);
-}
-
-static void __exit polyval_mod_exit(void)
-{
- crypto_unregister_shash(&polyval_alg);
-}
-
-module_init(polyval_mod_init);
-module_exit(polyval_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("POLYVAL hash function");
-MODULE_ALIAS_CRYPTO("polyval");
-MODULE_ALIAS_CRYPTO("polyval-generic");
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 1d010e2a1b1a..be0e24843806 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,25 +10,10 @@
*/
#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
-#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-enum {
- SKCIPHER_WALK_SLOW = 1 << 0,
- SKCIPHER_WALK_COPY = 1 << 1,
- SKCIPHER_WALK_DIFF = 1 << 2,
- SKCIPHER_WALK_SLEEP = 1 << 3,
-};
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
@@ -101,26 +86,97 @@ void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
}
EXPORT_SYMBOL_GPL(memcpy_to_sglist);
+/**
+ * memcpy_sglist() - Copy data from one scatterlist to another
+ * @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
+ * @src: The source scatterlist. Can be NULL if @nbytes == 0.
+ * @nbytes: Number of bytes to copy
+ *
+ * The scatterlists can describe exactly the same memory, in which case this
+ * function is a no-op. No other overlaps are supported.
+ *
+ * Context: Any context
+ */
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct skcipher_walk walk = {};
+ unsigned int src_offset, dst_offset;
- if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
return;
- walk.total = nbytes;
-
- scatterwalk_start(&walk.in, src);
- scatterwalk_start(&walk.out, dst);
+ src_offset = src->offset;
+ dst_offset = dst->offset;
+ for (;;) {
+ /* Compute the length to copy this step. */
+ unsigned int len = min3(src->offset + src->length - src_offset,
+ dst->offset + dst->length - dst_offset,
+ nbytes);
+ struct page *src_page = sg_page(src);
+ struct page *dst_page = sg_page(dst);
+ const void *src_virt;
+ void *dst_virt;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ /* HIGHMEM: we may have to actually map the pages. */
+ const unsigned int src_oip = offset_in_page(src_offset);
+ const unsigned int dst_oip = offset_in_page(dst_offset);
+ const unsigned int limit = PAGE_SIZE;
+
+ /* Further limit len to not cross a page boundary. */
+ len = min3(len, limit - src_oip, limit - dst_oip);
+
+ /* Compute the source and destination pages. */
+ src_page += src_offset / PAGE_SIZE;
+ dst_page += dst_offset / PAGE_SIZE;
+
+ if (src_page != dst_page) {
+ /* Copy between different pages. */
+ memcpy_page(dst_page, dst_oip,
+ src_page, src_oip, len);
+ flush_dcache_page(dst_page);
+ } else if (src_oip != dst_oip) {
+ /* Copy between different parts of same page. */
+ dst_virt = kmap_local_page(dst_page);
+ memcpy(dst_virt + dst_oip, dst_virt + src_oip,
+ len);
+ kunmap_local(dst_virt);
+ flush_dcache_page(dst_page);
+ } /* Else, it's the same memory. No action needed. */
+ } else {
+ /*
+ * !HIGHMEM: no mapping needed. Just work in the linear
+ * buffer of each sg entry. Note that we can cross page
+ * boundaries, as they are not significant in this case.
+ */
+ src_virt = page_address(src_page) + src_offset;
+ dst_virt = page_address(dst_page) + dst_offset;
+ if (src_virt != dst_virt) {
+ memcpy(dst_virt, src_virt, len);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(
+ dst_page, dst_offset, len);
+ } /* Else, it's the same memory. No action needed. */
+ }
+ nbytes -= len;
+ if (nbytes == 0) /* No more to copy? */
+ break;
- skcipher_walk_first(&walk, true);
- do {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- skcipher_walk_done(&walk, 0);
- } while (walk.nbytes);
+ /*
+ * There's more to copy. Advance the offsets by the length
+ * copied this step, and advance the sg entries as needed.
+ */
+ src_offset += len;
+ if (src_offset >= src->offset + src->length) {
+ src = sg_next(src);
+ src_offset = src->offset;
+ }
+ dst_offset += len;
+ if (dst_offset >= dst->offset + dst->length) {
+ dst = sg_next(dst);
+ dst_offset = dst->offset;
+ }
+ }
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
@@ -146,236 +202,3 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- unsigned alignmask = walk->alignmask;
- unsigned n;
- void *buffer;
-
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (!buffer) {
- /* Min size for a buffer of bsize bytes aligned to alignmask */
- n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- buffer = kzalloc(n, skcipher_walk_gfp(walk));
- if (!buffer)
- return skcipher_walk_done(walk, -ENOMEM);
- walk->buffer = buffer;
- }
-
- buffer = PTR_ALIGN(buffer, alignmask + 1);
- memcpy_from_scatterwalk(buffer, &walk->in, bsize);
- walk->out.__addr = buffer;
- walk->in.__addr = walk->out.addr;
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- void *tmp = walk->page;
-
- scatterwalk_map(&walk->in);
- memcpy(tmp, walk->in.addr, walk->nbytes);
- scatterwalk_unmap(&walk->in);
- /*
- * walk->in is advanced later when the number of bytes actually
- * processed (which might be less than walk->nbytes) is known.
- */
-
- walk->in.__addr = tmp;
- walk->out.__addr = tmp;
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- diff = offset_in_page(walk->in.offset) -
- offset_in_page(walk->out.offset);
- diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
- (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
-
- scatterwalk_map(&walk->out);
- walk->in.__addr = walk->out.__addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- scatterwalk_map(&walk->in);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- return skcipher_next_slow(walk, bsize);
- }
- walk->nbytes = n;
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
- walk->flags |= SKCIPHER_WALK_COPY;
- return skcipher_next_copy(walk);
- }
-
- return skcipher_next_fast(walk);
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
- unsigned size;
- u8 *iv;
-
- /* Min size for a buffer of stride + ivsize, aligned to alignmask */
- size = aligned_stride + ivsize +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
-
- return skcipher_walk_next(walk);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_first);
-
-/**
- * skcipher_walk_done() - finish one step of a skcipher_walk
- * @walk: the skcipher_walk
- * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
- * or a -errno value to terminate the walk due to an error
- *
- * This function cleans up after one step of walking through the source and
- * destination scatterlists, and advances to the next step if applicable.
- * walk->nbytes is set to the number of bytes available in the next step,
- * walk->total is set to the new total number of bytes remaining, and
- * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
- * is no more data, or if an error occurred (i.e. -errno return), then
- * walk->nbytes and walk->total are set to 0 and all resources owned by the
- * skcipher_walk are freed.
- *
- * Return: 0 or a -errno value. If @res was a -errno value then it will be
- * returned, but other errors may occur too.
- */
-int skcipher_walk_done(struct skcipher_walk *walk, int res)
-{
- unsigned int n = walk->nbytes; /* num bytes processed this step */
- unsigned int total = 0; /* new total remaining */
-
- if (!n)
- goto finish;
-
- if (likely(res >= 0)) {
- n -= res; /* subtract num bytes *not* processed */
- total = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
- scatterwalk_advance(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- scatterwalk_done_src(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- scatterwalk_advance(&walk->in, n);
- scatterwalk_map(&walk->out);
- memcpy(walk->out.addr, walk->page, n);
- } else { /* SKCIPHER_WALK_SLOW */
- if (res > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- res = -EINVAL;
- total = 0;
- } else
- memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
- goto dst_done;
- }
-
- scatterwalk_done_dst(&walk->out, n);
-dst_done:
-
- if (res > 0)
- res = 0;
-
- walk->total = total;
- walk->nbytes = 0;
-
- if (total) {
- if (walk->flags & SKCIPHER_WALK_SLEEP)
- cond_resched();
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return res;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
diff --git a/crypto/sha3.c b/crypto/sha3.c
new file mode 100644
index 000000000000..8f364979ec89
--- /dev/null
+++ b/crypto/sha3.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for SHA-3
+ * (https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf)
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define SHA3_CTX(desc) ((struct sha3_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha3_224_init(struct shash_desc *desc)
+{
+ sha3_224_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_256_init(struct shash_desc *desc)
+{
+ sha3_256_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_384_init(struct shash_desc *desc)
+{
+ sha3_384_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_512_init(struct shash_desc *desc)
+{
+ sha3_512_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ sha3_update(SHA3_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha3_final(struct shash_desc *desc, u8 *out)
+{
+ sha3_final(SHA3_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha3_224_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_224(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_256_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_384_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_384(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_512_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_512(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_export_core(struct shash_desc *desc, void *out)
+{
+ memcpy(out, SHA3_CTX(desc), sizeof(struct sha3_ctx));
+ return 0;
+}
+
+static int crypto_sha3_import_core(struct shash_desc *desc, const void *in)
+{
+ memcpy(SHA3_CTX(desc), in, sizeof(struct sha3_ctx));
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_224_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_224_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-lib",
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_256_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_256_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-lib",
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_384_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_384_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-lib",
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_512_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_512_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-lib",
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init crypto_sha3_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha3_mod_init);
+
+static void __exit crypto_sha3_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_sha3_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for SHA-3");
+
+MODULE_ALIAS_CRYPTO("sha3-224");
+MODULE_ALIAS_CRYPTO("sha3-224-lib");
+MODULE_ALIAS_CRYPTO("sha3-256");
+MODULE_ALIAS_CRYPTO("sha3-256-lib");
+MODULE_ALIAS_CRYPTO("sha3-384");
+MODULE_ALIAS_CRYPTO("sha3-384-lib");
+MODULE_ALIAS_CRYPTO("sha3-512");
+MODULE_ALIAS_CRYPTO("sha3-512-lib");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
deleted file mode 100644
index 41d1e506e6de..000000000000
--- a/crypto/sha3_generic.c
+++ /dev/null
@@ -1,290 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * SHA-3, as specified in
- * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
- *
- * SHA-3 code by Jeff Garzik <jeff@garzik.org>
- * Ard Biesheuvel <ard.biesheuvel@linaro.org>
- */
-#include <crypto/internal/hash.h>
-#include <crypto/sha3.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/unaligned.h>
-
-/*
- * On some 32-bit architectures (h8300), GCC ends up using
- * over 1 KB of stack if we inline the round calculation into the loop
- * in keccakf(). On the other hand, on 64-bit architectures with plenty
- * of [64-bit wide] general purpose registers, not inlining it severely
- * hurts performance. So let's use 64-bitness as a heuristic to decide
- * whether to inline or not.
- */
-#ifdef CONFIG_64BIT
-#define SHA3_INLINE inline
-#else
-#define SHA3_INLINE noinline
-#endif
-
-#define KECCAK_ROUNDS 24
-
-static const u64 keccakf_rndc[24] = {
- 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
- 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
- 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
- 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
- 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
- 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
- 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
- 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
-};
-
-/* update the state with given number of rounds */
-
-static SHA3_INLINE void keccakf_round(u64 st[25])
-{
- u64 t[5], tt, bc[5];
-
- /* Theta */
- bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
- bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
- bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
- bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
- bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
-
- t[0] = bc[4] ^ rol64(bc[1], 1);
- t[1] = bc[0] ^ rol64(bc[2], 1);
- t[2] = bc[1] ^ rol64(bc[3], 1);
- t[3] = bc[2] ^ rol64(bc[4], 1);
- t[4] = bc[3] ^ rol64(bc[0], 1);
-
- st[0] ^= t[0];
-
- /* Rho Pi */
- tt = st[1];
- st[ 1] = rol64(st[ 6] ^ t[1], 44);
- st[ 6] = rol64(st[ 9] ^ t[4], 20);
- st[ 9] = rol64(st[22] ^ t[2], 61);
- st[22] = rol64(st[14] ^ t[4], 39);
- st[14] = rol64(st[20] ^ t[0], 18);
- st[20] = rol64(st[ 2] ^ t[2], 62);
- st[ 2] = rol64(st[12] ^ t[2], 43);
- st[12] = rol64(st[13] ^ t[3], 25);
- st[13] = rol64(st[19] ^ t[4], 8);
- st[19] = rol64(st[23] ^ t[3], 56);
- st[23] = rol64(st[15] ^ t[0], 41);
- st[15] = rol64(st[ 4] ^ t[4], 27);
- st[ 4] = rol64(st[24] ^ t[4], 14);
- st[24] = rol64(st[21] ^ t[1], 2);
- st[21] = rol64(st[ 8] ^ t[3], 55);
- st[ 8] = rol64(st[16] ^ t[1], 45);
- st[16] = rol64(st[ 5] ^ t[0], 36);
- st[ 5] = rol64(st[ 3] ^ t[3], 28);
- st[ 3] = rol64(st[18] ^ t[3], 21);
- st[18] = rol64(st[17] ^ t[2], 15);
- st[17] = rol64(st[11] ^ t[1], 10);
- st[11] = rol64(st[ 7] ^ t[2], 6);
- st[ 7] = rol64(st[10] ^ t[0], 3);
- st[10] = rol64( tt ^ t[1], 1);
-
- /* Chi */
- bc[ 0] = ~st[ 1] & st[ 2];
- bc[ 1] = ~st[ 2] & st[ 3];
- bc[ 2] = ~st[ 3] & st[ 4];
- bc[ 3] = ~st[ 4] & st[ 0];
- bc[ 4] = ~st[ 0] & st[ 1];
- st[ 0] ^= bc[ 0];
- st[ 1] ^= bc[ 1];
- st[ 2] ^= bc[ 2];
- st[ 3] ^= bc[ 3];
- st[ 4] ^= bc[ 4];
-
- bc[ 0] = ~st[ 6] & st[ 7];
- bc[ 1] = ~st[ 7] & st[ 8];
- bc[ 2] = ~st[ 8] & st[ 9];
- bc[ 3] = ~st[ 9] & st[ 5];
- bc[ 4] = ~st[ 5] & st[ 6];
- st[ 5] ^= bc[ 0];
- st[ 6] ^= bc[ 1];
- st[ 7] ^= bc[ 2];
- st[ 8] ^= bc[ 3];
- st[ 9] ^= bc[ 4];
-
- bc[ 0] = ~st[11] & st[12];
- bc[ 1] = ~st[12] & st[13];
- bc[ 2] = ~st[13] & st[14];
- bc[ 3] = ~st[14] & st[10];
- bc[ 4] = ~st[10] & st[11];
- st[10] ^= bc[ 0];
- st[11] ^= bc[ 1];
- st[12] ^= bc[ 2];
- st[13] ^= bc[ 3];
- st[14] ^= bc[ 4];
-
- bc[ 0] = ~st[16] & st[17];
- bc[ 1] = ~st[17] & st[18];
- bc[ 2] = ~st[18] & st[19];
- bc[ 3] = ~st[19] & st[15];
- bc[ 4] = ~st[15] & st[16];
- st[15] ^= bc[ 0];
- st[16] ^= bc[ 1];
- st[17] ^= bc[ 2];
- st[18] ^= bc[ 3];
- st[19] ^= bc[ 4];
-
- bc[ 0] = ~st[21] & st[22];
- bc[ 1] = ~st[22] & st[23];
- bc[ 2] = ~st[23] & st[24];
- bc[ 3] = ~st[24] & st[20];
- bc[ 4] = ~st[20] & st[21];
- st[20] ^= bc[ 0];
- st[21] ^= bc[ 1];
- st[22] ^= bc[ 2];
- st[23] ^= bc[ 3];
- st[24] ^= bc[ 4];
-}
-
-static void keccakf(u64 st[25])
-{
- int round;
-
- for (round = 0; round < KECCAK_ROUNDS; round++) {
- keccakf_round(st);
- /* Iota */
- st[0] ^= keccakf_rndc[round];
- }
-}
-
-int crypto_sha3_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- memset(sctx->st, 0, sizeof(sctx->st));
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha3_init);
-
-static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int rsizw = rsiz / 8;
-
- do {
- int i;
-
- for (i = 0; i < rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(data + 8 * i);
- keccakf(sctx->st);
-
- data += rsiz;
- len -= rsiz;
- } while (len >= rsiz);
- return len;
-}
-
-static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src,
- unsigned int len, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
- struct sha3_state *sctx = shash_desc_ctx(desc);
- __le64 block[SHA3_224_BLOCK_SIZE / 8] = {};
- __le64 *digest = (__le64 *)out;
- unsigned int rsizw = rsiz / 8;
- u8 *p;
- int i;
-
- p = memcpy(block, src, len);
- p[len++] = 0x06;
- p[rsiz - 1] |= 0x80;
-
- for (i = 0; i < rsizw; i++)
- sctx->st[i] ^= le64_to_cpu(block[i]);
- memzero_explicit(block, sizeof(block));
-
- keccakf(sctx->st);
-
- for (i = 0; i < digest_size / 8; i++)
- put_unaligned_le64(sctx->st[i], digest++);
-
- if (digest_size & 4)
- put_unaligned_le32(sctx->st[i], (__le32 *)digest);
-
- return 0;
-}
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .finup = crypto_sha3_finup,
- .descsize = SHA3_STATE_SIZE,
- .base.cra_name = "sha3-224",
- .base.cra_driver_name = "sha3-224-generic",
- .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
- .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .finup = crypto_sha3_finup,
- .descsize = SHA3_STATE_SIZE,
- .base.cra_name = "sha3-256",
- .base.cra_driver_name = "sha3-256-generic",
- .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
- .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .finup = crypto_sha3_finup,
- .descsize = SHA3_STATE_SIZE,
- .base.cra_name = "sha3-384",
- .base.cra_driver_name = "sha3-384-generic",
- .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
- .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .finup = crypto_sha3_finup,
- .descsize = SHA3_STATE_SIZE,
- .base.cra_name = "sha3-512",
- .base.cra_driver_name = "sha3-512-generic",
- .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
- .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init sha3_generic_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha3_generic_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_init(sha3_generic_mod_init);
-module_exit(sha3_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha3-224");
-MODULE_ALIAS_CRYPTO("sha3-224-generic");
-MODULE_ALIAS_CRYPTO("sha3-256");
-MODULE_ALIAS_CRYPTO("sha3-256-generic");
-MODULE_ALIAS_CRYPTO("sha3-384");
-MODULE_ALIAS_CRYPTO("sha3-384-generic");
-MODULE_ALIAS_CRYPTO("sha3-512");
-MODULE_ALIAS_CRYPTO("sha3-512-generic");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 8fa5d9686d08..14a820cb06c7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,6 +17,7 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -27,14 +28,258 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
static const struct crypto_type crypto_skcipher_type;
+static int skcipher_walk_next(struct skcipher_walk *walk);
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+static int skcipher_walk_first(struct skcipher_walk *walk)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req, bool atomic)
{
@@ -49,8 +294,10 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
- atomic = true;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -67,7 +314,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk, atomic);
+ return skcipher_walk_first(walk);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
@@ -80,8 +327,10 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
- atomic = true;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -94,7 +343,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- return skcipher_walk_first(walk, atomic);
+ return skcipher_walk_first(walk);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d1d88debbd71..62fef100e599 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1690,10 +1690,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("ccm(sm4)"));
break;
- case 57:
- ret = min(ret, tcrypt_test("polyval"));
- break;
-
case 58:
ret = min(ret, tcrypt_test("gcm(aria)"));
break;
@@ -1758,10 +1754,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("hmac(streebog512)"));
break;
- case 150:
- ret = min(ret, tcrypt_test("ansi_cprng"));
- break;
-
case 151:
ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
break;
@@ -2268,10 +2260,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 321:
- test_hash_speed("poly1305", sec, poly1305_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 7f938ac93e58..85c3f77bcfb4 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -96,22 +96,4 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
-static struct hash_speed poly1305_speed_template[] = {
- { .blen = 96, .plen = 16, },
- { .blen = 96, .plen = 32, },
- { .blen = 96, .plen = 96, },
- { .blen = 288, .plen = 16, },
- { .blen = 288, .plen = 32, },
- { .blen = 288, .plen = 288, },
- { .blen = 1056, .plen = 32, },
- { .blen = 1056, .plen = 1056, },
- { .blen = 2080, .plen = 32, },
- { .blen = 2080, .plen = 2080, },
- { .blen = 4128, .plen = 4128, },
- { .blen = 8224, .plen = 8224, },
-
- /* End marker */
- { .blen = 0, .plen = 0, }
-};
-
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6a490aaa71b9..a302be53896d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -117,11 +117,6 @@ struct hash_test_suite {
unsigned int count;
};
-struct cprng_test_suite {
- const struct cprng_testvec *vecs;
- unsigned int count;
-};
-
struct drbg_test_suite {
const struct drbg_testvec *vecs;
unsigned int count;
@@ -154,7 +149,6 @@ struct alg_test_desc {
struct cipher_test_suite cipher;
struct comp_test_suite comp;
struct hash_test_suite hash;
- struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
struct sig_test_suite sig;
@@ -3442,68 +3436,6 @@ out:
return ret;
}
-static int test_cprng(struct crypto_rng *tfm,
- const struct cprng_testvec *template,
- unsigned int tcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err = 0, i, j, seedsize;
- u8 *seed;
- char result[32];
-
- seedsize = crypto_rng_seedsize(tfm);
-
- seed = kmalloc(seedsize, GFP_KERNEL);
- if (!seed) {
- printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
- "for %s\n", algo);
- return -ENOMEM;
- }
-
- for (i = 0; i < tcount; i++) {
- memset(result, 0, 32);
-
- memcpy(seed, template[i].v, template[i].vlen);
- memcpy(seed + template[i].vlen, template[i].key,
- template[i].klen);
- memcpy(seed + template[i].vlen + template[i].klen,
- template[i].dt, template[i].dtlen);
-
- err = crypto_rng_reset(tfm, seed, seedsize);
- if (err) {
- printk(KERN_ERR "alg: cprng: Failed to reset rng "
- "for %s\n", algo);
- goto out;
- }
-
- for (j = 0; j < template[i].loops; j++) {
- err = crypto_rng_get_bytes(tfm, result,
- template[i].rlen);
- if (err < 0) {
- printk(KERN_ERR "alg: cprng: Failed to obtain "
- "the correct amount of random data for "
- "%s (requested %d)\n", algo,
- template[i].rlen);
- goto out;
- }
- }
-
- err = memcmp(result, template[i].result,
- template[i].rlen);
- if (err) {
- printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
- i, algo);
- hexdump(result, template[i].rlen);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- kfree(seed);
- return err;
-}
-
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@@ -3550,29 +3482,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
return err;
}
-static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_rng *rng;
- int err;
-
- rng = crypto_alloc_rng(driver, type, mask);
- if (IS_ERR(rng)) {
- if (PTR_ERR(rng) == -ENOENT)
- return 0;
- printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(rng));
- return PTR_ERR(rng);
- }
-
- err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
-
- crypto_free_rng(rng);
-
- return err;
-}
-
-
static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
const char *driver, u32 type, u32 mask)
{
@@ -4171,12 +4080,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = __VECS(aegis128_tv_template)
}
}, {
- .alg = "ansi_cprng",
- .test = alg_test_cprng,
- .suite = {
- .cprng = __VECS(ansi_cprng_aes_tv_template)
- }
- }, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)",
.test = alg_test_aead,
@@ -4332,6 +4235,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "blake2b-160",
+ .generic_driver = "blake2b-160-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4339,6 +4243,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-256",
+ .generic_driver = "blake2b-256-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4346,6 +4251,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-384",
+ .generic_driver = "blake2b-384-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4353,6 +4259,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-512",
+ .generic_driver = "blake2b-512-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -5055,8 +4962,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hctr2(aes)",
- .generic_driver =
- "hctr2_base(xctr(aes-generic),polyval-generic)",
+ .generic_driver = "hctr2_base(xctr(aes-generic),polyval-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_hctr2_tv_template)
@@ -5100,6 +5006,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-224)",
+ .generic_driver = "hmac(sha3-224-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5107,6 +5014,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-256)",
+ .generic_driver = "hmac(sha3-256-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5114,6 +5022,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-384)",
+ .generic_driver = "hmac(sha3-384-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5121,6 +5030,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-512)",
+ .generic_driver = "hmac(sha3-512-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5364,12 +5274,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "polyval",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(polyval_tv_template)
- }
- }, {
.alg = "rfc3686(ctr(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -5474,6 +5378,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-224",
+ .generic_driver = "sha3-224-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5481,6 +5386,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-256",
+ .generic_driver = "sha3-256-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5488,6 +5394,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-384",
+ .generic_driver = "sha3-384-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5495,6 +5402,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-512",
+ .generic_driver = "sha3-512-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 268231227282..80bf5f1b67a6 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -119,18 +119,6 @@ struct aead_testvec {
int crypt_error;
};
-struct cprng_testvec {
- const char *key;
- const char *dt;
- const char *v;
- const char *result;
- unsigned char klen;
- unsigned short dtlen;
- unsigned short vlen;
- unsigned short rlen;
- unsigned short loops;
-};
-
struct drbg_testvec {
const unsigned char *entropy;
size_t entropylen;
@@ -9023,6 +9011,126 @@ static const struct cipher_testvec des_tv_template[] = {
.ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
.ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
.len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xe0\xe0\xe0\xf1\xf1\xf1\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xfe\x01\xfe\x01\xfe\x01\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x01\xfe\x01\xfe\x01\xfe\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xe0\x1f\xe0\x0e\xf1\x0e\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x1f\xe0\x1f\xf1\x0e\xf1\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xe0\x01\xe0\x01\xf1\x01\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x01\xe0\x01\xf1\x01\xf1\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xfe\x1f\xfe\x0e\xfe\x0e\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x1f\xfe\x1f\xfe\x0e\xfe\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\x1f\x01\x1f\x01\x0e\x01\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x01\x1f\x01\x0e\x01\x0e\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xfe\xe0\xfe\xf1\xfe\xf1\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xe0\xfe\xe0\xfe\xf1\xfe\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
}, { /* Two blocks -- for testing encryption across pages */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
@@ -22377,100 +22485,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
};
/*
- * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
- * test vectors, taken from Appendix B.2.9 and B.2.10:
- * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
- * Only AES-128 is supported at this time.
- */
-static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
- {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9",
- .dtlen = 16,
- .v = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55"
- "\x84\x79\x66\x85\xc1\x2f\x76\x41",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa",
- .dtlen = 16,
- .v = "\xc0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c"
- "\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb",
- .dtlen = 16,
- .v = "\xe0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5"
- "\x29\x14\x28\x81\xa9\x4d\x4e\xc7",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc",
- .dtlen = 16,
- .v = "\xf0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5"
- "\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd",
- .dtlen = 16,
- .v = "\xf8\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x05\x25\x92\x46\x61\x79\xd2\xcb"
- "\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8",
- .rlen = 16,
- .loops = 1,
- }, { /* Monte Carlo Test */
- .key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5"
- "\xd8\x2b\xe8\xc3\x72\x55\xc8\x48",
- .klen = 16,
- .dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b"
- "\x67\xc9\x25\xfa\x70\x1f\x11\xac",
- .dtlen = 16,
- .v = "\x57\x2c\x8e\x76\x87\x26\x47\x97"
- "\x7e\x74\xfb\xdd\xc4\x95\x01\xd1",
- .vlen = 16,
- .result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb"
- "\xe4\x57\x90\xd5\xc3\xfc\x9b\x73",
- .rlen = 16,
- .loops = 10000,
- },
-};
-
-/*
* SP800-90A DRBG Test vectors from
* http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
*
@@ -36237,177 +36251,6 @@ static const struct cipher_testvec aes_xctr_tv_template[] = {
/*
* Test vectors generated using https://github.com/google/hctr2
- *
- * To ensure compatibility with RFC 8452, some tests were sourced from
- * https://datatracker.ietf.org/doc/html/rfc8452
- */
-static const struct hash_testvec polyval_tv_template[] = {
- { // From RFC 8452
- .key = "\x31\x07\x28\xd9\x91\x1f\x1f\x38"
- "\x37\xb2\x43\x16\xc3\xfa\xb9\xa0",
- .plaintext = "\x65\x78\x61\x6d\x70\x6c\x65\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x48\x65\x6c\x6c\x6f\x20\x77\x6f"
- "\x72\x6c\x64\x00\x00\x00\x00\x00"
- "\x38\x00\x00\x00\x00\x00\x00\x00"
- "\x58\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\xad\x7f\xcf\x0b\x51\x69\x85\x16"
- "\x62\x67\x2f\x3c\x5f\x95\x13\x8f",
- .psize = 48,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x40\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\xeb\x93\xb7\x74\x09\x62\xc5\xe4"
- "\x9d\x2a\x90\xa7\xdc\x5c\xec\x74",
- .psize = 32,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x80\x01\x00\x00\x00\x00\x00\x00",
- .digest = "\x81\x38\x87\x46\xbc\x22\xd2\x6b"
- "\x2a\xbc\x3d\xcb\x15\x75\x42\x22",
- .psize = 64,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x02\x00\x00\x00\x00\x00\x00",
- .digest = "\x1e\x39\xb6\xd3\x34\x4d\x34\x8f"
- "\x60\x44\xf8\x99\x35\xd1\xcf\x78",
- .psize = 80,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x08\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x02\x00\x00\x00\x00\x00\x00",
- .digest = "\xff\xcd\x05\xd5\x77\x0f\x34\xad"
- "\x92\x67\xf0\xa5\x99\x94\xb1\x5a",
- .psize = 96,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\x90\xcc\xac\xee\xba\xd7\xd4\x68"
- "\x98\xa6\x79\x70\xdf\x66\x15\x6c",
- .plaintext = "",
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 0,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\xc1\x45\x71\xf0\x30\x07\x94\xe7"
- "\x3a\xdd\xe4\xc6\x19\x2d\x02\xa2",
- .plaintext = "\xc1\x5d\x47\xc7\x4c\x7c\x5e\x07"
- "\x85\x14\x8f\x79\xcc\x73\x83\xf7"
- "\x35\xb8\xcb\x73\x61\xf0\x53\x31"
- "\xbf\x84\xde\xb6\xde\xaf\xb0\xb8"
- "\xb7\xd9\x11\x91\x89\xfd\x1e\x4c"
- "\x84\x4a\x1f\x2a\x87\xa4\xaf\x62"
- "\x8d\x7d\x58\xf6\x43\x35\xfc\x53"
- "\x8f\x1a\xf6\x12\xe1\x13\x3f\x66"
- "\x91\x4b\x13\xd6\x45\xfb\xb0\x7a"
- "\xe0\x8b\x8e\x99\xf7\x86\x46\x37"
- "\xd1\x22\x9e\x52\xf3\x3f\xd9\x75"
- "\x2c\x2c\xc6\xbb\x0e\x08\x14\x29"
- "\xe8\x50\x2f\xd8\xbe\xf4\xe9\x69"
- "\x4a\xee\xf7\xae\x15\x65\x35\x1e",
- .digest = "\x00\x4f\x5d\xe9\x3b\xc0\xd6\x50"
- "\x3e\x38\x73\x86\xc6\xda\xca\x7f",
- .psize = 112,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\x37\xbe\x68\x16\x50\xb9\x4e\xb0"
- "\x47\xde\xe2\xbd\xde\xe4\x48\x09",
- .plaintext = "\x87\xfc\x68\x9f\xff\xf2\x4a\x1e"
- "\x82\x3b\x73\x8f\xc1\xb2\x1b\x7a"
- "\x6c\x4f\x81\xbc\x88\x9b\x6c\xa3"
- "\x9c\xc2\xa5\xbc\x14\x70\x4c\x9b"
- "\x0c\x9f\x59\x92\x16\x4b\x91\x3d"
- "\x18\x55\x22\x68\x12\x8c\x63\xb2"
- "\x51\xcb\x85\x4b\xd2\xae\x0b\x1c"
- "\x5d\x28\x9d\x1d\xb1\xc8\xf0\x77"
- "\xe9\xb5\x07\x4e\x06\xc8\xee\xf8"
- "\x1b\xed\x72\x2a\x55\x7d\x16\xc9"
- "\xf2\x54\xe7\xe9\xe0\x44\x5b\x33"
- "\xb1\x49\xee\xff\x43\xfb\x82\xcd"
- "\x4a\x70\x78\x81\xa4\x34\x36\xe8"
- "\x4c\x28\x54\xa6\x6c\xc3\x6b\x78"
- "\xe7\xc0\x5d\xc6\x5d\x81\xab\x70"
- "\x08\x86\xa1\xfd\xf4\x77\x55\xfd"
- "\xa3\xe9\xe2\x1b\xdf\x99\xb7\x80"
- "\xf9\x0a\x4f\x72\x4a\xd3\xaf\xbb"
- "\xb3\x3b\xeb\x08\x58\x0f\x79\xce"
- "\xa5\x99\x05\x12\x34\xd4\xf4\x86"
- "\x37\x23\x1d\xc8\x49\xc0\x92\xae"
- "\xa6\xac\x9b\x31\x55\xed\x15\xc6"
- "\x05\x17\x37\x8d\x90\x42\xe4\x87"
- "\x89\x62\x88\x69\x1c\x6a\xfd\xe3"
- "\x00\x2b\x47\x1a\x73\xc1\x51\xc2"
- "\xc0\x62\x74\x6a\x9e\xb2\xe5\x21"
- "\xbe\x90\xb5\xb0\x50\xca\x88\x68"
- "\xe1\x9d\x7a\xdf\x6c\xb7\xb9\x98"
- "\xee\x28\x62\x61\x8b\xd1\x47\xf9"
- "\x04\x7a\x0b\x5d\xcd\x2b\x65\xf5"
- "\x12\xa3\xfe\x1a\xaa\x2c\x78\x42"
- "\xb8\xbe\x7d\x74\xeb\x59\xba\xba",
- .digest = "\xae\x11\xd4\x60\x2a\x5f\x9e\x42"
- "\x89\x04\xc2\x34\x8d\x55\x94\x0a",
- .psize = 256,
- .ksize = 16,
- },
-
-};
-
-/*
- * Test vectors generated using https://github.com/google/hctr2
*/
static const struct cipher_testvec aes_hctr2_tv_template[] = {
{
diff --git a/crypto/zstd.c b/crypto/zstd.c
index ac318d333b68..cbbd0413751a 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
+#include <linux/overflow.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#include <crypto/internal/acompress.h>
@@ -25,7 +26,7 @@ struct zstd_ctx {
zstd_dctx *dctx;
size_t wksp_size;
zstd_parameters params;
- u8 wksp[] __aligned(8);
+ u8 wksp[] __aligned(8) __counted_by(wksp_size);
};
static DEFINE_MUTEX(zstd_stream_lock);
@@ -38,13 +39,12 @@ static void *zstd_alloc_stream(void)
params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE);
- wksp_size = max_t(size_t,
- zstd_cstream_workspace_bound(&params.cParams),
- zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
+ wksp_size = max(zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
if (!wksp_size)
return ERR_PTR(-EINVAL);
- ctx = kvmalloc(sizeof(*ctx) + wksp_size, GFP_KERNEL);
+ ctx = kvmalloc(struct_size(ctx, wksp, wksp_size), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -75,11 +75,6 @@ static int zstd_init(struct crypto_acomp *acomp_tfm)
return ret;
}
-static void zstd_exit(struct crypto_acomp *acomp_tfm)
-{
- crypto_acomp_free_streams(&zstd_streams);
-}
-
static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx,
const void *src, void *dst, unsigned int *dlen)
{
@@ -297,7 +292,6 @@ static struct acomp_alg zstd_acomp = {
.cra_module = THIS_MODULE,
},
.init = zstd_init,
- .exit = zstd_exit,
.compress = zstd_compress,
.decompress = zstd_decompress,
};
@@ -310,6 +304,7 @@ static int __init zstd_mod_init(void)
static void __exit zstd_mod_fini(void)
{
crypto_unregister_acomp(&zstd_acomp);
+ crypto_acomp_free_streams(&zstd_streams);
}
module_init(zstd_mod_init);