diff options
Diffstat (limited to 'arch/arm64/crypto/aes-glue.c')
| -rw-r--r-- | arch/arm64/crypto/aes-glue.c | 139 |
1 files changed, 63 insertions, 76 deletions
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 5e207ff34482..b087b900d279 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -5,8 +5,6 @@ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ -#include <asm/hwcap.h> -#include <asm/neon.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/internal/hash.h> @@ -20,6 +18,9 @@ #include <linux/module.h> #include <linux/string.h> +#include <asm/hwcap.h> +#include <asm/simd.h> + #include "aes-ce-setkey.h" #ifdef USE_V8_CRYPTO_EXTENSIONS @@ -186,10 +187,9 @@ static int __maybe_unused ecb_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, blocks); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; @@ -206,10 +206,9 @@ static int __maybe_unused ecb_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, blocks); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; @@ -224,10 +223,9 @@ static int cbc_encrypt_walk(struct skcipher_request *req, unsigned int blocks; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, - ctx->key_enc, rounds, blocks, walk->iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_enc, rounds, blocks, walk->iv); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; @@ -253,10 +251,9 @@ static int cbc_decrypt_walk(struct skcipher_request *req, unsigned int blocks; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, - ctx->key_dec, rounds, blocks, walk->iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_dec, rounds, blocks, walk->iv); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; @@ -322,10 +319,9 @@ static int cts_cbc_encrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, walk.nbytes, walk.iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, walk.nbytes, walk.iv); return skcipher_walk_done(&walk, 0); } @@ -379,10 +375,9 @@ static int cts_cbc_decrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, walk.nbytes, walk.iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, walk.nbytes, walk.iv); return skcipher_walk_done(&walk, 0); } @@ -399,11 +394,11 @@ static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req) blocks = walk.nbytes / AES_BLOCK_SIZE; if (blocks) { - kernel_neon_begin(); - aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, blocks, - req->iv, ctx->key2.key_enc); - kernel_neon_end(); + scoped_ksimd() + aes_essiv_cbc_encrypt(walk.dst.virt.addr, + walk.src.virt.addr, + ctx->key1.key_enc, rounds, blocks, + req->iv, ctx->key2.key_enc); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err ?: cbc_encrypt_walk(req, &walk); @@ -421,11 +416,11 @@ static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req) blocks = walk.nbytes / AES_BLOCK_SIZE; if (blocks) { - kernel_neon_begin(); - aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, blocks, - req->iv, ctx->key2.key_enc); - kernel_neon_end(); + scoped_ksimd() + aes_essiv_cbc_decrypt(walk.dst.virt.addr, + walk.src.virt.addr, + ctx->key1.key_dec, rounds, blocks, + req->iv, ctx->key2.key_enc); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err ?: cbc_decrypt_walk(req, &walk); @@ -461,10 +456,9 @@ static int __maybe_unused xctr_encrypt(struct skcipher_request *req) else if (nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, - walk.iv, byte_ctr); - kernel_neon_end(); + scoped_ksimd() + aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv, byte_ctr); if (unlikely(nbytes < AES_BLOCK_SIZE)) memcpy(walk.dst.virt.addr, @@ -506,10 +500,9 @@ static int __maybe_unused ctr_encrypt(struct skcipher_request *req) else if (nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, - walk.iv); - kernel_neon_end(); + scoped_ksimd() + aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv); if (unlikely(nbytes < AES_BLOCK_SIZE)) memcpy(walk.dst.virt.addr, @@ -562,11 +555,10 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, nbytes, + ctx->key2.key_enc, walk.iv, first); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -584,11 +576,10 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, walk.nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); return skcipher_walk_done(&walk, 0); } @@ -634,11 +625,10 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, nbytes, + ctx->key2.key_enc, walk.iv, first); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -657,11 +647,10 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) return err; - kernel_neon_begin(); - aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, walk.nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); return skcipher_walk_done(&walk, 0); } @@ -808,10 +797,9 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, return err; /* encrypt the zero vector */ - kernel_neon_begin(); - aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc, - rounds, 1); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, + ctx->key.key_enc, rounds, 1); cmac_gf128_mul_by_x(consts, consts); cmac_gf128_mul_by_x(consts + 1, consts); @@ -837,10 +825,10 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, if (err) return err; - kernel_neon_begin(); - aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1); - aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2); - kernel_neon_end(); + scoped_ksimd() { + aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1); + aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2); + } return cbcmac_setkey(tfm, key, sizeof(key)); } @@ -860,10 +848,9 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, int rem; do { - kernel_neon_begin(); - rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, - dg, enc_before, !enc_before); - kernel_neon_end(); + scoped_ksimd() + rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, + dg, enc_before, !enc_before); in += (blocks - rem) * AES_BLOCK_SIZE; blocks = rem; } while (blocks); |
