diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/crypto/inside-secure/safexcel_cipher.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/crypto/inside-secure/safexcel_cipher.c')
| -rw-r--r-- | drivers/crypto/inside-secure/safexcel_cipher.c | 642 | 
1 files changed, 522 insertions, 120 deletions
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index bafb60505fab..6bb60fda2043 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -12,8 +12,12 @@  #include <linux/dma-mapping.h>  #include <linux/dmapool.h> +#include <crypto/aead.h>  #include <crypto/aes.h> +#include <crypto/authenc.h> +#include <crypto/sha.h>  #include <crypto/skcipher.h> +#include <crypto/internal/aead.h>  #include <crypto/internal/skcipher.h>  #include "safexcel.h" @@ -28,9 +32,16 @@ struct safexcel_cipher_ctx {  	struct safexcel_crypto_priv *priv;  	u32 mode; +	bool aead;  	__le32 key[8];  	unsigned int key_len; + +	/* All the below is AEAD specific */ +	u32 alg; +	u32 state_sz; +	u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)]; +	u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];  };  struct safexcel_cipher_req { @@ -38,18 +49,16 @@ struct safexcel_cipher_req {  	bool needs_inv;  }; -static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, -				  struct crypto_async_request *async, -				  struct safexcel_command_desc *cdesc, -				  u32 length) +static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, +				    struct safexcel_command_desc *cdesc, +				    u32 length)  { -	struct skcipher_request *req = skcipher_request_cast(async);  	struct safexcel_token *token;  	unsigned offset = 0;  	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {  		offset = AES_BLOCK_SIZE / sizeof(u32); -		memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE); +		memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);  		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;  	} @@ -65,8 +74,64 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,  				EIP197_TOKEN_INS_TYPE_OUTPUT;  } -static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, -			       unsigned int len) +static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, +				struct safexcel_command_desc *cdesc, +				enum safexcel_cipher_direction direction, +				u32 cryptlen, u32 assoclen, u32 digestsize) +{ +	struct safexcel_token *token; +	unsigned offset = 0; + +	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { +		offset = AES_BLOCK_SIZE / sizeof(u32); +		memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); + +		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; +	} + +	token = (struct safexcel_token *)(cdesc->control_data.token + offset); + +	if (direction == SAFEXCEL_DECRYPT) +		cryptlen -= digestsize; + +	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; +	token[0].packet_length = assoclen; +	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | +				EIP197_TOKEN_INS_TYPE_OUTPUT; + +	token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; +	token[1].packet_length = cryptlen; +	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; +	token[1].instructions = EIP197_TOKEN_INS_LAST | +				EIP197_TOKEN_INS_TYPE_CRYTO | +				EIP197_TOKEN_INS_TYPE_HASH | +				EIP197_TOKEN_INS_TYPE_OUTPUT; + +	if (direction == SAFEXCEL_ENCRYPT) { +		token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; +		token[2].packet_length = digestsize; +		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | +				EIP197_TOKEN_STAT_LAST_PACKET; +		token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | +					EIP197_TOKEN_INS_INSERT_HASH_DIGEST; +	} else { +		token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; +		token[2].packet_length = digestsize; +		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | +				EIP197_TOKEN_STAT_LAST_PACKET; +		token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + +		token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; +		token[3].packet_length = digestsize | +					 EIP197_TOKEN_HASH_RESULT_VERIFY; +		token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | +				EIP197_TOKEN_STAT_LAST_PACKET; +		token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; +	} +} + +static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, +					const u8 *key, unsigned int len)  {  	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);  	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); @@ -98,41 +163,123 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,  	return 0;  } +static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, +				    unsigned int len) +{ +	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +	struct safexcel_ahash_export_state istate, ostate; +	struct safexcel_crypto_priv *priv = ctx->priv; +	struct crypto_authenc_keys keys; + +	if (crypto_authenc_extractkeys(&keys, key, len) != 0) +		goto badkey; + +	if (keys.enckeylen > sizeof(ctx->key)) +		goto badkey; + +	/* Encryption key */ +	if (priv->version == EIP197 && ctx->base.ctxr_dma && +	    memcmp(ctx->key, keys.enckey, keys.enckeylen)) +		ctx->base.needs_inv = true; + +	/* Auth key */ +	switch (ctx->alg) { +	case CONTEXT_CONTROL_CRYPTO_ALG_SHA1: +		if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey, +					 keys.authkeylen, &istate, &ostate)) +			goto badkey; +		break; +	case CONTEXT_CONTROL_CRYPTO_ALG_SHA224: +		if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey, +					 keys.authkeylen, &istate, &ostate)) +			goto badkey; +		break; +	case CONTEXT_CONTROL_CRYPTO_ALG_SHA256: +		if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey, +					 keys.authkeylen, &istate, &ostate)) +			goto badkey; +		break; +	default: +		dev_err(priv->dev, "aead: unsupported hash algorithm\n"); +		goto badkey; +	} + +	crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & +				    CRYPTO_TFM_RES_MASK); + +	if (priv->version == EIP197 && ctx->base.ctxr_dma && +	    (memcmp(ctx->ipad, istate.state, ctx->state_sz) || +	     memcmp(ctx->opad, ostate.state, ctx->state_sz))) +		ctx->base.needs_inv = true; + +	/* Now copy the keys into the context */ +	memcpy(ctx->key, keys.enckey, keys.enckeylen); +	ctx->key_len = keys.enckeylen; + +	memcpy(ctx->ipad, &istate.state, ctx->state_sz); +	memcpy(ctx->opad, &ostate.state, ctx->state_sz); + +	memzero_explicit(&keys, sizeof(keys)); +	return 0; + +badkey: +	crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); +	memzero_explicit(&keys, sizeof(keys)); +	return -EINVAL; +} +  static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,  				    struct crypto_async_request *async, +				    struct safexcel_cipher_req *sreq,  				    struct safexcel_command_desc *cdesc)  {  	struct safexcel_crypto_priv *priv = ctx->priv; -	struct skcipher_request *req = skcipher_request_cast(async); -	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);  	int ctrl_size; -	if (sreq->direction == SAFEXCEL_ENCRYPT) +	if (ctx->aead) { +		if (sreq->direction == SAFEXCEL_ENCRYPT) +			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; +		else +			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; +	} else {  		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; -	else -		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; + +		/* The decryption control type is a combination of the +		 * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all +		 * types. +		 */ +		if (sreq->direction == SAFEXCEL_DECRYPT) +			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; +	}  	cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;  	cdesc->control_data.control1 |= ctx->mode; +	if (ctx->aead) +		cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | +						ctx->alg; +  	switch (ctx->key_len) {  	case AES_KEYSIZE_128:  		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; -		ctrl_size = 4;  		break;  	case AES_KEYSIZE_192:  		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; -		ctrl_size = 6;  		break;  	case AES_KEYSIZE_256:  		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; -		ctrl_size = 8;  		break;  	default:  		dev_err(priv->dev, "aes keysize not supported: %u\n",  			ctx->key_len);  		return -EINVAL;  	} + +	ctrl_size = ctx->key_len / sizeof(u32); +	if (ctx->aead) +		/* Take in account the ipad+opad digests */ +		ctrl_size += ctx->state_sz / sizeof(u32) * 2;  	cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);  	return 0; @@ -140,9 +287,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,  static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,  				      struct crypto_async_request *async, +				      struct scatterlist *src, +				      struct scatterlist *dst, +				      unsigned int cryptlen, +				      struct safexcel_cipher_req *sreq,  				      bool *should_complete, int *ret)  { -	struct skcipher_request *req = skcipher_request_cast(async);  	struct safexcel_result_desc *rdesc;  	int ndesc = 0; @@ -158,12 +308,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin  			break;  		} -		if (rdesc->result_data.error_code) { -			dev_err(priv->dev, -				"cipher: result: result descriptor error (%d)\n", -				rdesc->result_data.error_code); -			*ret = -EIO; -		} +		if (likely(!*ret)) +			*ret = safexcel_rdesc_check_errors(priv, rdesc);  		ndesc++;  	} while (!rdesc->last_seg); @@ -171,16 +317,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin  	safexcel_complete(priv, ring);  	spin_unlock_bh(&priv->ring[ring].egress_lock); -	if (req->src == req->dst) { -		dma_unmap_sg(priv->dev, req->src, -			     sg_nents_for_len(req->src, req->cryptlen), +	if (src == dst) { +		dma_unmap_sg(priv->dev, src, +			     sg_nents_for_len(src, cryptlen),  			     DMA_BIDIRECTIONAL);  	} else { -		dma_unmap_sg(priv->dev, req->src, -			     sg_nents_for_len(req->src, req->cryptlen), +		dma_unmap_sg(priv->dev, src, +			     sg_nents_for_len(src, cryptlen),  			     DMA_TO_DEVICE); -		dma_unmap_sg(priv->dev, req->dst, -			     sg_nents_for_len(req->dst, req->cryptlen), +		dma_unmap_sg(priv->dev, dst, +			     sg_nents_for_len(dst, cryptlen),  			     DMA_FROM_DEVICE);  	} @@ -189,39 +335,43 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin  	return ndesc;  } -static int safexcel_aes_send(struct crypto_async_request *async, -			     int ring, struct safexcel_request *request, -			     int *commands, int *results) +static int safexcel_aes_send(struct crypto_async_request *base, int ring, +			     struct safexcel_request *request, +			     struct safexcel_cipher_req *sreq, +			     struct scatterlist *src, struct scatterlist *dst, +			     unsigned int cryptlen, unsigned int assoclen, +			     unsigned int digestsize, u8 *iv, int *commands, +			     int *results)  { -	struct skcipher_request *req = skcipher_request_cast(async); -	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);  	struct safexcel_crypto_priv *priv = ctx->priv;  	struct safexcel_command_desc *cdesc;  	struct safexcel_result_desc *rdesc;  	struct scatterlist *sg; -	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen; +	unsigned int totlen = cryptlen + assoclen; +	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;  	int i, ret = 0; -	if (req->src == req->dst) { -		nr_src = dma_map_sg(priv->dev, req->src, -				    sg_nents_for_len(req->src, req->cryptlen), +	if (src == dst) { +		nr_src = dma_map_sg(priv->dev, src, +				    sg_nents_for_len(src, totlen),  				    DMA_BIDIRECTIONAL);  		nr_dst = nr_src;  		if (!nr_src)  			return -EINVAL;  	} else { -		nr_src = dma_map_sg(priv->dev, req->src, -				    sg_nents_for_len(req->src, req->cryptlen), +		nr_src = dma_map_sg(priv->dev, src, +				    sg_nents_for_len(src, totlen),  				    DMA_TO_DEVICE);  		if (!nr_src)  			return -EINVAL; -		nr_dst = dma_map_sg(priv->dev, req->dst, -				    sg_nents_for_len(req->dst, req->cryptlen), +		nr_dst = dma_map_sg(priv->dev, dst, +				    sg_nents_for_len(dst, totlen),  				    DMA_FROM_DEVICE);  		if (!nr_dst) { -			dma_unmap_sg(priv->dev, req->src, -				     sg_nents_for_len(req->src, req->cryptlen), +			dma_unmap_sg(priv->dev, src, +				     sg_nents_for_len(src, totlen),  				     DMA_TO_DEVICE);  			return -EINVAL;  		} @@ -229,10 +379,17 @@ static int safexcel_aes_send(struct crypto_async_request *async,  	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); +	if (ctx->aead) { +		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), +		       ctx->ipad, ctx->state_sz); +		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32), +		       ctx->opad, ctx->state_sz); +	} +  	spin_lock_bh(&priv->ring[ring].egress_lock);  	/* command descriptors */ -	for_each_sg(req->src, sg, nr_src, i) { +	for_each_sg(src, sg, nr_src, i) {  		int len = sg_dma_len(sg);  		/* Do not overflow the request */ @@ -240,7 +397,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,  			len = queued;  		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), -					   sg_dma_address(sg), len, req->cryptlen, +					   sg_dma_address(sg), len, totlen,  					   ctx->base.ctxr_dma);  		if (IS_ERR(cdesc)) {  			/* No space left in the command descriptor ring */ @@ -250,8 +407,14 @@ static int safexcel_aes_send(struct crypto_async_request *async,  		n_cdesc++;  		if (n_cdesc == 1) { -			safexcel_context_control(ctx, async, cdesc); -			safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); +			safexcel_context_control(ctx, base, sreq, cdesc); +			if (ctx->aead) +				safexcel_aead_token(ctx, iv, cdesc, +						    sreq->direction, cryptlen, +						    assoclen, digestsize); +			else +				safexcel_skcipher_token(ctx, iv, cdesc, +							cryptlen);  		}  		queued -= len; @@ -260,7 +423,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,  	}  	/* result descriptors */ -	for_each_sg(req->dst, sg, nr_dst, i) { +	for_each_sg(dst, sg, nr_dst, i) {  		bool first = !i, last = (i == nr_dst - 1);  		u32 len = sg_dma_len(sg); @@ -276,7 +439,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,  	spin_unlock_bh(&priv->ring[ring].egress_lock); -	request->req = &req->base; +	request->req = base;  	*commands = n_cdesc;  	*results = n_rdesc; @@ -291,16 +454,16 @@ cdesc_rollback:  	spin_unlock_bh(&priv->ring[ring].egress_lock); -	if (req->src == req->dst) { -		dma_unmap_sg(priv->dev, req->src, -			     sg_nents_for_len(req->src, req->cryptlen), +	if (src == dst) { +		dma_unmap_sg(priv->dev, src, +			     sg_nents_for_len(src, totlen),  			     DMA_BIDIRECTIONAL);  	} else { -		dma_unmap_sg(priv->dev, req->src, -			     sg_nents_for_len(req->src, req->cryptlen), +		dma_unmap_sg(priv->dev, src, +			     sg_nents_for_len(src, totlen),  			     DMA_TO_DEVICE); -		dma_unmap_sg(priv->dev, req->dst, -			     sg_nents_for_len(req->dst, req->cryptlen), +		dma_unmap_sg(priv->dev, dst, +			     sg_nents_for_len(dst, totlen),  			     DMA_FROM_DEVICE);  	} @@ -309,11 +472,10 @@ cdesc_rollback:  static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,  				      int ring, -				      struct crypto_async_request *async, +				      struct crypto_async_request *base,  				      bool *should_complete, int *ret)  { -	struct skcipher_request *req = skcipher_request_cast(async); -	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);  	struct safexcel_result_desc *rdesc;  	int ndesc = 0, enq_ret; @@ -354,7 +516,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,  	ctx->base.ring = ring;  	spin_lock_bh(&priv->ring[ring].queue_lock); -	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); +	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);  	spin_unlock_bh(&priv->ring[ring].queue_lock);  	if (enq_ret != -EINPROGRESS) @@ -368,9 +530,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,  	return ndesc;  } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, -				  struct crypto_async_request *async, -				  bool *should_complete, int *ret) +static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, +					   int ring, +					   struct crypto_async_request *async, +					   bool *should_complete, int *ret)  {  	struct skcipher_request *req = skcipher_request_cast(async);  	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); @@ -381,24 +544,48 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,  		err = safexcel_handle_inv_result(priv, ring, async,  						 should_complete, ret);  	} else { -		err = safexcel_handle_req_result(priv, ring, async, +		err = safexcel_handle_req_result(priv, ring, async, req->src, +						 req->dst, req->cryptlen, sreq,  						 should_complete, ret);  	}  	return err;  } -static int safexcel_cipher_send_inv(struct crypto_async_request *async, +static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, +				       int ring, +				       struct crypto_async_request *async, +				       bool *should_complete, int *ret) +{ +	struct aead_request *req = aead_request_cast(async); +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct safexcel_cipher_req *sreq = aead_request_ctx(req); +	int err; + +	if (sreq->needs_inv) { +		sreq->needs_inv = false; +		err = safexcel_handle_inv_result(priv, ring, async, +						 should_complete, ret); +	} else { +		err = safexcel_handle_req_result(priv, ring, async, req->src, +						 req->dst, +						 req->cryptlen + crypto_aead_authsize(tfm), +						 sreq, should_complete, ret); +	} + +	return err; +} + +static int safexcel_cipher_send_inv(struct crypto_async_request *base,  				    int ring, struct safexcel_request *request,  				    int *commands, int *results)  { -	struct skcipher_request *req = skcipher_request_cast(async); -	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);  	struct safexcel_crypto_priv *priv = ctx->priv;  	int ret; -	ret = safexcel_invalidate_cache(async, priv, -					ctx->base.ctxr_dma, ring, request); +	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring, +					request);  	if (unlikely(ret))  		return ret; @@ -408,9 +595,9 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,  	return 0;  } -static int safexcel_send(struct crypto_async_request *async, -			 int ring, struct safexcel_request *request, -			 int *commands, int *results) +static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, +				  struct safexcel_request *request, +				  int *commands, int *results)  {  	struct skcipher_request *req = skcipher_request_cast(async);  	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); @@ -421,59 +608,108 @@ static int safexcel_send(struct crypto_async_request *async,  	BUG_ON(priv->version == EIP97 && sreq->needs_inv);  	if (sreq->needs_inv) -		ret = safexcel_cipher_send_inv(async, ring, request, -					       commands, results); +		ret = safexcel_cipher_send_inv(async, ring, request, commands, +					       results); +	else +		ret = safexcel_aes_send(async, ring, request, sreq, req->src, +					req->dst, req->cryptlen, 0, 0, req->iv, +					commands, results); +	return ret; +} + +static int safexcel_aead_send(struct crypto_async_request *async, int ring, +			      struct safexcel_request *request, int *commands, +			      int *results) +{ +	struct aead_request *req = aead_request_cast(async); +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); +	struct safexcel_cipher_req *sreq = aead_request_ctx(req); +	struct safexcel_crypto_priv *priv = ctx->priv; +	int ret; + +	BUG_ON(priv->version == EIP97 && sreq->needs_inv); + +	if (sreq->needs_inv) +		ret = safexcel_cipher_send_inv(async, ring, request, commands, +					       results);  	else -		ret = safexcel_aes_send(async, ring, request, +		ret = safexcel_aes_send(async, ring, request, sreq, req->src, +					req->dst, req->cryptlen, req->assoclen, +					crypto_aead_authsize(tfm), req->iv,  					commands, results);  	return ret;  } -static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) +static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, +				    struct crypto_async_request *base, +				    struct safexcel_cipher_req *sreq, +				    struct safexcel_inv_result *result)  {  	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);  	struct safexcel_crypto_priv *priv = ctx->priv; -	SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); -	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); -	struct safexcel_inv_result result = {};  	int ring = ctx->base.ring; -	memset(req, 0, sizeof(struct skcipher_request)); +	init_completion(&result->completion); -	/* create invalidation request */ -	init_completion(&result.completion); -	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, -				      safexcel_inv_complete, &result); - -	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); -	ctx = crypto_tfm_ctx(req->base.tfm); +	ctx = crypto_tfm_ctx(base->tfm);  	ctx->base.exit_inv = true;  	sreq->needs_inv = true;  	spin_lock_bh(&priv->ring[ring].queue_lock); -	crypto_enqueue_request(&priv->ring[ring].queue, &req->base); +	crypto_enqueue_request(&priv->ring[ring].queue, base);  	spin_unlock_bh(&priv->ring[ring].queue_lock);  	queue_work(priv->ring[ring].workqueue,  		   &priv->ring[ring].work_data.work); -	wait_for_completion(&result.completion); +	wait_for_completion(&result->completion); -	if (result.error) { +	if (result->error) {  		dev_warn(priv->dev,  			"cipher: sync: invalidate: completion error %d\n", -			 result.error); -		return result.error; +			 result->error); +		return result->error;  	}  	return 0;  } -static int safexcel_aes(struct skcipher_request *req, -			enum safexcel_cipher_direction dir, u32 mode) +static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)  { -	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); +	EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);  	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); +	struct safexcel_inv_result result = {}; + +	memset(req, 0, sizeof(struct skcipher_request)); + +	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, +				      safexcel_inv_complete, &result); +	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + +	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); +} + +static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) +{ +	EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE); +	struct safexcel_cipher_req *sreq = aead_request_ctx(req); +	struct safexcel_inv_result result = {}; + +	memset(req, 0, sizeof(struct aead_request)); + +	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, +				  safexcel_inv_complete, &result); +	aead_request_set_tfm(req, __crypto_aead_cast(tfm)); + +	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); +} + +static int safexcel_aes(struct crypto_async_request *base, +			struct safexcel_cipher_req *sreq, +			enum safexcel_cipher_direction dir, u32 mode) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);  	struct safexcel_crypto_priv *priv = ctx->priv;  	int ret, ring; @@ -489,7 +725,7 @@ static int safexcel_aes(struct skcipher_request *req,  	} else {  		ctx->base.ring = safexcel_select_ring(priv);  		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, -						 EIP197_GFP_FLAGS(req->base), +						 EIP197_GFP_FLAGS(*base),  						 &ctx->base.ctxr_dma);  		if (!ctx->base.ctxr)  			return -ENOMEM; @@ -498,7 +734,7 @@ static int safexcel_aes(struct skcipher_request *req,  	ring = ctx->base.ring;  	spin_lock_bh(&priv->ring[ring].queue_lock); -	ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); +	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);  	spin_unlock_bh(&priv->ring[ring].queue_lock);  	queue_work(priv->ring[ring].workqueue, @@ -509,14 +745,14 @@ static int safexcel_aes(struct skcipher_request *req,  static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)  { -	return safexcel_aes(req, SAFEXCEL_ENCRYPT, -			    CONTEXT_CONTROL_CRYPTO_MODE_ECB); +	return safexcel_aes(&req->base, skcipher_request_ctx(req), +			    SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);  }  static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)  { -	return safexcel_aes(req, SAFEXCEL_DECRYPT, -			    CONTEXT_CONTROL_CRYPTO_MODE_ECB); +	return safexcel_aes(&req->base, skcipher_request_ctx(req), +			    SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);  }  static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) @@ -526,34 +762,64 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)  		container_of(tfm->__crt_alg, struct safexcel_alg_template,  			     alg.skcipher.base); -	ctx->priv = tmpl->priv; -	ctx->base.send = safexcel_send; -	ctx->base.handle_result = safexcel_handle_result; -  	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),  				    sizeof(struct safexcel_cipher_req)); +	ctx->priv = tmpl->priv; + +	ctx->base.send = safexcel_skcipher_send; +	ctx->base.handle_result = safexcel_skcipher_handle_result;  	return 0;  } -static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) +static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)  {  	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -	struct safexcel_crypto_priv *priv = ctx->priv; -	int ret; -	memzero_explicit(ctx->key, 8 * sizeof(u32)); +	memzero_explicit(ctx->key, sizeof(ctx->key));  	/* context not allocated, skip invalidation */  	if (!ctx->base.ctxr) +		return -ENOMEM; + +	memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data)); +	return 0; +} + +static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +	struct safexcel_crypto_priv *priv = ctx->priv; +	int ret; + +	if (safexcel_cipher_cra_exit(tfm))  		return; -	memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); +	if (priv->version == EIP197) { +		ret = safexcel_skcipher_exit_inv(tfm); +		if (ret) +			dev_warn(priv->dev, "skcipher: invalidation error %d\n", +				 ret); +	} else { +		dma_pool_free(priv->context_pool, ctx->base.ctxr, +			      ctx->base.ctxr_dma); +	} +} + +static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +	struct safexcel_crypto_priv *priv = ctx->priv; +	int ret; + +	if (safexcel_cipher_cra_exit(tfm)) +		return;  	if (priv->version == EIP197) { -		ret = safexcel_cipher_exit_inv(tfm); +		ret = safexcel_aead_exit_inv(tfm);  		if (ret) -			dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); +			dev_warn(priv->dev, "aead: invalidation error %d\n", +				 ret);  	} else {  		dma_pool_free(priv->context_pool, ctx->base.ctxr,  			      ctx->base.ctxr_dma); @@ -563,7 +829,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)  struct safexcel_alg_template safexcel_alg_ecb_aes = {  	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,  	.alg.skcipher = { -		.setkey = safexcel_aes_setkey, +		.setkey = safexcel_skcipher_aes_setkey,  		.encrypt = safexcel_ecb_aes_encrypt,  		.decrypt = safexcel_ecb_aes_decrypt,  		.min_keysize = AES_MIN_KEY_SIZE, @@ -586,20 +852,20 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {  static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)  { -	return safexcel_aes(req, SAFEXCEL_ENCRYPT, -			    CONTEXT_CONTROL_CRYPTO_MODE_CBC); +	return safexcel_aes(&req->base, skcipher_request_ctx(req), +			    SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);  }  static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)  { -	return safexcel_aes(req, SAFEXCEL_DECRYPT, -			    CONTEXT_CONTROL_CRYPTO_MODE_CBC); +	return safexcel_aes(&req->base, skcipher_request_ctx(req), +			    SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);  }  struct safexcel_alg_template safexcel_alg_cbc_aes = {  	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,  	.alg.skcipher = { -		.setkey = safexcel_aes_setkey, +		.setkey = safexcel_skcipher_aes_setkey,  		.encrypt = safexcel_cbc_aes_encrypt,  		.decrypt = safexcel_cbc_aes_decrypt,  		.min_keysize = AES_MIN_KEY_SIZE, @@ -620,3 +886,139 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {  		},  	},  }; + +static int safexcel_aead_encrypt(struct aead_request *req) +{ +	struct safexcel_cipher_req *creq = aead_request_ctx(req); + +	return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT, +			    CONTEXT_CONTROL_CRYPTO_MODE_CBC); +} + +static int safexcel_aead_decrypt(struct aead_request *req) +{ +	struct safexcel_cipher_req *creq = aead_request_ctx(req); + +	return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT, +			    CONTEXT_CONTROL_CRYPTO_MODE_CBC); +} + +static int safexcel_aead_cra_init(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); +	struct safexcel_alg_template *tmpl = +		container_of(tfm->__crt_alg, struct safexcel_alg_template, +			     alg.aead.base); + +	crypto_aead_set_reqsize(__crypto_aead_cast(tfm), +				sizeof(struct safexcel_cipher_req)); + +	ctx->priv = tmpl->priv; + +	ctx->aead = true; +	ctx->base.send = safexcel_aead_send; +	ctx->base.handle_result = safexcel_aead_handle_result; +	return 0; +} + +static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + +	safexcel_aead_cra_init(tfm); +	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; +	ctx->state_sz = SHA1_DIGEST_SIZE; +	return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { +	.type = SAFEXCEL_ALG_TYPE_AEAD, +	.alg.aead = { +		.setkey = safexcel_aead_aes_setkey, +		.encrypt = safexcel_aead_encrypt, +		.decrypt = safexcel_aead_decrypt, +		.ivsize = AES_BLOCK_SIZE, +		.maxauthsize = SHA1_DIGEST_SIZE, +		.base = { +			.cra_name = "authenc(hmac(sha1),cbc(aes))", +			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", +			.cra_priority = 300, +			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | +				     CRYPTO_ALG_KERN_DRIVER_ONLY, +			.cra_blocksize = AES_BLOCK_SIZE, +			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx), +			.cra_alignmask = 0, +			.cra_init = safexcel_aead_sha1_cra_init, +			.cra_exit = safexcel_aead_cra_exit, +			.cra_module = THIS_MODULE, +		}, +	}, +}; + +static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + +	safexcel_aead_cra_init(tfm); +	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; +	ctx->state_sz = SHA256_DIGEST_SIZE; +	return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { +	.type = SAFEXCEL_ALG_TYPE_AEAD, +	.alg.aead = { +		.setkey = safexcel_aead_aes_setkey, +		.encrypt = safexcel_aead_encrypt, +		.decrypt = safexcel_aead_decrypt, +		.ivsize = AES_BLOCK_SIZE, +		.maxauthsize = SHA256_DIGEST_SIZE, +		.base = { +			.cra_name = "authenc(hmac(sha256),cbc(aes))", +			.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", +			.cra_priority = 300, +			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | +				     CRYPTO_ALG_KERN_DRIVER_ONLY, +			.cra_blocksize = AES_BLOCK_SIZE, +			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx), +			.cra_alignmask = 0, +			.cra_init = safexcel_aead_sha256_cra_init, +			.cra_exit = safexcel_aead_cra_exit, +			.cra_module = THIS_MODULE, +		}, +	}, +}; + +static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) +{ +	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + +	safexcel_aead_cra_init(tfm); +	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; +	ctx->state_sz = SHA256_DIGEST_SIZE; +	return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { +	.type = SAFEXCEL_ALG_TYPE_AEAD, +	.alg.aead = { +		.setkey = safexcel_aead_aes_setkey, +		.encrypt = safexcel_aead_encrypt, +		.decrypt = safexcel_aead_decrypt, +		.ivsize = AES_BLOCK_SIZE, +		.maxauthsize = SHA224_DIGEST_SIZE, +		.base = { +			.cra_name = "authenc(hmac(sha224),cbc(aes))", +			.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", +			.cra_priority = 300, +			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | +				     CRYPTO_ALG_KERN_DRIVER_ONLY, +			.cra_blocksize = AES_BLOCK_SIZE, +			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx), +			.cra_alignmask = 0, +			.cra_init = safexcel_aead_sha224_cra_init, +			.cra_exit = safexcel_aead_cra_exit, +			.cra_module = THIS_MODULE, +		}, +	}, +};  | 
