summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@kernel.bkbits.net>2003-02-03 09:30:25 -0800
committerDavid S. Miller <davem@kernel.bkbits.net>2003-02-03 09:30:25 -0800
commit9290777fff057846f0e73f7718f21cdc73236e2e (patch)
treef7cb0764eabe12d0db082570bb5badfaf0ede490
parentc98a2447b0f9fe758db94ffb5349bbf31e020b93 (diff)
parentde14887638ec6f54f12fe353da1e7663bea74b16 (diff)
Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5
into kernel.bkbits.net:/home/davem/net-2.5
-rw-r--r--crypto/cipher.c296
-rw-r--r--crypto/digest.c8
-rw-r--r--crypto/internal.h24
-rw-r--r--crypto/tcrypt.c54
-rw-r--r--include/asm-alpha/kmap_types.h4
-rw-r--r--include/asm-i386/kmap_types.h4
-rw-r--r--include/asm-ia64/kmap_types.h4
-rw-r--r--include/asm-ppc/kmap_types.h4
-rw-r--r--include/asm-ppc64/kmap_types.h4
-rw-r--r--include/asm-s390/kmap_types.h4
-rw-r--r--include/asm-sparc/kmap_types.h4
-rw-r--r--include/asm-sparc64/kmap_types.h4
-rw-r--r--include/asm-x86_64/kmap_types.h4
-rw-r--r--include/linux/crypto.h22
-rw-r--r--include/linux/xfrm.h1
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/xfrm.h1
-rw-r--r--net/ipv4/ah.c2
-rw-r--r--net/ipv4/devinet.c40
-rw-r--r--net/ipv4/esp.c151
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/sched/sch_htb.c64
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c4
23 files changed, 405 insertions, 302 deletions
diff --git a/crypto/cipher.c b/crypto/cipher.c
index fb6292ad2aa1..1f2fab6eade9 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -4,6 +4,7 @@
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -16,12 +17,22 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
typedef void (cryptfn_t)(void *, u8 *, const u8 *);
-typedef void (procfn_t)(struct crypto_tfm *, u8 *, cryptfn_t, int enc);
+typedef void (procfn_t)(struct crypto_tfm *, u8 *, u8*, cryptfn_t, int enc);
+
+struct scatter_walk {
+ struct scatterlist *sg;
+ struct page *page;
+ void *data;
+ unsigned int len_this_page;
+ unsigned int len_this_segment;
+ unsigned int offset;
+};
static inline void xor_64(u8 *a, const u8 *b)
{
@@ -37,165 +48,191 @@ static inline void xor_128(u8 *a, const u8 *b)
((u32 *)a)[3] ^= ((u32 *)b)[3];
}
-static inline unsigned int sglen(struct scatterlist *sg, unsigned int nsg)
+
+/* Define sg_next is an inline routine now in case we want to change
+ scatterlist to a linked list later. */
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
- unsigned int i, n;
-
- for (i = 0, n = 0; i < nsg; i++)
- n += sg[i].length;
-
- return n;
+ return sg + 1;
}
-/*
- * Do not call this unless the total length of all of the fragments
- * has been verified as multiple of the block size.
- */
-static unsigned int copy_chunks(struct crypto_tfm *tfm, u8 *buf,
- struct scatterlist *sg, unsigned int sgidx,
- unsigned int rlen, unsigned int *last, int in)
+void *which_buf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{
- unsigned int i, copied, coff, j, aligned;
- unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
-
- for (i = sgidx, j = copied = 0, aligned = 0 ; copied < bsize; i++) {
- unsigned int len = sg[i].length;
- unsigned int clen;
- char *p;
-
- if (copied) {
- coff = 0;
- clen = min(len, bsize - copied);
-
- if (len == bsize - copied)
- aligned = 1; /* last + right aligned */
-
- } else {
- coff = len - rlen;
- clen = rlen;
- }
+ if (nbytes <= walk->len_this_page &&
+ (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
+ PAGE_CACHE_SIZE)
+ return walk->data;
+ else
+ return scratch;
+}
- p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
-
- if (in)
- memcpy(&buf[copied], p, clen);
+static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
+{
+ if (out)
+ memcpy(sgdata, buf, nbytes);
+ else
+ memcpy(buf, sgdata, nbytes);
+}
+
+static void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
+{
+ unsigned int rest_of_page;
+
+ walk->sg = sg;
+
+ walk->page = sg->page;
+ walk->len_this_segment = sg->length;
+
+ rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
+ walk->len_this_page = min(sg->length, rest_of_page);
+ walk->offset = sg->offset;
+}
+
+static void scatterwalk_map(struct scatter_walk *walk, int out)
+{
+ walk->data = crypto_kmap(walk->page, out) + walk->offset;
+}
+
+static void scatter_page_done(struct scatter_walk *walk, int out,
+ unsigned int more)
+{
+ /* walk->data may be pointing the first byte of the next page;
+ however, we know we transfered at least one byte. So,
+ walk->data - 1 will be a virutual address in the mapped page. */
+
+ if (out)
+ flush_dcache_page(walk->page);
+
+ if (more) {
+ walk->len_this_segment -= walk->len_this_page;
+
+ if (walk->len_this_segment) {
+ walk->page++;
+ walk->len_this_page = min(walk->len_this_segment,
+ (unsigned)PAGE_CACHE_SIZE);
+ walk->offset = 0;
+ }
else
- memcpy(p, &buf[copied], clen);
-
- crypto_kunmap(p);
- *last = aligned ? 0 : clen;
- copied += clen;
+ scatterwalk_start(walk, sg_next(walk->sg));
}
-
- return i - sgidx - 2 + aligned;
}
-static inline unsigned int gather_chunks(struct crypto_tfm *tfm, u8 *buf,
- struct scatterlist *sg,
- unsigned int sgidx, unsigned int rlen,
- unsigned int *last)
+static void scatter_done(struct scatter_walk *walk, int out, int more)
{
- return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 1);
+ crypto_kunmap(walk->data, out);
+ if (walk->len_this_page == 0 || !more)
+ scatter_page_done(walk, out, more);
}
-static inline unsigned int scatter_chunks(struct crypto_tfm *tfm, u8 *buf,
- struct scatterlist *sg,
- unsigned int sgidx, unsigned int rlen,
- unsigned int *last)
+/*
+ * Do not call this unless the total length of all of the fragments
+ * has been verified as multiple of the block size.
+ */
+static int copy_chunks(void *buf, struct scatter_walk *walk,
+ size_t nbytes, int out)
{
- return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 0);
+ if (buf != walk->data) {
+ while (nbytes > walk->len_this_page) {
+ memcpy_dir(buf, walk->data, walk->len_this_page, out);
+ buf += walk->len_this_page;
+ nbytes -= walk->len_this_page;
+
+ crypto_kunmap(walk->data, out);
+ scatter_page_done(walk, out, 1);
+ scatterwalk_map(walk, out);
+ }
+
+ memcpy_dir(buf, walk->data, nbytes, out);
+ }
+
+ walk->offset += nbytes;
+ walk->len_this_page -= nbytes;
+ walk->len_this_segment -= nbytes;
+ return 0;
}
/*
- * Generic encrypt/decrypt wrapper for ciphers.
- *
- * If we find a a remnant at the end of a frag, we have to encrypt or
- * decrypt across possibly multiple page boundaries via a temporary
- * block, then continue processing with a chunk offset until the end
- * of a frag is block aligned.
- *
- * The code is further complicated by having to remap a page after
- * processing a block then yielding. The data will be offset from the
- * start of page at the scatterlist offset, the chunking offset (coff)
- * and the block offset (boff).
+ * Generic encrypt/decrypt wrapper for ciphers, handles operations across
+ * multiple page boundaries by using temporary blocks. In user context,
+ * the kernel is given a chance to schedule us once per block.
*/
-static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
- unsigned int nsg, cryptfn_t crfn, procfn_t prfn, int enc)
+static int crypt(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, cryptfn_t crfn, procfn_t prfn, int enc)
{
- unsigned int i, coff;
- unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp[bsize];
+ struct scatter_walk walk_in, walk_out;
+ const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
+ u8 tmp_src[nbytes > src->length ? bsize : 0];
+ u8 tmp_dst[nbytes > dst->length ? bsize : 0];
- if (sglen(sg, nsg) % bsize) {
+ if (!nbytes)
+ return 0;
+
+ if (nbytes % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
- for (i = 0, coff = 0; i < nsg; i++) {
- unsigned int n = 0, boff = 0;
- unsigned int len = sg[i].length - coff;
- char *p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
-
- while (len) {
- if (len < bsize) {
- crypto_kunmap(p);
- n = gather_chunks(tfm, tmp, sg, i, len, &coff);
- prfn(tfm, tmp, crfn, enc);
- scatter_chunks(tfm, tmp, sg, i, len, &coff);
- crypto_yield(tfm);
- goto unmapped;
- } else {
- prfn(tfm, p, crfn, enc);
- crypto_kunmap(p);
- crypto_yield(tfm);
-
- /* remap and point to recalculated offset */
- boff += bsize;
- p = crypto_kmap(sg[i].page)
- + sg[i].offset + coff + boff;
-
- len -= bsize;
-
- /* End of frag with no remnant? */
- if (coff && len == 0)
- coff = 0;
- }
- }
- crypto_kunmap(p);
-unmapped:
- i += n;
+ scatterwalk_start(&walk_in, src);
+ scatterwalk_start(&walk_out, dst);
+
+ for(;;) {
+ u8 *src_p, *dst_p;
+
+ scatterwalk_map(&walk_in, 0);
+ scatterwalk_map(&walk_out, 1);
+ src_p = which_buf(&walk_in, bsize, tmp_src);
+ dst_p = which_buf(&walk_out, bsize, tmp_dst);
+
+ nbytes -= bsize;
+ copy_chunks(src_p, &walk_in, bsize, 0);
+
+ prfn(tfm, dst_p, src_p, crfn, enc);
+
+ scatter_done(&walk_in, 0, nbytes);
+
+ copy_chunks(dst_p, &walk_out, bsize, 1);
+ scatter_done(&walk_out, 1, nbytes);
+
+ if (!nbytes)
+ return 0;
+
+ crypto_yield(tfm);
}
- return 0;
}
static void cbc_process(struct crypto_tfm *tfm,
- u8 *block, cryptfn_t fn, int enc)
+ u8 *dst, u8 *src, cryptfn_t fn, int enc)
{
/* Null encryption */
if (!tfm->crt_cipher.cit_iv)
return;
if (enc) {
- tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, block);
- fn(tfm->crt_ctx, block, tfm->crt_cipher.cit_iv);
- memcpy(tfm->crt_cipher.cit_iv, block,
+ tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, src);
+ fn(tfm->crt_ctx, dst, tfm->crt_cipher.cit_iv);
+ memcpy(tfm->crt_cipher.cit_iv, dst,
crypto_tfm_alg_blocksize(tfm));
} else {
- u8 buf[crypto_tfm_alg_blocksize(tfm)];
+ const int need_stack = (src == dst);
+ u8 stack[need_stack ? crypto_tfm_alg_blocksize(tfm) : 0];
+ u8 *buf = need_stack ? stack : dst;
- fn(tfm->crt_ctx, buf, block);
+ fn(tfm->crt_ctx, buf, src);
tfm->crt_u.cipher.cit_xor_block(buf, tfm->crt_cipher.cit_iv);
- memcpy(tfm->crt_cipher.cit_iv, block,
+ memcpy(tfm->crt_cipher.cit_iv, src,
crypto_tfm_alg_blocksize(tfm));
- memcpy(block, buf, crypto_tfm_alg_blocksize(tfm));
+ if (buf != dst)
+ memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
}
}
-static void ecb_process(struct crypto_tfm *tfm, u8 *block,
+static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc)
{
- fn(tfm->crt_ctx, block, block);
+ fn(tfm->crt_ctx, dst, src);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -211,35 +248,44 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
}
static int ecb_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
{
- return crypt(tfm, sg, nsg,
+ return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, ecb_process, 1);
}
static int ecb_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
- return crypt(tfm, sg, nsg,
+ return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, ecb_process, 1);
}
static int cbc_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
- return crypt(tfm, sg, nsg,
+ return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, cbc_process, 1);
}
static int cbc_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
- return crypt(tfm, sg, nsg,
+ return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, cbc_process, 0);
}
static int nocrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
return -ENOSYS;
}
diff --git a/crypto/digest.c b/crypto/digest.c
index 4db5f88ef38d..9e7c4bb417ab 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -28,10 +28,10 @@ static void update(struct crypto_tfm *tfm,
unsigned int i;
for (i = 0; i < nsg; i++) {
- char *p = crypto_kmap(sg[i].page) + sg[i].offset;
+ char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
- crypto_kunmap(p);
+ crypto_kunmap(p, 0);
crypto_yield(tfm);
}
}
@@ -49,10 +49,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
- char *p = crypto_kmap(sg[i].page) + sg[i].offset;
+ char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
- crypto_kunmap(p);
+ crypto_kunmap(p, 0);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
diff --git a/crypto/internal.h b/crypto/internal.h
index eb4c92d3b47f..df34df2e6acc 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -16,17 +16,29 @@
#include <linux/init.h>
#include <asm/hardirq.h>
#include <asm/softirq.h>
+#include <asm/kmap_types.h>
-static inline void *crypto_kmap(struct page *page)
+static enum km_type km_types[] = {
+ KM_USER0,
+ KM_USER1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+};
+
+static inline enum km_type crypto_kmap_type(int out)
+{
+ return km_types[(in_softirq() ? 2 : 0) + out];
+}
+
+
+static inline void *crypto_kmap(struct page *page, int out)
{
- return kmap_atomic(page, in_softirq() ?
- KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
+ return kmap_atomic(page, crypto_kmap_type(out));
}
-static inline void crypto_kunmap(void *vaddr)
+static inline void crypto_kunmap(void *vaddr, int out)
{
- kunmap_atomic(vaddr, in_softirq() ?
- KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
+ kunmap_atomic(vaddr, crypto_kmap_type(out));
}
static inline void crypto_yield(struct crypto_tfm *tfm)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bcce9065242a..a45e574528f7 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -703,7 +703,7 @@ test_des(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -746,7 +746,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8;
- ret = crypto_cipher_encrypt(tfm, sg, 2);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -814,7 +814,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 8;
- ret = crypto_cipher_encrypt(tfm, sg, 3);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 32);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
@@ -890,7 +890,7 @@ test_des(void)
sg[3].offset = ((long) p & ~PAGE_MASK);
sg[3].length = 18;
- ret = crypto_cipher_encrypt(tfm, sg, 4);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
@@ -979,7 +979,7 @@ test_des(void)
sg[4].offset = ((long) p & ~PAGE_MASK);
sg[4].length = 8;
- ret = crypto_cipher_encrypt(tfm, sg, 5);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
@@ -1078,7 +1078,7 @@ test_des(void)
sg[7].offset = ((long) p & ~PAGE_MASK);
sg[7].length = 1;
- ret = crypto_cipher_encrypt(tfm, sg, 8);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 8);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1120,7 +1120,7 @@ test_des(void)
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("des_decrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1163,7 +1163,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8;
- ret = crypto_cipher_decrypt(tfm, sg, 2);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1220,7 +1220,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 1;
- ret = crypto_cipher_decrypt(tfm, sg, 3);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
@@ -1290,7 +1290,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("des_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1349,7 +1349,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_encrypt(tfm, sg, 2);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1398,7 +1398,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_blocksize(tfm));
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1450,7 +1450,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_decrypt(tfm, sg, 2);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, 8);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1518,7 +1518,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1561,7 +1561,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1624,7 +1624,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1666,7 +1666,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1719,7 +1719,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1764,7 +1764,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1829,7 +1829,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1871,7 +1871,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -1924,7 +1924,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -1970,7 +1970,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
@@ -2030,7 +2030,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext);
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -2070,7 +2070,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext);
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -2133,7 +2133,7 @@ test_aes(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
@@ -2175,7 +2175,7 @@ test_aes(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
diff --git a/include/asm-alpha/kmap_types.h b/include/asm-alpha/kmap_types.h
index 10061ab1c9ad..e7e1f7c8d148 100644
--- a/include/asm-alpha/kmap_types.h
+++ b/include/asm-alpha/kmap_types.h
@@ -23,8 +23,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
-D(11) KM_CRYPTO_USER,
-D(12) KM_CRYPTO_SOFTIRQ,
+D(11) KM_SOFTIRQ0,
+D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
index 9ee55b1dea1f..dda910c4f1ba 100644
--- a/include/asm-i386/kmap_types.h
+++ b/include/asm-i386/kmap_types.h
@@ -22,8 +22,8 @@ D(8) KM_PTE1,
D(9) KM_PTE2,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
-D(12) KM_CRYPTO_USER,
-D(13) KM_CRYPTO_SOFTIRQ,
+D(12) KM_SOFTIRQ0,
+D(13) KM_SOFTIRQ1,
D(14) KM_TYPE_NR
};
diff --git a/include/asm-ia64/kmap_types.h b/include/asm-ia64/kmap_types.h
index 77187f614170..d54e19055e6c 100644
--- a/include/asm-ia64/kmap_types.h
+++ b/include/asm-ia64/kmap_types.h
@@ -21,8 +21,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
-D(11) KM_CRYPTO_USER,
-D(12) KM_CRYPTO_SOFTIRQ,
+D(11) KM_SOFTIRQ0,
+D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
index 1a4813527e74..5ffc886ddabb 100644
--- a/include/asm-ppc/kmap_types.h
+++ b/include/asm-ppc/kmap_types.h
@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/asm-ppc64/kmap_types.h b/include/asm-ppc64/kmap_types.h
index f8422429758a..fd1574648223 100644
--- a/include/asm-ppc64/kmap_types.h
+++ b/include/asm-ppc64/kmap_types.h
@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/asm-s390/kmap_types.h b/include/asm-s390/kmap_types.h
index f8422429758a..fd1574648223 100644
--- a/include/asm-s390/kmap_types.h
+++ b/include/asm-s390/kmap_types.h
@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/asm-sparc/kmap_types.h b/include/asm-sparc/kmap_types.h
index 05eeec6d17f3..e215f7104974 100644
--- a/include/asm-sparc/kmap_types.h
+++ b/include/asm-sparc/kmap_types.h
@@ -13,8 +13,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/asm-sparc64/kmap_types.h b/include/asm-sparc64/kmap_types.h
index 26c28fb7c8b7..34c1d3d9a3b0 100644
--- a/include/asm-sparc64/kmap_types.h
+++ b/include/asm-sparc64/kmap_types.h
@@ -17,8 +17,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h
index e87ef2e0f74d..7486338c6cea 100644
--- a/include/asm-x86_64/kmap_types.h
+++ b/include/asm-x86_64/kmap_types.h
@@ -11,8 +11,8 @@ enum km_type {
KM_BIO_DST_IRQ,
KM_IRQ0,
KM_IRQ1,
- KM_CRYPTO_USER,
- KM_CRYPTO_SOFTIRQ,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
KM_TYPE_NR
};
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 5c29e2b58011..5178882ec4d1 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -131,9 +131,13 @@ struct cipher_tfm {
int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
int (*cit_encrypt)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
int (*cit_decrypt)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
void (*cit_xor_block)(u8 *dst, const u8 *src);
};
@@ -274,19 +278,21 @@ static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
}
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, sg, nsg);
+ return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
}
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, sg, nsg);
+ return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
}
static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index bac8b2e3e666..7458d1f49472 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -163,6 +163,7 @@ struct xfrm_usersa_info {
struct xfrm_usersa_id {
xfrm_address_t saddr;
__u32 spi;
+ __u16 family;
__u8 proto;
};
diff --git a/include/net/route.h b/include/net/route.h
index ae62dc4e5683..ba15b5140798 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -167,7 +167,7 @@ static inline int ip_route_connect(struct rtable **rp, u32 dst,
ip_rt_put(*rp);
*rp = NULL;
}
- return ip_route_output_flow(rp, &fl, sk, 1);
+ return ip_route_output_flow(rp, &fl, sk, 0);
}
static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 82d2187eeb2a..0f331126faa4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -105,7 +105,6 @@ struct xfrm_state
u16 family;
xfrm_address_t saddr;
int header_len;
- int trailer_len;
} props;
struct xfrm_lifetime_cfg lft;
diff --git a/net/ipv4/ah.c b/net/ipv4/ah.c
index b18a8aaf459b..383d77a03c17 100644
--- a/net/ipv4/ah.c
+++ b/net/ipv4/ah.c
@@ -361,7 +361,7 @@ static int ah_init_state(struct xfrm_state *x, void *args)
ahp->icv = ah_hmac_digest;
/*
- * Lookup the algorithm description maintained by pfkey,
+ * Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm().
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 4d676060696f..a85769466ca9 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -823,6 +823,34 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
return notifier_chain_unregister(&inetaddr_chain, nb);
}
+/* Rename ifa_labels for a device name change. Make some effort to preserve existing
+ * alias numbering and to create unique labels if possible.
+*/
+static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
+{
+ struct in_ifaddr *ifa;
+ int named = 0;
+
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ char old[IFNAMSIZ], *dot;
+
+ memcpy(old, ifa->ifa_label, IFNAMSIZ);
+ memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
+ if (named++ == 0)
+ continue;
+ dot = strchr(ifa->ifa_label, ':');
+ if (dot == NULL) {
+ sprintf(old, ":%d", named);
+ dot = old;
+ }
+ if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
+ strcat(ifa->ifa_label, dot);
+ } else {
+ strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
+ }
+ }
+}
+
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -873,14 +901,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
inetdev_destroy(in_dev);
break;
case NETDEV_CHANGENAME:
- if (in_dev->ifa_list) {
- struct in_ifaddr *ifa;
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
- memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
- /* Do not notify about label change, this event is
- not interesting to applications using netlink.
- */
- }
+ /* Do not notify about label change, this event is
+ * not interesting to applications using netlink.
+ */
+ inetdev_changename(dev, in_dev);
break;
}
out:
diff --git a/net/ipv4/esp.c b/net/ipv4/esp.c
index 466517dd0482..09c8abd6aa8c 100644
--- a/net/ipv4/esp.c
+++ b/net/ipv4/esp.c
@@ -10,6 +10,9 @@
#define MAX_SG_ONSTACK 4
+typedef void (icv_update_fn_t)(struct crypto_tfm *,
+ struct scatterlist *, unsigned int);
+
/* BUGS:
* - we assume replay seqno is always present.
*/
@@ -30,37 +33,40 @@ struct esp_data
struct crypto_tfm *tfm; /* crypto handle */
} conf;
- /* Integrity. It is active when authlen != 0 */
+ /* Integrity. It is active when icv_full_len != 0 */
struct {
u8 *key; /* Key */
int key_len; /* Length of the key */
- u8 *work_digest;
- /* authlen is length of trailer containing auth token.
- * If it is not zero it is assumed to be
- * >= crypto_tfm_alg_digestsize(atfm) */
- int authlen;
- void (*digest)(struct esp_data*,
- struct sk_buff *skb,
- int offset,
- int len,
- u8 *digest);
+ u8 *work_icv;
+ int icv_full_len;
+ int icv_trunc_len;
+ void (*icv)(struct esp_data*,
+ struct sk_buff *skb,
+ int offset, int len, u8 *icv);
struct crypto_tfm *tfm;
} auth;
};
/* Move to common area: it is shared with AH. */
-void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
- int offset, int len)
+void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
+ int offset, int len, icv_update_fn_t icv_update)
{
int start = skb->len - skb->data_len;
int i, copy = start - offset;
+ struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
- tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, skb->data+offset, copy);
+
+ sg.page = virt_to_page(skb->data + offset);
+ sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
+ sg.length = copy;
+
+ icv_update(tfm, &sg, 1);
+
if ((len -= copy) == 0)
return;
offset += copy;
@@ -73,14 +79,17 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
- u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(frag);
- tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, vaddr+frag->page_offset+offset-start, copy);
- kunmap_skb_frag(vaddr);
+
+ sg.page = frag->page;
+ sg.offset = frag->page_offset + offset-start;
+ sg.length = copy;
+
+ icv_update(tfm, &sg, 1);
+
if (!(len -= copy))
return;
offset += copy;
@@ -100,7 +109,7 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- skb_digest_walk(list, tfm, offset-start, copy);
+ skb_icv_walk(list, tfm, offset-start, copy, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
@@ -188,12 +197,13 @@ esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
int len, u8 *auth_data)
{
struct crypto_tfm *tfm = esp->auth.tfm;
- char *digest = esp->auth.work_digest;
+ char *icv = esp->auth.work_icv;
+ memset(auth_data, 0, esp->auth.icv_trunc_len);
crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
- skb_digest_walk(skb, tfm, offset, len);
- crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, digest);
- memcpy(auth_data, digest, esp->auth.authlen);
+ skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
+ crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
+ memcpy(auth_data, icv, esp->auth.icv_trunc_len);
}
/* Check that skb data bits are writable. If they are not, copy data
@@ -317,6 +327,7 @@ int esp_output(struct sk_buff *skb)
struct sk_buff *trailer;
int blksize;
int clen;
+ int alen;
int nfrags;
union {
struct iphdr iph;
@@ -347,13 +358,14 @@ int esp_output(struct sk_buff *skb)
clen = skb->len;
esp = x->data;
+ alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = crypto_tfm_alg_blocksize(tfm);
clen = (clen + 2 + blksize-1)&~(blksize-1);
if (esp->conf.padlen)
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
- if ((nfrags = skb_cow_data(skb, clen-skb->len+esp->auth.authlen, &trailer)) < 0)
+ if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
goto error;
/* Fill padding... */
@@ -373,7 +385,7 @@ int esp_output(struct sk_buff *skb)
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = iph->tos; /* DS disclosed */
- top_iph->tot_len = htons(skb->len + esp->auth.authlen);
+ top_iph->tot_len = htons(skb->len + alen);
top_iph->frag_off = iph->frag_off&htons(IP_DF);
if (!(top_iph->frag_off))
ip_select_ident(top_iph, dst, 0);
@@ -388,7 +400,7 @@ int esp_output(struct sk_buff *skb)
top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4);
memcpy(top_iph, &tmp_iph, iph->ihl*4);
iph = &tmp_iph.iph;
- top_iph->tot_len = htons(skb->len + esp->auth.authlen);
+ top_iph->tot_len = htons(skb->len + alen);
top_iph->protocol = IPPROTO_ESP;
top_iph->check = 0;
top_iph->frag_off = iph->frag_off;
@@ -411,7 +423,7 @@ int esp_output(struct sk_buff *skb)
goto error;
}
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
- crypto_cipher_encrypt(tfm, sg, nfrags);
+ crypto_cipher_encrypt(tfm, sg, sg, clen);
if (unlikely(sg != sgbuf))
kfree(sg);
} while (0);
@@ -421,10 +433,10 @@ int esp_output(struct sk_buff *skb)
crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
}
- if (esp->auth.authlen) {
- esp->auth.digest(esp, skb, (u8*)esph-skb->data,
- 8+esp->conf.ivlen+clen, trailer->tail);
- pskb_put(skb, trailer, esp->auth.authlen);
+ if (esp->auth.icv_full_len) {
+ esp->auth.icv(esp, skb, (u8*)esph-skb->data,
+ 8+esp->conf.ivlen+clen, trailer->tail);
+ pskb_put(skb, trailer, alen);
}
ip_send_check(top_iph);
@@ -445,6 +457,11 @@ error_nolock:
return err;
}
+/*
+ * Note: detecting truncated vs. non-truncated authentication data is very
+ * expensive, so we only support truncated data, which is the recommended
+ * and common case.
+ */
int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct iphdr *iph;
@@ -452,7 +469,8 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data;
struct sk_buff *trailer;
int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm);
- int elen = skb->len - 8 - esp->conf.ivlen - esp->auth.authlen;
+ int alen = esp->auth.icv_trunc_len;
+ int elen = skb->len - 8 - esp->conf.ivlen - alen;
int nfrags;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
@@ -462,17 +480,16 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
/* If integrity check is required, do this. */
- if (esp->auth.authlen) {
- u8 sum[esp->auth.authlen];
- u8 sum1[esp->auth.authlen];
+ if (esp->auth.icv_full_len) {
+ u8 sum[esp->auth.icv_full_len];
+ u8 sum1[alen];
+
+ esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
- esp->auth.digest(esp, skb, 0, skb->len-esp->auth.authlen, sum);
-
- if (skb_copy_bits(skb, skb->len-esp->auth.authlen, sum1,
- esp->auth.authlen))
+ if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
BUG();
- if (unlikely(memcmp(sum, sum1, esp->auth.authlen))) {
+ if (unlikely(memcmp(sum, sum1, alen))) {
x->stats.integrity_failed++;
goto out;
}
@@ -503,12 +520,11 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
skb_to_sgvec(skb, sg, 8+esp->conf.ivlen, elen);
- crypto_cipher_decrypt(esp->conf.tfm, sg, nfrags);
+ crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
if (unlikely(sg != sgbuf))
kfree(sg);
- if (skb_copy_bits(skb, skb->len-esp->auth.authlen-2,
- nexthdr, 2))
+ if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
padlen = nexthdr[0];
@@ -518,7 +534,7 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* ... check padding bits here. Silly. :-) */
iph->protocol = nexthdr[1];
- pskb_trim(skb, skb->len - esp->auth.authlen - padlen - 2);
+ pskb_trim(skb, skb->len - alen - padlen - 2);
memcpy(workbuf, skb->nh.raw, iph->ihl*4);
skb->h.raw = skb_pull(skb, 8 + esp->conf.ivlen);
skb->nh.raw += 8 + esp->conf.ivlen;
@@ -546,7 +562,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
if (esp->conf.padlen)
mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1);
- return mtu + x->props.header_len + esp->auth.authlen;
+ return mtu + x->props.header_len + esp->auth.icv_full_len;
}
void esp4_err(struct sk_buff *skb, u32 info)
@@ -583,9 +599,9 @@ void esp_destroy(struct xfrm_state *x)
crypto_free_tfm(esp->auth.tfm);
esp->auth.tfm = NULL;
}
- if (esp->auth.work_digest) {
- kfree(esp->auth.work_digest);
- esp->auth.work_digest = NULL;
+ if (esp->auth.work_icv) {
+ kfree(esp->auth.work_icv);
+ esp->auth.work_icv = NULL;
}
}
@@ -593,11 +609,12 @@ int esp_init_state(struct xfrm_state *x, void *args)
{
struct esp_data *esp = NULL;
+ /* null auth and encryption can have zero length keys */
if (x->aalg) {
- if (x->aalg->alg_key_len == 0 || x->aalg->alg_key_len > 512)
+ if (x->aalg->alg_key_len > 512)
goto error;
}
- if (x->ealg == NULL || x->ealg->alg_key_len == 0)
+ if (x->ealg == NULL)
goto error;
esp = kmalloc(sizeof(*esp), GFP_KERNEL);
@@ -607,21 +624,32 @@ int esp_init_state(struct xfrm_state *x, void *args)
memset(esp, 0, sizeof(*esp));
if (x->aalg) {
- int digestsize;
+ struct xfrm_algo_desc *aalg_desc;
esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (esp->auth.tfm == NULL)
goto error;
- esp->auth.digest = esp_hmac_digest;
- digestsize = crypto_tfm_alg_digestsize(esp->auth.tfm);
- /* XXX RFC2403 and RFC 2404 truncate auth to 96 bit */
- esp->auth.authlen = 12;
- if (esp->auth.authlen > digestsize) /* XXX */
- BUG();
- esp->auth.work_digest = kmalloc(digestsize, GFP_KERNEL);
- if (!esp->auth.work_digest)
+ esp->auth.icv = esp_hmac_digest;
+
+ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name);
+ BUG_ON(!aalg_desc);
+
+ if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
+ crypto_tfm_alg_digestsize(esp->auth.tfm)) {
+ printk(KERN_INFO "ESP: %s digestsize %u != %hu\n",
+ x->aalg->alg_name,
+ crypto_tfm_alg_digestsize(esp->auth.tfm),
+ aalg_desc->uinfo.auth.icv_fullbits/8);
+ goto error;
+ }
+
+ esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
+ esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
+
+ esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
+ if (!esp->auth.work_icv)
goto error;
}
esp->conf.key = x->ealg->alg_key;
@@ -639,7 +667,6 @@ int esp_init_state(struct xfrm_state *x, void *args)
x->props.header_len = 8 + esp->conf.ivlen;
if (x->props.mode)
x->props.header_len += 20;
- x->props.trailer_len = esp->auth.authlen + crypto_tfm_alg_blocksize(esp->conf.tfm);
x->data = esp;
return 0;
@@ -647,8 +674,8 @@ error:
if (esp) {
if (esp->auth.tfm)
crypto_free_tfm(esp->auth.tfm);
- if (esp->auth.work_digest)
- kfree(esp->auth.work_digest);
+ if (esp->auth.work_icv)
+ kfree(esp->auth.work_icv);
if (esp->conf.tfm)
crypto_free_tfm(esp->conf.tfm);
kfree(esp);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b9ee209d62b1..18424ac3fe07 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -60,7 +60,7 @@ static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
-static int tcp_v6_xmit(struct sk_buff *skb);
+static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
static struct tcp_func ipv6_mapped;
static struct tcp_func ipv6_specific;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f4bbf7a2596a..a99047e1792d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -19,7 +19,7 @@
* created test case so that I was able to fix nasty bug
* and many others. thanks.
*
- * $Id: sch_htb.c,v 1.14 2002/09/28 12:55:22 devik Exp devik $
+ * $Id: sch_htb.c,v 1.17 2003/01/29 09:22:18 devik Exp devik $
*/
#include <linux/config.h>
#include <linux/module.h>
@@ -71,16 +71,12 @@
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
-#define HTB_VER 0x30007 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x3000a /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
-/* temporary debug defines to be removed after beta stage */
-#define DEVIK_MEND(N)
-#define DEVIK_MSTART(N)
-
/* debugging support; S is subsystem, these are defined:
0 - netlink messages
1 - enqueue
@@ -421,7 +417,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
#endif
- DEVIK_MSTART(9);
cl->pq_key = jiffies + PSCHED_US2JIFFIE(delay);
if (cl->pq_key == jiffies)
cl->pq_key++;
@@ -440,7 +435,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
}
rb_link_node(&cl->pq_node, parent, p);
rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
- DEVIK_MEND(9);
}
/**
@@ -678,7 +672,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch);
- DEVIK_MSTART(0);
if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
@@ -687,25 +680,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} else {
kfree_skb (skb);
sch->stats.drops++;
- DEVIK_MEND(0);
return NET_XMIT_DROP;
}
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++;
cl->stats.drops++;
- DEVIK_MEND(0);
return NET_XMIT_DROP;
} else {
cl->stats.packets++; cl->stats.bytes += skb->len;
- DEVIK_MSTART(1);
htb_activate (q,cl);
- DEVIK_MEND(1);
}
sch->q.qlen++;
sch->stats.packets++; sch->stats.bytes += skb->len;
HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
- DEVIK_MEND(0);
return NET_XMIT_SUCCESS;
}
@@ -941,7 +929,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
//struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl,*start;
/* look initial class up in the row */
- DEVIK_MSTART(6);
start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
do {
@@ -960,8 +947,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
} while (cl != start);
- DEVIK_MEND(6);
- DEVIK_MSTART(7);
if (likely(skb != NULL)) {
if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
@@ -973,11 +958,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate (q,cl);
- DEVIK_MSTART(8);
htb_charge_class (q,cl,level,skb->len);
- DEVIK_MEND(8);
}
- DEVIK_MEND(7);
return skb;
}
@@ -1005,6 +987,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = (struct htb_sched *)sch->data;
int level;
long min_delay;
+#ifdef HTB_DEBUG
+ int evs_used = 0;
+#endif
HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
sch->q.qlen);
@@ -1016,27 +1001,26 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
return skb;
}
- DEVIK_MSTART(2);
if (!sch->q.qlen) goto fin;
PSCHED_GET_TIME(q->now);
- min_delay = HZ*5;
+ min_delay = LONG_MAX;
q->nwc_hit = 0;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
long delay;
- DEVIK_MSTART(3);
if (jiffies - q->near_ev_cache[level] < 0x80000000 || 0) {
delay = htb_do_events(q,level);
q->near_ev_cache[level] += delay ? delay : HZ;
+#ifdef HTB_DEBUG
+ evs_used++;
+#endif
} else
delay = q->near_ev_cache[level] - jiffies;
if (delay && min_delay > delay)
min_delay = delay;
- DEVIK_MEND(3);
- DEVIK_MSTART(5);
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz (m);
@@ -1045,24 +1029,24 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (likely(skb != NULL)) {
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
- DEVIK_MEND(5);
goto fin;
}
}
- DEVIK_MEND(5);
}
- DEVIK_MSTART(4);
#ifdef HTB_DEBUG
- if (!q->nwc_hit && min_delay >= 5*HZ && net_ratelimit()) {
- printk(KERN_ERR "HTB: mindelay=%ld, report it please !\n",min_delay);
- htb_debug_dump(q);
+ if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
+ if (min_delay == LONG_MAX) {
+ printk(KERN_ERR "HTB: dequeue bug (%d), report it please !\n",
+ evs_used);
+ htb_debug_dump(q);
+ } else
+ printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
+ "too small rate\n",min_delay);
}
#endif
- htb_delay_by (sch,min_delay);
- DEVIK_MEND(4);
+ htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
fin:
HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,jiffies,skb);
- DEVIK_MEND(2);
return skb;
}
@@ -1433,6 +1417,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!rtab || !ctab) goto failure;
if (!cl) { /* new class */
+ struct Qdisc *new_q;
/* check for valid classid */
if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
goto failure;
@@ -1456,6 +1441,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->magic = HTB_CMAGIC;
#endif
+ /* create leaf qdisc early because it uses kmalloc(GPF_KERNEL)
+ so that can't be used inside of sch_tree_lock
+ -- thanks to Karlis Peisenieks */
+ new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
sch_tree_lock(sch);
if (parent && !parent->level) {
/* turn parent into inner node */
@@ -1474,8 +1463,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
memset (&parent->un.inner,0,sizeof(parent->un.inner));
}
/* leaf (we) needs elementary qdisc */
- if (!(cl->un.leaf.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
- cl->un.leaf.q = &noop_qdisc;
+ cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
cl->classid = classid; cl->parent = parent;
@@ -1503,11 +1491,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl->level) {
cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
- printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.", cl->classid);
+ printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
cl->un.leaf.quantum = 1000;
}
if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
- printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.", cl->classid);
+ printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
cl->un.leaf.quantum = 200000;
}
if (hopt->quantum)
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 512a4d686e5f..84f879595cff 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -78,7 +78,7 @@ krb5_encrypt(
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length;
- ret = crypto_cipher_encrypt(tfm, sg, 1);
+ ret = crypto_cipher_encrypt(tfm, sg, sg, length);
out:
dprintk("gss_k5encrypt returns %d\n",ret);
@@ -117,7 +117,7 @@ krb5_decrypt(
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length;
- ret = crypto_cipher_decrypt(tfm, sg, 1);
+ ret = crypto_cipher_decrypt(tfm, sg, sg, length);
out:
dprintk("gss_k5decrypt returns %d\n",ret);