diff options
| author | Ian Pratt <ian.pratt@cl.cam.ac.uk> | 2004-11-17 22:03:59 -0800 |
|---|---|---|
| committer | David S. Miller <davem@nuts.davemloft.net> | 2004-11-17 22:03:59 -0800 |
| commit | 8f78d753284f7bee7cf7bcd27e230eb0a68aa751 (patch) | |
| tree | 046cff98428b12d71b2426954aab9ba4c6dcc0cc | |
| parent | fcf4a4369976cb355092c049e30916d9706fbae0 (diff) | |
[NET]: Add alloc_skb_from_cache.
This serves two purposes: firstly, we like to allocate page-sized skbs
as this means we zero-copy transfer of network buffers between guest
operating systems. Secondly, it enables us to have a cache of pages
that have been used for network buffers that we can be more lax about
scrubbing when they change VM ownership (since they could be sniffed on
the wire).
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/linux/skbuff.h | 6 | ||||
| -rw-r--r-- | net/core/skbuff.c | 53 |
2 files changed, 59 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 71fc2bdc0cd6..57a2843faa21 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -292,6 +292,8 @@ struct sk_buff { extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *alloc_skb(unsigned int size, int priority); +extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, + unsigned int size, int priority); extern void kfree_skbmem(struct sk_buff *skb); extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); @@ -935,6 +937,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * * %NULL is returned in there is no free memory. */ +#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB static inline struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask) { @@ -943,6 +946,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, skb_reserve(skb, 16); return skb; } +#else +extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); +#endif /** * dev_alloc_skb - allocate an skbuff for sending diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0bc35a015555..a6d1d698230e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -163,6 +163,59 @@ nodata: goto out; } +/** + * alloc_skb_from_cache - allocate a network buffer + * @cp: kmem_cache from which to allocate the data area + * (object size must be big enough for @size bytes + skb overheads) + * @size: size to allocate + * @gfp_mask: allocation mask + * + * Allocate a new &sk_buff. The returned buffer has no headroom and + * tail room of size bytes. The object has a reference count of one. + * The return is the buffer. On a failure the return is %NULL. + * + * Buffers may only be allocated from interrupts using a @gfp_mask of + * %GFP_ATOMIC. + */ +struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, + unsigned int size, int gfp_mask) +{ + struct sk_buff *skb; + u8 *data; + + /* Get the HEAD */ + skb = kmem_cache_alloc(skbuff_head_cache, + gfp_mask & ~__GFP_DMA); + if (!skb) + goto out; + + /* Get the DATA. */ + size = SKB_DATA_ALIGN(size); + data = kmem_cache_alloc(cp, gfp_mask); + if (!data) + goto nodata; + + memset(skb, 0, offsetof(struct sk_buff, truesize)); + skb->truesize = size + sizeof(struct sk_buff); + atomic_set(&skb->users, 1); + skb->head = data; + skb->data = data; + skb->tail = data; + skb->end = data + size; + + atomic_set(&(skb_shinfo(skb)->dataref), 1); + skb_shinfo(skb)->nr_frags = 0; + skb_shinfo(skb)->tso_size = 0; + skb_shinfo(skb)->tso_segs = 0; + skb_shinfo(skb)->frag_list = NULL; +out: + return skb; +nodata: + kmem_cache_free(skbuff_head_cache, skb); + skb = NULL; + goto out; +} + static void skb_drop_fraglist(struct sk_buff *skb) { |
