summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@conectiva.com.br>2002-06-07 16:42:57 -0300
committerArnaldo Carvalho de Melo <acme@conectiva.com.br>2002-06-07 16:42:57 -0300
commit574b2d875eec4882f8a03171d163754cc6f35fc3 (patch)
tree5d3a2c3540b6a4ab34b1eeee8790c12043df0490
parentd0f0cde199764cb083b3617c3739f45b1a73052d (diff)
net/core/skbuff.c
include/linux/skbuff.h - remove spurious spaces and tabs at end of lines - make sure if, while, for, switch has a space before the opening '(' - make sure no line has more than 80 chars - move initializations to the declaration line where possible - bitwise, logical and arithmetic operators have spaces before and after, improving readability of complex expressions - remove uneeded () in returns - use kdoc comments - other minor cleanups Sizes: Before: text data bss dec hex filename 7088 8 2080 9176 23d8 net/core/skbuff.o After: text data bss dec hex filename 7056 4 2080 9140 23b4 net/core/skbuff.o
-rw-r--r--include/linux/skbuff.h499
-rw-r--r--net/core/skbuff.c469
2 files changed, 473 insertions, 495 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ddfd7b3b1c75..1e7feee38df6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -10,7 +10,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
+
#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H
@@ -35,10 +35,13 @@
#define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2
-#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
-#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
+#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
+ ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
+ sizeof(struct skb_shared_info)) & \
+ ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
+#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
/* A. Checksumming of received packets by device.
*
@@ -79,7 +82,7 @@
*/
#ifdef __i386__
-#define NET_CALLER(arg) (*(((void**)&arg)-1))
+#define NET_CALLER(arg) (*(((void **)&arg) - 1))
#else
#define NET_CALLER(arg) __builtin_return_address(0)
#endif
@@ -97,8 +100,8 @@ struct nf_ct_info {
struct sk_buff_head {
/* These two members must be first. */
- struct sk_buff * next;
- struct sk_buff * prev;
+ struct sk_buff *next;
+ struct sk_buff *prev;
__u32 qlen;
spinlock_t lock;
@@ -110,8 +113,7 @@ struct sk_buff;
typedef struct skb_frag_struct skb_frag_t;
-struct skb_frag_struct
-{
+struct skb_frag_struct {
struct page *page;
__u16 page_offset;
__u16 size;
@@ -127,19 +129,54 @@ struct skb_shared_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
+/**
+ * struct sk_buff - socket buffer
+ * @next: Next buffer in list
+ * @prev: Previous buffer in list
+ * @list: List we are on
+ * @sk: Socket we are owned by
+ * @stamp: Time we arrived
+ * @dev: Device we arrived on/are leaving by
+ * @h: Transport layer header
+ * @nh: Network layer header
+ * @mac: Link layer header
+ * @dst: FIXME: Describe this field
+ * @cb: Control buffer. Free for use by every layer. Put private vars here
+ * @len: Length of actual data
+ * @data_len: Data length
+ * @csum: Checksum
+ * @__unused: Dead field, may be reused
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @pkt_type: Packet class
+ * @ip_summed: Driver fed us an IP checksum
+ * @priority: Packet queueing priority
+ * @users: User count - see {datagram,tcp}.c
+ * @protocol: Packet protocol from driver
+ * @security: Security level of packet
+ * @truesize: Buffer size
+ * @head: Head of buffer
+ * @data: Data head pointer
+ * @tail: Tail pointer
+ * @end: End pointer
+ * @destructor: Destruct function
+ * @nfmark: Can be used for communication between hooks
+ * @nfcache: Cache info
+ * @nfct: Associated connection, if any
+ * @nf_debug: Netfilter debugging
+ * @tc_index: Traffic control index
+ */
+
struct sk_buff {
/* These two members must be first. */
- struct sk_buff * next; /* Next buffer in list */
- struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff *next;
+ struct sk_buff *prev;
- struct sk_buff_head * list; /* List we are on */
- struct sock *sk; /* Socket we are owned by */
- struct timeval stamp; /* Time we arrived */
- struct net_device *dev; /* Device we arrived on/are leaving by */
+ struct sk_buff_head *list;
+ struct sock *sk;
+ struct timeval stamp;
+ struct net_device *dev;
- /* Transport layer header */
- union
- {
+ union {
struct tcphdr *th;
struct udphdr *uh;
struct icmphdr *icmph;
@@ -149,72 +186,63 @@ struct sk_buff {
unsigned char *raw;
} h;
- /* Network layer header */
- union
- {
+ union {
struct iphdr *iph;
struct ipv6hdr *ipv6h;
struct arphdr *arph;
struct ipxhdr *ipxh;
unsigned char *raw;
} nh;
-
- /* Link layer header */
- union
- {
+
+ union {
struct ethhdr *ethernet;
unsigned char *raw;
} mac;
- struct dst_entry *dst;
+ struct dst_entry *dst;
- /*
+ /*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
- */
- char cb[48];
-
- unsigned int len; /* Length of actual data */
- unsigned int data_len;
- unsigned int csum; /* Checksum */
- unsigned char __unused, /* Dead field, may be reused */
- cloned, /* head may be cloned (check refcnt to be sure). */
- pkt_type, /* Packet class */
- ip_summed; /* Driver fed us an IP checksum */
- __u32 priority; /* Packet queueing priority */
- atomic_t users; /* User count - see datagram.c,tcp.c */
- unsigned short protocol; /* Packet protocol from driver. */
- unsigned short security; /* Security level of packet */
- unsigned int truesize; /* Buffer size */
-
- unsigned char *head; /* Head of buffer */
- unsigned char *data; /* Data head pointer */
- unsigned char *tail; /* Tail pointer */
- unsigned char *end; /* End pointer */
-
- void (*destructor)(struct sk_buff *); /* Destruct function */
+ */
+ char cb[48];
+
+ unsigned int len,
+ data_len,
+ csum;
+ unsigned char __unused,
+ cloned,
+ pkt_type,
+ ip_summed;
+ __u32 priority;
+ atomic_t users;
+ unsigned short protocol,
+ security;
+ unsigned int truesize;
+
+ unsigned char *head,
+ *data,
+ *tail,
+ *end;
+
+ void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
- /* Can be used for communication between hooks. */
- unsigned long nfmark;
- /* Cache info */
- __u32 nfcache;
- /* Associated connection, if any */
- struct nf_ct_info *nfct;
+ unsigned long nfmark;
+ __u32 nfcache;
+ struct nf_ct_info *nfct;
#ifdef CONFIG_NETFILTER_DEBUG
- unsigned int nf_debug;
+ unsigned int nf_debug;
#endif
-#endif /*CONFIG_NETFILTER*/
-
+#endif /* CONFIG_NETFILTER */
#if defined(CONFIG_HIPPI)
- union{
- __u32 ifield;
+ union {
+ __u32 ifield;
} private;
#endif
-
#ifdef CONFIG_NET_SCHED
- __u32 tc_index; /* traffic control index */
+ __u32 tc_index; /* traffic control index */
#endif
};
@@ -229,21 +257,24 @@ struct sk_buff {
#include <asm/system.h>
-extern void __kfree_skb(struct sk_buff *skb);
-extern struct sk_buff * alloc_skb(unsigned int size, int priority);
-extern void kfree_skbmem(struct sk_buff *skb);
-extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
-extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
-extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
-extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
-extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
-extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
- int newheadroom,
- int newtailroom,
- int priority);
+extern void __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff *alloc_skb(unsigned int size, int priority);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
+extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
+extern int pskb_expand_head(struct sk_buff *skb,
+ int nhead, int ntail, int gfp_mask);
+extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ unsigned int headroom);
+extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ int newheadroom, int newtailroom,
+ int priority);
#define dev_kfree_skb(a) kfree_skb(a)
-extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
-extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
+extern void skb_over_panic(struct sk_buff *skb, int len,
+ void *here);
+extern void skb_under_panic(struct sk_buff *skb, int len,
+ void *here);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
@@ -254,10 +285,9 @@ extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
*
* Returns true if the queue is empty, false otherwise.
*/
-
static inline int skb_queue_empty(struct sk_buff_head *list)
{
- return (list->next == (struct sk_buff *) list);
+ return list->next == (struct sk_buff *)list;
}
/**
@@ -267,7 +297,6 @@ static inline int skb_queue_empty(struct sk_buff_head *list)
* Makes another reference to a socket buffer and returns a pointer
* to the buffer.
*/
-
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
@@ -275,10 +304,10 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
}
/*
- * If users==1, we are the only owner and are can avoid redundant
+ * If users == 1, we are the only owner and are can avoid redundant
* atomic change.
*/
-
+
/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
@@ -286,7 +315,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
-
static inline void kfree_skb(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
@@ -297,7 +325,7 @@ static inline void kfree_skb(struct sk_buff *skb)
static inline void kfree_skb_fast(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
- kfree_skbmem(skb);
+ kfree_skbmem(skb);
}
/**
@@ -308,7 +336,6 @@ static inline void kfree_skb_fast(struct sk_buff *skb)
* one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances.
*/
-
static inline int skb_cloned(struct sk_buff *skb)
{
return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
@@ -321,17 +348,16 @@ static inline int skb_cloned(struct sk_buff *skb)
* Returns true if more than one person has a reference to this
* buffer.
*/
-
static inline int skb_shared(struct sk_buff *skb)
{
- return (atomic_read(&skb->users) != 1);
+ return atomic_read(&skb->users) != 1;
}
-/**
+/**
* skb_share_check - check if buffer is shared and if so clone it
* @skb: buffer to check
* @pri: priority for memory allocation
- *
+ *
* If the buffer is shared the buffer is cloned and the old copy
* drops a reference. A new clone with a single reference is returned.
* If the buffer is not shared the original buffer is returned. When
@@ -340,26 +366,23 @@ static inline int skb_shared(struct sk_buff *skb)
*
* NULL is returned on a memory allocation failure.
*/
-
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
if (skb_shared(skb)) {
- struct sk_buff *nskb;
- nskb = skb_clone(skb, pri);
+ struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
- return nskb;
+ skb = nskb;
}
return skb;
}
-
/*
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
* a packet thats being forwarded.
*/
-
+
/**
* skb_unshare - make a copy of a shared buffer
* @skb: buffer to check
@@ -373,15 +396,14 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
*
* %NULL is returned on a memory allocation failure.
*/
-
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
- struct sk_buff *nskb;
- if(!skb_cloned(skb))
- return skb;
- nskb=skb_copy(skb, pri);
- kfree_skb(skb); /* Free our shared copy */
- return nskb;
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, pri);
+ kfree_skb(skb); /* Free our shared copy */
+ skb = nskb;
+ }
+ return skb;
}
/**
@@ -397,7 +419,6 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-
static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->next;
@@ -419,7 +440,6 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-
static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
@@ -432,19 +452,17 @@ static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
* skb_queue_len - get queue length
* @list_: list to measure
*
- * Return the length of an &sk_buff queue.
+ * Return the length of an &sk_buff queue.
*/
-
static inline __u32 skb_queue_len(struct sk_buff_head *list_)
{
- return(list_->qlen);
+ return list_->qlen;
}
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
- list->prev = (struct sk_buff *)list;
- list->next = (struct sk_buff *)list;
+ list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
@@ -464,9 +482,9 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+ */
+static inline void __skb_queue_head(struct sk_buff_head *list,
+ struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
@@ -476,8 +494,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
next = prev->next;
newsk->next = next;
newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
+ next->prev = prev->next = newsk;
}
@@ -491,9 +508,9 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
* safely.
*
* A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+ */
+static inline void skb_queue_head(struct sk_buff_head *list,
+ struct sk_buff *newsk)
{
unsigned long flags;
@@ -511,10 +528,9 @@ static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *new
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
- */
-
-
-static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+ */
+static inline void __skb_queue_tail(struct sk_buff_head *list,
+ struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
@@ -524,8 +540,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
prev = next->prev;
newsk->next = next;
newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
+ next->prev = prev->next = newsk;
}
/**
@@ -538,9 +553,9 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
* safely.
*
* A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+ */
+static inline void skb_queue_tail(struct sk_buff_head *list,
+ struct sk_buff *newsk)
{
unsigned long flags;
@@ -557,7 +572,6 @@ static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *new
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
-
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *next, *prev, *result;
@@ -566,13 +580,12 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
next = prev->next;
result = NULL;
if (next != prev) {
- result = next;
- next = next->next;
+ result = next;
+ next = next->next;
list->qlen--;
- next->prev = prev;
- prev->next = next;
- result->next = NULL;
- result->prev = NULL;
+ next->prev = prev;
+ prev->next = next;
+ result->next = result->prev = NULL;
result->list = NULL;
}
return result;
@@ -603,13 +616,12 @@ static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
*/
static inline void __skb_insert(struct sk_buff *newsk,
- struct sk_buff * prev, struct sk_buff *next,
- struct sk_buff_head * list)
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *list)
{
newsk->next = next;
newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
+ next->prev = prev->next = newsk;
newsk->list = list;
list->qlen++;
}
@@ -666,17 +678,15 @@ static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
-
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
- struct sk_buff * next, * prev;
+ struct sk_buff *next, *prev;
list->qlen--;
- next = skb->next;
- prev = skb->prev;
- skb->next = NULL;
- skb->prev = NULL;
- skb->list = NULL;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+ skb->list = NULL;
next->prev = prev;
prev->next = next;
}
@@ -687,22 +697,21 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
*
* Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls
- *
- * Works even without knowing the list it is sitting on, which can be
- * handy at times. It also means that THE LIST MUST EXIST when you
+ *
+ * Works even without knowing the list it is sitting on, which can be
+ * handy at times. It also means that THE LIST MUST EXIST when you
* unlink. Thus a list must have its contents unlinked before it is
* destroyed.
*/
-
static inline void skb_unlink(struct sk_buff *skb)
{
struct sk_buff_head *list = skb->list;
- if(list) {
+ if (list) {
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
- if(skb->list == list)
+ if (skb->list == list)
__skb_unlink(skb, skb->list);
spin_unlock_irqrestore(&list->lock, flags);
}
@@ -718,10 +727,9 @@ static inline void skb_unlink(struct sk_buff *skb)
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
-
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
- struct sk_buff *skb = skb_peek_tail(list);
+ struct sk_buff *skb = skb_peek_tail(list);
if (skb)
__skb_unlink(skb, list);
return skb;
@@ -735,7 +743,6 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
* may be used safely with other locking list functions. The tail item is
* returned or %NULL if the list is empty.
*/
-
static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{
unsigned long flags;
@@ -757,83 +764,81 @@ static inline int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
-#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0)
-#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0)
-#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0)
+#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) \
+ BUG(); } while (0)
+#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) \
+ BUG(); } while (0)
+#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) \
+ BUG(); } while (0)
/*
* Add data to an sk_buff
*/
-
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp=skb->tail;
+ unsigned char *tmp = skb->tail;
SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
+ skb->tail += len;
+ skb->len += len;
return tmp;
}
/**
* skb_put - add data to a buffer
- * @skb: buffer to use
+ * @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
-
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp=skb->tail;
+ unsigned char *tmp = skb->tail;
SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
- if(skb->tail>skb->end) {
+ skb->tail += len;
+ skb->len += len;
+ if (skb->tail>skb->end)
skb_over_panic(skb, len, current_text_addr());
- }
return tmp;
}
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
- skb->data-=len;
- skb->len+=len;
+ skb->data -= len;
+ skb->len += len;
return skb->data;
}
/**
* skb_push - add data to the start of a buffer
- * @skb: buffer to use
+ * @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
-
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
- skb->data-=len;
- skb->len+=len;
- if(skb->data<skb->head) {
+ skb->data -= len;
+ skb->len += len;
+ if (skb->data<skb->head)
skb_under_panic(skb, len, current_text_addr());
- }
return skb->data;
}
static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
- skb->len-=len;
+ skb->len -= len;
if (skb->len < skb->data_len)
BUG();
- return skb->data+=len;
+ return skb->data += len;
}
/**
* skb_pull - remove data from the start of a buffer
- * @skb: buffer to use
+ * @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
@@ -841,30 +846,25 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
-
-static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb->len)
- return NULL;
- return __skb_pull(skb,len);
+static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ return (len > skb->len) ? NULL : __skb_pull(skb, len);
}
-extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
+extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
- __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
+ !__pskb_pull_tail(skb, len-skb_headlen(skb)))
return NULL;
skb->len -= len;
- return skb->data += len;
+ return skb->data += len;
}
-static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb->len)
- return NULL;
- return __pskb_pull(skb,len);
+static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ return (len > skb->len) ? NULL : __pskb_pull(skb, len);
}
static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
@@ -873,7 +873,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return 1;
if (len > skb->len)
return 0;
- return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
+ return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
}
/**
@@ -882,10 +882,9 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
*
* Return the number of bytes of free space at the head of an &sk_buff.
*/
-
static inline int skb_headroom(const struct sk_buff *skb)
{
- return skb->data-skb->head;
+ return skb->data - skb->head;
}
/**
@@ -894,10 +893,9 @@ static inline int skb_headroom(const struct sk_buff *skb)
*
* Return the number of bytes of free space at the tail of an sk_buff
*/
-
static inline int skb_tailroom(const struct sk_buff *skb)
{
- return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
+ return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
}
/**
@@ -908,11 +906,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
* Increase the headroom of an empty &sk_buff by reducing the tail
* room. This is only allowed for an empty buffer.
*/
-
static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
{
- skb->data+=len;
- skb->tail+=len;
+ skb->data += len;
+ skb->tail += len;
}
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
@@ -920,11 +917,10 @@ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- } else {
+ skb->len = len;
+ skb->tail = skb->data + len;
+ } else
___pskb_trim(skb, len, 0);
- }
}
/**
@@ -935,31 +931,26 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
*/
-
static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{
- if (skb->len > len) {
+ if (skb->len > len)
__skb_trim(skb, len);
- }
}
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (!skb->data_len) {
- skb->len = len;
+ skb->len = len;
skb->tail = skb->data+len;
return 0;
- } else {
- return ___pskb_trim(skb, len, 1);
}
+ return ___pskb_trim(skb, len, 1);
}
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
- if (len < skb->len)
- return __pskb_trim(skb, len);
- return 0;
+ return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
/**
@@ -970,47 +961,41 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
* destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner.
*/
-
-
static inline void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor)
skb->destructor(skb);
skb->destructor = NULL;
- skb->sk = NULL;
+ skb->sk = NULL;
}
/**
- * skb_purge - empty a list
+ * skb_queue_purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function takes the list
* lock and is atomic with respect to other list locking functions.
*/
-
-
static inline void skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb=skb_dequeue(list))!=NULL)
+ while ((skb = skb_dequeue(list)) != NULL)
kfree_skb(skb);
}
/**
- * __skb_purge - empty a list
+ * __skb_queue_purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-
-
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb=__skb_dequeue(list))!=NULL)
+ while ((skb = __skb_dequeue(list)) != NULL)
kfree_skb(skb);
}
@@ -1026,15 +1011,12 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
*
* %NULL is returned in there is no free memory.
*/
-
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
- struct sk_buff *skb;
-
- skb = alloc_skb(length+16, gfp_mask);
+ struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
if (skb)
- skb_reserve(skb,16);
+ skb_reserve(skb, 16);
return skb;
}
@@ -1050,7 +1032,6 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
* %NULL is returned in there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
-
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
return __dev_alloc_skb(length, GFP_ATOMIC);
@@ -1068,9 +1049,7 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
* The result is skb with writable area skb->head...skb->tail
* and at least @headroom of space at head.
*/
-
-static inline int
-skb_cow(struct sk_buff *skb, unsigned int headroom)
+static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
@@ -1078,7 +1057,7 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
delta = 0;
if (delta || skb_cloned(skb))
- return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
+ return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
return 0;
}
@@ -1088,7 +1067,8 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
* @gfp: allocation mode
*
* If there is no free memory -ENOMEM is returned, otherwise zero
- * is returned and the old skb data released. */
+ * is returned and the old skb data released.
+ */
int skb_linearize(struct sk_buff *skb, int gfp);
static inline void *kmap_skb_frag(const skb_frag_t *frag)
@@ -1113,34 +1093,45 @@ static inline void kunmap_skb_frag(void *vaddr)
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
(skb != (struct sk_buff *)(queue)); \
- skb=skb->next)
-
-
-extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
-extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
-extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
-extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
-extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
-extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
-extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
-
-extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
-extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
-extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
-extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+ skb = skb->next)
+
+
+extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+ int noblock, int *err);
+extern unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+extern int skb_copy_datagram(const struct sk_buff *from,
+ int offset, char *to, int size);
+extern int skb_copy_datagram_iovec(const struct sk_buff *from,
+ int offset, struct iovec *to,
+ int size);
+extern int skb_copy_and_csum_datagram(const struct sk_buff *skb,
+ int offset, u8 *to, int len,
+ unsigned int *csump);
+extern int skb_copy_and_csum_datagram_iovec(const
+ struct sk_buff *skb,
+ int hlen,
+ struct iovec *iov);
+extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+ int len, unsigned int csum);
+extern int skb_copy_bits(const struct sk_buff *skb, int offset,
+ void *to, int len);
+extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
+ int offset, u8 *to, int len,
+ unsigned int csum);
+extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_init(void);
extern void skb_add_mtu(int mtu);
#ifdef CONFIG_NETFILTER
-static inline void
-nf_conntrack_put(struct nf_ct_info *nfct)
+static inline void nf_conntrack_put(struct nf_ct_info *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->master->use))
nfct->master->destroy(nfct->master);
}
-static inline void
-nf_conntrack_get(struct nf_ct_info *nfct)
+static inline void nf_conntrack_get(struct nf_ct_info *nfct)
{
if (nfct)
atomic_inc(&nfct->master->use);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9c2bc5375880..eedb9f71425c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6,8 +6,9 @@
*
* Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
*
- * Fixes:
- * Alan Cox : Fixed the worst of the load balancer bugs.
+ * Fixes:
+ * Alan Cox : Fixed the worst of the load
+ * balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
@@ -21,8 +22,8 @@
* Andi Kleen : slabified it.
*
* NOTE:
- * The __skb_ routines should be called with interrupts
- * disabled, or you better be *real* sure that the operation is atomic
+ * The __skb_ routines should be called with interrupts
+ * disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc).
*
@@ -73,7 +74,7 @@ static union {
/*
* Keep out-of-line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always
- * reliable.
+ * reliable.
*/
/**
@@ -84,10 +85,9 @@ static union {
*
* Out of line support code for skb_put(). Not user callable.
*/
-
void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{
- printk("skput:over: %p:%d put:%d dev:%s",
+ printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
@@ -100,29 +100,27 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
*
* Out of line support code for skb_push(). Not user callable.
*/
-
void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{
- printk("skput:under: %p:%d put:%d dev:%s",
- here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+ printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
+ here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
static __inline__ struct sk_buff *skb_head_from_pool(void)
{
struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+ struct sk_buff *skb = NULL;
if (skb_queue_len(list)) {
- struct sk_buff *skb;
unsigned long flags;
local_irq_save(flags);
skb = __skb_dequeue(list);
local_irq_restore(flags);
- return skb;
}
- return NULL;
+ return skb;
}
static __inline__ void skb_head_to_pool(struct sk_buff *skb)
@@ -135,17 +133,15 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
local_irq_save(flags);
__skb_queue_head(list, skb);
local_irq_restore(flags);
-
- return;
- }
- kmem_cache_free(skbuff_head_cache, skb);
+ } else
+ kmem_cache_free(skbuff_head_cache, skb);
}
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
- *
+ *
*/
/**
@@ -160,14 +156,13 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
-
-struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
+struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
{
struct sk_buff *skb;
u8 *data;
if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
- static int count = 0;
+ static int count;
if (++count < 5) {
printk(KERN_ERR "alloc_skb called nonatomically "
"from interrupt %p\n", NET_CALLER(size));
@@ -178,76 +173,74 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
/* Get the HEAD */
skb = skb_head_from_pool();
- if (skb == NULL) {
- skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
- if (skb == NULL)
- goto nohead;
+ if (!skb) {
+ skb = kmem_cache_alloc(skbuff_head_cache,
+ gfp_mask & ~__GFP_DMA);
+ if (!skb)
+ goto out;
}
/* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
+ if (!data)
goto nodata;
- /* XXX: does not include slab overhead */
+ /* XXX: does not include slab overhead */
skb->truesize = size + sizeof(struct sk_buff);
/* Load the data pointers. */
- skb->head = data;
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
+ skb->head = skb->data = skb->tail = data;
+ skb->end = data + size;
/* Set up other state */
- skb->len = 0;
- skb->cloned = 0;
+ skb->len = 0;
+ skb->cloned = 0;
skb->data_len = 0;
- atomic_set(&skb->users, 1);
+ atomic_set(&skb->users, 1);
atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL;
+out:
return skb;
-
nodata:
skb_head_to_pool(skb);
-nohead:
- return NULL;
+ skb = NULL;
+ goto out;
}
/*
- * Slab constructor for a skb head.
- */
-static inline void skb_headerinit(void *p, kmem_cache_t *cache,
+ * Slab constructor for a skb head.
+ */
+static inline void skb_headerinit(void *p, kmem_cache_t *cache,
unsigned long flags)
{
struct sk_buff *skb = p;
- skb->next = NULL;
- skb->prev = NULL;
- skb->list = NULL;
- skb->sk = NULL;
- skb->stamp.tv_sec=0; /* No idea about time */
- skb->dev = NULL;
- skb->dst = NULL;
+ skb->next = skb->prev = NULL;
+ skb->list = NULL;
+ skb->sk = NULL;
+ skb->stamp.tv_sec = 0; /* No idea about time */
+ skb->dev = NULL;
+ skb->dst = NULL;
memset(skb->cb, 0, sizeof(skb->cb));
- skb->pkt_type = PACKET_HOST; /* Default type */
- skb->ip_summed = 0;
- skb->priority = 0;
- skb->security = 0; /* By default packets are insecure */
- skb->destructor = NULL;
+ skb->pkt_type = PACKET_HOST; /* Default type */
+ skb->ip_summed = 0;
+ skb->priority = 0;
+ skb->security = 0; /* By default packets are insecure */
+ skb->destructor = NULL;
#ifdef CONFIG_NETFILTER
- skb->nfmark = skb->nfcache = 0;
- skb->nfct = NULL;
+ skb->nfmark = skb->nfcache = 0;
+ skb->nfct = NULL;
#ifdef CONFIG_NETFILTER_DEBUG
- skb->nf_debug = 0;
+ skb->nf_debug = 0;
#endif
#endif
#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
+ skb->tc_index = 0;
#endif
}
@@ -268,7 +261,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
+ for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
skb_get(list);
}
@@ -290,7 +283,7 @@ static void skb_release_data(struct sk_buff *skb)
}
/*
- * Free an skbuff by memory without cleaning the state.
+ * Free an skbuff by memory without cleaning the state.
*/
void kfree_skbmem(struct sk_buff *skb)
{
@@ -299,10 +292,10 @@ void kfree_skbmem(struct sk_buff *skb)
}
/**
- * __kfree_skb - private function
+ * __kfree_skb - private function
* @skb: buffer
*
- * Free an sk_buff. Release anything attached to the buffer.
+ * Free an sk_buff. Release anything attached to the buffer.
* Clean the state. This is an internal helper function. Users should
* always call kfree_skb
*/
@@ -317,10 +310,9 @@ void __kfree_skb(struct sk_buff *skb)
dst_release(skb->dst);
if(skb->destructor) {
- if (in_irq()) {
- printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
- NET_CALLER(skb));
- }
+ if (in_irq())
+ printk(KERN_WARNING "Warning: kfree_skb on "
+ "hard IRQ %p\n", NET_CALLER(skb));
skb->destructor(skb);
}
#ifdef CONFIG_NETFILTER
@@ -337,18 +329,17 @@ void __kfree_skb(struct sk_buff *skb)
*
* Duplicate an &sk_buff. The new one is not owned by a socket. Both
* copies share the same packet data but not structure. The new
- * buffer has a reference count of 1. If the allocation fails the
+ * buffer has a reference count of 1. If the allocation fails the
* function returns %NULL otherwise the new buffer is returned.
- *
+ *
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*/
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
- struct sk_buff *n;
+ struct sk_buff *n = skb_head_from_pool();
- n = skb_head_from_pool();
if (!n) {
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
@@ -414,32 +405,32 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
*/
unsigned long offset = new->data - old->data;
- new->list=NULL;
- new->sk=NULL;
- new->dev=old->dev;
- new->priority=old->priority;
- new->protocol=old->protocol;
- new->dst=dst_clone(old->dst);
- new->h.raw=old->h.raw+offset;
- new->nh.raw=old->nh.raw+offset;
- new->mac.raw=old->mac.raw+offset;
+ new->list = NULL;
+ new->sk = NULL;
+ new->dev = old->dev;
+ new->priority = old->priority;
+ new->protocol = old->protocol;
+ new->dst = dst_clone(old->dst);
+ new->h.raw = old->h.raw + offset;
+ new->nh.raw = old->nh.raw + offset;
+ new->mac.raw = old->mac.raw + offset;
memcpy(new->cb, old->cb, sizeof(old->cb));
atomic_set(&new->users, 1);
- new->pkt_type=old->pkt_type;
- new->stamp=old->stamp;
+ new->pkt_type = old->pkt_type;
+ new->stamp = old->stamp;
new->destructor = NULL;
- new->security=old->security;
+ new->security = old->security;
#ifdef CONFIG_NETFILTER
- new->nfmark=old->nfmark;
- new->nfcache=old->nfcache;
- new->nfct=old->nfct;
+ new->nfmark = old->nfmark;
+ new->nfcache = old->nfcache;
+ new->nfct = old->nfct;
nf_conntrack_get(new->nfct);
#ifdef CONFIG_NETFILTER_DEBUG
- new->nf_debug=old->nf_debug;
+ new->nf_debug = old->nf_debug;
#endif
#endif
#ifdef CONFIG_NET_SCHED
- new->tc_index = old->tc_index;
+ new->tc_index = old->tc_index;
#endif
}
@@ -449,7 +440,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and its data. This is used when the
- * caller wishes to modify the data and needs a private copy of the
+ * caller wishes to modify the data and needs a private copy of the
* data to alter. Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
*
@@ -459,31 +450,29 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* function is not recommended for use in circumstances when only
* header is going to be modified. Use pskb_copy() instead.
*/
-
+
struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
{
- struct sk_buff *n;
- int headerlen = skb->data-skb->head;
-
+ int headerlen = skb->data - skb->head;
/*
* Allocate the copy buffer
*/
- n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
- if(n==NULL)
+ struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
+ gfp_mask);
+ if (!n)
return NULL;
/* Set the data pointer */
- skb_reserve(n,headerlen);
+ skb_reserve(n, headerlen);
/* Set the tail pointer and length */
- skb_put(n,skb->len);
- n->csum = skb->csum;
+ skb_put(n, skb->len);
+ n->csum = skb->csum;
n->ip_summed = skb->ip_summed;
- if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
+ if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
BUG();
copy_skb_header(n, skb);
-
return n;
}
@@ -494,7 +483,7 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
u8 *data;
long offset;
int headerlen = skb->data - skb->head;
- int expand = (skb->tail+skb->data_len) - skb->end;
+ int expand = (skb->tail + skb->data_len) - skb->end;
if (skb_shared(skb))
BUG();
@@ -502,14 +491,14 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
if (expand <= 0)
expand = 0;
- size = (skb->end - skb->head + expand);
+ size = skb->end - skb->head + expand;
size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
+ if (!data)
return -ENOMEM;
/* Copy entire thing */
- if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
+ if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
BUG();
/* Offset between the two in bytes */
@@ -522,22 +511,22 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
skb->end = data + size;
/* Set up new pointers */
- skb->h.raw += offset;
- skb->nh.raw += offset;
+ skb->h.raw += offset;
+ skb->nh.raw += offset;
skb->mac.raw += offset;
- skb->tail += offset;
- skb->data += offset;
+ skb->tail += offset;
+ skb->data += offset;
/* Set up shinfo */
atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL;
/* We are no longer a clone, even if we were. */
- skb->cloned = 0;
+ skb->cloned = 0;
- skb->tail += skb->data_len;
- skb->data_len = 0;
+ skb->tail += skb->data_len;
+ skb->data_len = 0;
return 0;
}
@@ -557,26 +546,25 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
{
- struct sk_buff *n;
-
/*
* Allocate the copy buffer
*/
- n=alloc_skb(skb->end - skb->head, gfp_mask);
- if(n==NULL)
- return NULL;
+ struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
+
+ if (!n)
+ goto out;
/* Set the data pointer */
- skb_reserve(n,skb->data-skb->head);
+ skb_reserve(n, skb->data - skb->head);
/* Set the tail pointer and length */
- skb_put(n,skb_headlen(skb));
+ skb_put(n, skb_headlen(skb));
/* Copy the bytes */
memcpy(n->data, skb->data, n->len);
- n->csum = skb->csum;
+ n->csum = skb->csum;
n->ip_summed = skb->ip_summed;
- n->data_len = skb->data_len;
- n->len = skb->len;
+ n->data_len = skb->data_len;
+ n->len = skb->len;
if (skb_shinfo(skb)->nr_frags) {
int i;
@@ -594,7 +582,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
}
copy_skb_header(n, skb);
-
+out:
return n;
}
@@ -627,15 +615,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
+ if (!data)
goto nodata;
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void. */
- memcpy(data+nhead, skb->head, skb->tail-skb->head);
- memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
+ memcpy(data + nhead, skb->head, skb->tail - skb->head);
+ memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page);
if (skb_shinfo(skb)->frag_list)
@@ -643,17 +631,16 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
skb_release_data(skb);
- off = (data+nhead) - skb->head;
+ off = (data + nhead) - skb->head;
- skb->head = data;
- skb->end = data+size;
-
- skb->data += off;
- skb->tail += off;
+ skb->head = data;
+ skb->end = data + size;
+ skb->data += off;
+ skb->tail += off;
skb->mac.raw += off;
- skb->h.raw += off;
- skb->nh.raw += off;
- skb->cloned = 0;
+ skb->h.raw += off;
+ skb->nh.raw += off;
+ skb->cloned = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0;
@@ -663,22 +650,22 @@ nodata:
/* Make private copy of skb with writable head and some headroom */
-struct sk_buff *
-skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
{
struct sk_buff *skb2;
int delta = headroom - skb_headroom(skb);
if (delta <= 0)
- return pskb_copy(skb, GFP_ATOMIC);
-
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL ||
- !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
- return skb2;
-
- kfree_skb(skb2);
- return NULL;
+ skb2 = pskb_copy(skb, GFP_ATOMIC);
+ else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
+ GFP_ATOMIC)) {
+ kfree_skb(skb2);
+ skb2 = NULL;
+ }
+ }
+ return skb2;
}
@@ -689,10 +676,10 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* @newtailroom: new free bytes at tail
* @gfp_mask: allocation priority
*
- * Make a copy of both an &sk_buff and its data and while doing so
+ * Make a copy of both an &sk_buff and its data and while doing so
* allocate additional space.
*
- * This is used when the caller wishes to modify the data and needs a
+ * This is used when the caller wishes to modify the data and needs a
* private copy of the data to alter as well as more space for new fields.
* Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
@@ -700,34 +687,28 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt.
*/
-
-
struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
- int newheadroom,
- int newtailroom,
- int gfp_mask)
+ int newheadroom, int newtailroom, int gfp_mask)
{
- struct sk_buff *n;
-
/*
* Allocate the copy buffer
*/
-
- n=alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask);
- if(n==NULL)
+ struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask);
+ if (!n)
return NULL;
- skb_reserve(n,newheadroom);
+ skb_reserve(n, newheadroom);
/* Set the tail pointer and length */
- skb_put(n,skb->len);
+ skb_put(n, skb->len);
/* Copy the data only. */
if (skb_copy_bits(skb, 0, n->data, skb->len))
BUG();
copy_skb_header(n, skb);
+
return n;
}
@@ -742,7 +723,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int nfrags = skb_shinfo(skb)->nr_frags;
int i;
- for (i=0; i<nfrags; i++) {
+ for (i = 0; i < nfrags; i++) {
int end = offset + skb_shinfo(skb)->frags[i].size;
if (end > len) {
if (skb_cloned(skb)) {
@@ -755,7 +736,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
put_page(skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb)->nr_frags--;
} else {
- skb_shinfo(skb)->frags[i].size = len-offset;
+ skb_shinfo(skb)->frags[i].size = len - offset;
}
}
offset = end;
@@ -763,17 +744,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
if (offset < len) {
skb->data_len -= skb->len - len;
- skb->len = len;
+ skb->len = len;
} else {
if (len <= skb_headlen(skb)) {
- skb->len = len;
+ skb->len = len;
skb->data_len = 0;
- skb->tail = skb->data + len;
+ skb->tail = skb->data + len;
if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
skb_drop_fraglist(skb);
} else {
skb->data_len -= skb->len - len;
- skb->len = len;
+ skb->len = len;
}
}
@@ -781,7 +762,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
}
/**
- * __pskb_pull_tail - advance tail of skb header
+ * __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
* @delta: number of bytes to advance tail
*
@@ -805,18 +786,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
-unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
{
- int i, k, eat;
-
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
* room at tail, reallocate without expansion only if skb is cloned.
*/
- eat = (skb->tail+delta) - skb->end;
+ int i, k, eat = (skb->tail + delta) - skb->end;
if (eat > 0 || skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
+ if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
+ GFP_ATOMIC))
return NULL;
}
@@ -826,12 +806,12 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
- if (skb_shinfo(skb)->frag_list == NULL)
+ if (!skb_shinfo(skb)->frag_list)
goto pull_pages;
/* Estimate size of pulled pages. */
eat = delta;
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size >= eat)
goto pull_pages;
eat -= skb_shinfo(skb)->frags[i].size;
@@ -850,7 +830,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL;
do {
- if (list == NULL)
+ if (!list)
BUG();
if (list->len <= eat) {
@@ -864,7 +844,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
clone = skb_clone(list, GFP_ATOMIC);
- if (clone == NULL)
+ if (!clone)
return NULL;
insp = list->next;
list = clone;
@@ -873,7 +853,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
* problems. */
insp = list;
}
- if (pskb_pull(list, eat) == NULL) {
+ if (!pskb_pull(list, eat)) {
if (clone)
kfree_skb(clone);
return NULL;
@@ -898,7 +878,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
pull_pages:
eat = delta;
k = 0;
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size <= eat) {
put_page(skb_shinfo(skb)->frags[i].page);
eat -= skb_shinfo(skb)->frags[i].size;
@@ -914,7 +894,7 @@ pull_pages:
}
skb_shinfo(skb)->nr_frags = k;
- skb->tail += delta;
+ skb->tail += delta;
skb->data_len -= delta;
return skb->tail;
@@ -927,68 +907,70 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
int i, copy;
int start = skb->len - skb->data_len;
- if (offset > (int)skb->len-len)
+ if (offset > (int)skb->len - len)
goto fault;
/* Copy header. */
- if ((copy = start-offset) > 0) {
+ if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
memcpy(to, skb->data + offset, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
- to += copy;
+ to += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
- memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
- offset-start, copy);
+ memcpy(to,
+ vaddr + skb_shinfo(skb)->frags[i].page_offset+
+ offset - start, copy);
kunmap_skb_frag(vaddr);
if ((len -= copy) == 0)
return 0;
offset += copy;
- to += copy;
+ to += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + list->len;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_bits(list, offset-start, to, copy))
+ if (skb_copy_bits(list, offset - start,
+ to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
- to += copy;
+ to += copy;
}
start = end;
}
}
- if (len == 0)
+ if (!len)
return 0;
fault:
@@ -997,30 +979,31 @@ fault:
/* Checksum skb data. */
-unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
+unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+ int len, unsigned int csum)
{
- int i, copy;
int start = skb->len - skb->data_len;
+ int i, copy = start - offset;
int pos = 0;
/* Checksum header. */
- if ((copy = start-offset) > 0) {
+ if (copy > 0) {
if (copy > len)
copy = len;
- csum = csum_partial(skb->data+offset, copy, csum);
+ csum = csum_partial(skb->data + offset, copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
- pos = copy;
+ pos = copy;
}
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
unsigned int csum2;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1029,74 +1012,76 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign
copy = len;
vaddr = kmap_skb_frag(frag);
csum2 = csum_partial(vaddr + frag->page_offset +
- offset-start, copy, 0);
+ offset - start, copy, 0);
kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
return csum;
offset += copy;
- pos += copy;
+ pos += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + list->len;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
unsigned int csum2;
if (copy > len)
copy = len;
- csum2 = skb_checksum(list, offset-start, copy, 0);
+ csum2 = skb_checksum(list, offset - start,
+ copy, 0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
- pos += copy;
+ pos += copy;
}
start = end;
}
}
- if (len == 0)
- return csum;
+ if (len)
+ BUG();
- BUG();
return csum;
}
/* Both of above in one bottle. */
-unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum)
+unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
+ u8 *to, int len, unsigned int csum)
{
- int i, copy;
int start = skb->len - skb->data_len;
+ int i, copy = start - offset;
int pos = 0;
/* Copy header. */
- if ((copy = start-offset) > 0) {
+ if (copy > 0) {
if (copy > len)
copy = len;
- csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum);
+ csum = csum_partial_copy_nocheck(skb->data + offset, to,
+ copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
- to += copy;
- pos = copy;
+ to += copy;
+ pos = copy;
}
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
unsigned int csum2;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1104,47 +1089,49 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
if (copy > len)
copy = len;
vaddr = kmap_skb_frag(frag);
- csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset +
- offset-start, to, copy, 0);
+ csum2 = csum_partial_copy_nocheck(vaddr +
+ frag->page_offset +
+ offset - start, to,
+ copy, 0);
kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
return csum;
offset += copy;
- to += copy;
- pos += copy;
+ to += copy;
+ pos += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ for (; list; list = list->next) {
unsigned int csum2;
int end;
- BUG_TRAP(start <= offset+len);
+ BUG_TRAP(start <= offset + len);
end = start + list->len;
- if ((copy = end-offset) > 0) {
+ if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0);
+ csum2 = skb_copy_and_csum_bits(list,
+ offset - start,
+ to, copy, 0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
- to += copy;
- pos += copy;
+ to += copy;
+ pos += copy;
}
start = end;
}
}
- if (len == 0)
- return csum;
-
- BUG();
+ if (len)
+ BUG();
return csum;
}
@@ -1165,8 +1152,8 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
csum = 0;
if (csstart != skb->len)
- csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
- skb->len-csstart, 0);
+ csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
+ skb->len - csstart, 0);
if (skb->ip_summed == CHECKSUM_HW) {
long csstuff = csstart + skb->csum;
@@ -1176,7 +1163,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
}
#if 0
-/*
+/*
* Tune the memory allocator for a new MTU size.
*/
void skb_add_mtu(int mtu)
@@ -1200,6 +1187,6 @@ void __init skb_init(void)
if (!skbuff_head_cache)
panic("cannot create skbuff cache");
- for (i=0; i<NR_CPUS; i++)
+ for (i = 0; i < NR_CPUS; i++)
skb_queue_head_init(&skb_head_pool[i].list);
}