diff options
| author | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 2003-06-04 16:00:32 -0300 |
|---|---|---|
| committer | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 2003-06-04 16:00:32 -0300 |
| commit | 63413da5d31d99ba8d89cf9d9ff997f93b49443d (patch) | |
| tree | 4add196b24e554f140663a7bd92260a7fd9ebdc4 /net/unix/garbage.c | |
| parent | fbf47415732b00ff5578d55c6be779a109a01e1f (diff) | |
o net: create struct sock_common and use in struct sock & tcp_tw_bucket
With this the data dependency is reduced to just making sure that the first
member of both struct sock and struct tcp_tw_bucket are a struct sock_common.
Also makes it easier to grep for struct sock and struct tcp_tw_bucket usage in
the tree as all the members in those structs are prefixed, respectively, with
sk_ and tw_, like struct inode (i_), struct block_device (bd_), etc.
Checked namespace with make tags/ctags, just one colision with the macros for
the struct sock members, with a wanrouter struct, fixed that
s/sk_state/state_sk/g in the wanrouter struct.
Checked as well if the names of the members in both structs collided with some
macro, none found.
Diffstat (limited to 'net/unix/garbage.c')
| -rw-r--r-- | net/unix/garbage.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 8dd0d6f2e12b..86c6998d35ae 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -219,8 +219,8 @@ void unix_gc(void) * negative inflight counter to close race window. * It is trick of course and dirty one. */ - if(s->socket && s->socket->file) - open_count = file_count(s->socket->file); + if (s->sk_socket && s->sk_socket->file) + open_count = file_count(s->sk_socket->file); if (open_count > atomic_read(&unix_sk(s)->inflight)) maybe_unmark_and_push(s); } @@ -234,15 +234,14 @@ void unix_gc(void) unix_socket *x = pop_stack(); unix_socket *sk; - spin_lock(&x->receive_queue.lock); - skb=skb_peek(&x->receive_queue); + spin_lock(&x->sk_receive_queue.lock); + skb = skb_peek(&x->sk_receive_queue); /* * Loop through all but first born */ - while(skb && skb != (struct sk_buff *)&x->receive_queue) - { + while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { /* * Do we have file descriptors ? */ @@ -266,12 +265,11 @@ void unix_gc(void) } } /* We have to scan not-yet-accepted ones too */ - if (x->state == TCP_LISTEN) { + if (x->sk_state == TCP_LISTEN) maybe_unmark_and_push(skb->sk); - } skb=skb->next; } - spin_unlock(&x->receive_queue.lock); + spin_unlock(&x->sk_receive_queue.lock); sock_put(x); } @@ -283,10 +281,11 @@ void unix_gc(void) if (u->gc_tree == GC_ORPHAN) { struct sk_buff *nextsk; - spin_lock(&s->receive_queue.lock); - skb=skb_peek(&s->receive_queue); - while(skb && skb != (struct sk_buff *)&s->receive_queue) - { + + spin_lock(&s->sk_receive_queue.lock); + skb = skb_peek(&s->sk_receive_queue); + while (skb && + skb != (struct sk_buff *)&s->sk_receive_queue) { nextsk=skb->next; /* * Do we have file descriptors ? @@ -298,7 +297,7 @@ void unix_gc(void) } skb=nextsk; } - spin_unlock(&s->receive_queue.lock); + spin_unlock(&s->sk_receive_queue.lock); } u->gc_tree = GC_ORPHAN; } |
