diff options
| -rw-r--r-- | include/net/sock.h | 13 | ||||
| -rw-r--r-- | include/net/tcp.h | 2 | ||||
| -rw-r--r-- | net/core/sock.c | 2 | ||||
| -rw-r--r-- | net/decnet/dn_nsp_in.c | 4 | ||||
| -rw-r--r-- | net/decnet/dn_timer.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 8 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 14 | ||||
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_timer.c | 6 | ||||
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 12 | ||||
| -rw-r--r-- | net/llc/llc_c_ac.c | 2 | ||||
| -rw-r--r-- | net/llc/llc_mac.c | 2 | ||||
| -rw-r--r-- | net/llc/llc_proc.c | 2 | ||||
| -rw-r--r-- | net/x25/x25_dev.c | 2 | ||||
| -rw-r--r-- | net/x25/x25_timer.c | 4 |
16 files changed, 43 insertions, 40 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 51ce0de50c43..5bc26816bcd5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,15 +70,16 @@ * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */ +struct sock_iocb; typedef struct { spinlock_t slock; - unsigned int users; + struct sock_iocb *owner; wait_queue_head_t wq; } socket_lock_t; #define sock_lock_init(__sk) \ do { spin_lock_init(&((__sk)->lock.slock)); \ - (__sk)->lock.users = 0; \ + (__sk)->lock.owner = NULL; \ init_waitqueue_head(&((__sk)->lock.wq)); \ } while(0) @@ -306,14 +307,16 @@ static __inline__ void sock_prot_dec_use(struct proto *prot) * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */ +extern int __async_lock_sock(struct sock_iocb *, struct sock *, struct list_head *); extern void __lock_sock(struct sock *sk); extern void __release_sock(struct sock *sk); +#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner) #define lock_sock(__sk) \ do { might_sleep(); \ spin_lock_bh(&((__sk)->lock.slock)); \ - if ((__sk)->lock.users != 0) \ + if ((__sk)->lock.owner != NULL) \ __lock_sock(__sk); \ - (__sk)->lock.users = 1; \ + (__sk)->lock.owner = (void *)1; \ spin_unlock_bh(&((__sk)->lock.slock)); \ } while(0) @@ -321,7 +324,7 @@ do { might_sleep(); \ do { spin_lock_bh(&((__sk)->lock.slock)); \ if ((__sk)->backlog.tail != NULL) \ __release_sock(__sk); \ - (__sk)->lock.users = 0; \ + (__sk)->lock.owner = NULL; \ if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \ spin_unlock_bh(&((__sk)->lock.slock)); \ } while(0) diff --git a/include/net/tcp.h b/include/net/tcp.h index 0dd375b7fd4b..0a58d80e7794 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1348,7 +1348,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (tp->ucopy.memory > sk->rcvbuf) { struct sk_buff *skb1; - if (sk->lock.users) BUG(); + if (sock_owned_by_user(sk)) BUG(); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { sk->backlog_rcv(sk, skb1); diff --git a/net/core/sock.c b/net/core/sock.c index 07abf8bf43f4..5da5bf210cba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -861,7 +861,7 @@ void __lock_sock(struct sock *sk) spin_unlock_bh(&sk->lock.slock); schedule(); spin_lock_bh(&sk->lock.slock); - if(!sk->lock.users) + if(!sock_owned_by_user(sk)) break; } current->state = TASK_RUNNING; diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 115febdf2486..611f3952689d 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -800,8 +800,8 @@ got_it: printk(KERN_DEBUG "NSP: 0x%02x 0x%02x 0x%04x 0x%04x %d\n", (int)cb->rt_flags, (int)cb->nsp_flags, (int)cb->src_port, (int)cb->dst_port, - (int)sk->lock.users); - if (sk->lock.users == 0) + (int)sock_owned_by_user(sk)); + if (!sock_owned_by_user(sk)) ret = dn_nsp_backlog_rcv(sk, skb); else sk_add_backlog(sk, skb); diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index 41a4aa6025e2..7a7a0ad22bd6 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c @@ -57,7 +57,7 @@ static void dn_slow_timer(unsigned long arg) sock_hold(sk); bh_lock_sock(sk); - if (sk->lock.users != 0) { + if (sock_owned_by_user(sk)) { sk->timer.expires = jiffies + HZ / 10; add_timer(&sk->timer); goto out; @@ -115,7 +115,7 @@ static void dn_fast_timer(unsigned long arg) struct dn_scp *scp = DN_SK(sk); bh_lock_sock(sk); - if (sk->lock.users != 0) { + if (sock_owned_by_user(sk)) { scp->delack_timer.expires = jiffies + HZ / 20; add_timer(&scp->delack_timer); goto out; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 61f7c2d949a2..9473c588dd86 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -623,7 +623,7 @@ static void tcp_listen_stop (struct sock *sk) local_bh_disable(); bh_lock_sock(child); - BUG_TRAP(!child->lock.users); + BUG_TRAP(!sock_owned_by_user(child)); sock_hold(child); tcp_disconnect(child, O_NONBLOCK); @@ -2019,7 +2019,7 @@ adjudge_to_death: */ local_bh_disable(); bh_lock_sock(sk); - BUG_TRAP(!sk->lock.users); + BUG_TRAP(!sock_owned_by_user(sk)); sock_hold(sk); sock_orphan(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3f58b7ce0a54..56f0e451909b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2570,7 +2570,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && - sk->lock.users && !tp->urg_data) { + sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); @@ -3190,7 +3190,7 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { int result; - if (sk->lock.users) { + if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); @@ -3324,7 +3324,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len && - sk->lock.users) { + sock_owned_by_user(sk)) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { @@ -3864,7 +3864,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tmo = tcp_fin_time(tp); if (tmo > TCP_TIMEWAIT_LEN) { tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); - } else if (th->fin || sk->lock.users) { + } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 994ed2410d1a..754d615c1aa5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1003,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ - if (sk->lock.users) + if (sock_owned_by_user(sk)) NET_INC_STATS_BH(LockDroppedIcmps); if (sk->state == TCP_CLOSE) @@ -1022,7 +1022,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) /* This is deprecated, but if someone generated it, * we have no reasons to ignore it. */ - if (!sk->lock.users) + if (!sock_owned_by_user(sk)) tcp_enter_cwr(tp); goto out; case ICMP_PARAMETERPROB: @@ -1033,7 +1033,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ - if (!sk->lock.users) + if (!sock_owned_by_user(sk)) do_pmtu_discovery(sk, iph, info); goto out; } @@ -1050,7 +1050,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) switch (sk->state) { struct open_request *req, **prev; case TCP_LISTEN: - if (sk->lock.users) + if (sock_owned_by_user(sk)) goto out; req = tcp_v4_search_req(tp, &prev, th->dest, @@ -1081,7 +1081,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) case TCP_SYN_RECV: /* Cannot happen. It can f.e. if SYNs crossed. */ - if (!sk->lock.users) { + if (!sock_owned_by_user(sk)) { TCP_INC_STATS_BH(TcpAttemptFails); sk->err = err; @@ -1111,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) */ inet = inet_sk(sk); - if (!sk->lock.users && inet->recverr) { + if (!sock_owned_by_user(sk) && inet->recverr) { sk->err = err; sk->error_report(sk); } else { /* Only an error on timeout */ @@ -1778,7 +1778,7 @@ process: bh_lock_sock(sk); ret = 0; - if (!sk->lock.users) { + if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } else diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index a582e9cb80c8..5c6e42952d46 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -989,7 +989,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->state; - if (child->lock.users == 0) { + if (!sock_owned_by_user(child)) { ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); /* Wakeup parent, send SIGIO */ diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 3901d11cac15..d9a4e91003ca 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -213,7 +213,7 @@ static void tcp_delack_timer(unsigned long data) struct tcp_opt *tp = tcp_sk(sk); bh_lock_sock(sk); - if (sk->lock.users) { + if (sock_owned_by_user(sk)) { /* Try again later. */ tp->ack.blocked = 1; NET_INC_STATS_BH(DelayedACKLocked); @@ -421,7 +421,7 @@ static void tcp_write_timer(unsigned long data) int event; bh_lock_sock(sk); - if (sk->lock.users) { + if (sock_owned_by_user(sk)) { /* Try again later */ if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20))) sock_hold(sk); @@ -581,7 +581,7 @@ static void tcp_keepalive_timer (unsigned long data) /* Only process if socket is not in use. */ bh_lock_sock(sk); - if (sk->lock.users) { + if (sock_owned_by_user(sk)) { /* Try again later. */ tcp_reset_keepalive_timer (sk, HZ/20); goto out; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e84ea79e5f3c..7ce196ec2509 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -731,7 +731,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } bh_lock_sock(sk); - if (sk->lock.users) + if (sock_owned_by_user(sk)) NET_INC_STATS_BH(LockDroppedIcmps); if (sk->state == TCP_CLOSE) @@ -749,7 +749,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst = NULL; - if (sk->lock.users) + if (sock_owned_by_user(sk)) goto out; if ((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE)) goto out; @@ -792,7 +792,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, switch (sk->state) { struct open_request *req, **prev; case TCP_LISTEN: - if (sk->lock.users) + if (sock_owned_by_user(sk)) goto out; req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, @@ -816,7 +816,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can, it SYNs are crossed. --ANK */ - if (sk->lock.users == 0) { + if (!sock_owned_by_user(sk)) { TCP_INC_STATS_BH(TcpAttemptFails); sk->err = err; sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ @@ -828,7 +828,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, goto out; } - if (sk->lock.users == 0 && np->recverr) { + if (!sock_owned_by_user(sk) && np->recverr) { sk->err = err; sk->error_report(sk); } else { @@ -1622,7 +1622,7 @@ process: bh_lock_sock(sk); ret = 0; - if (!sk->lock.users) { + if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } else diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 2a96a6a6178d..634bf598da72 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -1489,7 +1489,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) __FUNCTION__); kfree_skb(skb); } else { - if (!sk->lock.users) + if (!sock_owned_by_user(sk)) llc_conn_state_process(sk, skb); else { llc_set_backlog_type(skb, LLC_EVENT); diff --git a/net/llc/llc_mac.c b/net/llc/llc_mac.c index 3f53410df740..74cadcee49d0 100644 --- a/net/llc/llc_mac.c +++ b/net/llc/llc_mac.c @@ -140,7 +140,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, } else skb->sk = sk; bh_lock_sock(sk); - if (!sk->lock.users) { + if (!sock_owned_by_user(sk)) { /* rc = */ llc_conn_rcv(sk, skb); rc = 0; } else { diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index d4f1f73f2245..8f30da83b52f 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -182,7 +182,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v) timer_pending(&llc->pf_cycle_timer.timer), timer_pending(&llc->rej_sent_timer.timer), timer_pending(&llc->busy_state_timer.timer), - !!sk->backlog.tail, sk->lock.users); + !!sk->backlog.tail, sock_owned_by_user(sk)); out: return 0; } diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index aa5137f9a5ed..17ef400a4b98 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -70,7 +70,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) skb->h.raw = skb->data; bh_lock_sock(sk); - if (!sk->lock.users) { + if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { sk_add_backlog(sk, skb); diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c index 71f67c0a94e3..957f7c7b8b0c 100644 --- a/net/x25/x25_timer.c +++ b/net/x25/x25_timer.c @@ -131,7 +131,7 @@ static void x25_heartbeat_expiry(unsigned long param) struct sock *sk = (struct sock *)param; bh_lock_sock(sk); - if (sk->lock.users) /* can currently only occur in state 3 */ + if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */ goto restart_heartbeat; switch (x25_sk(sk)->state) { @@ -193,7 +193,7 @@ static void x25_timer_expiry(unsigned long param) struct sock *sk = (struct sock *)param; bh_lock_sock(sk); - if (sk->lock.users) { /* can currently only occur in state 3 */ + if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */ if (x25_sk(sk)->state == X25_STATE_3) x25_start_t2timer(sk); } else |
