diff options
| author | Benjamin LaHaise <bcrl@redhat.com> | 2002-10-08 03:47:13 -0700 |
|---|---|---|
| committer | David S. Miller <davem@nuts.ninka.net> | 2002-10-08 03:47:13 -0700 |
| commit | 3525bea4ce3bcd70f8072bf67e6ac9f76e201c06 (patch) | |
| tree | ba4f73991a9c44095930b72464dfb6628734172b /include | |
| parent | 90e464a4f8660a1de4be9c4f73ba6296625219e4 (diff) | |
[AIO]: First stage of AIO infrastructure for networking.
- Change socket lock users to owner which is a pointer.
- Add sock_owned_by_user
Diffstat (limited to 'include')
| -rw-r--r-- | include/net/sock.h | 13 | ||||
| -rw-r--r-- | include/net/tcp.h | 2 |
2 files changed, 9 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 51ce0de50c43..5bc26816bcd5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,15 +70,16 @@ * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */ +struct sock_iocb; typedef struct { spinlock_t slock; - unsigned int users; + struct sock_iocb *owner; wait_queue_head_t wq; } socket_lock_t; #define sock_lock_init(__sk) \ do { spin_lock_init(&((__sk)->lock.slock)); \ - (__sk)->lock.users = 0; \ + (__sk)->lock.owner = NULL; \ init_waitqueue_head(&((__sk)->lock.wq)); \ } while(0) @@ -306,14 +307,16 @@ static __inline__ void sock_prot_dec_use(struct proto *prot) * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */ +extern int __async_lock_sock(struct sock_iocb *, struct sock *, struct list_head *); extern void __lock_sock(struct sock *sk); extern void __release_sock(struct sock *sk); +#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner) #define lock_sock(__sk) \ do { might_sleep(); \ spin_lock_bh(&((__sk)->lock.slock)); \ - if ((__sk)->lock.users != 0) \ + if ((__sk)->lock.owner != NULL) \ __lock_sock(__sk); \ - (__sk)->lock.users = 1; \ + (__sk)->lock.owner = (void *)1; \ spin_unlock_bh(&((__sk)->lock.slock)); \ } while(0) @@ -321,7 +324,7 @@ do { might_sleep(); \ do { spin_lock_bh(&((__sk)->lock.slock)); \ if ((__sk)->backlog.tail != NULL) \ __release_sock(__sk); \ - (__sk)->lock.users = 0; \ + (__sk)->lock.owner = NULL; \ if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \ spin_unlock_bh(&((__sk)->lock.slock)); \ } while(0) diff --git a/include/net/tcp.h b/include/net/tcp.h index 0dd375b7fd4b..0a58d80e7794 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1348,7 +1348,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (tp->ucopy.memory > sk->rcvbuf) { struct sk_buff *skb1; - if (sk->lock.users) BUG(); + if (sock_owned_by_user(sk)) BUG(); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { sk->backlog_rcv(sk, skb1); |
