diff options
| author | Matt Mackall <mpm@selenic.com> | 2005-03-22 02:30:38 -0800 |
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2005-03-22 02:30:38 -0800 |
| commit | dc86a62eb6a9067719c19b0fee1b4f4aca759b4a (patch) | |
| tree | eb3e710c01fdfae06c99c8ced7864c376e894eb0 /include/linux/netpoll.h | |
| parent | f06a39cc010a8c87bb6fa8fe8bcfb951fde44074 (diff) | |
[NETPOLL]: Fix ->poll() locking
Introduce a per-client poll lock and flag. The lock assures we never
have more than one caller in dev->poll(). The flag provides recursion
avoidance on UP where the lock disappears.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netpoll.h')
| -rw-r--r-- | include/linux/netpoll.h | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 93572c94b6a2..c7a3898c3596 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -21,6 +21,8 @@ struct netpoll { u32 local_ip, remote_ip; u16 local_port, remote_port; unsigned char local_mac[6], remote_mac[6]; + spinlock_t poll_lock; + int poll_owner; }; void netpoll_poll(struct netpoll *np); @@ -37,8 +39,27 @@ static inline int netpoll_rx(struct sk_buff *skb) { return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb); } + +static inline void netpoll_poll_lock(struct net_device *dev) +{ + if (dev->np) { + spin_lock(&dev->np->poll_lock); + dev->np->poll_owner = __smp_processor_id(); + } +} + +static inline void netpoll_poll_unlock(struct net_device *dev) +{ + if (dev->np) { + spin_unlock(&dev->np->poll_lock); + dev->np->poll_owner = -1; + } +} + #else #define netpoll_rx(a) 0 +#define netpoll_poll_lock(a) +#define netpoll_poll_unlock(a) #endif #endif |
