diff options
| author | Robert Love <rml@tech9.net> | 2002-09-24 01:37:38 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-09-24 01:37:38 -0700 |
| commit | c6e70088033a7c01d2a156f5ac740b716d52b1bb (patch) | |
| tree | b8f386c319ec8be5d235fa2df8b706419c36029e /include | |
| parent | 7f644d00d25a416a4e583b3d190ab6b89167fd85 (diff) | |
[PATCH] per-cpu data preempt-safing
This unsafe access to per-CPU data via reordering of instructions or use
of "get_cpu()".
Before anyone balks at the brlock.h fix, note this was in the
alternative version of the code which is not used by default.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/brlock.h | 5 | ||||
| -rw-r--r-- | include/linux/netdevice.h | 12 | ||||
| -rw-r--r-- | include/linux/page-flags.h | 6 |
3 files changed, 15 insertions, 8 deletions
diff --git a/include/linux/brlock.h b/include/linux/brlock.h index db27bc1830d0..59880a3f38c6 100644 --- a/include/linux/brlock.h +++ b/include/linux/brlock.h @@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx) if (idx >= __BR_END) __br_lock_usage_bug(); - read_lock(&__brlock_array[smp_processor_id()][idx]); + preempt_disable(); + _raw_read_lock(&__brlock_array[smp_processor_id()][idx]); } static inline void br_read_unlock (enum brlock_indices idx) @@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx) if (idx >= __BR_END) __br_lock_usage_bug(); + preempt_disable(); ctr = &__brlock_array[smp_processor_id()][idx]; lock = &__br_write_locks[idx].lock; again: @@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx) wmb(); (*ctr)--; + preempt_enable(); } #endif /* __BRLOCK_USE_ATOMICS */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c73a00744bc8..93b4d5f1e64d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { unsigned long flags; - int cpu = smp_processor_id(); + int cpu; local_irq_save(flags); + cpu = smp_processor_id(); dev->next_sched = softnet_data[cpu].output_queue; softnet_data[cpu].output_queue = dev; cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); @@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev) static inline void dev_kfree_skb_irq(struct sk_buff *skb) { if (atomic_dec_and_test(&skb->users)) { - int cpu =smp_processor_id(); + int cpu; unsigned long flags; local_irq_save(flags); + cpu = smp_processor_id(); skb->next = softnet_data[cpu].completion_queue; softnet_data[cpu].completion_queue = skb; cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); @@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) static inline void __netif_rx_schedule(struct net_device *dev) { unsigned long flags; - int cpu = smp_processor_id(); + int cpu; local_irq_save(flags); + cpu = smp_processor_id(); dev_hold(dev); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); if (dev->quota < 0) @@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) { if (netif_rx_schedule_prep(dev)) { unsigned long flags; - int cpu = smp_processor_id(); + int cpu; dev->quota += undo; local_irq_save(flags); + cpu = smp_processor_id(); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); local_irq_restore(flags); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5a4208b4651f..0970c101c197 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret); #define mod_page_state(member, delta) \ do { \ - preempt_disable(); \ - page_states[smp_processor_id()].member += (delta); \ - preempt_enable(); \ + int cpu = get_cpu(); \ + page_states[cpu].member += (delta); \ + put_cpu(); \ } while (0) #define inc_page_state(member) mod_page_state(member, 1UL) |
