summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorRavikiran G. Thirumalai <kiran@in.ibm.com>2002-07-24 19:15:13 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-07-24 19:15:13 -0700
commita94a3303d99ff6ec0c07b5670fd8e1f5251163fe (patch)
tree124999679d72b8a402efa629780b76425d7921fc /kernel
parent4966a3c5f700221a54a35f09803b7aeef5fdafc2 (diff)
[PATCH] Ensure xtime_lock and timerlist_lock are on difft cachelines
I've noticed that xtime_lock and timerlist_lock ends up on the same cacheline all the time (atleaset on x86). Not a good thing for loads with high xxx_timer and do_gettimeofday counts I guess (networking etc). Here's a trivial fix.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/timer.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index 64d154ba1255..3c7cbeaf957a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -169,7 +169,7 @@ static inline void internal_add_timer(struct timer_list *timer)
}
/* Initialize both explicitly - let's try to have them in the same cache line */
-spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t timerlist_lock ____cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_SMP
volatile struct timer_list * volatile running_timer;
@@ -327,7 +327,7 @@ repeat:
spin_unlock_irq(&timerlist_lock);
}
-spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t tqueue_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
void tqueue_bh(void)
{
@@ -633,7 +633,7 @@ unsigned long wall_jiffies;
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
-rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
+rwlock_t xtime_lock __cacheline_aligned_in_smp = RW_LOCK_UNLOCKED;
unsigned long last_time_offset;
static inline void update_times(void)