diff options
| author | Dipankar Sarma <dipankar@in.ibm.com> | 2004-08-22 22:57:30 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-08-22 22:57:30 -0700 |
| commit | f0f4d6e41008746f51db2c795469e1707e516672 (patch) | |
| tree | 3a2e58897a1bdfc8bb4bb19027108d5e6d9de490 /kernel | |
| parent | bcce63134d465edc461f81ef23627cde4227e05a (diff) | |
[PATCH] RCU - cpu offline fix
This fixes the RCU cpu offline code which was broken by singly-linked RCU
changes. Nathan pointed out the problems and submitted a patch for this.
This is an optimal fix - no need to iterate through the list of callbacks,
just use the tail pointers and attach the list from the dead cpu.
Signed-off-by: Nathan Lynch <nathanl@austin.ibm.com>
Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcupdate.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 97cb4eaa1019..5a8d9856610b 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -210,19 +210,15 @@ static void rcu_check_quiescent_state(void) * locking requirements, the list it's pulling from has to belong to a cpu * which is dead and hence not processing interrupts. */ -static void rcu_move_batch(struct rcu_head *list) +static void rcu_move_batch(struct rcu_head *list, struct rcu_head **tail) { int cpu; local_irq_disable(); - cpu = smp_processor_id(); - - while (list != NULL) { - *RCU_nxttail(cpu) = list; - RCU_nxttail(cpu) = &list->next; - list = list->next; - } + *RCU_nxttail(cpu) = list; + if (list) + RCU_nxttail(cpu) = tail; local_irq_enable(); } @@ -237,8 +233,8 @@ static void rcu_offline_cpu(int cpu) cpu_quiet(cpu); spin_unlock_bh(&rcu_state.mutex); - rcu_move_batch(RCU_curlist(cpu)); - rcu_move_batch(RCU_nxtlist(cpu)); + rcu_move_batch(RCU_curlist(cpu), RCU_curtail(cpu)); + rcu_move_batch(RCU_nxtlist(cpu), RCU_nxttail(cpu)); tasklet_kill_immediate(&RCU_tasklet(cpu), cpu); } @@ -271,6 +267,7 @@ static void rcu_process_callbacks(unsigned long unused) !rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) { rcu_list = RCU_curlist(cpu); RCU_curlist(cpu) = NULL; + RCU_curtail(cpu) = &RCU_curlist(cpu); } local_irq_disable(); @@ -278,6 +275,7 @@ static void rcu_process_callbacks(unsigned long unused) int next_pending, seq; RCU_curlist(cpu) = RCU_nxtlist(cpu); + RCU_curtail(cpu) = RCU_nxttail(cpu); RCU_nxtlist(cpu) = NULL; RCU_nxttail(cpu) = &RCU_nxtlist(cpu); local_irq_enable(); @@ -319,6 +317,7 @@ static void __devinit rcu_online_cpu(int cpu) { memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data)); tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL); + RCU_curtail(cpu) = &RCU_curlist(cpu); RCU_nxttail(cpu) = &RCU_nxtlist(cpu); RCU_quiescbatch(cpu) = rcu_ctrlblk.completed; RCU_qs_pending(cpu) = 0; |
