summaryrefslogtreecommitdiff
path: root/kernel/rcupdate.c
blob: 49c3ab0a2a767879f14cd68d8537e21cc854c182 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
/*
 * Read-Copy Update mechanism for mutual exclusion
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (c) IBM Corporation, 2001
 *
 * Author: Dipankar Sarma <dipankar@in.ibm.com>
 * 
 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 * Papers:
 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 * 		http://lse.sourceforge.net/locking/rcupdate.html
 *
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>

/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk = 
	{ .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1, 
	  .maxbatch = 1, .rcu_cpu_mask = 0 };
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };

/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
#define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))

/**
 * call_rcu - Queue an RCU update request.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual update function to be invoked after the grace period
 * @arg: argument to be passed to the update function
 *
 * The update function will be invoked as soon as all CPUs have performed 
 * a context switch or been seen in the idle loop or in a user process. 
 * The read-side of critical section that use call_rcu() for updation must 
 * be protected by rcu_read_lock()/rcu_read_unlock().
 */
void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
{
	int cpu;
	unsigned long flags;

	head->func = func;
	head->arg = arg;
	local_irq_save(flags);
	cpu = smp_processor_id();
	list_add_tail(&head->list, &RCU_nxtlist(cpu));
	local_irq_restore(flags);
}

/*
 * Invoke the completed RCU callbacks. They are expected to be in
 * a per-cpu list.
 */
static void rcu_do_batch(struct list_head *list)
{
	struct list_head *entry;
	struct rcu_head *head;

	while (!list_empty(list)) {
		entry = list->next;
		list_del(entry);
		head = list_entry(entry, struct rcu_head, list);
		head->func(head->arg);
	}
}

/*
 * Register a new batch of callbacks, and start it up if there is currently no
 * active batch and the batch to be registered has not already occurred.
 * Caller must hold the rcu_ctrlblk lock.
 */
static void rcu_start_batch(long newbatch)
{
	if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) {
		rcu_ctrlblk.maxbatch = newbatch;
	}
	if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
	    (rcu_ctrlblk.rcu_cpu_mask != 0)) {
		return;
	}
	rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
}

/*
 * Check if the cpu has gone through a quiescent state (say context
 * switch). If so and if it already hasn't done so in this RCU
 * quiescent cycle, then indicate that it has done so.
 */
static void rcu_check_quiescent_state(void)
{
	int cpu = smp_processor_id();

	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
		return;

	/* 
	 * Races with local timer interrupt - in the worst case
	 * we may miss one quiescent state of that CPU. That is
	 * tolerable. So no need to disable interrupts.
	 */
	if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) {
		RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
		return;
	}
	if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
		return;

	spin_lock(&rcu_ctrlblk.mutex);
	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
		goto out_unlock;

	clear_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask);
	RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
	if (rcu_ctrlblk.rcu_cpu_mask != 0)
		goto out_unlock;

	rcu_ctrlblk.curbatch++;
	rcu_start_batch(rcu_ctrlblk.maxbatch);

out_unlock:
	spin_unlock(&rcu_ctrlblk.mutex);
}


/*
 * This does the RCU processing work from tasklet context. 
 */
static void rcu_process_callbacks(unsigned long unused)
{
	int cpu = smp_processor_id();
	LIST_HEAD(list);

	if (!list_empty(&RCU_curlist(cpu)) &&
	    rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) {
		list_splice(&RCU_curlist(cpu), &list);
		INIT_LIST_HEAD(&RCU_curlist(cpu));
	}

	local_irq_disable();
	if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) {
		list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu));
		INIT_LIST_HEAD(&RCU_nxtlist(cpu));
		local_irq_enable();

		/*
		 * start the next batch of callbacks
		 */
		spin_lock(&rcu_ctrlblk.mutex);
		RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1;
		rcu_start_batch(RCU_batch(cpu));
		spin_unlock(&rcu_ctrlblk.mutex);
	} else {
		local_irq_enable();
	}
	rcu_check_quiescent_state();
	if (!list_empty(&list))
		rcu_do_batch(&list);
}

void rcu_check_callbacks(int cpu, int user)
{
	if (user || 
	    (idle_cpu(cpu) && !in_softirq() && 
				hardirq_count() <= (1 << HARDIRQ_SHIFT)))
		RCU_qsctr(cpu)++;
	tasklet_schedule(&RCU_tasklet(cpu));
}

static void __devinit rcu_online_cpu(int cpu)
{
	memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
	tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
	INIT_LIST_HEAD(&RCU_nxtlist(cpu));
	INIT_LIST_HEAD(&RCU_curlist(cpu));
}

static int __devinit rcu_cpu_notify(struct notifier_block *self, 
				unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	switch (action) {
	case CPU_UP_PREPARE:
		rcu_online_cpu(cpu);
		break;
	/* Space reserved for CPU_OFFLINE :) */
	default:
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block __devinitdata rcu_nb = {
	.notifier_call	= rcu_cpu_notify,
};

/*
 * Initializes rcu mechanism.  Assumed to be called early.
 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
 * Note that rcu_qsctr and friends are implicitly
 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
 */
void __init rcu_init(void)
{
	rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	/* Register notifier for non-boot CPUs */
	register_cpu_notifier(&rcu_nb);
}


/* Because of FASTCALL declaration of complete, we use this wrapper */
static void wakeme_after_rcu(void *completion)
{
	complete(completion);
}

/**
 * synchronize-kernel - wait until all the CPUs have gone
 * through a "quiescent" state. It may sleep.
 */
void synchronize_kernel(void)
{
	struct rcu_head rcu;
	DECLARE_COMPLETION(completion);

	/* Will wake me after RCU finished */
	call_rcu(&rcu, wakeme_after_rcu, &completion);

	/* Wait for it */
	wait_for_completion(&completion);
}


EXPORT_SYMBOL(call_rcu);
EXPORT_SYMBOL(synchronize_kernel);