diff options
| author | Dipankar Sarma <dipankar@in.ibm.com> | 2002-10-15 05:40:46 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-10-15 05:40:46 -0700 |
| commit | 1477a825d7e6486a077608c7baf6abbb6f27ed95 (patch) | |
| tree | cd5a97bdfdf85383259bf9aacc3bea968d0f6188 /include/linux | |
| parent | e856ba4ac9cc2c405d062c496ec125b63c502e4d (diff) | |
[PATCH] Read-Copy Update infrastructure
This is the RCU core patch from akpm's tree. It has been in his
tree since about 2.5.37-mm1 along with dcache_rcu and so far it has
worked fine. For 2.5, I am hoping that we might get the following
RCU patches included -
1. rt_rcu - ipv4 routecache lookup. Davem agreed to include this patch
if and when you include RCU core in your tree.
2. dcache_rcu (by Maneesh Soni) - dcache lookup avoiding dcache_lock as
much as possible. This has been akpm's tree - stable and gives us
good yield. I have been submitting this to Viro and I will publish
some more benchmark numbers later to help decide on this.
This RCU core implements RCU APIs, call_rcu() and synchronize_kernel(),
by monitoring a per-CPU quiescent state (idle/user etc.) counter.
call_rcu() queues a callback to be invoked after all the CPUs have
gone through a quiescent state. Queuing is per-CPU and each per-CPU
batch gets a batch number. As batches get their turn, a global
cpu mask is used to keep track of CPUs pending quiescent state.
Checking for quiescent cycle is done by saving the per-CPU
counter at the beginning of the batch and then monitoring it for change
through the local timer interrupt handler.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/rcupdate.h | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h new file mode 100644 index 000000000000..a5ffb7bb5743 --- /dev/null +++ b/include/linux/rcupdate.h @@ -0,0 +1,134 @@ +/* + * Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) IBM Corporation, 2001 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * + * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUPDATE_H +#define __LINUX_RCUPDATE_H + +#ifdef __KERNEL__ + +#include <linux/cache.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/threads.h> + +/** + * struct rcu_head - callback structure for use with RCU + * @list: list_head to queue the update requests + * @func: actual update function to call after the grace period. + * @arg: argument to be passed to the actual update function. + */ +struct rcu_head { + struct list_head list; + void (*func)(void *obj); + void *arg; +}; + +#define RCU_HEAD_INIT(head) \ + { list: LIST_HEAD_INIT(head.list), func: NULL, arg: NULL } +#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head) +#define INIT_RCU_HEAD(ptr) do { \ + INIT_LIST_HEAD(&(ptr)->list); (ptr)->func = NULL; (ptr)->arg = NULL; \ +} while (0) + + + +/* Control variables for rcupdate callback mechanism. */ +struct rcu_ctrlblk { + spinlock_t mutex; /* Guard this struct */ + long curbatch; /* Current batch number. */ + long maxbatch; /* Max requested batch number. */ + unsigned long rcu_cpu_mask; /* CPUs that need to switch in order */ + /* for current batch to proceed. */ +}; + +/* Is batch a before batch b ? */ +static inline int rcu_batch_before(long a, long b) +{ + return (a - b) < 0; +} + +/* Is batch a after batch b ? */ +static inline int rcu_batch_after(long a, long b) +{ + return (a - b) > 0; +} + +/* + * Per-CPU data for Read-Copy UPdate. + * nxtlist - new callbacks are added here + * curlist - current batch for which quiescent cycle started if any + */ +struct rcu_data { + long qsctr; /* User-mode/idle loop etc. */ + long last_qsctr; /* value of qsctr at beginning */ + /* of rcu grace period */ + long batch; /* Batch # for current RCU batch */ + struct list_head nxtlist; + struct list_head curlist; +} ____cacheline_aligned_in_smp; + +extern struct rcu_data rcu_data[NR_CPUS]; +extern struct rcu_ctrlblk rcu_ctrlblk; + +#define RCU_qsctr(cpu) (rcu_data[(cpu)].qsctr) +#define RCU_last_qsctr(cpu) (rcu_data[(cpu)].last_qsctr) +#define RCU_batch(cpu) (rcu_data[(cpu)].batch) +#define RCU_nxtlist(cpu) (rcu_data[(cpu)].nxtlist) +#define RCU_curlist(cpu) (rcu_data[(cpu)].curlist) + +#define RCU_QSCTR_INVALID 0 + +static inline int rcu_pending(int cpu) +{ + if ((!list_empty(&RCU_curlist(cpu)) && + rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) || + (list_empty(&RCU_curlist(cpu)) && + !list_empty(&RCU_nxtlist(cpu))) || + test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) + return 1; + else + return 0; +} + +#define rcu_read_lock() preempt_disable() +#define rcu_read_unlock() preempt_enable() + +extern void rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); + +/* Exported interfaces */ +extern void FASTCALL(call_rcu(struct rcu_head *head, + void (*func)(void *arg), void *arg)); +extern void synchronize_kernel(void); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUPDATE_H */ |
